diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/CONTRIBUTORS b/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/README.md b/README.md index 5cd8f0ac6e9..d9d7edd7332 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,9 @@ Selected commands: - `cmd/toolstash` is a utility to simplify working with multiple versions of the Go toolchain. These commands may be fetched with a command such as -`go install golang.org/x/tools/cmd/goimports@latest`. +``` +go install golang.org/x/tools/cmd/goimports@latest +``` Selected packages: diff --git a/cmd/auth/cookieauth/cookieauth.go b/cmd/auth/cookieauth/cookieauth.go index feefaff0b6e..8b0ff17664b 100644 --- a/cmd/auth/cookieauth/cookieauth.go +++ b/cmd/auth/cookieauth/cookieauth.go @@ -40,7 +40,6 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { log.Fatalf("failed to read cookie file: %v\n", os.Args[1]) - os.Exit(1) } defer f.Close() diff --git a/cmd/bundle/main.go b/cmd/bundle/main.go index 96cbce9a131..194797bd822 100644 --- a/cmd/bundle/main.go +++ b/cmd/bundle/main.go @@ -84,6 +84,7 @@ import ( "os" "strconv" "strings" + "unicode" "golang.org/x/tools/go/packages" ) @@ -233,7 +234,7 @@ func bundle(src, dst, dstpkg, prefix, buildTags string) ([]byte, error) { fmt.Fprintf(&out, "// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.\n") if *outputFile != "" && buildTags == "" { - fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(os.Args[1:], " ")) + fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(quoteArgs(os.Args[1:]), " ")) } else { fmt.Fprintf(&out, "// $ bundle %s\n", strings.Join(os.Args[1:], " ")) } @@ -447,6 +448,35 @@ func printSameLineComment(out *bytes.Buffer, comments []*ast.CommentGroup, fset return pos } +func quoteArgs(ss []string) []string { + // From go help generate: + // + // > The arguments to the directive are space-separated tokens or + // > double-quoted strings passed to the generator as individual + // > arguments when it is run. + // + // > Quoted strings use Go syntax and are evaluated before execution; a + // > quoted string appears as a single argument to the generator. + // + var qs []string + for _, s := range ss { + if s == "" || containsSpace(s) { + s = strconv.Quote(s) + } + qs = append(qs, s) + } + return qs +} + +func containsSpace(s string) bool { + for _, r := range s { + if unicode.IsSpace(r) { + return true + } + } + return false +} + type flagFunc func(string) func (f flagFunc) Set(s string) error { diff --git a/cmd/compilebench/main.go b/cmd/compilebench/main.go index 28dc450e9e0..754acdca0e4 100644 --- a/cmd/compilebench/main.go +++ b/cmd/compilebench/main.go @@ -335,10 +335,10 @@ type compile struct{ dir string } func (compile) long() bool { return false } func (c compile) run(name string, count int) error { - // Make sure dependencies needed by go tool compile are installed to GOROOT/pkg. - out, err := exec.Command(*flagGoCmd, "build", "-i", c.dir).CombinedOutput() + // Make sure dependencies needed by go tool compile are built. + out, err := exec.Command(*flagGoCmd, "build", c.dir).CombinedOutput() if err != nil { - return fmt.Errorf("go build -i %s: %v\n%s", c.dir, err, out) + return fmt.Errorf("go build %s: %v\n%s", c.dir, err, out) } // Find dir and source file list. @@ -347,6 +347,11 @@ func (c compile) run(name string, count int) error { return err } + importcfg, err := genImportcfgFile(c.dir, false) + if err != nil { + return err + } + // If this package has assembly files, we'll need to pass a symabis // file to the compiler; call a helper to invoke the assembler // to do that. @@ -371,6 +376,10 @@ func (c compile) run(name string, count int) error { if symAbisFile != "" { args = append(args, "-symabis", symAbisFile) } + if importcfg != "" { + args = append(args, "-importcfg", importcfg) + defer os.Remove(importcfg) + } args = append(args, pkg.GoFiles...) if err := runBuildCmd(name, count, pkg.Dir, compiler, args); err != nil { return err @@ -406,18 +415,28 @@ func (r link) run(name string, count int) error { } // Build dependencies. - out, err := exec.Command(*flagGoCmd, "build", "-i", "-o", "/dev/null", r.dir).CombinedOutput() + out, err := exec.Command(*flagGoCmd, "build", "-o", "/dev/null", r.dir).CombinedOutput() if err != nil { - return fmt.Errorf("go build -i %s: %v\n%s", r.dir, err, out) + return fmt.Errorf("go build -a %s: %v\n%s", r.dir, err, out) } + importcfg, err := genImportcfgFile(r.dir, true) + if err != nil { + return err + } + defer os.Remove(importcfg) + // Build the main package. pkg, err := goList(r.dir) if err != nil { return err } - args := []string{"-o", "_compilebench_.o"} + args := []string{"-o", "_compilebench_.o", "-importcfg", importcfg} args = append(args, pkg.GoFiles...) + if *flagTrace { + fmt.Fprintf(os.Stderr, "running: %s %+v\n", + compiler, args) + } cmd := exec.Command(compiler, args...) cmd.Dir = pkg.Dir cmd.Stdout = os.Stderr @@ -429,7 +448,7 @@ func (r link) run(name string, count int) error { defer os.Remove(pkg.Dir + "/_compilebench_.o") // Link the main package. - args = []string{"-o", "_compilebench_.exe"} + args = []string{"-o", "_compilebench_.exe", "-importcfg", importcfg} args = append(args, strings.Fields(*flagLinkerFlags)...) args = append(args, strings.Fields(r.flags)...) args = append(args, "_compilebench_.o") @@ -578,3 +597,49 @@ func genSymAbisFile(pkg *Pkg, symAbisFile, incdir string) error { } return nil } + +// genImportcfgFile generates an importcfg file for building package +// dir. Returns the generated importcfg file path (or empty string +// if the package has no dependency). +func genImportcfgFile(dir string, full bool) (string, error) { + need := "{{.Imports}}" + if full { + // for linking, we need transitive dependencies + need = "{{.Deps}}" + } + + // find imported/dependent packages + cmd := exec.Command(*flagGoCmd, "list", "-f", need, dir) + cmd.Stderr = os.Stderr + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("go list -f %s %s: %v", need, dir, err) + } + // trim [ ]\n + if len(out) < 3 || out[0] != '[' || out[len(out)-2] != ']' || out[len(out)-1] != '\n' { + return "", fmt.Errorf("unexpected output from go list -f %s %s: %s", need, dir, out) + } + out = out[1 : len(out)-2] + if len(out) == 0 { + return "", nil + } + + // build importcfg for imported packages + cmd = exec.Command(*flagGoCmd, "list", "-export", "-f", "{{if .Export}}packagefile {{.ImportPath}}={{.Export}}{{end}}") + cmd.Args = append(cmd.Args, strings.Fields(string(out))...) + cmd.Stderr = os.Stderr + out, err = cmd.Output() + if err != nil { + return "", fmt.Errorf("generating importcfg for %s: %s: %v", dir, cmd, err) + } + + f, err := os.CreateTemp("", "importcfg") + if err != nil { + return "", fmt.Errorf("creating tmp importcfg file failed: %v", err) + } + defer f.Close() + if _, err := f.Write(out); err != nil { + return "", fmt.Errorf("writing importcfg file %s failed: %v", f.Name(), err) + } + return f.Name(), nil +} diff --git a/cmd/cover/README.md b/cmd/cover/README.md deleted file mode 100644 index 62e60279a9b..00000000000 --- a/cmd/cover/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Deprecated - -NOTE: For Go releases 1.5 and later, this tool lives in the standard repository. The code here is not maintained. diff --git a/cmd/cover/cover.go b/cmd/cover/cover.go deleted file mode 100644 index 0c7db1025ad..00000000000 --- a/cmd/cover/cover.go +++ /dev/null @@ -1,721 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strconv" - "strings" -) - -const usageMessage = "" + - `Usage of 'go tool cover': -Given a coverage profile produced by 'go test': - go test -coverprofile=c.out - -Open a web browser displaying annotated source code: - go tool cover -html=c.out - -Write out an HTML file instead of launching a web browser: - go tool cover -html=c.out -o coverage.html - -Display coverage percentages to stdout for each function: - go tool cover -func=c.out - -Finally, to generate modified source code with coverage annotations -(what go test -cover does): - go tool cover -mode=set -var=CoverageVariableName program.go -` - -func usage() { - fmt.Fprint(os.Stderr, usageMessage) - fmt.Fprintln(os.Stderr, "\nFlags:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\n Only one of -html, -func, or -mode may be set.") - os.Exit(2) -} - -var ( - mode = flag.String("mode", "", "coverage mode: set, count, atomic") - varVar = flag.String("var", "GoCover", "name of coverage variable to generate") - output = flag.String("o", "", "file for output; default: stdout") - htmlOut = flag.String("html", "", "generate HTML representation of coverage profile") - funcOut = flag.String("func", "", "output coverage profile information for each function") -) - -var profile string // The profile to read; the value of -html or -func - -var counterStmt func(*File, ast.Expr) ast.Stmt - -const ( - atomicPackagePath = "sync/atomic" - atomicPackageName = "_cover_atomic_" -) - -func main() { - flag.Usage = usage - flag.Parse() - - // Usage information when no arguments. - if flag.NFlag() == 0 && flag.NArg() == 0 { - flag.Usage() - } - - err := parseFlags() - if err != nil { - fmt.Fprintln(os.Stderr, err) - fmt.Fprintln(os.Stderr, `For usage information, run "go tool cover -help"`) - os.Exit(2) - } - - // Generate coverage-annotated source. - if *mode != "" { - annotate(flag.Arg(0)) - return - } - - // Output HTML or function coverage information. - if *htmlOut != "" { - err = htmlOutput(profile, *output) - } else { - err = funcOutput(profile, *output) - } - - if err != nil { - fmt.Fprintf(os.Stderr, "cover: %v\n", err) - os.Exit(2) - } -} - -// parseFlags sets the profile and counterStmt globals and performs validations. -func parseFlags() error { - profile = *htmlOut - if *funcOut != "" { - if profile != "" { - return fmt.Errorf("too many options") - } - profile = *funcOut - } - - // Must either display a profile or rewrite Go source. - if (profile == "") == (*mode == "") { - return fmt.Errorf("too many options") - } - - if *mode != "" { - switch *mode { - case "set": - counterStmt = setCounterStmt - case "count": - counterStmt = incCounterStmt - case "atomic": - counterStmt = atomicCounterStmt - default: - return fmt.Errorf("unknown -mode %v", *mode) - } - - if flag.NArg() == 0 { - return fmt.Errorf("missing source file") - } else if flag.NArg() == 1 { - return nil - } - } else if flag.NArg() == 0 { - return nil - } - return fmt.Errorf("too many arguments") -} - -// Block represents the information about a basic block to be recorded in the analysis. -// Note: Our definition of basic block is based on control structures; we don't break -// apart && and ||. We could but it doesn't seem important enough to bother. -type Block struct { - startByte token.Pos - endByte token.Pos - numStmt int -} - -// File is a wrapper for the state of a file used in the parser. -// The basic parse tree walker is a method of this type. -type File struct { - fset *token.FileSet - name string // Name of file. - astFile *ast.File - blocks []Block - atomicPkg string // Package name for "sync/atomic" in this file. -} - -// Visit implements the ast.Visitor interface. -func (f *File) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.BlockStmt: - // If it's a switch or select, the body is a list of case clauses; don't tag the block itself. - if len(n.List) > 0 { - switch n.List[0].(type) { - case *ast.CaseClause: // switch - for _, n := range n.List { - clause := n.(*ast.CaseClause) - clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false) - } - return f - case *ast.CommClause: // select - for _, n := range n.List { - clause := n.(*ast.CommClause) - clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false) - } - return f - } - } - n.List = f.addCounters(n.Lbrace, n.Rbrace+1, n.List, true) // +1 to step past closing brace. - case *ast.IfStmt: - ast.Walk(f, n.Body) - if n.Else == nil { - return nil - } - // The elses are special, because if we have - // if x { - // } else if y { - // } - // we want to cover the "if y". To do this, we need a place to drop the counter, - // so we add a hidden block: - // if x { - // } else { - // if y { - // } - // } - switch stmt := n.Else.(type) { - case *ast.IfStmt: - block := &ast.BlockStmt{ - Lbrace: n.Body.End(), // Start at end of the "if" block so the covered part looks like it starts at the "else". - List: []ast.Stmt{stmt}, - Rbrace: stmt.End(), - } - n.Else = block - case *ast.BlockStmt: - stmt.Lbrace = n.Body.End() // Start at end of the "if" block so the covered part looks like it starts at the "else". - default: - panic("unexpected node type in if") - } - ast.Walk(f, n.Else) - return nil - case *ast.SelectStmt: - // Don't annotate an empty select - creates a syntax error. - if n.Body == nil || len(n.Body.List) == 0 { - return nil - } - case *ast.SwitchStmt: - // Don't annotate an empty switch - creates a syntax error. - if n.Body == nil || len(n.Body.List) == 0 { - return nil - } - case *ast.TypeSwitchStmt: - // Don't annotate an empty type switch - creates a syntax error. - if n.Body == nil || len(n.Body.List) == 0 { - return nil - } - } - return f -} - -// unquote returns the unquoted string. -func unquote(s string) string { - t, err := strconv.Unquote(s) - if err != nil { - log.Fatalf("cover: improperly quoted string %q\n", s) - } - return t -} - -// addImport adds an import for the specified path, if one does not already exist, and returns -// the local package name. -func (f *File) addImport(path string) string { - // Does the package already import it? - for _, s := range f.astFile.Imports { - if unquote(s.Path.Value) == path { - if s.Name != nil { - return s.Name.Name - } - return filepath.Base(path) - } - } - newImport := &ast.ImportSpec{ - Name: ast.NewIdent(atomicPackageName), - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: fmt.Sprintf("%q", path), - }, - } - impDecl := &ast.GenDecl{ - Tok: token.IMPORT, - Specs: []ast.Spec{ - newImport, - }, - } - // Make the new import the first Decl in the file. - astFile := f.astFile - astFile.Decls = append(astFile.Decls, nil) - copy(astFile.Decls[1:], astFile.Decls[0:]) - astFile.Decls[0] = impDecl - astFile.Imports = append(astFile.Imports, newImport) - - // Now refer to the package, just in case it ends up unused. - // That is, append to the end of the file the declaration - // var _ = _cover_atomic_.AddUint32 - reference := &ast.GenDecl{ - Tok: token.VAR, - Specs: []ast.Spec{ - &ast.ValueSpec{ - Names: []*ast.Ident{ - ast.NewIdent("_"), - }, - Values: []ast.Expr{ - &ast.SelectorExpr{ - X: ast.NewIdent(atomicPackageName), - Sel: ast.NewIdent("AddUint32"), - }, - }, - }, - }, - } - astFile.Decls = append(astFile.Decls, reference) - return atomicPackageName -} - -var slashslash = []byte("//") - -// initialComments returns the prefix of content containing only -// whitespace and line comments. Any +build directives must appear -// within this region. This approach is more reliable than using -// go/printer to print a modified AST containing comments. -func initialComments(content []byte) []byte { - // Derived from go/build.Context.shouldBuild. - end := 0 - p := content - for len(p) > 0 { - line := p - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, p = line[:i], p[i+1:] - } else { - p = p[len(p):] - } - line = bytes.TrimSpace(line) - if len(line) == 0 { // Blank line. - end = len(content) - len(p) - continue - } - if !bytes.HasPrefix(line, slashslash) { // Not comment line. - break - } - } - return content[:end] -} - -func annotate(name string) { - fset := token.NewFileSet() - content, err := ioutil.ReadFile(name) - if err != nil { - log.Fatalf("cover: %s: %s", name, err) - } - parsedFile, err := parser.ParseFile(fset, name, content, parser.ParseComments) - if err != nil { - log.Fatalf("cover: %s: %s", name, err) - } - parsedFile.Comments = trimComments(parsedFile, fset) - - file := &File{ - fset: fset, - name: name, - astFile: parsedFile, - } - if *mode == "atomic" { - file.atomicPkg = file.addImport(atomicPackagePath) - } - ast.Walk(file, file.astFile) - fd := os.Stdout - if *output != "" { - var err error - fd, err = os.Create(*output) - if err != nil { - log.Fatalf("cover: %s", err) - } - } - fd.Write(initialComments(content)) // Retain '// +build' directives. - file.print(fd) - // After printing the source tree, add some declarations for the counters etc. - // We could do this by adding to the tree, but it's easier just to print the text. - file.addVariables(fd) -} - -// trimComments drops all but the //go: comments, some of which are semantically important. -// We drop all others because they can appear in places that cause our counters -// to appear in syntactically incorrect places. //go: appears at the beginning of -// the line and is syntactically safe. -func trimComments(file *ast.File, fset *token.FileSet) []*ast.CommentGroup { - var comments []*ast.CommentGroup - for _, group := range file.Comments { - var list []*ast.Comment - for _, comment := range group.List { - if strings.HasPrefix(comment.Text, "//go:") && fset.Position(comment.Slash).Column == 1 { - list = append(list, comment) - } - } - if list != nil { - comments = append(comments, &ast.CommentGroup{List: list}) - } - } - return comments -} - -func (f *File) print(w io.Writer) { - printer.Fprint(w, f.fset, f.astFile) -} - -// intLiteral returns an ast.BasicLit representing the integer value. -func (f *File) intLiteral(i int) *ast.BasicLit { - node := &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprint(i), - } - return node -} - -// index returns an ast.BasicLit representing the number of counters present. -func (f *File) index() *ast.BasicLit { - return f.intLiteral(len(f.blocks)) -} - -// setCounterStmt returns the expression: __count[23] = 1. -func setCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.AssignStmt{ - Lhs: []ast.Expr{counter}, - Tok: token.ASSIGN, - Rhs: []ast.Expr{f.intLiteral(1)}, - } -} - -// incCounterStmt returns the expression: __count[23]++. -func incCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.IncDecStmt{ - X: counter, - Tok: token.INC, - } -} - -// atomicCounterStmt returns the expression: atomic.AddUint32(&__count[23], 1) -func atomicCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.ExprStmt{ - X: &ast.CallExpr{ - Fun: &ast.SelectorExpr{ - X: ast.NewIdent(f.atomicPkg), - Sel: ast.NewIdent("AddUint32"), - }, - Args: []ast.Expr{&ast.UnaryExpr{ - Op: token.AND, - X: counter, - }, - f.intLiteral(1), - }, - }, - } -} - -// newCounter creates a new counter expression of the appropriate form. -func (f *File) newCounter(start, end token.Pos, numStmt int) ast.Stmt { - counter := &ast.IndexExpr{ - X: &ast.SelectorExpr{ - X: ast.NewIdent(*varVar), - Sel: ast.NewIdent("Count"), - }, - Index: f.index(), - } - stmt := counterStmt(f, counter) - f.blocks = append(f.blocks, Block{start, end, numStmt}) - return stmt -} - -// addCounters takes a list of statements and adds counters to the beginning of -// each basic block at the top level of that list. For instance, given -// -// S1 -// if cond { -// S2 -// } -// S3 -// -// counters will be added before S1 and before S3. The block containing S2 -// will be visited in a separate call. -// TODO: Nested simple blocks get unnecessary (but correct) counters -func (f *File) addCounters(pos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) []ast.Stmt { - // Special case: make sure we add a counter to an empty block. Can't do this below - // or we will add a counter to an empty statement list after, say, a return statement. - if len(list) == 0 { - return []ast.Stmt{f.newCounter(pos, blockEnd, 0)} - } - // We have a block (statement list), but it may have several basic blocks due to the - // appearance of statements that affect the flow of control. - var newList []ast.Stmt - for { - // Find first statement that affects flow of control (break, continue, if, etc.). - // It will be the last statement of this basic block. - var last int - end := blockEnd - for last = 0; last < len(list); last++ { - end = f.statementBoundary(list[last]) - if f.endsBasicSourceBlock(list[last]) { - extendToClosingBrace = false // Block is broken up now. - last++ - break - } - } - if extendToClosingBrace { - end = blockEnd - } - if pos != end { // Can have no source to cover if e.g. blocks abut. - newList = append(newList, f.newCounter(pos, end, last)) - } - newList = append(newList, list[0:last]...) - list = list[last:] - if len(list) == 0 { - break - } - pos = list[0].Pos() - } - return newList -} - -// hasFuncLiteral reports the existence and position of the first func literal -// in the node, if any. If a func literal appears, it usually marks the termination -// of a basic block because the function body is itself a block. -// Therefore we draw a line at the start of the body of the first function literal we find. -// TODO: what if there's more than one? Probably doesn't matter much. -func hasFuncLiteral(n ast.Node) (bool, token.Pos) { - if n == nil { - return false, 0 - } - var literal funcLitFinder - ast.Walk(&literal, n) - return literal.found(), token.Pos(literal) -} - -// statementBoundary finds the location in s that terminates the current basic -// block in the source. -func (f *File) statementBoundary(s ast.Stmt) token.Pos { - // Control flow statements are easy. - switch s := s.(type) { - case *ast.BlockStmt: - // Treat blocks like basic blocks to avoid overlapping counters. - return s.Lbrace - case *ast.IfStmt: - found, pos := hasFuncLiteral(s.Init) - if found { - return pos - } - found, pos = hasFuncLiteral(s.Cond) - if found { - return pos - } - return s.Body.Lbrace - case *ast.ForStmt: - found, pos := hasFuncLiteral(s.Init) - if found { - return pos - } - found, pos = hasFuncLiteral(s.Cond) - if found { - return pos - } - found, pos = hasFuncLiteral(s.Post) - if found { - return pos - } - return s.Body.Lbrace - case *ast.LabeledStmt: - return f.statementBoundary(s.Stmt) - case *ast.RangeStmt: - found, pos := hasFuncLiteral(s.X) - if found { - return pos - } - return s.Body.Lbrace - case *ast.SwitchStmt: - found, pos := hasFuncLiteral(s.Init) - if found { - return pos - } - found, pos = hasFuncLiteral(s.Tag) - if found { - return pos - } - return s.Body.Lbrace - case *ast.SelectStmt: - return s.Body.Lbrace - case *ast.TypeSwitchStmt: - found, pos := hasFuncLiteral(s.Init) - if found { - return pos - } - return s.Body.Lbrace - } - // If not a control flow statement, it is a declaration, expression, call, etc. and it may have a function literal. - // If it does, that's tricky because we want to exclude the body of the function from this block. - // Draw a line at the start of the body of the first function literal we find. - // TODO: what if there's more than one? Probably doesn't matter much. - found, pos := hasFuncLiteral(s) - if found { - return pos - } - return s.End() -} - -// endsBasicSourceBlock reports whether s changes the flow of control: break, if, etc., -// or if it's just problematic, for instance contains a function literal, which will complicate -// accounting due to the block-within-an expression. -func (f *File) endsBasicSourceBlock(s ast.Stmt) bool { - switch s := s.(type) { - case *ast.BlockStmt: - // Treat blocks like basic blocks to avoid overlapping counters. - return true - case *ast.BranchStmt: - return true - case *ast.ForStmt: - return true - case *ast.IfStmt: - return true - case *ast.LabeledStmt: - return f.endsBasicSourceBlock(s.Stmt) - case *ast.RangeStmt: - return true - case *ast.SwitchStmt: - return true - case *ast.SelectStmt: - return true - case *ast.TypeSwitchStmt: - return true - case *ast.ExprStmt: - // Calls to panic change the flow. - // We really should verify that "panic" is the predefined function, - // but without type checking we can't and the likelihood of it being - // an actual problem is vanishingly small. - if call, ok := s.X.(*ast.CallExpr); ok { - if ident, ok := call.Fun.(*ast.Ident); ok && ident.Name == "panic" && len(call.Args) == 1 { - return true - } - } - } - found, _ := hasFuncLiteral(s) - return found -} - -// funcLitFinder implements the ast.Visitor pattern to find the location of any -// function literal in a subtree. -type funcLitFinder token.Pos - -func (f *funcLitFinder) Visit(node ast.Node) (w ast.Visitor) { - if f.found() { - return nil // Prune search. - } - switch n := node.(type) { - case *ast.FuncLit: - *f = funcLitFinder(n.Body.Lbrace) - return nil // Prune search. - } - return f -} - -func (f *funcLitFinder) found() bool { - return token.Pos(*f) != token.NoPos -} - -// Sort interface for []block1; used for self-check in addVariables. - -type block1 struct { - Block - index int -} - -type blockSlice []block1 - -func (b blockSlice) Len() int { return len(b) } -func (b blockSlice) Less(i, j int) bool { return b[i].startByte < b[j].startByte } -func (b blockSlice) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -// offset translates a token position into a 0-indexed byte offset. -func (f *File) offset(pos token.Pos) int { - return f.fset.Position(pos).Offset -} - -// addVariables adds to the end of the file the declarations to set up the counter and position variables. -func (f *File) addVariables(w io.Writer) { - // Self-check: Verify that the instrumented basic blocks are disjoint. - t := make([]block1, len(f.blocks)) - for i := range f.blocks { - t[i].Block = f.blocks[i] - t[i].index = i - } - sort.Sort(blockSlice(t)) - for i := 1; i < len(t); i++ { - if t[i-1].endByte > t[i].startByte { - fmt.Fprintf(os.Stderr, "cover: internal error: block %d overlaps block %d\n", t[i-1].index, t[i].index) - // Note: error message is in byte positions, not token positions. - fmt.Fprintf(os.Stderr, "\t%s:#%d,#%d %s:#%d,#%d\n", - f.name, f.offset(t[i-1].startByte), f.offset(t[i-1].endByte), - f.name, f.offset(t[i].startByte), f.offset(t[i].endByte)) - } - } - - // Declare the coverage struct as a package-level variable. - fmt.Fprintf(w, "\nvar %s = struct {\n", *varVar) - fmt.Fprintf(w, "\tCount [%d]uint32\n", len(f.blocks)) - fmt.Fprintf(w, "\tPos [3 * %d]uint32\n", len(f.blocks)) - fmt.Fprintf(w, "\tNumStmt [%d]uint16\n", len(f.blocks)) - fmt.Fprintf(w, "} {\n") - - // Initialize the position array field. - fmt.Fprintf(w, "\tPos: [3 * %d]uint32{\n", len(f.blocks)) - - // A nice long list of positions. Each position is encoded as follows to reduce size: - // - 32-bit starting line number - // - 32-bit ending line number - // - (16 bit ending column number << 16) | (16-bit starting column number). - for i, block := range f.blocks { - start := f.fset.Position(block.startByte) - end := f.fset.Position(block.endByte) - fmt.Fprintf(w, "\t\t%d, %d, %#x, // [%d]\n", start.Line, end.Line, (end.Column&0xFFFF)<<16|(start.Column&0xFFFF), i) - } - - // Close the position array. - fmt.Fprintf(w, "\t},\n") - - // Initialize the position array field. - fmt.Fprintf(w, "\tNumStmt: [%d]uint16{\n", len(f.blocks)) - - // A nice long list of statements-per-block, so we can give a conventional - // valuation of "percent covered". To save space, it's a 16-bit number, so we - // clamp it if it overflows - won't matter in practice. - for i, block := range f.blocks { - n := block.numStmt - if n > 1<<16-1 { - n = 1<<16 - 1 - } - fmt.Fprintf(w, "\t\t%d, // %d\n", n, i) - } - - // Close the statements-per-block array. - fmt.Fprintf(w, "\t},\n") - - // Close the struct initialization. - fmt.Fprintf(w, "}\n") -} diff --git a/cmd/cover/cover_test.go b/cmd/cover/cover_test.go deleted file mode 100644 index 91c7695b44d..00000000000 --- a/cmd/cover/cover_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// No testdata on Android. - -//go:build !android -// +build !android - -package main_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" - - "golang.org/x/tools/internal/testenv" -) - -const ( - // Data directory, also the package directory for the test. - testdata = "testdata" -) - -var debug = false // Keeps the rewritten files around if set. - -// Run this shell script, but do it in Go so it can be run by "go test". -// -// replace the word LINE with the line number < testdata/test.go > testdata/test_line.go -// go build -o ./testcover -// ./testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go -// go run ./testdata/main.go ./testdata/test.go -func TestCover(t *testing.T) { - testenv.NeedsTool(t, "go") - - tmpdir, err := ioutil.TempDir("", "TestCover") - if err != nil { - t.Fatal(err) - } - defer func() { - if debug { - fmt.Printf("test files left in %s\n", tmpdir) - } else { - os.RemoveAll(tmpdir) - } - }() - - testcover := filepath.Join(tmpdir, "testcover.exe") - testMain := filepath.Join(tmpdir, "main.go") - testTest := filepath.Join(tmpdir, "test.go") - coverInput := filepath.Join(tmpdir, "test_line.go") - coverOutput := filepath.Join(tmpdir, "test_cover.go") - - for _, f := range []string{testMain, testTest} { - data, err := ioutil.ReadFile(filepath.Join(testdata, filepath.Base(f))) - if err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(f, data, 0644); err != nil { - t.Fatal(err) - } - } - - // Read in the test file (testTest) and write it, with LINEs specified, to coverInput. - file, err := ioutil.ReadFile(testTest) - if err != nil { - t.Fatal(err) - } - lines := bytes.Split(file, []byte("\n")) - for i, line := range lines { - lines[i] = bytes.Replace(line, []byte("LINE"), []byte(fmt.Sprint(i+1)), -1) - } - err = ioutil.WriteFile(coverInput, bytes.Join(lines, []byte("\n")), 0666) - if err != nil { - t.Fatal(err) - } - - // go build -o testcover - cmd := exec.Command("go", "build", "-o", testcover) - run(cmd, t) - - // ./testcover -mode=count -var=coverTest -o ./testdata/test_cover.go testdata/test_line.go - cmd = exec.Command(testcover, "-mode=count", "-var=coverTest", "-o", coverOutput, coverInput) - run(cmd, t) - - // defer removal of ./testdata/test_cover.go - if !debug { - defer os.Remove(coverOutput) - } - - // go run ./testdata/main.go ./testdata/test.go - cmd = exec.Command("go", "run", testMain, coverOutput) - run(cmd, t) -} - -func run(c *exec.Cmd, t *testing.T) { - c.Stdout = os.Stdout - c.Stderr = os.Stderr - err := c.Run() - if err != nil { - t.Fatal(err) - } -} diff --git a/cmd/cover/doc.go b/cmd/cover/doc.go deleted file mode 100644 index 77dce442f15..00000000000 --- a/cmd/cover/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Cover is a program for analyzing the coverage profiles generated by -'go test -coverprofile=cover.out'. - -Deprecated: For Go releases 1.5 and later, this tool lives in the -standard repository. The code here is not maintained. - -Cover is also used by 'go test -cover' to rewrite the source code with -annotations to track which parts of each function are executed. -It operates on one Go source file at a time, computing approximate -basic block information by studying the source. It is thus more portable -than binary-rewriting coverage tools, but also a little less capable. -For instance, it does not probe inside && and || expressions, and can -be mildly confused by single statements with multiple function literals. - -For usage information, please see: - - go help testflag - go tool cover -help -*/ -package main // import "golang.org/x/tools/cmd/cover" diff --git a/cmd/cover/func.go b/cmd/cover/func.go deleted file mode 100644 index 41d9fceca58..00000000000 --- a/cmd/cover/func.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements the visitor that computes the (line, column)-(line-column) range for each function. - -package main - -import ( - "bufio" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "os" - "path/filepath" - "text/tabwriter" - - "golang.org/x/tools/cover" -) - -// funcOutput takes two file names as arguments, a coverage profile to read as input and an output -// file to write ("" means to write to standard output). The function reads the profile and produces -// as output the coverage data broken down by function, like this: -// -// fmt/format.go:30: init 100.0% -// fmt/format.go:57: clearflags 100.0% -// ... -// fmt/scan.go:1046: doScan 100.0% -// fmt/scan.go:1075: advance 96.2% -// fmt/scan.go:1119: doScanf 96.8% -// total: (statements) 91.9% - -func funcOutput(profile, outputFile string) error { - profiles, err := cover.ParseProfiles(profile) - if err != nil { - return err - } - - var out *bufio.Writer - if outputFile == "" { - out = bufio.NewWriter(os.Stdout) - } else { - fd, err := os.Create(outputFile) - if err != nil { - return err - } - defer fd.Close() - out = bufio.NewWriter(fd) - } - defer out.Flush() - - tabber := tabwriter.NewWriter(out, 1, 8, 1, '\t', 0) - defer tabber.Flush() - - var total, covered int64 - for _, profile := range profiles { - fn := profile.FileName - file, err := findFile(fn) - if err != nil { - return err - } - funcs, err := findFuncs(file) - if err != nil { - return err - } - // Now match up functions and profile blocks. - for _, f := range funcs { - c, t := f.coverage(profile) - fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n", fn, f.startLine, f.name, 100.0*float64(c)/float64(t)) - total += t - covered += c - } - } - fmt.Fprintf(tabber, "total:\t(statements)\t%.1f%%\n", 100.0*float64(covered)/float64(total)) - - return nil -} - -// findFuncs parses the file and returns a slice of FuncExtent descriptors. -func findFuncs(name string) ([]*FuncExtent, error) { - fset := token.NewFileSet() - parsedFile, err := parser.ParseFile(fset, name, nil, 0) - if err != nil { - return nil, err - } - visitor := &FuncVisitor{ - fset: fset, - name: name, - astFile: parsedFile, - } - ast.Walk(visitor, visitor.astFile) - return visitor.funcs, nil -} - -// FuncExtent describes a function's extent in the source by file and position. -type FuncExtent struct { - name string - startLine int - startCol int - endLine int - endCol int -} - -// FuncVisitor implements the visitor that builds the function position list for a file. -type FuncVisitor struct { - fset *token.FileSet - name string // Name of file. - astFile *ast.File - funcs []*FuncExtent -} - -// Visit implements the ast.Visitor interface. -func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.FuncDecl: - start := v.fset.Position(n.Pos()) - end := v.fset.Position(n.End()) - fe := &FuncExtent{ - name: n.Name.Name, - startLine: start.Line, - startCol: start.Column, - endLine: end.Line, - endCol: end.Column, - } - v.funcs = append(v.funcs, fe) - } - return v -} - -// coverage returns the fraction of the statements in the function that were covered, as a numerator and denominator. -func (f *FuncExtent) coverage(profile *cover.Profile) (num, den int64) { - // We could avoid making this n^2 overall by doing a single scan and annotating the functions, - // but the sizes of the data structures is never very large and the scan is almost instantaneous. - var covered, total int64 - // The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block. - for _, b := range profile.Blocks { - if b.StartLine > f.endLine || (b.StartLine == f.endLine && b.StartCol >= f.endCol) { - // Past the end of the function. - break - } - if b.EndLine < f.startLine || (b.EndLine == f.startLine && b.EndCol <= f.startCol) { - // Before the beginning of the function - continue - } - total += int64(b.NumStmt) - if b.Count > 0 { - covered += int64(b.NumStmt) - } - } - if total == 0 { - total = 1 // Avoid zero denominator. - } - return covered, total -} - -// findFile finds the location of the named file in GOROOT, GOPATH etc. -func findFile(file string) (string, error) { - dir, file := filepath.Split(file) - pkg, err := build.Import(dir, ".", build.FindOnly) - if err != nil { - return "", fmt.Errorf("can't find %q: %v", file, err) - } - return filepath.Join(pkg.Dir, file), nil -} diff --git a/cmd/cover/html.go b/cmd/cover/html.go deleted file mode 100644 index 0f8c72542b8..00000000000 --- a/cmd/cover/html.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "fmt" - exec "golang.org/x/sys/execabs" - "html/template" - "io" - "io/ioutil" - "math" - "os" - "path/filepath" - "runtime" - - "golang.org/x/tools/cover" -) - -// htmlOutput reads the profile data from profile and generates an HTML -// coverage report, writing it to outfile. If outfile is empty, -// it writes the report to a temporary file and opens it in a web browser. -func htmlOutput(profile, outfile string) error { - profiles, err := cover.ParseProfiles(profile) - if err != nil { - return err - } - - var d templateData - - for _, profile := range profiles { - fn := profile.FileName - if profile.Mode == "set" { - d.Set = true - } - file, err := findFile(fn) - if err != nil { - return err - } - src, err := ioutil.ReadFile(file) - if err != nil { - return fmt.Errorf("can't read %q: %v", fn, err) - } - var buf bytes.Buffer - err = htmlGen(&buf, src, profile.Boundaries(src)) - if err != nil { - return err - } - d.Files = append(d.Files, &templateFile{ - Name: fn, - Body: template.HTML(buf.String()), - Coverage: percentCovered(profile), - }) - } - - var out *os.File - if outfile == "" { - var dir string - dir, err = ioutil.TempDir("", "cover") - if err != nil { - return err - } - out, err = os.Create(filepath.Join(dir, "coverage.html")) - } else { - out, err = os.Create(outfile) - } - if err != nil { - return err - } - err = htmlTemplate.Execute(out, d) - if err == nil { - err = out.Close() - } - if err != nil { - return err - } - - if outfile == "" { - if !startBrowser("file://" + out.Name()) { - fmt.Fprintf(os.Stderr, "HTML output written to %s\n", out.Name()) - } - } - - return nil -} - -// percentCovered returns, as a percentage, the fraction of the statements in -// the profile covered by the test run. -// In effect, it reports the coverage of a given source file. -func percentCovered(p *cover.Profile) float64 { - var total, covered int64 - for _, b := range p.Blocks { - total += int64(b.NumStmt) - if b.Count > 0 { - covered += int64(b.NumStmt) - } - } - if total == 0 { - return 0 - } - return float64(covered) / float64(total) * 100 -} - -// htmlGen generates an HTML coverage report with the provided filename, -// source code, and tokens, and writes it to the given Writer. -func htmlGen(w io.Writer, src []byte, boundaries []cover.Boundary) error { - dst := bufio.NewWriter(w) - for i := range src { - for len(boundaries) > 0 && boundaries[0].Offset == i { - b := boundaries[0] - if b.Start { - n := 0 - if b.Count > 0 { - n = int(math.Floor(b.Norm*9)) + 1 - } - fmt.Fprintf(dst, ``, n, b.Count) - } else { - dst.WriteString("") - } - boundaries = boundaries[1:] - } - switch b := src[i]; b { - case '>': - dst.WriteString(">") - case '<': - dst.WriteString("<") - case '&': - dst.WriteString("&") - case '\t': - dst.WriteString(" ") - default: - dst.WriteByte(b) - } - } - return dst.Flush() -} - -// startBrowser tries to open the URL in a browser -// and reports whether it succeeds. -func startBrowser(url string) bool { - // try to start the browser - var args []string - switch runtime.GOOS { - case "darwin": - args = []string{"open"} - case "windows": - args = []string{"cmd", "/c", "start"} - default: - args = []string{"xdg-open"} - } - cmd := exec.Command(args[0], append(args[1:], url)...) - return cmd.Start() == nil -} - -// rgb returns an rgb value for the specified coverage value -// between 0 (no coverage) and 10 (max coverage). -func rgb(n int) string { - if n == 0 { - return "rgb(192, 0, 0)" // Red - } - // Gradient from gray to green. - r := 128 - 12*(n-1) - g := 128 + 12*(n-1) - b := 128 + 3*(n-1) - return fmt.Sprintf("rgb(%v, %v, %v)", r, g, b) -} - -// colors generates the CSS rules for coverage colors. -func colors() template.CSS { - var buf bytes.Buffer - for i := 0; i < 11; i++ { - fmt.Fprintf(&buf, ".cov%v { color: %v }\n", i, rgb(i)) - } - return template.CSS(buf.String()) -} - -var htmlTemplate = template.Must(template.New("html").Funcs(template.FuncMap{ - "colors": colors, -}).Parse(tmplHTML)) - -type templateData struct { - Files []*templateFile - Set bool -} - -type templateFile struct { - Name string - Body template.HTML - Coverage float64 -} - -const tmplHTML = ` - - - - - - - -
- -
- not tracked - {{if .Set}} - not covered - covered - {{else}} - no coverage - low coverage - * - * - * - * - * - * - * - * - high coverage - {{end}} -
-
-
- {{range $i, $f := .Files}} -
{{$f.Body}}
- {{end}} -
- - - -` diff --git a/cmd/cover/testdata/main.go b/cmd/cover/testdata/main.go deleted file mode 100644 index 6ed39c4f230..00000000000 --- a/cmd/cover/testdata/main.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Test runner for coverage test. This file is not coverage-annotated; test.go is. -// It knows the coverage counter is called "coverTest". - -package main - -import ( - "fmt" - "os" -) - -func main() { - testAll() - verify() -} - -type block struct { - count uint32 - line uint32 -} - -var counters = make(map[block]bool) - -// check records the location and expected value for a counter. -func check(line, count uint32) { - b := block{ - count, - line, - } - counters[b] = true -} - -// checkVal is a version of check that returns its extra argument, -// so it can be used in conditionals. -func checkVal(line, count uint32, val int) int { - b := block{ - count, - line, - } - counters[b] = true - return val -} - -var PASS = true - -// verify checks the expected counts against the actual. It runs after the test has completed. -func verify() { - for b := range counters { - got, index := count(b.line) - if b.count == anything && got != 0 { - got = anything - } - if got != b.count { - fmt.Fprintf(os.Stderr, "test_go:%d expected count %d got %d [counter %d]\n", b.line, b.count, got, index) - PASS = false - } - } - verifyPanic() - if !PASS { - fmt.Fprintf(os.Stderr, "FAIL\n") - os.Exit(2) - } -} - -// verifyPanic is a special check for the known counter that should be -// after the panic call in testPanic. -func verifyPanic() { - if coverTest.Count[panicIndex-1] != 1 { - // Sanity check for test before panic. - fmt.Fprintf(os.Stderr, "bad before panic") - PASS = false - } - if coverTest.Count[panicIndex] != 0 { - fmt.Fprintf(os.Stderr, "bad at panic: %d should be 0\n", coverTest.Count[panicIndex]) - PASS = false - } - if coverTest.Count[panicIndex+1] != 1 { - fmt.Fprintf(os.Stderr, "bad after panic") - PASS = false - } -} - -// count returns the count and index for the counter at the specified line. -func count(line uint32) (uint32, int) { - // Linear search is fine. Choose perfect fit over approximate. - // We can have a closing brace for a range on the same line as a condition for an "else if" - // and we don't want that brace to steal the count for the condition on the "if". - // Therefore we test for a perfect (lo==line && hi==line) match, but if we can't - // find that we take the first imperfect match. - index := -1 - indexLo := uint32(1e9) - for i := range coverTest.Count { - lo, hi := coverTest.Pos[3*i], coverTest.Pos[3*i+1] - if lo == line && line == hi { - return coverTest.Count[i], i - } - // Choose the earliest match (the counters are in unpredictable order). - if lo <= line && line <= hi && indexLo > lo { - index = i - indexLo = lo - } - } - if index == -1 { - fmt.Fprintln(os.Stderr, "cover_test: no counter for line", line) - PASS = false - return 0, 0 - } - return coverTest.Count[index], index -} diff --git a/cmd/cover/testdata/test.go b/cmd/cover/testdata/test.go deleted file mode 100644 index 9013950a2b3..00000000000 --- a/cmd/cover/testdata/test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This program is processed by the cover command, and then testAll is called. -// The test driver in main.go can then compare the coverage statistics with expectation. - -// The word LINE is replaced by the line number in this file. When the file is executed, -// the coverage processing has changed the line numbers, so we can't use runtime.Caller. - -package main - -const anything = 1e9 // Just some unlikely value that means "we got here, don't care how often" - -func testAll() { - testSimple() - testBlockRun() - testIf() - testFor() - testRange() - testSwitch() - testTypeSwitch() - testSelect1() - testSelect2() - testPanic() - testEmptySwitches() -} - -// The indexes of the counters in testPanic are known to main.go -const panicIndex = 3 - -// This test appears first because the index of its counters is known to main.go -func testPanic() { - defer func() { - recover() - }() - check(LINE, 1) - panic("should not get next line") - check(LINE, 0) // this is GoCover.Count[panicIndex] - // The next counter is in testSimple and it will be non-zero. - // If the panic above does not trigger a counter, the test will fail - // because GoCover.Count[panicIndex] will be the one in testSimple. -} - -func testSimple() { - check(LINE, 1) -} - -func testIf() { - if true { - check(LINE, 1) - } else { - check(LINE, 0) - } - if false { - check(LINE, 0) - } else { - check(LINE, 1) - } - for i := 0; i < 3; i++ { - if checkVal(LINE, 3, i) <= 2 { - check(LINE, 3) - } - if checkVal(LINE, 3, i) <= 1 { - check(LINE, 2) - } - if checkVal(LINE, 3, i) <= 0 { - check(LINE, 1) - } - } - for i := 0; i < 3; i++ { - if checkVal(LINE, 3, i) <= 1 { - check(LINE, 2) - } else { - check(LINE, 1) - } - } - for i := 0; i < 3; i++ { - if checkVal(LINE, 3, i) <= 0 { - check(LINE, 1) - } else if checkVal(LINE, 2, i) <= 1 { - check(LINE, 1) - } else if checkVal(LINE, 1, i) <= 2 { - check(LINE, 1) - } else if checkVal(LINE, 0, i) <= 3 { - check(LINE, 0) - } - } - if func(a, b int) bool { return a < b }(3, 4) { - check(LINE, 1) - } -} - -func testFor() { - for i := 0; i < 10; func() { i++; check(LINE, 10) }() { - check(LINE, 10) - } -} - -func testRange() { - for _, f := range []func(){ - func() { check(LINE, 1) }, - } { - f() - check(LINE, 1) - } -} - -func testBlockRun() { - check(LINE, 1) - { - check(LINE, 1) - } - { - check(LINE, 1) - } - check(LINE, 1) - { - check(LINE, 1) - } - { - check(LINE, 1) - } - check(LINE, 1) -} - -func testSwitch() { - for i := 0; i < 5; func() { i++; check(LINE, 5) }() { - switch i { - case 0: - check(LINE, 1) - case 1: - check(LINE, 1) - case 2: - check(LINE, 1) - default: - check(LINE, 2) - } - } -} - -func testTypeSwitch() { - var x = []interface{}{1, 2.0, "hi"} - for _, v := range x { - switch func() { check(LINE, 3) }(); v.(type) { - case int: - check(LINE, 1) - case float64: - check(LINE, 1) - case string: - check(LINE, 1) - case complex128: - check(LINE, 0) - default: - check(LINE, 0) - } - } -} - -func testSelect1() { - c := make(chan int) - go func() { - for i := 0; i < 1000; i++ { - c <- i - } - }() - for { - select { - case <-c: - check(LINE, anything) - case <-c: - check(LINE, anything) - default: - check(LINE, 1) - return - } - } -} - -func testSelect2() { - c1 := make(chan int, 1000) - c2 := make(chan int, 1000) - for i := 0; i < 1000; i++ { - c1 <- i - c2 <- i - } - for { - select { - case <-c1: - check(LINE, 1000) - case <-c2: - check(LINE, 1000) - default: - check(LINE, 1) - return - } - } -} - -// Empty control statements created syntax errors. This function -// is here just to be sure that those are handled correctly now. -func testEmptySwitches() { - check(LINE, 1) - switch 3 { - } - check(LINE, 1) - switch i := (interface{})(3).(int); i { - } - check(LINE, 1) - c := make(chan int) - go func() { - check(LINE, 1) - c <- 1 - select {} - }() - <-c - check(LINE, 1) -} diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go index 62cb08d23a8..0e50ad18dcb 100644 --- a/cmd/digraph/digraph.go +++ b/cmd/digraph/digraph.go @@ -34,7 +34,7 @@ The support commands are: sccs all strongly connected components (one per line) scc - the set of nodes nodes strongly connected to the specified one + the set of nodes strongly connected to the specified one focus the subgraph containing all directed paths that pass through the specified node @@ -351,20 +351,29 @@ func parse(rd io.Reader) (graph, error) { g := make(graph) var linenum int - in := bufio.NewScanner(rd) - for in.Scan() { + // We avoid bufio.Scanner as it imposes a (configurable) limit + // on line length, whereas Reader.ReadString does not. + in := bufio.NewReader(rd) + for { linenum++ + line, err := in.ReadString('\n') + eof := false + if err == io.EOF { + eof = true + } else if err != nil { + return nil, err + } // Split into words, honoring double-quotes per Go spec. - words, err := split(in.Text()) + words, err := split(line) if err != nil { return nil, fmt.Errorf("at line %d: %v", linenum, err) } if len(words) > 0 { g.addEdges(words[0], words[1:]...) } - } - if err := in.Err(); err != nil { - return nil, err + if eof { + break + } } return g, nil } diff --git a/cmd/digraph/digraph_test.go b/cmd/digraph/digraph_test.go index cff46735b2d..60b8e75eb72 100644 --- a/cmd/digraph/digraph_test.go +++ b/cmd/digraph/digraph_test.go @@ -45,6 +45,7 @@ e e {"scss", g2, "sccs", nil, "c d\ne\n"}, {"scc", g2, "scc", []string{"d"}, "c\nd\n"}, {"succs", g2, "succs", []string{"a"}, "b\nc\n"}, + {"succs-long-token", g2 + "x " + strings.Repeat("x", 96*1024), "succs", []string{"x"}, strings.Repeat("x", 96*1024) + "\n"}, {"preds", g2, "preds", []string{"c"}, "a\nd\n"}, {"preds multiple args", g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"}, } { diff --git a/cmd/fiximports/main.go b/cmd/fiximports/main.go index 82e5fb57296..8eeacd1eda3 100644 --- a/cmd/fiximports/main.go +++ b/cmd/fiximports/main.go @@ -72,11 +72,9 @@ import ( "flag" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/token" - exec "golang.org/x/sys/execabs" "io" "io/ioutil" "log" @@ -86,6 +84,8 @@ import ( "sort" "strconv" "strings" + + exec "golang.org/x/sys/execabs" ) // flags @@ -137,16 +137,16 @@ type canonicalName struct{ path, name string } // Invariant: a false result implies an error was already printed. func fiximports(packages ...string) bool { // importedBy is the transpose of the package import graph. - importedBy := make(map[string]map[*build.Package]bool) + importedBy := make(map[string]map[*listPackage]bool) // addEdge adds an edge to the import graph. - addEdge := func(from *build.Package, to string) { + addEdge := func(from *listPackage, to string) { if to == "C" || to == "unsafe" { return // fake } pkgs := importedBy[to] if pkgs == nil { - pkgs = make(map[*build.Package]bool) + pkgs = make(map[*listPackage]bool) importedBy[to] = pkgs } pkgs[from] = true @@ -162,7 +162,7 @@ func fiximports(packages ...string) bool { // packageName maps each package's path to its name. packageName := make(map[string]string) for _, p := range pkgs { - packageName[p.ImportPath] = p.Package.Name + packageName[p.ImportPath] = p.Name } // canonical maps each non-canonical package path to @@ -207,21 +207,21 @@ func fiximports(packages ...string) bool { } for _, imp := range p.Imports { - addEdge(&p.Package, imp) + addEdge(p, imp) } for _, imp := range p.TestImports { - addEdge(&p.Package, imp) + addEdge(p, imp) } for _, imp := range p.XTestImports { - addEdge(&p.Package, imp) + addEdge(p, imp) } // Does package have an explicit import comment? if p.ImportComment != "" { if p.ImportComment != p.ImportPath { canonical[p.ImportPath] = canonicalName{ - path: p.Package.ImportComment, - name: p.Package.Name, + path: p.ImportComment, + name: p.Name, } } } else { @@ -273,7 +273,7 @@ func fiximports(packages ...string) bool { // Find all clients (direct importers) of canonical packages. // These are the packages that need fixing up. - clients := make(map[*build.Package]bool) + clients := make(map[*listPackage]bool) for path := range canonical { for client := range importedBy[path] { clients[client] = true @@ -350,7 +350,7 @@ func fiximports(packages ...string) bool { } // Invariant: false result => error already printed. -func rewritePackage(client *build.Package, canonical map[string]canonicalName) bool { +func rewritePackage(client *listPackage, canonical map[string]canonicalName) bool { ok := true used := make(map[string]bool) @@ -389,7 +389,7 @@ func rewritePackage(client *build.Package, canonical map[string]canonicalName) b return ok } -// rewrite reads, modifies, and writes filename, replacing all imports +// rewriteFile reads, modifies, and writes filename, replacing all imports // of packages P in canonical by canonical[P]. // It records in used which canonical packages were imported. // used[P]=="" indicates that P was imported but its canonical path is unknown. @@ -450,11 +450,20 @@ func rewriteFile(filename string, canonical map[string]canonicalName, used map[s return nil } -// listPackage is a copy of cmd/go/list.Package. -// It has more fields than build.Package and we need some of them. +// listPackage corresponds to the output of go list -json, +// but only the fields we need. type listPackage struct { - build.Package - Error *packageError // error loading package + Name string + Dir string + ImportPath string + GoFiles []string + TestGoFiles []string + XTestGoFiles []string + Imports []string + TestImports []string + XTestImports []string + ImportComment string + Error *packageError // error loading package } // A packageError describes an error loading information about a package. diff --git a/cmd/godoc/godoc_test.go b/cmd/godoc/godoc_test.go index 4eb341af1e1..3e91ac6f94c 100644 --- a/cmd/godoc/godoc_test.go +++ b/cmd/godoc/godoc_test.go @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package main_test +package main import ( "bytes" + "context" "fmt" "go/build" "io/ioutil" @@ -13,10 +14,10 @@ import ( "net/http" "os" "os/exec" - "path/filepath" "regexp" "runtime" "strings" + "sync" "testing" "time" @@ -24,42 +25,39 @@ import ( "golang.org/x/tools/internal/testenv" ) -// buildGodoc builds the godoc executable. -// It returns its path, and a cleanup function. -// -// TODO(adonovan): opt: do this at most once, and do the cleanup -// exactly once. How though? There's no atexit. -func buildGodoc(t *testing.T) (bin string, cleanup func()) { - t.Helper() - - if runtime.GOARCH == "arm" { - t.Skip("skipping test on arm platforms; too slow") - } - if runtime.GOOS == "android" { - t.Skipf("the dependencies are not available on android") +func TestMain(m *testing.M) { + if os.Getenv("GODOC_TEST_IS_GODOC") != "" { + main() + os.Exit(0) } - testenv.NeedsTool(t, "go") - tmp, err := ioutil.TempDir("", "godoc-regtest-") - if err != nil { - t.Fatal(err) - } - defer func() { - if cleanup == nil { // probably, go build failed. - os.RemoveAll(tmp) - } - }() + // Inform subprocesses that they should run the cmd/godoc main instead of + // running tests. It's a close approximation to building and running the real + // command, and much less complicated and expensive to build and clean up. + os.Setenv("GODOC_TEST_IS_GODOC", "1") - bin = filepath.Join(tmp, "godoc") - if runtime.GOOS == "windows" { - bin += ".exe" - } - cmd := exec.Command("go", "build", "-o", bin) - if err := cmd.Run(); err != nil { - t.Fatalf("Building godoc: %v", err) + os.Exit(m.Run()) +} + +var exe struct { + path string + err error + once sync.Once +} + +func godocPath(t *testing.T) string { + switch runtime.GOOS { + case "js", "ios": + t.Skipf("skipping test that requires exec") } - return bin, func() { os.RemoveAll(tmp) } + exe.once.Do(func() { + exe.path, exe.err = os.Executable() + }) + if exe.err != nil { + t.Fatal(exe.err) + } + return exe.path } func serverAddress(t *testing.T) string { @@ -74,60 +72,42 @@ func serverAddress(t *testing.T) string { return ln.Addr().String() } -func waitForServerReady(t *testing.T, cmd *exec.Cmd, addr string) { - ch := make(chan error, 1) - go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }() - go waitForServer(t, ch, +func waitForServerReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) { + waitForServer(t, ctx, fmt.Sprintf("http://%v/", addr), "Go Documentation Server", - 15*time.Second, false) - if err := <-ch; err != nil { - t.Skipf("skipping due to https://go.dev/issue/50014: %v", err) - } } -func waitForSearchReady(t *testing.T, cmd *exec.Cmd, addr string) { - ch := make(chan error, 1) - go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }() - go waitForServer(t, ch, +func waitForSearchReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) { + waitForServer(t, ctx, fmt.Sprintf("http://%v/search?q=FALLTHROUGH", addr), "The list of tokens.", - 2*time.Minute, false) - if err := <-ch; err != nil { - t.Skipf("skipping due to https://go.dev/issue/50014: %v", err) - } } -func waitUntilScanComplete(t *testing.T, addr string) { - ch := make(chan error) - go waitForServer(t, ch, +func waitUntilScanComplete(t *testing.T, ctx context.Context, addr string) { + waitForServer(t, ctx, fmt.Sprintf("http://%v/pkg", addr), "Scan is not yet complete", - 2*time.Minute, // setting reverse as true, which means this waits // until the string is not returned in the response anymore - true, - ) - if err := <-ch; err != nil { - t.Skipf("skipping due to https://go.dev/issue/50014: %v", err) - } + true) } -const pollInterval = 200 * time.Millisecond +const pollInterval = 50 * time.Millisecond -// waitForServer waits for server to meet the required condition. -// It sends a single error value to ch, unless the test has failed. -// The error value is nil if the required condition was met within -// timeout, or non-nil otherwise. -func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout time.Duration, reverse bool) { - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - time.Sleep(pollInterval) - if t.Failed() { - return +// waitForServer waits for server to meet the required condition, +// failing the test if ctx is canceled before that occurs. +func waitForServer(t *testing.T, ctx context.Context, url, match string, reverse bool) { + start := time.Now() + for { + if ctx.Err() != nil { + t.Helper() + t.Fatalf("server failed to respond in %v", time.Since(start)) } + + time.Sleep(pollInterval) res, err := http.Get(url) if err != nil { continue @@ -140,11 +120,9 @@ func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout tim switch { case !reverse && bytes.Contains(body, []byte(match)), reverse && !bytes.Contains(body, []byte(match)): - ch <- nil return } } - ch <- fmt.Errorf("server failed to respond in %v", timeout) } // hasTag checks whether a given release tag is contained in the current version @@ -158,24 +136,18 @@ func hasTag(t string) bool { return false } -func killAndWait(cmd *exec.Cmd) { - cmd.Process.Kill() - cmd.Process.Wait() -} - func TestURL(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; fails to start up quickly enough") } - bin, cleanup := buildGodoc(t) - defer cleanup() + bin := godocPath(t) testcase := func(url string, contents string) func(t *testing.T) { return func(t *testing.T) { stdout, stderr := new(bytes.Buffer), new(bytes.Buffer) args := []string{fmt.Sprintf("-url=%s", url)} - cmd := exec.Command(bin, args...) + cmd := testenv.Command(t, bin, args...) cmd.Stdout = stdout cmd.Stderr = stderr cmd.Args[0] = "godoc" @@ -205,8 +177,8 @@ func TestURL(t *testing.T) { // Basic integration test for godoc HTTP interface. func TestWeb(t *testing.T) { - bin, cleanup := buildGodoc(t) - defer cleanup() + bin := godocPath(t) + for _, x := range packagestest.All { t.Run(x.Name(), func(t *testing.T) { testWeb(t, x, bin, false) @@ -217,17 +189,19 @@ func TestWeb(t *testing.T) { // Basic integration test for godoc HTTP interface. func TestWebIndex(t *testing.T) { if testing.Short() { - t.Skip("skipping test in -short mode") + t.Skip("skipping slow test in -short mode") } - bin, cleanup := buildGodoc(t) - defer cleanup() + bin := godocPath(t) testWeb(t, packagestest.GOPATH, bin, true) } // Basic integration test for godoc HTTP interface. func testWeb(t *testing.T, x packagestest.Exporter, bin string, withIndex bool) { - if runtime.GOOS == "plan9" { - t.Skip("skipping on plan9; fails to start up quickly enough") + switch runtime.GOOS { + case "plan9": + t.Skip("skipping on plan9: fails to start up quickly enough") + case "android", "ios": + t.Skip("skipping on mobile: lacks GOROOT/api in test environment") } // Write a fake GOROOT/GOPATH with some third party packages. @@ -256,23 +230,39 @@ package a; import _ "godoc.test/repo2/a"; const Name = "repo1a"`, if withIndex { args = append(args, "-index", "-index_interval=-1s") } - cmd := exec.Command(bin, args...) + cmd := testenv.Command(t, bin, args...) cmd.Dir = e.Config.Dir cmd.Env = e.Config.Env - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr + cmdOut := new(strings.Builder) + cmd.Stdout = cmdOut + cmd.Stderr = cmdOut cmd.Args[0] = "godoc" if err := cmd.Start(); err != nil { t.Fatalf("failed to start godoc: %s", err) } - defer killAndWait(cmd) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err := cmd.Wait() + t.Logf("%v: %v", cmd, err) + cancel() + }() + defer func() { + // Shut down the server cleanly if possible. + if runtime.GOOS == "windows" { + cmd.Process.Kill() // Windows doesn't support os.Interrupt. + } else { + cmd.Process.Signal(os.Interrupt) + } + <-ctx.Done() + t.Logf("server output:\n%s", cmdOut) + }() if withIndex { - waitForSearchReady(t, cmd, addr) + waitForSearchReady(t, ctx, cmd, addr) } else { - waitForServerReady(t, cmd, addr) - waitUntilScanComplete(t, addr) + waitForServerReady(t, ctx, cmd, addr) + waitUntilScanComplete(t, ctx, addr) } tests := []struct { @@ -454,22 +444,17 @@ func TestNoMainModule(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; for consistency with other tests that build godoc binary") } - bin, cleanup := buildGodoc(t) - defer cleanup() - tempDir, err := ioutil.TempDir("", "godoc-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) + bin := godocPath(t) + tempDir := t.TempDir() // Run godoc in an empty directory with module mode explicitly on, // so that 'go env GOMOD' reports os.DevNull. - cmd := exec.Command(bin, "-url=/") + cmd := testenv.Command(t, bin, "-url=/") cmd.Dir = tempDir cmd.Env = append(os.Environ(), "GO111MODULE=on") var stderr bytes.Buffer cmd.Stderr = &stderr - err = cmd.Run() + err := cmd.Run() if err != nil { t.Fatalf("godoc command failed: %v\nstderr=%q", err, stderr.String()) } diff --git a/cmd/guru/guru.go b/cmd/guru/guru.go index fdb13f92932..7a42aaa3ae3 100644 --- a/cmd/guru/guru.go +++ b/cmd/guru/guru.go @@ -55,12 +55,12 @@ type queryPos struct { info *loader.PackageInfo // type info for the queried package (nil for fastQueryPos) } -// TypeString prints type T relative to the query position. +// typeString prints type T relative to the query position. func (qpos *queryPos) typeString(T types.Type) string { return types.TypeString(T, types.RelativeTo(qpos.info.Pkg)) } -// ObjectString prints object obj relative to the query position. +// objectString prints object obj relative to the query position. func (qpos *queryPos) objectString(obj types.Object) string { return types.ObjectString(obj, types.RelativeTo(qpos.info.Pkg)) } @@ -207,7 +207,7 @@ func pkgContainsFile(bp *build.Package, filename string) byte { return 0 // not found } -// ParseQueryPos parses the source query position pos and returns the +// parseQueryPos parses the source query position pos and returns the // AST node of the loaded program lprog that it identifies. // If needExact, it must identify a single AST subtree; // this is appropriate for queries that allow fairly arbitrary syntax, diff --git a/cmd/guru/referrers.go b/cmd/guru/referrers.go index 78c8ef91a8c..d75196bf93a 100644 --- a/cmd/guru/referrers.go +++ b/cmd/guru/referrers.go @@ -703,7 +703,7 @@ type referrersPackageResult struct { refs []*ast.Ident // set of all other references to it } -// forEachRef calls f(id, text) for id in r.refs, in order. +// foreachRef calls f(id, text) for id in r.refs, in order. // Text is the text of the line on which id appears. func (r *referrersPackageResult) foreachRef(f func(id *ast.Ident, text string)) { // Show referring lines, like grep. diff --git a/cmd/signature-fuzzer/fuzz-runner/runner.go b/cmd/signature-fuzzer/fuzz-runner/runner.go index 4e5b413f3ff..b77b218f5a8 100644 --- a/cmd/signature-fuzzer/fuzz-runner/runner.go +++ b/cmd/signature-fuzzer/fuzz-runner/runner.go @@ -107,7 +107,7 @@ func docmd(cmd []string, dir string) int { return st } -// docodmout forks and execs command 'cmd' in dir 'dir', redirecting +// docmdout forks and execs command 'cmd' in dir 'dir', redirecting // stderr and stdout from the execution to file 'outfile'. func docmdout(cmd []string, dir string, outfile string) int { of, err := os.OpenFile(outfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) diff --git a/cmd/splitdwarf/splitdwarf.go b/cmd/splitdwarf/splitdwarf.go index 13888aa512e..9729b0b7a6a 100644 --- a/cmd/splitdwarf/splitdwarf.go +++ b/cmd/splitdwarf/splitdwarf.go @@ -182,7 +182,7 @@ for input_exe need to allow writing. oldsym := symtab.Syms[ii] newsymtab.Syms = append(newsymtab.Syms, oldsym) - linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: uint32(linkeditstringcur), + linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: linkeditstringcur, Type: oldsym.Type, Sect: oldsym.Sect, Desc: oldsym.Desc, Value: oldsym.Value}) linkeditstringcur += uint32(len(oldsym.Name)) + 1 linkeditstrings = append(linkeditstrings, oldsym.Name) diff --git a/cmd/ssadump/main.go b/cmd/ssadump/main.go index 138e7f69ff2..cfb9122b24d 100644 --- a/cmd/ssadump/main.go +++ b/cmd/ssadump/main.go @@ -157,12 +157,15 @@ func doMain() error { // Build SSA for all packages. prog.Build() - // The interpreter needs the runtime package. - // It is a limitation of go/packages that - // we cannot add "runtime" to its initial set, - // we can only check that it is present. - if prog.ImportedPackage("runtime") == nil { - return fmt.Errorf("-run: program does not depend on runtime") + // Earlier versions of the interpreter needed the runtime + // package; however, interp cannot handle unsafe constructs + // used during runtime's package initialization at the moment. + // The key construct blocking support is: + // *((*T)(unsafe.Pointer(p))) + // Unfortunately, this means only trivial programs can be + // interpreted by ssadump. + if prog.ImportedPackage("runtime") != nil { + return fmt.Errorf("-run: program depends on runtime package (interpreter can run only trivial programs)") } if runtime.GOARCH != build.Default.GOARCH { diff --git a/cmd/stringer/endtoend_test.go b/cmd/stringer/endtoend_test.go index 5b969a52e36..29eb91860ff 100644 --- a/cmd/stringer/endtoend_test.go +++ b/cmd/stringer/endtoend_test.go @@ -14,15 +14,14 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" + "sync" "testing" - "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/typeparams" ) @@ -31,9 +30,22 @@ import ( // we run stringer -type X and then compile and run the program. The resulting // binary panics if the String method for X is not correct, including for error cases. +func TestMain(m *testing.M) { + if os.Getenv("STRINGER_TEST_IS_STRINGER") != "" { + main() + os.Exit(0) + } + + // Inform subprocesses that they should run the cmd/stringer main instead of + // running tests. It's a close approximation to building and running the real + // command, and much less complicated and expensive to build and clean up. + os.Setenv("STRINGER_TEST_IS_STRINGER", "1") + + os.Exit(m.Run()) +} + func TestEndToEnd(t *testing.T) { - dir, stringer := buildStringer(t) - defer os.RemoveAll(dir) + stringer := stringerPath(t) // Read the testdata directory. fd, err := os.Open("testdata") if err != nil { @@ -65,7 +77,7 @@ func TestEndToEnd(t *testing.T) { t.Logf("cgo is not enabled for %s", name) continue } - stringerCompileAndRun(t, dir, stringer, typeName(name), name) + stringerCompileAndRun(t, t.TempDir(), stringer, typeName(name), name) } } @@ -92,8 +104,8 @@ func moreTests(t *testing.T, dirname, prefix string) []string { // TestTags verifies that the -tags flag works as advertised. func TestTags(t *testing.T) { - dir, stringer := buildStringer(t) - defer os.RemoveAll(dir) + stringer := stringerPath(t) + dir := t.TempDir() var ( protectedConst = []byte("TagProtected") output = filepath.Join(dir, "const_string.go") @@ -113,7 +125,7 @@ func TestTags(t *testing.T) { if err != nil { t.Fatal(err) } - result, err := ioutil.ReadFile(output) + result, err := os.ReadFile(output) if err != nil { t.Fatal(err) } @@ -128,7 +140,7 @@ func TestTags(t *testing.T) { if err != nil { t.Fatal(err) } - result, err = ioutil.ReadFile(output) + result, err = os.ReadFile(output) if err != nil { t.Fatal(err) } @@ -140,8 +152,8 @@ func TestTags(t *testing.T) { // TestConstValueChange verifies that if a constant value changes and // the stringer code is not regenerated, we'll get a compiler error. func TestConstValueChange(t *testing.T) { - dir, stringer := buildStringer(t) - defer os.RemoveAll(dir) + stringer := stringerPath(t) + dir := t.TempDir() source := filepath.Join(dir, "day.go") err := copy(source, filepath.Join("testdata", "day.go")) if err != nil { @@ -179,21 +191,20 @@ func TestConstValueChange(t *testing.T) { } } -// buildStringer creates a temporary directory and installs stringer there. -func buildStringer(t *testing.T) (dir string, stringer string) { - t.Helper() - testenv.NeedsTool(t, "go") +var exe struct { + path string + err error + once sync.Once +} - dir, err := ioutil.TempDir("", "stringer") - if err != nil { - t.Fatal(err) - } - stringer = filepath.Join(dir, "stringer.exe") - err = run("go", "build", "-o", stringer) - if err != nil { - t.Fatalf("building stringer: %s", err) +func stringerPath(t *testing.T) string { + exe.once.Do(func() { + exe.path, exe.err = os.Executable() + }) + if exe.err != nil { + t.Fatal(exe.err) } - return dir, stringer + return exe.path } // stringerCompileAndRun runs stringer for the named file and compiles and diff --git a/cmd/stringer/golden_test.go b/cmd/stringer/golden_test.go index b29763174b3..250af05f903 100644 --- a/cmd/stringer/golden_test.go +++ b/cmd/stringer/golden_test.go @@ -10,7 +10,6 @@ package main import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -452,12 +451,7 @@ func (i Token) String() string { func TestGolden(t *testing.T) { testenv.NeedsTool(t, "go") - dir, err := ioutil.TempDir("", "stringer") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) - + dir := t.TempDir() for _, test := range golden { g := Generator{ trimPrefix: test.trimPrefix, @@ -466,7 +460,7 @@ func TestGolden(t *testing.T) { input := "package test\n" + test.input file := test.name + ".go" absFile := filepath.Join(dir, file) - err := ioutil.WriteFile(absFile, []byte(input), 0644) + err := os.WriteFile(absFile, []byte(input), 0644) if err != nil { t.Error(err) } diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go index 9f9c85a0370..998d1a51bfd 100644 --- a/cmd/stringer/stringer.go +++ b/cmd/stringer/stringer.go @@ -76,7 +76,6 @@ import ( "go/format" "go/token" "go/types" - "io/ioutil" "log" "os" "path/filepath" @@ -166,7 +165,7 @@ func main() { baseName := fmt.Sprintf("%s_string.go", types[0]) outputName = filepath.Join(dir, strings.ToLower(baseName)) } - err := ioutil.WriteFile(outputName, src, 0644) + err := os.WriteFile(outputName, src, 0644) if err != nil { log.Fatalf("writing output: %s", err) } @@ -217,7 +216,7 @@ type Package struct { // parsePackage exits if there is an error. func (g *Generator) parsePackage(patterns []string, tags []string) { cfg := &packages.Config{ - Mode: packages.LoadSyntax, + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, // TODO: Need to think about constants in test files. Maybe write type_string_test.go // in a separate pass? For later. Tests: false, diff --git a/cmd/toolstash/buildall b/cmd/toolstash/buildall index 0c6492c9efa..4fc22f7f8fc 100755 --- a/cmd/toolstash/buildall +++ b/cmd/toolstash/buildall @@ -38,10 +38,10 @@ if [ "$pattern" = "" ]; then fi targets="$(go tool dist list; echo linux/386/softfloat)" -targets="$(echo "$targets" | tr '/' '-' | sort | egrep "$pattern" | egrep -v 'android-arm|darwin-arm')" +targets="$(echo "$targets" | tr '/' '-' | sort | grep -E "$pattern" | grep -E -v 'android-arm|darwin-arm')" # put linux first in the target list to get all the architectures up front. -targets="$(echo "$targets" | egrep 'linux') $(echo "$targets" | egrep -v 'linux')" +targets="$(echo "$targets" | grep -E 'linux') $(echo "$targets" | grep -E -v 'linux')" if [ "$sete" = true ]; then set -e diff --git a/container/intsets/sparse.go b/container/intsets/sparse.go index c06aec80b0d..d5fe156ed36 100644 --- a/container/intsets/sparse.go +++ b/container/intsets/sparse.go @@ -190,7 +190,7 @@ func (b *block) min(take bool) int { if take { b.bits[i] = w &^ (1 << uint(tz)) } - return b.offset + int(i*bitsPerWord) + tz + return b.offset + i*bitsPerWord + tz } } panic("BUG: empty block") diff --git a/copyright/copyright.go b/copyright/copyright.go index eb56ef28b22..db63c59922e 100644 --- a/copyright/copyright.go +++ b/copyright/copyright.go @@ -94,7 +94,7 @@ func checkFile(toolsDir, filename string) (bool, error) { return shouldAddCopyright, nil } -// Copied from golang.org/x/tools/internal/lsp/source/util.go. +// Copied from golang.org/x/tools/gopls/internal/lsp/source/util.go. // Matches cgo generated comment as well as the proposed standard: // // https://golang.org/s/generatedcode diff --git a/go.mod b/go.mod index 985b9cc120c..b8944bbd385 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module golang.org/x/tools -go 1.17 +go 1.18 // tagx:compat 1.16 require ( - github.com/yuin/goldmark v1.4.1 - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 - golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 - golang.org/x/text v0.3.7 + github.com/yuin/goldmark v1.4.13 + golang.org/x/mod v0.8.0 + golang.org/x/net v0.6.0 + golang.org/x/sys v0.5.0 ) + +require golang.org/x/sync v0.1.0 diff --git a/go.sum b/go.sum index 85cf00cab79..044d1bf1316 100644 --- a/go.sum +++ b/go.sum @@ -1,30 +1,34 @@ -github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/go/analysis/analysis.go b/go/analysis/analysis.go index d11505a165c..44ada22a03a 100644 --- a/go/analysis/analysis.go +++ b/go/analysis/analysis.go @@ -11,8 +11,6 @@ import ( "go/token" "go/types" "reflect" - - "golang.org/x/tools/internal/analysisinternal" ) // An Analyzer describes an analysis function and its options. @@ -48,6 +46,7 @@ type Analyzer struct { // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a // package that contains parse or type errors. + // The Pass.TypeErrors field may consequently be non-empty. RunDespiteErrors bool // Requires is a set of analyzers that must run successfully @@ -75,17 +74,6 @@ type Analyzer struct { func (a *Analyzer) String() string { return a.Name } -func init() { - // Set the analysisinternal functions to be able to pass type errors - // to the Pass type without modifying the go/analysis API. - analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) { - p.(*Pass).typeErrors = errors - } - analysisinternal.GetTypeErrors = func(p interface{}) []types.Error { - return p.(*Pass).typeErrors - } -} - // A Pass provides information to the Run function that // applies a specific analyzer to a single Go package. // @@ -106,6 +94,7 @@ type Pass struct { Pkg *types.Package // type information about the package TypesInfo *types.Info // type information about the syntax trees TypesSizes types.Sizes // function for computing sizes of types + TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors) // Report reports a Diagnostic, a finding about a specific location // in the analyzed source code such as a potential mistake. diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go index 6ef2e7984fa..a3a53ba9f20 100644 --- a/go/analysis/analysistest/analysistest.go +++ b/go/analysis/analysistest/analysistest.go @@ -19,14 +19,13 @@ import ( "sort" "strconv" "strings" + "testing" "text/scanner" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/checker" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/diff" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/txtar" ) @@ -114,7 +113,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns // should match up. for _, act := range r { // file -> message -> edits - fileEdits := make(map[*token.File]map[string][]diff.TextEdit) + fileEdits := make(map[*token.File]map[string][]diff.Edit) fileContents := make(map[*token.File][]byte) // Validate edits, prepare the fileEdits map and read the file contents. @@ -142,17 +141,13 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns } fileContents[file] = contents } - spn, err := span.NewRange(act.Pass.Fset, edit.Pos, edit.End).Span() - if err != nil { - t.Errorf("error converting edit to span %s: %v", file.Name(), err) - } - if _, ok := fileEdits[file]; !ok { - fileEdits[file] = make(map[string][]diff.TextEdit) + fileEdits[file] = make(map[string][]diff.Edit) } - fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.TextEdit{ - Span: spn, - NewText: string(edit.NewText), + fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.Edit{ + Start: file.Offset(edit.Pos), + End: file.Offset(edit.End), + New: string(edit.NewText), }) } } @@ -189,7 +184,11 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns for _, vf := range ar.Files { if vf.Name == sf { found = true - out := diff.ApplyEdits(string(orig), edits) + out, err := diff.Apply(string(orig), edits) + if err != nil { + t.Errorf("%s: error applying fixes: %v", file.Name(), err) + continue + } // the file may contain multiple trailing // newlines if the user places empty lines // between files in the archive. normalize @@ -200,12 +199,9 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns t.Errorf("%s: error formatting edited source: %v\n%s", file.Name(), err, out) continue } - if want != string(formatted) { - d, err := myers.ComputeEdits("", want, string(formatted)) - if err != nil { - t.Errorf("failed to compute suggested fix diff: %v", err) - } - t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, d)) + if got := string(formatted); got != want { + unified := diff.Unified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, got) + t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified) } break } @@ -217,12 +213,16 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns } else { // all suggested fixes are represented by a single file - var catchallEdits []diff.TextEdit + var catchallEdits []diff.Edit for _, edits := range fixes { catchallEdits = append(catchallEdits, edits...) } - out := diff.ApplyEdits(string(orig), catchallEdits) + out, err := diff.Apply(string(orig), catchallEdits) + if err != nil { + t.Errorf("%s: error applying fixes: %v", file.Name(), err) + continue + } want := string(ar.Comment) formatted, err := format.Source([]byte(out)) @@ -230,12 +230,9 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns t.Errorf("%s: error formatting resulting source: %v\n%s", file.Name(), err, out) continue } - if want != string(formatted) { - d, err := myers.ComputeEdits("", want, string(formatted)) - if err != nil { - t.Errorf("%s: failed to compute suggested fix diff: %s", file.Name(), err) - } - t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(file.Name()+".golden", "actual", want, d)) + if got := string(formatted); got != want { + unified := diff.Unified(file.Name()+".golden", "actual", want, got) + t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified) } } } @@ -282,7 +279,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns // attempted, even if unsuccessful. It is safe for a test to ignore all // the results, but a test may use it to perform additional checks. func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { - if t, ok := t.(testenv.Testing); ok { + if t, ok := t.(testing.TB); ok { testenv.NeedsGoPackages(t) } diff --git a/go/analysis/diagnostic.go b/go/analysis/diagnostic.go index cd462a0cb55..5cdcf46d2a1 100644 --- a/go/analysis/diagnostic.go +++ b/go/analysis/diagnostic.go @@ -37,7 +37,7 @@ type Diagnostic struct { // declaration. type RelatedInformation struct { Pos token.Pos - End token.Pos + End token.Pos // optional Message string } diff --git a/go/analysis/doc.go b/go/analysis/doc.go index 03c31525e36..c5429c9e239 100644 --- a/go/analysis/doc.go +++ b/go/analysis/doc.go @@ -177,14 +177,14 @@ Diagnostic is defined as: The optional Category field is a short identifier that classifies the kind of message when an analysis produces several kinds of diagnostic. -Many analyses want to associate diagnostics with a severity level. -Because Diagnostic does not have a severity level field, an Analyzer's -diagnostics effectively all have the same severity level. To separate which -diagnostics are high severity and which are low severity, expose multiple -Analyzers instead. Analyzers should also be separated when their -diagnostics belong in different groups, or could be tagged differently -before being shown to the end user. Analyzers should document their severity -level to help downstream tools surface diagnostics properly. +The Diagnostic struct does not have a field to indicate its severity +because opinions about the relative importance of Analyzers and their +diagnostics vary widely among users. The design of this framework does +not hold each Analyzer responsible for identifying the severity of its +diagnostics. Instead, we expect that drivers will allow the user to +customize the filtering and prioritization of diagnostics based on the +producing Analyzer and optional Category, according to the user's +preferences. Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl and buildtag, inspect the raw text of Go source files or even non-Go @@ -244,6 +244,9 @@ if the default encoding is unsuitable. Facts should be stateless. Because serialized facts may appear within build outputs, the gob encoding of a fact must be deterministic, to avoid spurious cache misses in build systems that use content-addressable caches. +The driver makes a single call to the gob encoder for all facts +exported by a given analysis pass, so that the topology of +shared data structures referenced by multiple facts is preserved. The Pass type has functions to import and export facts, associated either with an object or with a package: @@ -297,7 +300,7 @@ singlechecker and multichecker subpackages. The singlechecker package provides the main function for a command that runs one analyzer. By convention, each analyzer such as -go/passes/findcall should be accompanied by a singlechecker-based +go/analysis/passes/findcall should be accompanied by a singlechecker-based command such as go/analysis/passes/findcall/cmd/findcall, defined in its entirety as: diff --git a/go/analysis/internal/analysisflags/flags.go b/go/analysis/internal/analysisflags/flags.go index 4b7be2d1f5f..e127a42b97a 100644 --- a/go/analysis/internal/analysisflags/flags.go +++ b/go/analysis/internal/analysisflags/flags.go @@ -206,7 +206,7 @@ func (versionFlag) Get() interface{} { return nil } func (versionFlag) String() string { return "" } func (versionFlag) Set(s string) error { if s != "full" { - log.Fatalf("unsupported flag value: -V=%s", s) + log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s) } // This replicates the minimal subset of @@ -218,7 +218,10 @@ func (versionFlag) Set(s string) error { // Formats: // $progname version devel ... buildID=... // $progname version go1.9.1 - progname := os.Args[0] + progname, err := os.Executable() + if err != nil { + return err + } f, err := os.Open(progname) if err != nil { log.Fatal(err) @@ -339,9 +342,38 @@ func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { } // A JSONTree is a mapping from package ID to analysis name to result. -// Each result is either a jsonError or a list of jsonDiagnostic. +// Each result is either a jsonError or a list of JSONDiagnostic. type JSONTree map[string]map[string]interface{} +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type JSONTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A JSONSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type JSONSuggestedFix struct { + Message string `json:"message"` + Edits []JSONTextEdit `json:"edits"` +} + +// A JSONDiagnostic can be used to encode and decode analysis.Diagnostics to and +// from JSON. +// TODO(matloob): Should the JSON diagnostics contain ranges? +// If so, how should they be formatted? +type JSONDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` + Message string `json:"message"` + SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` +} + // Add adds the result of analysis 'name' on package 'id'. // The result is either a list of diagnostics or an error. func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { @@ -352,20 +384,31 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. } v = jsonError{err.Error()} } else if len(diags) > 0 { - type jsonDiagnostic struct { - Category string `json:"category,omitempty"` - Posn string `json:"posn"` - Message string `json:"message"` - } - var diagnostics []jsonDiagnostic - // TODO(matloob): Should the JSON diagnostics contain ranges? - // If so, how should they be formatted? + diagnostics := make([]JSONDiagnostic, 0, len(diags)) for _, f := range diags { - diagnostics = append(diagnostics, jsonDiagnostic{ - Category: f.Category, - Posn: fset.Position(f.Pos).String(), - Message: f.Message, - }) + var fixes []JSONSuggestedFix + for _, fix := range f.SuggestedFixes { + var edits []JSONTextEdit + for _, edit := range fix.TextEdits { + edits = append(edits, JSONTextEdit{ + Filename: fset.Position(edit.Pos).Filename, + Start: fset.Position(edit.Pos).Offset, + End: fset.Position(edit.End).Offset, + New: string(edit.NewText), + }) + } + fixes = append(fixes, JSONSuggestedFix{ + Message: fix.Message, + Edits: edits, + }) + } + jdiag := JSONDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + SuggestedFixes: fixes, + } + diagnostics = append(diagnostics, jdiag) } v = diagnostics } diff --git a/go/analysis/internal/analysisflags/flags_test.go b/go/analysis/internal/analysisflags/flags_test.go index 1f055dde72d..b5cfb3d4430 100644 --- a/go/analysis/internal/analysisflags/flags_test.go +++ b/go/analysis/internal/analysisflags/flags_test.go @@ -42,7 +42,7 @@ func TestExec(t *testing.T) { for _, test := range []struct { flags string - want string + want string // output should contain want }{ {"", "[a1 a2 a3]"}, {"-a1=0", "[a2 a3]"}, @@ -50,6 +50,7 @@ func TestExec(t *testing.T) { {"-a1", "[a1]"}, {"-a1=1 -a3=1", "[a1 a3]"}, {"-a1=1 -a3=0", "[a1]"}, + {"-V=full", "analysisflags.test version devel"}, } { cmd := exec.Command(progname, "-test.run=TestExec") cmd.Env = append(os.Environ(), "ANALYSISFLAGS_CHILD=1", "FLAGS="+test.flags) @@ -60,8 +61,8 @@ func TestExec(t *testing.T) { } got := strings.TrimSpace(string(output)) - if got != test.want { - t.Errorf("got %s, want %s", got, test.want) + if !strings.Contains(got, test.want) { + t.Errorf("got %q, does not contain %q", got, test.want) } } } diff --git a/go/analysis/internal/checker/checker.go b/go/analysis/internal/checker/checker.go index 51cbf689ac0..8f49e8fc76b 100644 --- a/go/analysis/internal/checker/checker.go +++ b/go/analysis/internal/checker/checker.go @@ -15,7 +15,6 @@ import ( "flag" "fmt" "go/format" - "go/parser" "go/token" "go/types" "io/ioutil" @@ -33,8 +32,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/robustio" ) var ( @@ -147,7 +146,11 @@ func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { roots := analyze(initial, analyzers) if Fix { - applyFixes(roots) + if err := applyFixes(roots); err != nil { + // Fail when applying fixes failed. + log.Print(err) + return 1 + } } return printDiagnostics(roots) } @@ -305,7 +308,10 @@ func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action return roots } -func applyFixes(roots []*action) { +func applyFixes(roots []*action) error { + // visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit) visited := make(map[*action]bool) var apply func(*action) error var visitAll func(actions []*action) error @@ -313,7 +319,9 @@ func applyFixes(roots []*action) { for _, act := range actions { if !visited[act] { visited[act] = true - visitAll(act.deps) + if err := visitAll(act.deps); err != nil { + return err + } if err := apply(act); err != nil { return err } @@ -322,116 +330,168 @@ func applyFixes(roots []*action) { return nil } - // TODO(matloob): Is this tree business too complicated? (After all this is Go!) - // Just create a set (map) of edits, sort by pos and call it a day? - type offsetedit struct { - start, end int - newText []byte - } // TextEdit using byteOffsets instead of pos - type node struct { - edit offsetedit - left, right *node - } - - var insert func(tree **node, edit offsetedit) error - insert = func(treeptr **node, edit offsetedit) error { - if *treeptr == nil { - *treeptr = &node{edit, nil, nil} - return nil - } - tree := *treeptr - if edit.end <= tree.edit.start { - return insert(&tree.left, edit) - } else if edit.start >= tree.edit.end { - return insert(&tree.right, edit) - } - - // Overlapping text edit. - return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)", - edit.start, edit.end, tree.edit.start, tree.edit.end) - - } - - editsForFile := make(map[*token.File]*node) - apply = func(act *action) error { + editsForTokenFile := make(map[*token.File][]diff.Edit) for _, diag := range act.diagnostics { for _, sf := range diag.SuggestedFixes { for _, edit := range sf.TextEdits { // Validate the edit. + // Any error here indicates a bug in the analyzer. + file := act.pkg.Fset.File(edit.Pos) + if file == nil { + return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", + act.a.Name, edit.Pos) + } if edit.Pos > edit.End { - return fmt.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", + return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", act.a.Name, edit.Pos, edit.End) } - file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End) - if file == nil || endfile == nil || file != endfile { - return (fmt.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", - act.a.Name, file.Name(), endfile.Name())) - } - start, end := file.Offset(edit.Pos), file.Offset(edit.End) - - // TODO(matloob): Validate that edits do not affect other packages. - root := editsForFile[file] - if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil { - return err + if eof := token.Pos(file.Base() + file.Size()); edit.End > eof { + return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", + act.a.Name, edit.End, eof) } - editsForFile[file] = root // In case the root changed + edit := diff.Edit{Start: file.Offset(edit.Pos), End: file.Offset(edit.End), New: string(edit.NewText)} + editsForTokenFile[file] = append(editsForTokenFile[file], edit) } } } + + for f, edits := range editsForTokenFile { + id, _, err := robustio.GetFileID(f.Name()) + if err != nil { + return err + } + if _, hasId := paths[id]; !hasId { + paths[id] = f.Name() + editsByAction[id] = make(map[*action][]diff.Edit) + } + editsByAction[id][act] = edits + } return nil } - visitAll(roots) + if err := visitAll(roots); err != nil { + return err + } - fset := token.NewFileSet() // Shared by parse calls below - // Now we've got a set of valid edits for each file. Get the new file contents. - for f, tree := range editsForFile { - contents, err := ioutil.ReadFile(f.Name()) - if err != nil { - log.Fatal(err) + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for id, actToEdits := range editsByAction { + path := paths[id] + actions := make([]*action, 0, len(actToEdits)) + for act := range actToEdits { + actions = append(actions, act) } - cur := 0 // current position in the file - - var out bytes.Buffer - - var recurse func(*node) - recurse = func(node *node) { - if node.left != nil { - recurse(node.left) + // Does any action create conflicting edits? + for _, act := range actions { + edits := actToEdits[act] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := act.a.Name, edits[invalid-1], edits[invalid] + return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) } + } - edit := node.edit - if edit.start > cur { - out.Write(contents[cur:edit.start]) - out.Write(edit.newText) + // Does any pair of different actions create edits that conflict? + for j := range actions { + for k := range actions[:j] { + x, y := actions[j], actions[k] + if x.a.Name > y.a.Name { + x, y = y, x + } + xedits, yedits := actToEdits[x], actToEdits[y] + combined := append(xedits, yedits...) + if _, invalid := validateEdits(combined); invalid > 0 { + // TODO: consider applying each action's consistent list of edits entirely, + // and then using a three-way merge (such as GNU diff3) on the resulting + // files to report more precisely the parts that actually conflict. + return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits) + } } - cur = edit.end + } - if node.right != nil { - recurse(node.right) - } + var edits []diff.Edit + for act := range actToEdits { + edits = append(edits, actToEdits[act]...) } - recurse(tree) - // Write out the rest of the file. - if cur < len(contents) { - out.Write(contents[cur:]) + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } + + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + applied, err := diff.Apply(string(contents), edits) + if err != nil { + return err } + out := []byte(applied) // Try to format the file. - ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments) - if err == nil { - var buf bytes.Buffer - if err = format.Node(&buf, fset, ff); err == nil { - out = buf + if formatted, err := format.Source(out); err == nil { + out = formatted + } + + if err := ioutil.WriteFile(path, out, 0644); err != nil { + return err + } + } + return nil +} + +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } + diff.SortEdits(edits) + unique := []diff.Edit{edits[0]} + invalid := -1 + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i } } + } + return unique, invalid +} + +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + oldlabel, old := "base", string(contents) - ioutil.WriteFile(f.Name(), out.Bytes(), 0644) + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits) + if err != nil { + return err } + + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) } // printDiagnostics prints the diagnostics for the root packages in either @@ -578,7 +638,6 @@ type action struct { deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - inputs map[*analysis.Analyzer]interface{} result interface{} diagnostics []analysis.Diagnostic err error @@ -676,14 +735,16 @@ func (act *action) execOnce() { // Run the analysis. pass := &analysis.Pass{ - Analyzer: act.a, - Fset: act.pkg.Fset, - Files: act.pkg.Syntax, - OtherFiles: act.pkg.OtherFiles, - IgnoredFiles: act.pkg.IgnoredFiles, - Pkg: act.pkg.Types, - TypesInfo: act.pkg.TypesInfo, - TypesSizes: act.pkg.TypesSizes, + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + IgnoredFiles: act.pkg.IgnoredFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + TypeErrors: act.pkg.TypeErrors, + ResultOf: inputs, Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, ImportObjectFact: act.importObjectFact, @@ -695,36 +756,6 @@ func (act *action) execOnce() { } act.pass = pass - var errors []types.Error - // Get any type errors that are attributed to the pkg. - // This is necessary to test analyzers that provide - // suggested fixes for compiler/type errors. - for _, err := range act.pkg.Errors { - if err.Kind != packages.TypeError { - continue - } - // err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-" - spn := span.Parse(err.Pos) - // Extract the token positions from the error string. - line, col, offset := spn.Start().Line(), spn.Start().Column(), -1 - act.pkg.Fset.Iterate(func(f *token.File) bool { - if f.Name() != spn.URI().Filename() { - return true - } - offset = int(f.LineStart(line)) + col - 1 - return false - }) - if offset == -1 { - continue - } - errors = append(errors, types.Error{ - Fset: act.pkg.Fset, - Msg: err.Msg, - Pos: token.Pos(offset), - }) - } - analysisinternal.SetTypeErrors(pass, errors) - var err error if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { err = fmt.Errorf("analysis skipped due to errors in package") @@ -766,7 +797,7 @@ func inheritFacts(act, dep *action) { if serialize { encodedFact, err := codeFact(fact) if err != nil { - log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) } fact = encodedFact } @@ -894,7 +925,7 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { func (act *action) allObjectFacts() []analysis.ObjectFact { facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) for k := range act.objectFacts { - facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]}) + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) } return facts } @@ -936,11 +967,11 @@ func factType(fact analysis.Fact) reflect.Type { return t } -// allObjectFacts implements Pass.AllObjectFacts. +// allPackageFacts implements Pass.AllPackageFacts. func (act *action) allPackageFacts() []analysis.PackageFact { facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]}) + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) } return facts } diff --git a/go/analysis/internal/checker/checker_test.go b/go/analysis/internal/checker/checker_test.go index eee211c21a4..34acae81e16 100644 --- a/go/analysis/internal/checker/checker_test.go +++ b/go/analysis/internal/checker/checker_test.go @@ -19,14 +19,9 @@ import ( "golang.org/x/tools/internal/testenv" ) -var from, to string - func TestApplyFixes(t *testing.T) { testenv.NeedsGoPackages(t) - from = "bar" - to = "baz" - files := map[string]string{ "rename/test.go": `package rename @@ -74,26 +69,55 @@ var analyzer = &analysis.Analyzer{ Run: run, } +var other = &analysis.Analyzer{ // like analyzer but with a different Name. + Name: "other", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + func run(pass *analysis.Pass) (interface{}, error) { + const ( + from = "bar" + to = "baz" + conflict = "conflict" // add conflicting edits to package conflict. + duplicate = "duplicate" // add duplicate edits to package conflict. + other = "other" // add conflicting edits to package other from different analyzers. + ) + + if pass.Analyzer.Name == other { + if pass.Pkg.Name() != other { + return nil, nil // only apply Analyzer other to packages named other + } + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{(*ast.Ident)(nil)} inspect.Preorder(nodeFilter, func(n ast.Node) { ident := n.(*ast.Ident) if ident.Name == from { msg := fmt.Sprintf("renaming %q to %q", from, to) + edits := []analysis.TextEdit{ + {Pos: ident.Pos(), End: ident.End(), NewText: []byte(to)}, + } + switch pass.Pkg.Name() { + case conflict: + edits = append(edits, []analysis.TextEdit{ + {Pos: ident.Pos() - 1, End: ident.End(), NewText: []byte(to)}, + {Pos: ident.Pos(), End: ident.End() - 1, NewText: []byte(to)}, + {Pos: ident.Pos(), End: ident.End(), NewText: []byte("lorem ipsum")}, + }...) + case duplicate: + edits = append(edits, edits...) + case other: + if pass.Analyzer.Name == other { + edits[0].Pos = edits[0].Pos + 1 // shift by one to mismatch analyzer and other + } + } pass.Report(analysis.Diagnostic{ - Pos: ident.Pos(), - End: ident.End(), - Message: msg, - SuggestedFixes: []analysis.SuggestedFix{{ - Message: msg, - TextEdits: []analysis.TextEdit{{ - Pos: ident.Pos(), - End: ident.End(), - NewText: []byte(to), - }}, - }}, - }) + Pos: ident.Pos(), + End: ident.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{{Message: msg, TextEdits: edits}}}) } }) @@ -129,6 +153,18 @@ func Foo(s string) int { RunDespiteErrors: true, } + // A no-op analyzer that should finish regardless of + // parse or type errors in the code. + noopWithFact := &analysis.Analyzer{ + Name: "noopfact", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (interface{}, error) { + return nil, nil + }, + RunDespiteErrors: true, + FactTypes: []analysis.Fact{&EmptyFact{}}, + } + for _, test := range []struct { name string pattern []string @@ -137,7 +173,17 @@ func Foo(s string) int { }{ // parse/type errors {name: "skip-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{analyzer}, code: 1}, - {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 0}, + // RunDespiteErrors allows a driver to run an Analyzer even after parse/type errors. + // + // The noop analyzer doesn't use facts, so the driver loads only the root + // package from source. For the rest, it asks 'go list' for export data, + // which fails because the compiler encounters the type error. Since the + // errors come from 'go list', the driver doesn't run the analyzer. + {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 1}, + // The noopfact analyzer does use facts, so the driver loads source for + // all dependencies, does type checking itself, recognizes the error as a + // type error, and runs the analyzer. + {name: "despite-error-fact", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noopWithFact}, code: 0}, // combination of parse/type errors and no errors {name: "despite-error-and-no-error", pattern: []string{"file=" + path, "sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 1}, // non-existing package error @@ -151,6 +197,10 @@ func Foo(s string) int { // no errors {name: "no-errors", pattern: []string{"sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 0}, } { + if test.name == "despite-error" && testenv.Go1Point() < 20 { + // The behavior in the comment on the despite-error test only occurs for Go 1.20+. + continue + } if got := checker.Run(test.pattern, test.analyzers); got != test.code { t.Errorf("got incorrect exit code %d for test %s; want %d", got, test.name, test.code) } @@ -158,3 +208,7 @@ func Foo(s string) int { defer cleanup() } + +type EmptyFact struct{} + +func (f *EmptyFact) AFact() {} diff --git a/go/analysis/internal/checker/fix_test.go b/go/analysis/internal/checker/fix_test.go new file mode 100644 index 00000000000..3ea92b38cc1 --- /dev/null +++ b/go/analysis/internal/checker/fix_test.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checker_test + +import ( + "flag" + "io/ioutil" + "os" + "os/exec" + "path" + "regexp" + "runtime" + "testing" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/internal/testenv" +) + +func main() { + checker.Fix = true + patterns := flag.Args() + + code := checker.Run(patterns, []*analysis.Analyzer{analyzer, other}) + os.Exit(code) +} + +// TestFixes ensures that checker.Run applies fixes correctly. +// This test fork/execs the main function above. +func TestFixes(t *testing.T) { + oses := map[string]bool{"darwin": true, "linux": true} + if !oses[runtime.GOOS] { + t.Skipf("skipping fork/exec test on this platform") + } + + if os.Getenv("TESTFIXES_CHILD") == "1" { + // child process + + // replace [progname -test.run=TestFixes -- ...] + // by [progname ...] + os.Args = os.Args[2:] + os.Args[0] = "vet" + main() + panic("unreachable") + } + + testenv.NeedsTool(t, "go") + + files := map[string]string{ + "rename/foo.go": `package rename + +func Foo() { + bar := 12 + _ = bar +} + +// the end +`, + "rename/intestfile_test.go": `package rename + +func InTestFile() { + bar := 13 + _ = bar +} + +// the end +`, + "rename/foo_test.go": `package rename_test + +func Foo() { + bar := 14 + _ = bar +} + +// the end +`, + "duplicate/dup.go": `package duplicate + +func Foo() { + bar := 14 + _ = bar +} + +// the end +`, + } + fixed := map[string]string{ + "rename/foo.go": `package rename + +func Foo() { + baz := 12 + _ = baz +} + +// the end +`, + "rename/intestfile_test.go": `package rename + +func InTestFile() { + baz := 13 + _ = baz +} + +// the end +`, + "rename/foo_test.go": `package rename_test + +func Foo() { + baz := 14 + _ = baz +} + +// the end +`, + "duplicate/dup.go": `package duplicate + +func Foo() { + baz := 14 + _ = baz +} + +// the end +`, + } + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatalf("Creating test files failed with %s", err) + } + defer cleanup() + + args := []string{"-test.run=TestFixes", "--", "rename", "duplicate"} + cmd := exec.Command(os.Args[0], args...) + cmd.Env = append(os.Environ(), "TESTFIXES_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off") + + out, err := cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s: out=<<%s>>", args, out) + } + var exitcode int + if err, ok := err.(*exec.ExitError); ok { + exitcode = err.ExitCode() // requires go1.12 + } + + const diagnosticsExitCode = 3 + if exitcode != diagnosticsExitCode { + t.Errorf("%s: exited %d, want %d", args, exitcode, diagnosticsExitCode) + } + + for name, want := range fixed { + path := path.Join(dir, "src", name) + contents, err := ioutil.ReadFile(path) + if err != nil { + t.Errorf("error reading %s: %v", path, err) + } + if got := string(contents); got != want { + t.Errorf("contents of %s file did not match expectations. got=%s, want=%s", path, got, want) + } + } +} + +// TestConflict ensures that checker.Run detects conflicts correctly. +// This test fork/execs the main function above. +func TestConflict(t *testing.T) { + oses := map[string]bool{"darwin": true, "linux": true} + if !oses[runtime.GOOS] { + t.Skipf("skipping fork/exec test on this platform") + } + + if os.Getenv("TESTCONFLICT_CHILD") == "1" { + // child process + + // replace [progname -test.run=TestConflict -- ...] + // by [progname ...] + os.Args = os.Args[2:] + os.Args[0] = "vet" + main() + panic("unreachable") + } + + testenv.NeedsTool(t, "go") + + files := map[string]string{ + "conflict/foo.go": `package conflict + +func Foo() { + bar := 12 + _ = bar +} + +// the end +`, + } + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatalf("Creating test files failed with %s", err) + } + defer cleanup() + + args := []string{"-test.run=TestConflict", "--", "conflict"} + cmd := exec.Command(os.Args[0], args...) + cmd.Env = append(os.Environ(), "TESTCONFLICT_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off") + + out, err := cmd.CombinedOutput() + var exitcode int + if err, ok := err.(*exec.ExitError); ok { + exitcode = err.ExitCode() // requires go1.12 + } + const errExitCode = 1 + if exitcode != errExitCode { + t.Errorf("%s: exited %d, want %d", args, exitcode, errExitCode) + } + + pattern := `conflicting edits from rename and rename on /.*/conflict/foo.go` + matched, err := regexp.Match(pattern, out) + if err != nil { + t.Errorf("error matching pattern %s: %v", pattern, err) + } else if !matched { + t.Errorf("%s: output was=<<%s>>. Expected it to match <<%s>>", args, out, pattern) + } + + // No files updated + for name, want := range files { + path := path.Join(dir, "src", name) + contents, err := ioutil.ReadFile(path) + if err != nil { + t.Errorf("error reading %s: %v", path, err) + } + if got := string(contents); got != want { + t.Errorf("contents of %s file updated. got=%s, want=%s", path, got, want) + } + } +} + +// TestOther ensures that checker.Run reports conflicts from +// distinct actions correctly. +// This test fork/execs the main function above. +func TestOther(t *testing.T) { + oses := map[string]bool{"darwin": true, "linux": true} + if !oses[runtime.GOOS] { + t.Skipf("skipping fork/exec test on this platform") + } + + if os.Getenv("TESTOTHER_CHILD") == "1" { + // child process + + // replace [progname -test.run=TestOther -- ...] + // by [progname ...] + os.Args = os.Args[2:] + os.Args[0] = "vet" + main() + panic("unreachable") + } + + testenv.NeedsTool(t, "go") + + files := map[string]string{ + "other/foo.go": `package other + +func Foo() { + bar := 12 + _ = bar +} + +// the end +`, + } + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatalf("Creating test files failed with %s", err) + } + defer cleanup() + + args := []string{"-test.run=TestOther", "--", "other"} + cmd := exec.Command(os.Args[0], args...) + cmd.Env = append(os.Environ(), "TESTOTHER_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off") + + out, err := cmd.CombinedOutput() + var exitcode int + if err, ok := err.(*exec.ExitError); ok { + exitcode = err.ExitCode() // requires go1.12 + } + const errExitCode = 1 + if exitcode != errExitCode { + t.Errorf("%s: exited %d, want %d", args, exitcode, errExitCode) + } + + pattern := `conflicting edits from other and rename on /.*/other/foo.go` + matched, err := regexp.Match(pattern, out) + if err != nil { + t.Errorf("error matching pattern %s: %v", pattern, err) + } else if !matched { + t.Errorf("%s: output was=<<%s>>. Expected it to match <<%s>>", args, out, pattern) + } + + // No files updated + for name, want := range files { + path := path.Join(dir, "src", name) + contents, err := ioutil.ReadFile(path) + if err != nil { + t.Errorf("error reading %s: %v", path, err) + } + if got := string(contents); got != want { + t.Errorf("contents of %s file updated. got=%s, want=%s", path, got, want) + } + } +} diff --git a/go/analysis/internal/checker/start_test.go b/go/analysis/internal/checker/start_test.go new file mode 100644 index 00000000000..ede21159bc8 --- /dev/null +++ b/go/analysis/internal/checker/start_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checker_test + +import ( + "go/ast" + "io/ioutil" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/testenv" +) + +// TestStartFixes make sure modifying the first character +// of the file takes effect. +func TestStartFixes(t *testing.T) { + testenv.NeedsGoPackages(t) + + files := map[string]string{ + "comment/doc.go": `/* Package comment */ +package comment +`} + + want := `// Package comment +package comment +` + + testdata, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatal(err) + } + path := filepath.Join(testdata, "src/comment/doc.go") + checker.Fix = true + checker.Run([]string{"file=" + path}, []*analysis.Analyzer{commentAnalyzer}) + + contents, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + got := string(contents) + if got != want { + t.Errorf("contents of rewritten file\ngot: %s\nwant: %s", got, want) + } + + defer cleanup() +} + +var commentAnalyzer = &analysis.Analyzer{ + Name: "comment", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: commentRun, +} + +func commentRun(pass *analysis.Pass) (interface{}, error) { + const ( + from = "/* Package comment */" + to = "// Package comment" + ) + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + inspect.Preorder(nil, func(n ast.Node) { + if n, ok := n.(*ast.Comment); ok && n.Text == from { + pass.Report(analysis.Diagnostic{ + Pos: n.Pos(), + End: n.End(), + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: n.Pos(), + End: n.End(), + NewText: []byte(to), + }}, + }}, + }) + } + }) + + return nil, nil +} diff --git a/go/analysis/internal/facts/facts_test.go b/go/analysis/internal/facts/facts_test.go deleted file mode 100644 index a55e30d7a31..00000000000 --- a/go/analysis/internal/facts/facts_test.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package facts_test - -import ( - "encoding/gob" - "fmt" - "go/token" - "go/types" - "os" - "reflect" - "testing" - - "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/go/analysis/internal/facts" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/testenv" - "golang.org/x/tools/internal/typeparams" -) - -type myFact struct { - S string -} - -func (f *myFact) String() string { return fmt.Sprintf("myFact(%s)", f.S) } -func (f *myFact) AFact() {} - -func init() { - gob.Register(new(myFact)) -} - -func TestEncodeDecode(t *testing.T) { - tests := []struct { - name string - typeparams bool // requires typeparams to be enabled - files map[string]string - plookups []pkgLookups // see testEncodeDecode for details - }{ - { - name: "loading-order", - // c -> b -> a, a2 - // c does not directly depend on a, but it indirectly uses a.T. - // - // Package a2 is never loaded directly so it is incomplete. - // - // We use only types in this example because we rely on - // types.Eval to resolve the lookup expressions, and it only - // works for types. This is a definite gap in the typechecker API. - files: map[string]string{ - "a/a.go": `package a; type A int; type T int`, - "a2/a.go": `package a2; type A2 int; type Unneeded int`, - "b/b.go": `package b; import ("a"; "a2"); type B chan a2.A2; type F func() a.T`, - "c/c.go": `package c; import "b"; type C []b.B`, - }, - // In the following table, we analyze packages (a, b, c) in order, - // look up various objects accessible within each package, - // and see if they have a fact. The "analysis" exports a fact - // for every object at package level. - // - // Note: Loop iterations are not independent test cases; - // order matters, as we populate factmap. - plookups: []pkgLookups{ - {"a", []lookup{ - {"A", "myFact(a.A)"}, - }}, - {"b", []lookup{ - {"a.A", "myFact(a.A)"}, - {"a.T", "myFact(a.T)"}, - {"B", "myFact(b.B)"}, - {"F", "myFact(b.F)"}, - {"F(nil)()", "myFact(a.T)"}, // (result type of b.F) - }}, - {"c", []lookup{ - {"b.B", "myFact(b.B)"}, - {"b.F", "myFact(b.F)"}, - //{"b.F(nil)()", "myFact(a.T)"}, // no fact; TODO(adonovan): investigate - {"C", "myFact(c.C)"}, - {"C{}[0]", "myFact(b.B)"}, - {"<-(C{}[0])", "no fact"}, // object but no fact (we never "analyze" a2) - }}, - }, - }, - { - name: "globals", - files: map[string]string{ - "a/a.go": `package a; - type T1 int - type T2 int - type T3 int - type T4 int - type T5 int - type K int; type V string - `, - "b/b.go": `package b - import "a" - var ( - G1 []a.T1 - G2 [7]a.T2 - G3 chan a.T3 - G4 *a.T4 - G5 struct{ F a.T5 } - G6 map[a.K]a.V - ) - `, - "c/c.go": `package c; import "b"; - var ( - v1 = b.G1 - v2 = b.G2 - v3 = b.G3 - v4 = b.G4 - v5 = b.G5 - v6 = b.G6 - ) - `, - }, - plookups: []pkgLookups{ - {"a", []lookup{}}, - {"b", []lookup{}}, - {"c", []lookup{ - {"v1[0]", "myFact(a.T1)"}, - {"v2[0]", "myFact(a.T2)"}, - {"<-v3", "myFact(a.T3)"}, - {"*v4", "myFact(a.T4)"}, - {"v5.F", "myFact(a.T5)"}, - {"v6[0]", "myFact(a.V)"}, - }}, - }, - }, - { - name: "typeparams", - typeparams: true, - files: map[string]string{ - "a/a.go": `package a - type T1 int - type T2 int - type T3 interface{Foo()} - type T4 int - type T5 int - type T6 interface{Foo()} - `, - "b/b.go": `package b - import "a" - type N1[T a.T1|int8] func() T - type N2[T any] struct{ F T } - type N3[T a.T3] func() T - type N4[T a.T4|int8] func() T - type N5[T interface{Bar() a.T5} ] func() T - - type t5 struct{}; func (t5) Bar() a.T5 - - var G1 N1[a.T1] - var G2 func() N2[a.T2] - var G3 N3[a.T3] - var G4 N4[a.T4] - var G5 N5[t5] - - func F6[T a.T6]() T { var x T; return x } - `, - "c/c.go": `package c; import "b"; - var ( - v1 = b.G1 - v2 = b.G2 - v3 = b.G3 - v4 = b.G4 - v5 = b.G5 - v6 = b.F6[t6] - ) - - type t6 struct{}; func (t6) Foo() {} - `, - }, - plookups: []pkgLookups{ - {"a", []lookup{}}, - {"b", []lookup{}}, - {"c", []lookup{ - {"v1", "myFact(b.N1)"}, - {"v1()", "myFact(a.T1)"}, - {"v2()", "myFact(b.N2)"}, - {"v2().F", "myFact(a.T2)"}, - {"v3", "myFact(b.N3)"}, - {"v4", "myFact(b.N4)"}, - {"v4()", "myFact(a.T4)"}, - {"v5", "myFact(b.N5)"}, - {"v5()", "myFact(b.t5)"}, - {"v6()", "myFact(c.t6)"}, - }}, - }, - }, - } - - for i := range tests { - test := tests[i] - t.Run(test.name, func(t *testing.T) { - t.Parallel() - if test.typeparams && !typeparams.Enabled { - t.Skip("type parameters are not enabled") - } - testEncodeDecode(t, test.files, test.plookups) - }) - } -} - -type lookup struct { - objexpr string - want string -} - -type pkgLookups struct { - path string - lookups []lookup -} - -// testEncodeDecode tests fact encoding and decoding and simulates how package facts -// are passed during analysis. It operates on a group of Go file contents. Then -// for each in tests it does the following: -// 1. loads and type checks the package, -// 2. calls facts.Decode to loads the facts exported by its imports, -// 3. exports a myFact Fact for all of package level objects, -// 4. For each lookup for the current package: -// 4.a) lookup the types.Object for an Go source expression in the curent package -// (or confirms one is not expected want=="no object"), -// 4.b) finds a Fact for the object (or confirms one is not expected want=="no fact"), -// 4.c) compares the content of the Fact to want. -// 5. encodes the Facts of the package. -// -// Note: tests are not independent test cases; order matters (as does a package being -// skipped). It changes what Facts can be imported. -// -// Failures are reported on t. -func testEncodeDecode(t *testing.T, files map[string]string, tests []pkgLookups) { - dir, cleanup, err := analysistest.WriteFiles(files) - if err != nil { - t.Fatal(err) - } - defer cleanup() - - // factmap represents the passing of encoded facts from one - // package to another. In practice one would use the file system. - factmap := make(map[string][]byte) - read := func(path string) ([]byte, error) { return factmap[path], nil } - - // Analyze packages in order, look up various objects accessible within - // each package, and see if they have a fact. The "analysis" exports a - // fact for every object at package level. - // - // Note: Loop iterations are not independent test cases; - // order matters, as we populate factmap. - for _, test := range tests { - // load package - pkg, err := load(t, dir, test.path) - if err != nil { - t.Fatal(err) - } - - // decode - facts, err := facts.Decode(pkg, read) - if err != nil { - t.Fatalf("Decode failed: %v", err) - } - t.Logf("decode %s facts = %v", pkg.Path(), facts) // show all facts - - // export - // (one fact for each package-level object) - for _, name := range pkg.Scope().Names() { - obj := pkg.Scope().Lookup(name) - fact := &myFact{obj.Pkg().Name() + "." + obj.Name()} - facts.ExportObjectFact(obj, fact) - } - t.Logf("exported %s facts = %v", pkg.Path(), facts) // show all facts - - // import - // (after export, because an analyzer may import its own facts) - for _, lookup := range test.lookups { - fact := new(myFact) - var got string - if obj := find(pkg, lookup.objexpr); obj == nil { - got = "no object" - } else if facts.ImportObjectFact(obj, fact) { - got = fact.String() - } else { - got = "no fact" - } - if got != lookup.want { - t.Errorf("in %s, ImportObjectFact(%s, %T) = %s, want %s", - pkg.Path(), lookup.objexpr, fact, got, lookup.want) - } - } - - // encode - factmap[pkg.Path()] = facts.Encode() - } -} - -func find(p *types.Package, expr string) types.Object { - // types.Eval only allows us to compute a TypeName object for an expression. - // TODO(adonovan): support other expressions that denote an object: - // - an identifier (or qualified ident) for a func, const, or var - // - new(T).f for a field or method - // I've added CheckExpr in https://go-review.googlesource.com/c/go/+/144677. - // If that becomes available, use it. - - // Choose an arbitrary position within the (single-file) package - // so that we are within the scope of its import declarations. - somepos := p.Scope().Lookup(p.Scope().Names()[0]).Pos() - tv, err := types.Eval(token.NewFileSet(), p, somepos, expr) - if err != nil { - return nil - } - if n, ok := tv.Type.(*types.Named); ok { - return n.Obj() - } - return nil -} - -func load(t *testing.T, dir string, path string) (*types.Package, error) { - cfg := &packages.Config{ - Mode: packages.LoadSyntax, - Dir: dir, - Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"), - } - testenv.NeedsGoPackagesEnv(t, cfg.Env) - pkgs, err := packages.Load(cfg, path) - if err != nil { - return nil, err - } - if packages.PrintErrors(pkgs) > 0 { - return nil, fmt.Errorf("packages had errors") - } - if len(pkgs) == 0 { - return nil, fmt.Errorf("no package matched %s", path) - } - return pkgs[0].Types, nil -} - -type otherFact struct { - S string -} - -func (f *otherFact) String() string { return fmt.Sprintf("otherFact(%s)", f.S) } -func (f *otherFact) AFact() {} - -func TestFactFilter(t *testing.T) { - files := map[string]string{ - "a/a.go": `package a; type A int`, - } - dir, cleanup, err := analysistest.WriteFiles(files) - if err != nil { - t.Fatal(err) - } - defer cleanup() - - pkg, err := load(t, dir, "a") - if err != nil { - t.Fatal(err) - } - - obj := pkg.Scope().Lookup("A") - s, err := facts.Decode(pkg, func(string) ([]byte, error) { return nil, nil }) - if err != nil { - t.Fatal(err) - } - s.ExportObjectFact(obj, &myFact{"good object fact"}) - s.ExportPackageFact(&myFact{"good package fact"}) - s.ExportObjectFact(obj, &otherFact{"bad object fact"}) - s.ExportPackageFact(&otherFact{"bad package fact"}) - - filter := map[reflect.Type]bool{ - reflect.TypeOf(&myFact{}): true, - } - - pkgFacts := s.AllPackageFacts(filter) - wantPkgFacts := `[{package a ("a") myFact(good package fact)}]` - if got := fmt.Sprintf("%v", pkgFacts); got != wantPkgFacts { - t.Errorf("AllPackageFacts: got %v, want %v", got, wantPkgFacts) - } - - objFacts := s.AllObjectFacts(filter) - wantObjFacts := "[{type a.A int myFact(good object fact)}]" - if got := fmt.Sprintf("%v", objFacts); got != wantObjFacts { - t.Errorf("AllObjectFacts: got %v, want %v", got, wantObjFacts) - } -} diff --git a/go/analysis/internal/facts/imports.go b/go/analysis/internal/facts/imports.go deleted file mode 100644 index 8a5553e2e9b..00000000000 --- a/go/analysis/internal/facts/imports.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package facts - -import ( - "go/types" - - "golang.org/x/tools/internal/typeparams" -) - -// importMap computes the import map for a package by traversing the -// entire exported API each of its imports. -// -// This is a workaround for the fact that we cannot access the map used -// internally by the types.Importer returned by go/importer. The entries -// in this map are the packages and objects that may be relevant to the -// current analysis unit. -// -// Packages in the map that are only indirectly imported may be -// incomplete (!pkg.Complete()). -func importMap(imports []*types.Package) map[string]*types.Package { - objects := make(map[types.Object]bool) - packages := make(map[string]*types.Package) - - var addObj func(obj types.Object) bool - var addType func(T types.Type) - - addObj = func(obj types.Object) bool { - if !objects[obj] { - objects[obj] = true - addType(obj.Type()) - if pkg := obj.Pkg(); pkg != nil { - packages[pkg.Path()] = pkg - } - return true - } - return false - } - - addType = func(T types.Type) { - switch T := T.(type) { - case *types.Basic: - // nop - case *types.Named: - if addObj(T.Obj()) { - // TODO(taking): Investigate why the Underlying type is not added here. - for i := 0; i < T.NumMethods(); i++ { - addObj(T.Method(i)) - } - if tparams := typeparams.ForNamed(T); tparams != nil { - for i := 0; i < tparams.Len(); i++ { - addType(tparams.At(i)) - } - } - if targs := typeparams.NamedTypeArgs(T); targs != nil { - for i := 0; i < targs.Len(); i++ { - addType(targs.At(i)) - } - } - } - case *types.Pointer: - addType(T.Elem()) - case *types.Slice: - addType(T.Elem()) - case *types.Array: - addType(T.Elem()) - case *types.Chan: - addType(T.Elem()) - case *types.Map: - addType(T.Key()) - addType(T.Elem()) - case *types.Signature: - addType(T.Params()) - addType(T.Results()) - if tparams := typeparams.ForSignature(T); tparams != nil { - for i := 0; i < tparams.Len(); i++ { - addType(tparams.At(i)) - } - } - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - addObj(T.Field(i)) - } - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - addObj(T.At(i)) - } - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - addObj(T.Method(i)) - } - for i := 0; i < T.NumEmbeddeds(); i++ { - addType(T.EmbeddedType(i)) // walk Embedded for implicits - } - case *typeparams.Union: - for i := 0; i < T.Len(); i++ { - addType(T.Term(i).Type()) - } - case *typeparams.TypeParam: - if addObj(T.Obj()) { - addType(T.Constraint()) - } - } - } - - for _, imp := range imports { - packages[imp.Path()] = imp - - scope := imp.Scope() - for _, name := range scope.Names() { - addObj(scope.Lookup(name)) - } - } - - return packages -} diff --git a/go/analysis/passes/asmdecl/asmdecl.go b/go/analysis/passes/asmdecl/asmdecl.go index 6fbfe7e181c..7288559fc0e 100644 --- a/go/analysis/passes/asmdecl/asmdecl.go +++ b/go/analysis/passes/asmdecl/asmdecl.go @@ -92,7 +92,7 @@ var ( asmArchMips64LE = asmArch{name: "mips64le", bigEndian: false, stack: "R29", lr: true} asmArchPpc64 = asmArch{name: "ppc64", bigEndian: true, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}} asmArchPpc64LE = asmArch{name: "ppc64le", bigEndian: false, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}} - asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true} + asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true, retRegs: []string{"X10", "F10"}} asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true} asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false} diff --git a/go/analysis/passes/asmdecl/asmdecl_test.go b/go/analysis/passes/asmdecl/asmdecl_test.go index f6b01a9c308..50938a07571 100644 --- a/go/analysis/passes/asmdecl/asmdecl_test.go +++ b/go/analysis/passes/asmdecl/asmdecl_test.go @@ -19,11 +19,12 @@ var goosarches = []string{ "linux/arm", // asm3.s // TODO: skip test on loong64 until go toolchain supported loong64. // "linux/loong64", // asm10.s - "linux/mips64", // asm5.s - "linux/s390x", // asm6.s - "linux/ppc64", // asm7.s - "linux/mips", // asm8.s, - "js/wasm", // asm9.s + "linux/mips64", // asm5.s + "linux/s390x", // asm6.s + "linux/ppc64", // asm7.s + "linux/mips", // asm8.s, + "js/wasm", // asm9.s + "linux/riscv64", // asm11.s } func Test(t *testing.T) { diff --git a/go/analysis/passes/asmdecl/testdata/src/a/asm11.s b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s new file mode 100644 index 00000000000..e81e8ee179f --- /dev/null +++ b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +// writing to result in ABIInternal function +TEXT Ā·returnABIInternal(SB), NOSPLIT, $8 + MOV $123, X10 + RET +TEXT Ā·returnmissingABIInternal(SB), NOSPLIT, $8 + MOV $123, X20 + RET // want `RET without writing to result register` diff --git a/go/analysis/passes/assign/assign.go b/go/analysis/passes/assign/assign.go index 3586638efc0..89146b73346 100644 --- a/go/analysis/passes/assign/assign.go +++ b/go/analysis/passes/assign/assign.go @@ -12,6 +12,7 @@ import ( "fmt" "go/ast" "go/token" + "go/types" "reflect" "golang.org/x/tools/go/analysis" @@ -51,7 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) { for i, lhs := range stmt.Lhs { rhs := stmt.Rhs[i] if analysisutil.HasSideEffects(pass.TypesInfo, lhs) || - analysisutil.HasSideEffects(pass.TypesInfo, rhs) { + analysisutil.HasSideEffects(pass.TypesInfo, rhs) || + isMapIndex(pass.TypesInfo, lhs) { continue // expressions may not be equal } if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) { @@ -74,3 +76,14 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } + +// isMapIndex returns true if e is a map index expression. +func isMapIndex(info *types.Info, e ast.Expr) bool { + if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok { + if typ := info.Types[idx.X].Type; typ != nil { + _, ok := typ.Underlying().(*types.Map) + return ok + } + } + return false +} diff --git a/go/analysis/passes/assign/testdata/src/a/a.go b/go/analysis/passes/assign/testdata/src/a/a.go index eaec634d181..f9663120b4a 100644 --- a/go/analysis/passes/assign/testdata/src/a/a.go +++ b/go/analysis/passes/assign/testdata/src/a/a.go @@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) { } func num() int { return 2 } + +func Index() { + s := []int{1} + s[0] = s[0] // want "self-assignment" + + var a [5]int + a[0] = a[0] // want "self-assignment" + + pa := &[2]int{1, 2} + pa[1] = pa[1] // want "self-assignment" + + var pss *struct { // report self assignment despite nil dereference + s []int + } + pss.s[0] = pss.s[0] // want "self-assignment" + + m := map[int]string{1: "a"} + m[0] = m[0] // bail on map self-assignments due to side effects + m[1] = m[1] // not modeling what elements must be in the map + (m[2]) = (m[2]) // even with parens + type Map map[string]bool + named := make(Map) + named["s"] = named["s"] // even on named maps. + var psm *struct { + m map[string]int + } + psm.m["key"] = psm.m["key"] // handles dereferences +} diff --git a/go/analysis/passes/assign/testdata/src/a/a.go.golden b/go/analysis/passes/assign/testdata/src/a/a.go.golden index 6c91d3666cc..f45b7f208e2 100644 --- a/go/analysis/passes/assign/testdata/src/a/a.go.golden +++ b/go/analysis/passes/assign/testdata/src/a/a.go.golden @@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) { } func num() int { return 2 } + +func Index() { + s := []int{1} + // want "self-assignment" + + var a [5]int + // want "self-assignment" + + pa := &[2]int{1, 2} + // want "self-assignment" + + var pss *struct { // report self assignment despite nil dereference + s []int + } + // want "self-assignment" + + m := map[int]string{1: "a"} + m[0] = m[0] // bail on map self-assignments due to side effects + m[1] = m[1] // not modeling what elements must be in the map + (m[2]) = (m[2]) // even with parens + type Map map[string]bool + named := make(Map) + named["s"] = named["s"] // even on named maps. + var psm *struct { + m map[string]int + } + psm.m["key"] = psm.m["key"] // handles dereferences +} diff --git a/go/analysis/passes/buildssa/buildssa.go b/go/analysis/passes/buildssa/buildssa.go index 4ec0e73ff2c..02b7b18b3f5 100644 --- a/go/analysis/passes/buildssa/buildssa.go +++ b/go/analysis/passes/buildssa/buildssa.go @@ -48,8 +48,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // Some Analyzers may need GlobalDebug, in which case we'll have // to set it globally, but let's wait till we need it. - // Monomorphize at least until type parameters are available. - mode := ssa.InstantiateGenerics + mode := ssa.BuilderMode(0) prog := ssa.NewProgram(pass.Fset, mode) diff --git a/go/analysis/passes/buildtag/buildtag.go b/go/analysis/passes/buildtag/buildtag.go index c4407ad91fe..775e507a346 100644 --- a/go/analysis/passes/buildtag/buildtag.go +++ b/go/analysis/passes/buildtag/buildtag.go @@ -20,7 +20,7 @@ import ( "golang.org/x/tools/go/analysis/passes/internal/analysisutil" ) -const Doc = "check that +build tags are well-formed and correctly located" +const Doc = "check //go:build and // +build directives" var Analyzer = &analysis.Analyzer{ Name: "buildtag", diff --git a/go/analysis/passes/buildtag/buildtag_old.go b/go/analysis/passes/buildtag/buildtag_old.go index e9234925f9c..0001ba53639 100644 --- a/go/analysis/passes/buildtag/buildtag_old.go +++ b/go/analysis/passes/buildtag/buildtag_old.go @@ -22,7 +22,7 @@ import ( "golang.org/x/tools/go/analysis/passes/internal/analysisutil" ) -const Doc = "check that +build tags are well-formed and correctly located" +const Doc = "check // +build directives" var Analyzer = &analysis.Analyzer{ Name: "buildtag", diff --git a/go/analysis/passes/composite/composite.go b/go/analysis/passes/composite/composite.go index d3670aca97a..64e184d3439 100644 --- a/go/analysis/passes/composite/composite.go +++ b/go/analysis/passes/composite/composite.go @@ -7,6 +7,7 @@ package composite import ( + "fmt" "go/ast" "go/types" "strings" @@ -83,7 +84,8 @@ func run(pass *analysis.Pass) (interface{}, error) { } for _, typ := range structuralTypes { under := deref(typ.Underlying()) - if _, ok := under.(*types.Struct); !ok { + strct, ok := under.(*types.Struct) + if !ok { // skip non-struct composite literals continue } @@ -92,20 +94,47 @@ func run(pass *analysis.Pass) (interface{}, error) { continue } - // check if the CompositeLit contains an unkeyed field + // check if the struct contains an unkeyed field allKeyValue := true - for _, e := range cl.Elts { + var suggestedFixAvailable = len(cl.Elts) == strct.NumFields() + var missingKeys []analysis.TextEdit + for i, e := range cl.Elts { if _, ok := e.(*ast.KeyValueExpr); !ok { allKeyValue = false - break + if i >= strct.NumFields() { + break + } + field := strct.Field(i) + if !field.Exported() { + // Adding unexported field names for structs not defined + // locally will not work. + suggestedFixAvailable = false + break + } + missingKeys = append(missingKeys, analysis.TextEdit{ + Pos: e.Pos(), + End: e.Pos(), + NewText: []byte(fmt.Sprintf("%s: ", field.Name())), + }) } } if allKeyValue { - // all the composite literal fields are keyed + // all the struct fields are keyed continue } - pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName) + diag := analysis.Diagnostic{ + Pos: cl.Pos(), + End: cl.End(), + Message: fmt.Sprintf("%s struct literal uses unkeyed fields", typeName), + } + if suggestedFixAvailable { + diag.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Add field names to struct literal", + TextEdits: missingKeys, + }} + } + pass.Report(diag) return } }) diff --git a/go/analysis/passes/composite/composite_test.go b/go/analysis/passes/composite/composite_test.go index 952de8bfdad..7afaaa7ffd4 100644 --- a/go/analysis/passes/composite/composite_test.go +++ b/go/analysis/passes/composite/composite_test.go @@ -18,5 +18,5 @@ func Test(t *testing.T) { if typeparams.Enabled { pkgs = append(pkgs, "typeparams") } - analysistest.Run(t, testdata, composite.Analyzer, pkgs...) + analysistest.RunWithSuggestedFixes(t, testdata, composite.Analyzer, pkgs...) } diff --git a/go/analysis/passes/composite/testdata/src/a/a.go b/go/analysis/passes/composite/testdata/src/a/a.go index 3a5bc203b03..cd69d395173 100644 --- a/go/analysis/passes/composite/testdata/src/a/a.go +++ b/go/analysis/passes/composite/testdata/src/a/a.go @@ -11,6 +11,7 @@ import ( "go/scanner" "go/token" "image" + "sync" "unicode" ) @@ -79,6 +80,18 @@ var badStructLiteral = flag.Flag{ // want "unkeyed fields" nil, // Value "DefValue", } +var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value + "DefValue", + "Extra Field", +} +var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value +} var delta [3]rune @@ -100,6 +113,10 @@ var badScannerErrorList = scanner.ErrorList{ &scanner.Error{token.Position{}, "foobar"}, // want "unkeyed fields" } +// sync.Mutex has unexported fields. We expect a diagnostic but no +// suggested fix. +var mu = sync.Mutex{0, 0} // want "unkeyed fields" + // Check whitelisted structs: if vet is run with --compositewhitelist=false, // this line triggers an error. var whitelistedPoint = image.Point{1, 2} diff --git a/go/analysis/passes/composite/testdata/src/a/a.go.golden b/go/analysis/passes/composite/testdata/src/a/a.go.golden new file mode 100644 index 00000000000..fe73a2e0a1d --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/a/a.go.golden @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the test for untagged struct literals. + +package a + +import ( + "flag" + "go/scanner" + "go/token" + "image" + "sync" + "unicode" +) + +var Okay1 = []string{ + "Name", + "Usage", + "DefValue", +} + +var Okay2 = map[string]bool{ + "Name": true, + "Usage": true, + "DefValue": true, +} + +var Okay3 = struct { + X string + Y string + Z string +}{ + "Name", + "Usage", + "DefValue", +} + +var Okay4 = []struct { + A int + B int +}{ + {1, 2}, + {3, 4}, +} + +type MyStruct struct { + X string + Y string + Z string +} + +var Okay5 = &MyStruct{ + "Name", + "Usage", + "DefValue", +} + +var Okay6 = []MyStruct{ + {"foo", "bar", "baz"}, + {"aa", "bb", "cc"}, +} + +var Okay7 = []*MyStruct{ + {"foo", "bar", "baz"}, + {"aa", "bb", "cc"}, +} + +// Testing is awkward because we need to reference things from a separate package +// to trigger the warnings. + +var goodStructLiteral = flag.Flag{ + Name: "Name", + Usage: "Usage", +} +var badStructLiteral = flag.Flag{ // want "unkeyed fields" + Name: "Name", + Usage: "Usage", + Value: nil, // Value + DefValue: "DefValue", +} +var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value + "DefValue", + "Extra Field", +} +var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value +} + +var delta [3]rune + +// SpecialCase is a named slice of CaseRange to test issue 9171. +var goodNamedSliceLiteral = unicode.SpecialCase{ + {Lo: 1, Hi: 2, Delta: delta}, + unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, +} +var badNamedSliceLiteral = unicode.SpecialCase{ + {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" + unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" +} + +// ErrorList is a named slice, so no warnings should be emitted. +var goodScannerErrorList = scanner.ErrorList{ + &scanner.Error{Msg: "foobar"}, +} +var badScannerErrorList = scanner.ErrorList{ + &scanner.Error{Pos: token.Position{}, Msg: "foobar"}, // want "unkeyed fields" +} + +// sync.Mutex has unexported fields. We expect a diagnostic but no +// suggested fix. +var mu = sync.Mutex{0, 0} // want "unkeyed fields" + +// Check whitelisted structs: if vet is run with --compositewhitelist=false, +// this line triggers an error. +var whitelistedPoint = image.Point{1, 2} + +// Do not check type from unknown package. +// See issue 15408. +var unknownPkgVar = unicode.NoSuchType{"foo", "bar"} + +// A named pointer slice of CaseRange to test issue 23539. In +// particular, we're interested in how some slice elements omit their +// type. +var goodNamedPointerSliceLiteral = []*unicode.CaseRange{ + {Lo: 1, Hi: 2}, + &unicode.CaseRange{Lo: 1, Hi: 2}, +} +var badNamedPointerSliceLiteral = []*unicode.CaseRange{ + {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" + &unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" +} + +// unicode.Range16 is whitelisted, so there'll be no vet error +var range16 = unicode.Range16{0xfdd0, 0xfdef, 1} + +// unicode.Range32 is whitelisted, so there'll be no vet error +var range32 = unicode.Range32{0x1fffe, 0x1ffff, 1} diff --git a/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden new file mode 100644 index 00000000000..20b652e88dd --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package a + +import "testing" + +var fuzzTargets = []testing.InternalFuzzTarget{ + {"Fuzz", Fuzz}, +} + +func Fuzz(f *testing.F) {} diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go index dd5d57efed4..f9a5e1fb105 100644 --- a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go +++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go @@ -6,7 +6,7 @@ package typeparams import "typeparams/lib" -type localStruct struct { F int } +type localStruct struct{ F int } func F[ T1 ~struct{ f int }, @@ -20,8 +20,8 @@ func F[ _ = T1{2} _ = T2a{2} _ = T2b{2} // want "unkeyed fields" - _ = T3{1,2} - _ = T4{1,2} - _ = T5{1:2} - _ = T6{1:2} + _ = T3{1, 2} + _ = T4{1, 2} + _ = T5{1: 2} + _ = T6{1: 2} } diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden new file mode 100644 index 00000000000..66cd9158cb6 --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import "typeparams/lib" + +type localStruct struct{ F int } + +func F[ + T1 ~struct{ f int }, + T2a localStruct, + T2b lib.Struct, + T3 ~[]int, + T4 lib.Slice, + T5 ~map[int]int, + T6 lib.Map, +]() { + _ = T1{2} + _ = T2a{2} + _ = T2b{F: 2} // want "unkeyed fields" + _ = T3{1, 2} + _ = T4{1, 2} + _ = T5{1: 2} + _ = T6{1: 2} +} diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock.go b/go/analysis/passes/copylock/testdata/src/a/copylock.go index 7704b3a42b2..4ab66dca1f6 100644 --- a/go/analysis/passes/copylock/testdata/src/a/copylock.go +++ b/go/analysis/passes/copylock/testdata/src/a/copylock.go @@ -50,27 +50,27 @@ func BadFunc() { var t Tlock var tp *Tlock tp = &t - *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync.Mutex` - t = *tp // want "assignment copies lock value to t: a.Tlock contains sync.Once contains sync.Mutex" + *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync\b.*` + t = *tp // want `assignment copies lock value to t: a.Tlock contains sync.Once contains sync\b.*` y := *x // want "assignment copies lock value to y: sync.Mutex" - var z = t // want "variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync.Mutex" + var z = t // want `variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync\b.*` w := struct{ L sync.Mutex }{ L: *x, // want `literal copies lock value from \*x: sync.Mutex` } var q = map[int]Tlock{ - 1: t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex" - 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex` + 1: t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*` + 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*` } yy := []Tlock{ - t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex" - *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex` + t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*` + *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*` } // override 'new' keyword new := func(interface{}) {} - new(t) // want "call of new copies lock value: a.Tlock contains sync.Once contains sync.Mutex" + new(t) // want `call of new copies lock value: a.Tlock contains sync.Once contains sync\b.*` // copy of array of locks var muA [5]sync.Mutex @@ -193,9 +193,9 @@ func SyncTypesCheck() { var onceX sync.Once var onceXX = sync.Once{} onceX1 := new(sync.Once) - onceY := onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex" - onceY = onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex" - var onceYY = onceX // want "variable declaration copies lock value to onceYY: sync.Once contains sync.Mutex" + onceY := onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*` + onceY = onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*` + var onceYY = onceX // want `variable declaration copies lock value to onceYY: sync.Once contains sync\b.*` onceP := &onceX onceZ := &sync.Once{} } diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go index 801bc6f24f1..0d3168f1ef1 100644 --- a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go +++ b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go @@ -126,7 +126,7 @@ func AcceptedCases() { // sync.Mutex gets called out, but without any reference to the sync.Once. type LocalOnce sync.Once -func (LocalOnce) Bad() {} // want "Bad passes lock by value: a.LocalOnce contains sync.Mutex" +func (LocalOnce) Bad() {} // want `Bad passes lock by value: a.LocalOnce contains sync.\b.*` // False negative: // LocalMutex doesn't have a Lock method. diff --git a/go/analysis/passes/directive/directive.go b/go/analysis/passes/directive/directive.go new file mode 100644 index 00000000000..76d852cd0fe --- /dev/null +++ b/go/analysis/passes/directive/directive.go @@ -0,0 +1,216 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package directive defines an Analyzer that checks known Go toolchain directives. +package directive + +import ( + "go/ast" + "go/parser" + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = `check Go toolchain directives such as //go:debug + +This analyzer checks for problems with known Go toolchain directives +in all Go source files in a package directory, even those excluded by +//go:build constraints, and all non-Go source files too. + +For //go:debug (see https://go.dev/doc/godebug), the analyzer checks +that the directives are placed only in Go source files, only above the +package comment, and only in package main or *_test.go files. + +Support for other known directives may be added in the future. + +This analyzer does not check //go:build, which is handled by the +buildtag analyzer. +` + +var Analyzer = &analysis.Analyzer{ + Name: "directive", + Doc: Doc, + Run: runDirective, +} + +func runDirective(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + checkGoFile(pass, f) + } + for _, name := range pass.OtherFiles { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + if err != nil { + // Not valid Go source code - not our job to diagnose, so ignore. + continue + } + checkGoFile(pass, f) + } else { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + } + return nil, nil +} + +func checkGoFile(pass *analysis.Pass, f *ast.File) { + check := newChecker(pass, pass.Fset.File(f.Package).Name(), f) + + for _, group := range f.Comments { + // A +build comment is ignored after or adjoining the package declaration. + if group.End()+1 >= f.Package { + check.inHeader = false + } + // A //go:build comment is ignored after the package declaration + // (but adjoining it is OK, in contrast to +build comments). + if group.Pos() >= f.Package { + check.inHeader = false + } + + // Check each line of a //-comment. + for _, c := range group.List { + check.comment(c.Slash, c.Text) + } + } +} + +func checkOtherFile(pass *analysis.Pass, filename string) error { + // We cannot use the Go parser, since is not a Go source file. + // Read the raw bytes instead. + content, tf, err := analysisutil.ReadFile(pass.Fset, filename) + if err != nil { + return err + } + + check := newChecker(pass, filename, nil) + check.nonGoFile(token.Pos(tf.Base()), string(content)) + return nil +} + +type checker struct { + pass *analysis.Pass + filename string + file *ast.File // nil for non-Go file + inHeader bool // in file header (before package declaration) + inStar bool // currently in a /* */ comment +} + +func newChecker(pass *analysis.Pass, filename string, file *ast.File) *checker { + return &checker{ + pass: pass, + filename: filename, + file: file, + inHeader: true, + } +} + +func (check *checker) nonGoFile(pos token.Pos, fullText string) { + // Process each line. + text := fullText + inStar := false + for text != "" { + offset := len(fullText) - len(text) + var line string + line, text, _ = stringsCut(text, "\n") + + if !inStar && strings.HasPrefix(line, "//") { + check.comment(pos+token.Pos(offset), line) + continue + } + + // Skip over, cut out any /* */ comments, + // to avoid being confused by a commented-out // comment. + for { + line = strings.TrimSpace(line) + if inStar { + var ok bool + _, line, ok = stringsCut(line, "*/") + if !ok { + break + } + inStar = false + continue + } + line, inStar = stringsCutPrefix(line, "/*") + if !inStar { + break + } + } + if line != "" { + // Found non-comment non-blank line. + // Ends space for valid //go:build comments, + // but also ends the fraction of the file we can + // reliably parse. From this point on we might + // incorrectly flag "comments" inside multiline + // string constants or anything else (this might + // not even be a Go program). So stop. + break + } + } +} + +func (check *checker) comment(pos token.Pos, line string) { + if !strings.HasPrefix(line, "//go:") { + return + } + // testing hack: stop at // ERROR + if i := strings.Index(line, " // ERROR "); i >= 0 { + line = line[:i] + } + + verb := line + if i := strings.IndexFunc(verb, unicode.IsSpace); i >= 0 { + verb = verb[:i] + if line[i] != ' ' && line[i] != '\t' && line[i] != '\n' { + r, _ := utf8.DecodeRuneInString(line[i:]) + check.pass.Reportf(pos, "invalid space %#q in %s directive", r, verb) + } + } + + switch verb { + default: + // TODO: Use the go language version for the file. + // If that version is not newer than us, then we can + // report unknown directives. + + case "//go:build": + // Ignore. The buildtag analyzer reports misplaced comments. + + case "//go:debug": + if check.file == nil { + check.pass.Reportf(pos, "//go:debug directive only valid in Go source files") + } else if check.file.Name.Name != "main" && !strings.HasSuffix(check.filename, "_test.go") { + check.pass.Reportf(pos, "//go:debug directive only valid in package main or test") + } else if !check.inHeader { + check.pass.Reportf(pos, "//go:debug directive only valid before package declaration") + } + } +} + +// Go 1.18 strings.Cut. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// Go 1.20 strings.CutPrefix. +func stringsCutPrefix(s, prefix string) (after string, found bool) { + if !strings.HasPrefix(s, prefix) { + return s, false + } + return s[len(prefix):], true +} diff --git a/go/analysis/passes/directive/directive_test.go b/go/analysis/passes/directive/directive_test.go new file mode 100644 index 00000000000..a526c0d740d --- /dev/null +++ b/go/analysis/passes/directive/directive_test.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package directive_test + +import ( + "runtime" + "strings" + "testing" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/directive" +) + +func Test(t *testing.T) { + if strings.HasPrefix(runtime.Version(), "go1.") && runtime.Version() < "go1.16" { + t.Skipf("skipping on %v", runtime.Version()) + } + analyzer := *directive.Analyzer + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + defer func() { + // The directive pass is unusual in that it checks the IgnoredFiles. + // After analysis, add IgnoredFiles to OtherFiles so that + // the test harness checks for expected diagnostics in those. + // (The test harness shouldn't do this by default because most + // passes can't do anything with the IgnoredFiles without type + // information, which is unavailable because they are ignored.) + var files []string + files = append(files, pass.OtherFiles...) + files = append(files, pass.IgnoredFiles...) + pass.OtherFiles = files + }() + + return directive.Analyzer.Run(pass) + } + analysistest.Run(t, analysistest.TestData(), &analyzer, "a") +} diff --git a/go/analysis/passes/directive/testdata/src/a/badspace.go b/go/analysis/passes/directive/testdata/src/a/badspace.go new file mode 100644 index 00000000000..11313996046 --- /dev/null +++ b/go/analysis/passes/directive/testdata/src/a/badspace.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// want +1 `invalid space '\\u00a0' in //go:debug directive` +//go:debugĀ 00a0 + +package main + diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced.go b/go/analysis/passes/directive/testdata/src/a/misplaced.go new file mode 100644 index 00000000000..db30ceb476e --- /dev/null +++ b/go/analysis/passes/directive/testdata/src/a/misplaced.go @@ -0,0 +1,10 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// want +1 `//go:debug directive only valid before package declaration` +//go:debug panicnil=1 diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced.s b/go/analysis/passes/directive/testdata/src/a/misplaced.s new file mode 100644 index 00000000000..9e26dbc5241 --- /dev/null +++ b/go/analysis/passes/directive/testdata/src/a/misplaced.s @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// want +1 `//go:debug directive only valid in Go source files` +//go:debug panicnil=1 + +/* +can skip over comments +//go:debug doesn't matter here +*/ + +// want +1 `//go:debug directive only valid in Go source files` +//go:debug panicnil=1 + +package a + +// no error here because we can't parse this far +//go:debug panicnil=1 diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced_test.go b/go/analysis/passes/directive/testdata/src/a/misplaced_test.go new file mode 100644 index 00000000000..6b4527a3589 --- /dev/null +++ b/go/analysis/passes/directive/testdata/src/a/misplaced_test.go @@ -0,0 +1,10 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:debug panicnil=1 + +package p_test + +// want +1 `//go:debug directive only valid before package declaration` +//go:debug panicnil=1 diff --git a/go/analysis/passes/directive/testdata/src/a/p.go b/go/analysis/passes/directive/testdata/src/a/p.go new file mode 100644 index 00000000000..e1e3e65520f --- /dev/null +++ b/go/analysis/passes/directive/testdata/src/a/p.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// want +1 `//go:debug directive only valid in package main or test` +//go:debug panicnil=1 + +package p + +// want +1 `//go:debug directive only valid in package main or test` +//go:debug panicnil=1 diff --git a/go/analysis/passes/fieldalignment/fieldalignment.go b/go/analysis/passes/fieldalignment/fieldalignment.go index 78afe94ab30..aff663046a3 100644 --- a/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/go/analysis/passes/fieldalignment/fieldalignment.go @@ -23,7 +23,7 @@ import ( const Doc = `find structs that would use less memory if their fields were sorted This analyzer find structs that can be rearranged to use less memory, and provides -a suggested edit with the optimal order. +a suggested edit with the most compact order. Note that there are two different diagnostics reported. One checks struct size, and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the @@ -41,6 +41,11 @@ has 24 pointer bytes because it has to scan further through the *uint32. struct { string; uint32 } has 8 because it can stop immediately after the string pointer. + +Be aware that the most compact order is not always the most efficient. +In rare cases it may cause two variables each updated by its own goroutine +to occupy the same CPU cache line, inducing a form of memory contention +known as "false sharing" that slows down both goroutines. ` var Analyzer = &analysis.Analyzer{ diff --git a/go/analysis/passes/ifaceassert/parameterized.go b/go/analysis/passes/ifaceassert/parameterized.go index 1285ecf1367..b35f62dc730 100644 --- a/go/analysis/passes/ifaceassert/parameterized.go +++ b/go/analysis/passes/ifaceassert/parameterized.go @@ -1,6 +1,7 @@ // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package ifaceassert import ( diff --git a/go/analysis/passes/inspect/inspect.go b/go/analysis/passes/inspect/inspect.go index c1c1127d089..165c70cbd36 100644 --- a/go/analysis/passes/inspect/inspect.go +++ b/go/analysis/passes/inspect/inspect.go @@ -24,7 +24,7 @@ // inspect.Preorder(nil, func(n ast.Node) { // ... // }) -// return nil +// return nil, nil // } package inspect diff --git a/go/analysis/passes/loopclosure/loopclosure.go b/go/analysis/passes/loopclosure/loopclosure.go index 98de9a9bacd..ae5b4151dbe 100644 --- a/go/analysis/passes/loopclosure/loopclosure.go +++ b/go/analysis/passes/loopclosure/loopclosure.go @@ -18,19 +18,60 @@ import ( const Doc = `check references to loop variables from within nested functions -This analyzer checks for references to loop variables from within a -function literal inside the loop body. It checks only instances where -the function literal is called in a defer or go statement that is the -last statement in the loop body, as otherwise we would need whole -program analysis. +This analyzer reports places where a function literal references the +iteration variable of an enclosing loop, and the loop calls the function +in such a way (e.g. with go or defer) that it may outlive the loop +iteration and possibly observe the wrong value of the variable. -For example: +In this example, all the deferred functions run after the loop has +completed, so all observe the final value of v. - for i, v := range s { - go func() { - println(i, v) // not what you might expect - }() - } + for _, v := range list { + defer func() { + use(v) // incorrect + }() + } + +One fix is to create a new variable for each iteration of the loop: + + for _, v := range list { + v := v // new var per iteration + defer func() { + use(v) // ok + }() + } + +The next example uses a go statement and has a similar problem. +In addition, it has a data race because the loop updates v +concurrent with the goroutines accessing it. + + for _, v := range elem { + go func() { + use(v) // incorrect, and a data race + }() + } + +A fix is the same as before. The checker also reports problems +in goroutines started by golang.org/x/sync/errgroup.Group. +A hard-to-spot variant of this form is common in parallel tests: + + func Test(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + use(test) // incorrect, and a data race + }) + } + } + +The t.Parallel() call causes the rest of the function to execute +concurrent with the loop. + +The analyzer reports references only in the last statement, +as it is not deep enough to understand the effects of subsequent +statements that might render the reference benign. +("Last statement" is defined recursively in compound +statements such as if, switch, and select.) See: https://golang.org/doc/go_faq.html#closures_and_goroutines` @@ -50,10 +91,12 @@ func run(pass *analysis.Pass) (interface{}, error) { } inspect.Preorder(nodeFilter, func(n ast.Node) { // Find the variables updated by the loop statement. - var vars []*ast.Ident + var vars []types.Object addVar := func(expr ast.Expr) { - if id, ok := expr.(*ast.Ident); ok { - vars = append(vars, id) + if id, _ := expr.(*ast.Ident); id != nil { + if obj := pass.TypesInfo.ObjectOf(id); obj != nil { + vars = append(vars, obj) + } } } var body *ast.BlockStmt @@ -79,52 +122,141 @@ func run(pass *analysis.Pass) (interface{}, error) { return } - // Inspect a go or defer statement - // if it's the last one in the loop body. - // (We give up if there are following statements, - // because it's hard to prove go isn't followed by wait, - // or defer by return.) - if len(body.List) == 0 { - return - } - // The function invoked in the last return statement. - var fun ast.Expr - switch s := body.List[len(body.List)-1].(type) { - case *ast.GoStmt: - fun = s.Call.Fun - case *ast.DeferStmt: - fun = s.Call.Fun - case *ast.ExprStmt: // check for errgroup.Group.Go() - if call, ok := s.X.(*ast.CallExpr); ok { - fun = goInvokes(pass.TypesInfo, call) - } - } - lit, ok := fun.(*ast.FuncLit) - if !ok { - return - } - ast.Inspect(lit.Body, func(n ast.Node) bool { - id, ok := n.(*ast.Ident) - if !ok || id.Obj == nil { - return true + // Inspect statements to find function literals that may be run outside of + // the current loop iteration. + // + // For go, defer, and errgroup.Group.Go, we ignore all but the last + // statement, because it's hard to prove go isn't followed by wait, or + // defer by return. "Last" is defined recursively. + // + // TODO: consider allowing the "last" go/defer/Go statement to be followed by + // N "trivial" statements, possibly under a recursive definition of "trivial" + // so that that checker could, for example, conclude that a go statement is + // followed by an if statement made of only trivial statements and trivial expressions, + // and hence the go statement could still be checked. + forEachLastStmt(body.List, func(last ast.Stmt) { + var stmts []ast.Stmt + switch s := last.(type) { + case *ast.GoStmt: + stmts = litStmts(s.Call.Fun) + case *ast.DeferStmt: + stmts = litStmts(s.Call.Fun) + case *ast.ExprStmt: // check for errgroup.Group.Go + if call, ok := s.X.(*ast.CallExpr); ok { + stmts = litStmts(goInvoke(pass.TypesInfo, call)) + } } - if pass.TypesInfo.Types[id].Type == nil { - // Not referring to a variable (e.g. struct field name) - return true + for _, stmt := range stmts { + reportCaptured(pass, vars, stmt) } - for _, v := range vars { - if v.Obj == id.Obj { - pass.ReportRangef(id, "loop variable %s captured by func literal", - id.Name) + }) + + // Also check for testing.T.Run (with T.Parallel). + // We consider every t.Run statement in the loop body, because there is + // no commonly used mechanism for synchronizing parallel subtests. + // It is of course theoretically possible to synchronize parallel subtests, + // though such a pattern is likely to be exceedingly rare as it would be + // fighting against the test runner. + for _, s := range body.List { + switch s := s.(type) { + case *ast.ExprStmt: + if call, ok := s.X.(*ast.CallExpr); ok { + for _, stmt := range parallelSubtest(pass.TypesInfo, call) { + reportCaptured(pass, vars, stmt) + } + } } - return true - }) + } }) return nil, nil } -// goInvokes returns a function expression that would be called asynchronously +// reportCaptured reports a diagnostic stating a loop variable +// has been captured by a func literal if checkStmt has escaping +// references to vars. vars is expected to be variables updated by a loop statement, +// and checkStmt is expected to be a statements from the body of a func literal in the loop. +func reportCaptured(pass *analysis.Pass, vars []types.Object, checkStmt ast.Stmt) { + ast.Inspect(checkStmt, func(n ast.Node) bool { + id, ok := n.(*ast.Ident) + if !ok { + return true + } + obj := pass.TypesInfo.Uses[id] + if obj == nil { + return true + } + for _, v := range vars { + if v == obj { + pass.ReportRangef(id, "loop variable %s captured by func literal", id.Name) + } + } + return true + }) +} + +// forEachLastStmt calls onLast on each "last" statement in a list of statements. +// "Last" is defined recursively so, for example, if the last statement is +// a switch statement, then each switch case is also visited to examine +// its last statements. +func forEachLastStmt(stmts []ast.Stmt, onLast func(last ast.Stmt)) { + if len(stmts) == 0 { + return + } + + s := stmts[len(stmts)-1] + switch s := s.(type) { + case *ast.IfStmt: + loop: + for { + forEachLastStmt(s.Body.List, onLast) + switch e := s.Else.(type) { + case *ast.BlockStmt: + forEachLastStmt(e.List, onLast) + break loop + case *ast.IfStmt: + s = e + case nil: + break loop + } + } + case *ast.ForStmt: + forEachLastStmt(s.Body.List, onLast) + case *ast.RangeStmt: + forEachLastStmt(s.Body.List, onLast) + case *ast.SwitchStmt: + for _, c := range s.Body.List { + cc := c.(*ast.CaseClause) + forEachLastStmt(cc.Body, onLast) + } + case *ast.TypeSwitchStmt: + for _, c := range s.Body.List { + cc := c.(*ast.CaseClause) + forEachLastStmt(cc.Body, onLast) + } + case *ast.SelectStmt: + for _, c := range s.Body.List { + cc := c.(*ast.CommClause) + forEachLastStmt(cc.Body, onLast) + } + default: + onLast(s) + } +} + +// litStmts returns all statements from the function body of a function +// literal. +// +// If fun is not a function literal, it returns nil. +func litStmts(fun ast.Expr) []ast.Stmt { + lit, _ := fun.(*ast.FuncLit) + if lit == nil { + return nil + } + return lit.Body.List +} + +// goInvoke returns a function expression that would be called asynchronously // (but not awaited) in another goroutine as a consequence of the call. // For example, given the g.Go call below, it returns the function literal expression. // @@ -133,33 +265,169 @@ func run(pass *analysis.Pass) (interface{}, error) { // g.Go(func() error { ... }) // // Currently only "golang.org/x/sync/errgroup.Group()" is considered. -func goInvokes(info *types.Info, call *ast.CallExpr) ast.Expr { - f := typeutil.StaticCallee(info, call) - // Note: Currently only supports: golang.org/x/sync/errgroup.Go. - if f == nil || f.Name() != "Go" { +func goInvoke(info *types.Info, call *ast.CallExpr) ast.Expr { + if !isMethodCall(info, call, "golang.org/x/sync/errgroup", "Group", "Go") { return nil } - recv := f.Type().(*types.Signature).Recv() - if recv == nil { + return call.Args[0] +} + +// parallelSubtest returns statements that can be easily proven to execute +// concurrently via the go test runner, as t.Run has been invoked with a +// function literal that calls t.Parallel. +// +// In practice, users rely on the fact that statements before the call to +// t.Parallel are synchronous. For example by declaring test := test inside the +// function literal, but before the call to t.Parallel. +// +// Therefore, we only flag references in statements that are obviously +// dominated by a call to t.Parallel. As a simple heuristic, we only consider +// statements following the final labeled statement in the function body, to +// avoid scenarios where a jump would cause either the call to t.Parallel or +// the problematic reference to be skipped. +// +// import "testing" +// +// func TestFoo(t *testing.T) { +// tests := []int{0, 1, 2} +// for i, test := range tests { +// t.Run("subtest", func(t *testing.T) { +// println(i, test) // OK +// t.Parallel() +// println(i, test) // Not OK +// }) +// } +// } +func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt { + if !isMethodCall(info, call, "testing", "T", "Run") { return nil } - rtype, ok := recv.Type().(*types.Pointer) - if !ok { + + if len(call.Args) != 2 { + // Ignore calls such as t.Run(fn()). return nil } - named, ok := rtype.Elem().(*types.Named) - if !ok { + + lit, _ := call.Args[1].(*ast.FuncLit) + if lit == nil { return nil } - if named.Obj().Name() != "Group" { + + // Capture the *testing.T object for the first argument to the function + // literal. + if len(lit.Type.Params.List[0].Names) == 0 { + return nil + } + + tObj := info.Defs[lit.Type.Params.List[0].Names[0]] + if tObj == nil { return nil } + + // Match statements that occur after a call to t.Parallel following the final + // labeled statement in the function body. + // + // We iterate over lit.Body.List to have a simple, fast and "frequent enough" + // dominance relationship for t.Parallel(): lit.Body.List[i] dominates + // lit.Body.List[j] for i < j unless there is a jump. + var stmts []ast.Stmt + afterParallel := false + for _, stmt := range lit.Body.List { + stmt, labeled := unlabel(stmt) + if labeled { + // Reset: naively we don't know if a jump could have caused the + // previously considered statements to be skipped. + stmts = nil + afterParallel = false + } + + if afterParallel { + stmts = append(stmts, stmt) + continue + } + + // Check if stmt is a call to t.Parallel(), for the correct t. + exprStmt, ok := stmt.(*ast.ExprStmt) + if !ok { + continue + } + expr := exprStmt.X + if isMethodCall(info, expr, "testing", "T", "Parallel") { + call, _ := expr.(*ast.CallExpr) + if call == nil { + continue + } + x, _ := call.Fun.(*ast.SelectorExpr) + if x == nil { + continue + } + id, _ := x.X.(*ast.Ident) + if id == nil { + continue + } + if info.Uses[id] == tObj { + afterParallel = true + } + } + } + + return stmts +} + +// unlabel returns the inner statement for the possibly labeled statement stmt, +// stripping any (possibly nested) *ast.LabeledStmt wrapper. +// +// The second result reports whether stmt was an *ast.LabeledStmt. +func unlabel(stmt ast.Stmt) (ast.Stmt, bool) { + labeled := false + for { + labelStmt, ok := stmt.(*ast.LabeledStmt) + if !ok { + return stmt, labeled + } + labeled = true + stmt = labelStmt.Stmt + } +} + +// isMethodCall reports whether expr is a method call of +// ... +func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool { + call, ok := expr.(*ast.CallExpr) + if !ok { + return false + } + + // Check that we are calling a method + f := typeutil.StaticCallee(info, call) + if f == nil || f.Name() != method { + return false + } + recv := f.Type().(*types.Signature).Recv() + if recv == nil { + return false + } + + // Check that the receiver is a . or + // *.. + rtype := recv.Type() + if ptr, ok := recv.Type().(*types.Pointer); ok { + rtype = ptr.Elem() + } + named, ok := rtype.(*types.Named) + if !ok { + return false + } + if named.Obj().Name() != typeName { + return false + } pkg := f.Pkg() if pkg == nil { - return nil + return false } - if pkg.Path() != "golang.org/x/sync/errgroup" { - return nil + if pkg.Path() != pkgPath { + return false } - return call.Args[0] + + return true } diff --git a/go/analysis/passes/loopclosure/loopclosure_test.go b/go/analysis/passes/loopclosure/loopclosure_test.go index 1498838d7ff..55fb2a4a3d6 100644 --- a/go/analysis/passes/loopclosure/loopclosure_test.go +++ b/go/analysis/passes/loopclosure/loopclosure_test.go @@ -5,16 +5,16 @@ package loopclosure_test import ( - "golang.org/x/tools/internal/typeparams" "testing" "golang.org/x/tools/go/analysis/analysistest" "golang.org/x/tools/go/analysis/passes/loopclosure" + "golang.org/x/tools/internal/typeparams" ) func Test(t *testing.T) { testdata := analysistest.TestData() - tests := []string{"a", "golang.org/..."} + tests := []string{"a", "golang.org/...", "subtests"} if typeparams.Enabled { tests = append(tests, "typeparams") } diff --git a/go/analysis/passes/loopclosure/testdata/src/a/a.go b/go/analysis/passes/loopclosure/testdata/src/a/a.go index 2c8e2e6c411..7a7f05f663f 100644 --- a/go/analysis/passes/loopclosure/testdata/src/a/a.go +++ b/go/analysis/passes/loopclosure/testdata/src/a/a.go @@ -6,7 +6,13 @@ package testdata -import "golang.org/x/sync/errgroup" +import ( + "sync" + + "golang.org/x/sync/errgroup" +) + +var A int func _() { var s []int @@ -49,6 +55,19 @@ func _() { println(i, v) }() } + + // iteration variable declared outside the loop + for A = range s { + go func() { + println(A) // want "loop variable A captured by func literal" + }() + } + // iteration variable declared in a different file + for B = range s { + go func() { + println(B) // want "loop variable B captured by func literal" + }() + } // If the key of the range statement is not an identifier // the code should not panic (it used to). var x [2]int @@ -91,9 +110,73 @@ func _() { } } -// Group is used to test that loopclosure does not match on any type named "Group". -// The checker only matches on methods "(*...errgroup.Group).Go". -type Group struct{}; +// Cases that rely on recursively checking for last statements. +func _() { + + for i := range "outer" { + for j := range "inner" { + if j < 1 { + defer func() { + print(i) // want "loop variable i captured by func literal" + }() + } else if j < 2 { + go func() { + print(i) // want "loop variable i captured by func literal" + }() + } else { + go func() { + print(i) + }() + println("we don't catch the error above because of this statement") + } + } + } + + for i := 0; i < 10; i++ { + for j := 0; j < 10; j++ { + if j < 1 { + switch j { + case 0: + defer func() { + print(i) // want "loop variable i captured by func literal" + }() + default: + go func() { + print(i) // want "loop variable i captured by func literal" + }() + } + } else if j < 2 { + var a interface{} = j + switch a.(type) { + case int: + defer func() { + print(i) // want "loop variable i captured by func literal" + }() + default: + go func() { + print(i) // want "loop variable i captured by func literal" + }() + } + } else { + ch := make(chan string) + select { + case <-ch: + defer func() { + print(i) // want "loop variable i captured by func literal" + }() + default: + go func() { + print(i) // want "loop variable i captured by func literal" + }() + } + } + } + } +} + +// Group is used to test that loopclosure only matches Group.Go when Group is +// from the golang.org/x/sync/errgroup package. +type Group struct{} func (g *Group) Go(func() error) {} @@ -108,6 +191,21 @@ func _() { return nil }) } + + for i, v := range s { + if i > 0 { + g.Go(func() error { + print(i) // want "loop variable i captured by func literal" + return nil + }) + } else { + g.Go(func() error { + print(v) // want "loop variable v captured by func literal" + return nil + }) + } + } + // Do not match other Group.Go cases g1 := new(Group) for i, v := range s { @@ -118,3 +216,28 @@ func _() { }) } } + +// Real-world example from #16520, slightly simplified +func _() { + var nodes []interface{} + + critical := new(errgroup.Group) + others := sync.WaitGroup{} + + isCritical := func(node interface{}) bool { return false } + run := func(node interface{}) error { return nil } + + for _, node := range nodes { + if isCritical(node) { + critical.Go(func() error { + return run(node) // want "loop variable node captured by func literal" + }) + } else { + others.Add(1) + go func() { + _ = run(node) // want "loop variable node captured by func literal" + others.Done() + }() + } + } +} diff --git a/go/analysis/passes/loopclosure/testdata/src/a/b.go b/go/analysis/passes/loopclosure/testdata/src/a/b.go new file mode 100644 index 00000000000..d4e5da418e5 --- /dev/null +++ b/go/analysis/passes/loopclosure/testdata/src/a/b.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +// B is declared in a separate file to test that object resolution spans the +// entire package. +var B int diff --git a/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go new file mode 100644 index 00000000000..50283ec6152 --- /dev/null +++ b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go @@ -0,0 +1,202 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests that the loopclosure analyzer detects leaked +// references via parallel subtests. + +package subtests + +import ( + "testing" +) + +// T is used to test that loopclosure only matches T.Run when T is from the +// testing package. +type T struct{} + +// Run should not match testing.T.Run. Note that the second argument is +// intentionally a *testing.T, not a *T, so that we can check both +// testing.T.Parallel inside a T.Run, and a T.Parallel inside a testing.T.Run. +func (t *T) Run(string, func(*testing.T)) { +} + +func (t *T) Parallel() {} + +func _(t *testing.T) { + for i, test := range []int{1, 2, 3} { + // Check that parallel subtests are identified. + t.Run("", func(t *testing.T) { + t.Parallel() + println(i) // want "loop variable i captured by func literal" + println(test) // want "loop variable test captured by func literal" + }) + + // Check that serial tests are OK. + t.Run("", func(t *testing.T) { + println(i) + println(test) + }) + + // Check that the location of t.Parallel matters. + t.Run("", func(t *testing.T) { + println(i) + println(test) + t.Parallel() + println(i) // want "loop variable i captured by func literal" + println(test) // want "loop variable test captured by func literal" + }) + + // Check that *testing.T value matters. + t.Run("", func(t *testing.T) { + var x testing.T + x.Parallel() + println(i) + println(test) + }) + + // Check that shadowing the loop variables within the test literal is OK if + // it occurs before t.Parallel(). + t.Run("", func(t *testing.T) { + i := i + test := test + t.Parallel() + println(i) + println(test) + }) + + // Check that shadowing the loop variables within the test literal is Not + // OK if it occurs after t.Parallel(). + t.Run("", func(t *testing.T) { + t.Parallel() + i := i // want "loop variable i captured by func literal" + test := test // want "loop variable test captured by func literal" + println(i) // OK + println(test) // OK + }) + + // Check uses in nested blocks. + t.Run("", func(t *testing.T) { + t.Parallel() + { + println(i) // want "loop variable i captured by func literal" + println(test) // want "loop variable test captured by func literal" + } + }) + + // Check that we catch uses in nested subtests. + t.Run("", func(t *testing.T) { + t.Parallel() + t.Run("", func(t *testing.T) { + println(i) // want "loop variable i captured by func literal" + println(test) // want "loop variable test captured by func literal" + }) + }) + + // Check that there is no diagnostic if t is not a *testing.T. + t.Run("", func(_ *testing.T) { + t := &T{} + t.Parallel() + println(i) + println(test) + }) + + // Check that there is no diagnostic when a jump to a label may have caused + // the call to t.Parallel to have been skipped. + t.Run("", func(t *testing.T) { + if true { + goto Test + } + t.Parallel() + Test: + println(i) + println(test) + }) + + // Check that there is no diagnostic when a jump to a label may have caused + // the loop variable reference to be skipped, but there is a diagnostic + // when both the call to t.Parallel and the loop variable reference occur + // after the final label in the block. + t.Run("", func(t *testing.T) { + if true { + goto Test + } + t.Parallel() + println(i) // maybe OK + Test: + t.Parallel() + println(test) // want "loop variable test captured by func literal" + }) + + // Check that multiple labels are handled. + t.Run("", func(t *testing.T) { + if true { + goto Test1 + } else { + goto Test2 + } + Test1: + Test2: + t.Parallel() + println(test) // want "loop variable test captured by func literal" + }) + + // Check that we do not have problems when t.Run has a single argument. + fn := func() (string, func(t *testing.T)) { return "", nil } + t.Run(fn()) + } +} + +// Check that there is no diagnostic when loop variables are shadowed within +// the loop body. +func _(t *testing.T) { + for i, test := range []int{1, 2, 3} { + i := i + test := test + t.Run("", func(t *testing.T) { + t.Parallel() + println(i) + println(test) + }) + } +} + +// Check that t.Run must be *testing.T.Run. +func _(t *T) { + for i, test := range []int{1, 2, 3} { + t.Run("", func(t *testing.T) { + t.Parallel() + println(i) + println(test) + }) + } +} + +// Check that the top-level must be parallel in order to cause a diagnostic. +// +// From https://pkg.go.dev/testing: +// +// "Run does not return until parallel subtests have completed, providing a +// way to clean up after a group of parallel tests" +func _(t *testing.T) { + for _, test := range []int{1, 2, 3} { + // In this subtest, a/b must complete before the synchronous subtest "a" + // completes, so the reference to test does not escape the current loop + // iteration. + t.Run("a", func(s *testing.T) { + s.Run("b", func(u *testing.T) { + u.Parallel() + println(test) + }) + }) + + // In this subtest, c executes concurrently, so the reference to test may + // escape the current loop iteration. + t.Run("c", func(s *testing.T) { + s.Parallel() + s.Run("d", func(u *testing.T) { + println(test) // want "loop variable test captured by func literal" + }) + }) + } +} diff --git a/go/analysis/passes/nilness/nilness.go b/go/analysis/passes/nilness/nilness.go index 8db18c73ade..6849c33ccef 100644 --- a/go/analysis/passes/nilness/nilness.go +++ b/go/analysis/passes/nilness/nilness.go @@ -15,6 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/typeparams" ) const Doc = `check for redundant or impossible nil comparisons @@ -62,7 +63,6 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (interface{}, error) { ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) - // TODO(48525): ssainput.SrcFuncs is missing fn._Instances(). runFunc will be skipped. for _, fn := range ssainput.SrcFuncs { runFunc(pass, fn) } @@ -103,8 +103,11 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { for _, instr := range b.Instrs { switch instr := instr.(type) { case ssa.CallInstruction: - notNil(stack, instr, instr.Common().Value, - instr.Common().Description()) + // A nil receiver may be okay for type params. + cc := instr.Common() + if !(cc.IsInvoke() && typeparams.IsTypeParam(cc.Value.Type())) { + notNil(stack, instr, cc.Value, cc.Description()) + } case *ssa.FieldAddr: notNil(stack, instr, instr.X, "field selection") case *ssa.IndexAddr: @@ -307,9 +310,9 @@ func nilnessOf(stack []fact, v ssa.Value) nilness { return isnonnil case *ssa.Const: if v.IsNil() { - return isnil + return isnil // nil or zero value of a pointer-like type } else { - return isnonnil + return unknown // non-pointer } } diff --git a/go/analysis/passes/nilness/nilness_test.go b/go/analysis/passes/nilness/nilness_test.go index 86c4a769da8..99c4dfbac1d 100644 --- a/go/analysis/passes/nilness/nilness_test.go +++ b/go/analysis/passes/nilness/nilness_test.go @@ -24,3 +24,11 @@ func TestInstantiated(t *testing.T) { testdata := analysistest.TestData() analysistest.Run(t, testdata, nilness.Analyzer, "c") } + +func TestTypeSet(t *testing.T) { + if !typeparams.Enabled { + t.Skip("TestTypeSet requires type parameters") + } + testdata := analysistest.TestData() + analysistest.Run(t, testdata, nilness.Analyzer, "d") +} diff --git a/go/analysis/passes/nilness/testdata/src/a/a.go b/go/analysis/passes/nilness/testdata/src/a/a.go index aa7f9a8f859..0629e08d89e 100644 --- a/go/analysis/passes/nilness/testdata/src/a/a.go +++ b/go/analysis/passes/nilness/testdata/src/a/a.go @@ -209,3 +209,10 @@ func f13() { var d *Y print(d.value) // want "nil dereference in field selection" } + +func f14() { + var x struct{ f string } + if x == struct{ f string }{} { // we don't catch this tautology as we restrict to reference types + print(x) + } +} diff --git a/go/analysis/passes/nilness/testdata/src/c/c.go b/go/analysis/passes/nilness/testdata/src/c/c.go index 2b2036595a6..c9a05a714ff 100644 --- a/go/analysis/passes/nilness/testdata/src/c/c.go +++ b/go/analysis/passes/nilness/testdata/src/c/c.go @@ -2,7 +2,7 @@ package c func instantiated[X any](x *X) int { if x == nil { - print(*x) // not reported until _Instances are added to SrcFuncs + print(*x) // want "nil dereference in load" } return 1 } diff --git a/go/analysis/passes/nilness/testdata/src/d/d.go b/go/analysis/passes/nilness/testdata/src/d/d.go new file mode 100644 index 00000000000..72bd1c87217 --- /dev/null +++ b/go/analysis/passes/nilness/testdata/src/d/d.go @@ -0,0 +1,55 @@ +package d + +type message interface{ PR() } + +func noparam() { + var messageT message + messageT.PR() // want "nil dereference in dynamic method call" +} + +func paramNonnil[T message]() { + var messageT T + messageT.PR() // cannot conclude messageT is nil. +} + +func instance() { + // buildssa.BuilderMode does not include InstantiateGenerics. + paramNonnil[message]() // no warning is expected as param[message] id not built. +} + +func param[T interface { + message + ~*int | ~chan int +}]() { + var messageT T // messageT is nil. + messageT.PR() // nil receiver may be okay. See param[nilMsg]. +} + +type nilMsg chan int + +func (m nilMsg) PR() { + if m == nil { + print("not an error") + } +} + +var G func() = param[nilMsg] // no warning + +func allNillable[T ~*int | ~chan int]() { + var x, y T // both are nillable and are nil. + if x != y { // want "impossible condition: nil != nil" + print("unreachable") + } +} + +func notAll[T ~*int | ~chan int | ~int]() { + var x, y T // neither are nillable due to ~int + if x != y { // no warning + print("unreachable") + } +} + +func noninvoke[T ~func()]() { + var x T + x() // want "nil dereference in dynamic function call" +} diff --git a/go/analysis/passes/printf/printf.go b/go/analysis/passes/printf/printf.go index c4ccc95b4fb..daaf709a449 100644 --- a/go/analysis/passes/printf/printf.go +++ b/go/analysis/passes/printf/printf.go @@ -583,7 +583,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F argNum := firstArg maxArgNum := firstArg anyIndex := false - anyW := false for i, w := 0, 0; i < len(format); i += w { w = 1 if format[i] != '%' { @@ -606,11 +605,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name) return } - if anyW { - pass.Reportf(call.Pos(), "%s call has more than one error-wrapping directive %%w", state.name) - return - } - anyW = true } if len(state.argNums) > 0 { // Continue with the next sequential argument. @@ -672,12 +666,13 @@ func (s *formatState) parseIndex() bool { s.scanNum() ok := true if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' { - ok = false - s.nbytes = strings.Index(s.format, "]") + ok = false // syntax error is either missing "]" or invalid index. + s.nbytes = strings.Index(s.format[start:], "]") if s.nbytes < 0 { s.pass.ReportRangef(s.call, "%s format %s is missing closing ]", s.name, s.format) return false } + s.nbytes = s.nbytes + start } arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32) if err != nil || !ok || arg32 <= 0 || arg32 > int64(len(s.call.Args)-s.firstArg) { @@ -950,11 +945,16 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) { return "", false } + // inScope returns true if e is in the scope of f. + inScope := func(e ast.Expr, f *types.Func) bool { + return f.Scope() != nil && f.Scope().Contains(e.Pos()) + } + // Is the expression e within the body of that String or Error method? var method *types.Func - if strOk && strMethod.Pkg() == pass.Pkg && strMethod.Scope().Contains(e.Pos()) { + if strOk && strMethod.Pkg() == pass.Pkg && inScope(e, strMethod) { method = strMethod - } else if errOk && errMethod.Pkg() == pass.Pkg && errMethod.Scope().Contains(e.Pos()) { + } else if errOk && errMethod.Pkg() == pass.Pkg && inScope(e, errMethod) { method = errMethod } else { return "", false diff --git a/go/analysis/passes/printf/testdata/src/a/a.go b/go/analysis/passes/printf/testdata/src/a/a.go index 5eca3172dec..0c4d11bf0c0 100644 --- a/go/analysis/passes/printf/testdata/src/a/a.go +++ b/go/analysis/passes/printf/testdata/src/a/a.go @@ -217,6 +217,7 @@ func PrintfTests() { Printf("%[2]*.[1]*[3]d x", 2, "hi", 4) // want `a.Printf format %\[2]\*\.\[1\]\*\[3\]d uses non-int \x22hi\x22 as argument of \*` Printf("%[0]s x", "arg1") // want `a.Printf format has invalid argument index \[0\]` Printf("%[0]d x", 1) // want `a.Printf format has invalid argument index \[0\]` + Printf("%[3]*.[2*[1]f", 1, 2, 3) // want `a.Printf format has invalid argument index \[2\*\[1\]` // Something that satisfies the error interface. var e error fmt.Println(e.Error()) // ok @@ -341,7 +342,7 @@ func PrintfTests() { _ = fmt.Errorf("%[2]w %[1]s", "x", err) // OK _ = fmt.Errorf("%[2]w %[1]s", e, "x") // want `fmt.Errorf format %\[2\]w has arg "x" of wrong type string` _ = fmt.Errorf("%w", "x") // want `fmt.Errorf format %w has arg "x" of wrong type string` - _ = fmt.Errorf("%w %w", err, err) // want `fmt.Errorf call has more than one error-wrapping directive %w` + _ = fmt.Errorf("%w %w", err, err) // OK _ = fmt.Errorf("%w", interface{}(nil)) // want `fmt.Errorf format %w has arg interface{}\(nil\) of wrong type interface{}` _ = fmt.Errorf("%w", errorTestOK(0)) // concrete value implements error _ = fmt.Errorf("%w", errSubset) // interface value implements error diff --git a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go index 76a9a205a70..c4d7e530d93 100644 --- a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go +++ b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go @@ -121,3 +121,25 @@ func TestTermReduction[T1 interface{ ~int | string }, T2 interface { fmt.Printf("%d", t2) fmt.Printf("%s", t2) // want "wrong type.*contains typeparams.myInt" } + +type U[T any] struct{} + +func (u U[T]) String() string { + fmt.Println(u) // want `fmt.Println arg u causes recursive call to \(typeparams.U\[T\]\).String method` + return "" +} + +type S[T comparable] struct { + t T +} + +func (s S[T]) String() T { + fmt.Println(s) // Not flagged. We currently do not consider String() T to implement fmt.Stringer (see #55928). + return s.t +} + +func TestInstanceStringer() { + // Tests String method with nil Scope (#55350) + fmt.Println(&S[string]{}) + fmt.Println(&U[string]{}) +} diff --git a/go/analysis/passes/printf/types.go b/go/analysis/passes/printf/types.go index 270e917c809..7cbb0bdbf5f 100644 --- a/go/analysis/passes/printf/types.go +++ b/go/analysis/passes/printf/types.go @@ -299,13 +299,3 @@ func isConvertibleToString(typ types.Type) bool { return false } - -// hasBasicType reports whether x's type is a types.Basic with the given kind. -func hasBasicType(pass *analysis.Pass, x ast.Expr, kind types.BasicKind) bool { - t := pass.TypesInfo.Types[x].Type - if t != nil { - t = t.Underlying() - } - b, ok := t.(*types.Basic) - return ok && b.Kind() == kind -} diff --git a/go/analysis/passes/sortslice/analyzer.go b/go/analysis/passes/sortslice/analyzer.go index 5eb957a1883..f85837d66bf 100644 --- a/go/analysis/passes/sortslice/analyzer.go +++ b/go/analysis/passes/sortslice/analyzer.go @@ -52,11 +52,20 @@ func run(pass *analysis.Pass) (interface{}, error) { arg := call.Args[0] typ := pass.TypesInfo.Types[arg].Type + + if tuple, ok := typ.(*types.Tuple); ok { + typ = tuple.At(0).Type() // special case for Slice(f(...)) + } + switch typ.Underlying().(type) { case *types.Slice, *types.Interface: return } + // Restore typ to the original type, we may unwrap the tuple above, + // typ might not be the type of arg. + typ = pass.TypesInfo.Types[arg].Type + var fixes []analysis.SuggestedFix switch v := typ.Underlying().(type) { case *types.Array: diff --git a/go/analysis/passes/sortslice/testdata/src/a/a.go b/go/analysis/passes/sortslice/testdata/src/a/a.go index bc6cc16e9f1..c6aca8df13b 100644 --- a/go/analysis/passes/sortslice/testdata/src/a/a.go +++ b/go/analysis/passes/sortslice/testdata/src/a/a.go @@ -6,8 +6,8 @@ import "sort" func IncorrectSort() { i := 5 sortFn := func(i, j int) bool { return false } - sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int" - sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int" + sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int" + sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int" sort.SliceIsSorted(i, sortFn) // want "sort.SliceIsSorted's argument must be a slice; is called with int" } @@ -62,3 +62,23 @@ func UnderlyingSlice() { sort.SliceStable(s, sortFn) sort.SliceIsSorted(s, sortFn) } + +// FunctionResultsAsArguments passes a function which returns two values +// that satisfy sort.Slice signature. It should not produce a diagnostic. +func FunctionResultsAsArguments() { + s := []string{"a", "z", "ooo"} + sort.Slice(less(s)) + sort.Slice(lessPtr(s)) // want `sort.Slice's argument must be a slice; is called with \(\*\[\]string,.*` +} + +func less(s []string) ([]string, func(i, j int) bool) { + return s, func(i, j int) bool { + return s[i] < s[j] + } +} + +func lessPtr(s []string) (*[]string, func(i, j int) bool) { + return &s, func(i, j int) bool { + return s[i] < s[j] + } +} diff --git a/go/analysis/passes/stdmethods/stdmethods.go b/go/analysis/passes/stdmethods/stdmethods.go index cc9497179da..41f455d1003 100644 --- a/go/analysis/passes/stdmethods/stdmethods.go +++ b/go/analysis/passes/stdmethods/stdmethods.go @@ -134,6 +134,19 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { } } + // Special case: Unwrap has two possible signatures. + // Check for Unwrap() []error here. + if id.Name == "Unwrap" { + if args.Len() == 0 && results.Len() == 1 { + t := typeString(results.At(0).Type()) + if t == "error" || t == "[]error" { + return + } + } + pass.ReportRangef(id, "method Unwrap() should have signature Unwrap() error or Unwrap() []error") + return + } + // Do the =s (if any) all match? if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { return diff --git a/go/analysis/passes/stdmethods/testdata/src/a/a.go b/go/analysis/passes/stdmethods/testdata/src/a/a.go index c95cf5d2b76..2b01f46932f 100644 --- a/go/analysis/passes/stdmethods/testdata/src/a/a.go +++ b/go/analysis/passes/stdmethods/testdata/src/a/a.go @@ -49,7 +49,7 @@ func (E) Error() string { return "" } // E implements error. func (E) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool` func (E) Is() {} // want `method Is\(\) should have signature Is\(error\) bool` -func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error` +func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error` type F int @@ -57,8 +57,18 @@ func (F) Error() string { return "" } // Both F and *F implement error. func (*F) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool` func (*F) Is() {} // want `method Is\(\) should have signature Is\(error\) bool` -func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error` +func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error` type G int func (G) As(interface{}) bool // ok + +type W int + +func (W) Error() string { return "" } +func (W) Unwrap() error { return nil } // ok + +type M int + +func (M) Error() string { return "" } +func (M) Unwrap() []error { return nil } // ok diff --git a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go index 72df30d4960..3d4146e9b2c 100644 --- a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go +++ b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go @@ -30,7 +30,7 @@ func (E[_]) Error() string { return "" } // E implements error. func (E[P]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool` func (E[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool` -func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error` +func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error` type F[P any] int @@ -38,4 +38,4 @@ func (F[_]) Error() string { return "" } // Both F and *F implement error. func (*F[_]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool` func (*F[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool` -func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error` +func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error` diff --git a/go/analysis/passes/tests/testdata/src/a/go118_test.go b/go/analysis/passes/tests/testdata/src/a/go118_test.go index dc898daca0b..e2bc3f3a0bd 100644 --- a/go/analysis/passes/tests/testdata/src/a/go118_test.go +++ b/go/analysis/passes/tests/testdata/src/a/go118_test.go @@ -94,3 +94,8 @@ func FuzzObjectMethod(f *testing.F) { } f.Fuzz(obj.myVar) // ok } + +// Test for golang/go#56505: checking fuzz arguments should not panic on *error. +func FuzzIssue56505(f *testing.F) { + f.Fuzz(func(e *error) {}) // want "the first parameter of a fuzz target must be \\*testing.T" +} diff --git a/go/analysis/passes/tests/tests.go b/go/analysis/passes/tests/tests.go index 56b20ebd519..935aad00c98 100644 --- a/go/analysis/passes/tests/tests.go +++ b/go/analysis/passes/tests/tests.go @@ -269,7 +269,9 @@ func isTestingType(typ types.Type, testingType string) bool { if !ok { return false } - return named.Obj().Pkg().Path() == "testing" && named.Obj().Name() == testingType + obj := named.Obj() + // obj.Pkg is nil for the error type. + return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == testingType } // Validate that fuzz target function's arguments are of accepted types. @@ -475,10 +477,12 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. + // TODO(adonovan): use ReportRangef(tparams). pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix) } if !isTestSuffix(fn.Name.Name[len(prefix):]) { + // TODO(adonovan): use ReportRangef(fn.Name). pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix) } } diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go b/go/analysis/passes/timeformat/testdata/src/a/a.go new file mode 100644 index 00000000000..98481446e55 --- /dev/null +++ b/go/analysis/passes/timeformat/testdata/src/a/a.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests for the timeformat checker. + +package a + +import ( + "time" + + "b" +) + +func hasError() { + a, _ := time.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02` + a.Format(`2006-02-01`) // want `2006-02-01 should be 2006-01-02` + a.Format("2006-02-01 15:04:05") // want `2006-02-01 should be 2006-01-02` + + const c = "2006-02-01" + a.Format(c) // want `2006-02-01 should be 2006-01-02` +} + +func notHasError() { + a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") + a.Format("2006-01-02") + + const c = "2006-01-02" + a.Format(c) + + v := "2006-02-01" + a.Format(v) // Allowed though variables. + + m := map[string]string{ + "y": "2006-02-01", + } + a.Format(m["y"]) + + s := []string{"2006-02-01"} + a.Format(s[0]) + + a.Format(badFormat()) + + o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") + o.Format("2006-02-01") +} + +func badFormat() string { + return "2006-02-01" +} diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go.golden b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden new file mode 100644 index 00000000000..9eccded63b4 --- /dev/null +++ b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests for the timeformat checker. + +package a + +import ( + "time" + + "b" +) + +func hasError() { + a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02` + a.Format(`2006-01-02`) // want `2006-02-01 should be 2006-01-02` + a.Format("2006-01-02 15:04:05") // want `2006-02-01 should be 2006-01-02` + + const c = "2006-02-01" + a.Format(c) // want `2006-02-01 should be 2006-01-02` +} + +func notHasError() { + a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") + a.Format("2006-01-02") + + const c = "2006-01-02" + a.Format(c) + + v := "2006-02-01" + a.Format(v) // Allowed though variables. + + m := map[string]string{ + "y": "2006-02-01", + } + a.Format(m["y"]) + + s := []string{"2006-02-01"} + a.Format(s[0]) + + a.Format(badFormat()) + + o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") + o.Format("2006-02-01") +} + +func badFormat() string { + return "2006-02-01" +} diff --git a/go/analysis/passes/timeformat/testdata/src/b/b.go b/go/analysis/passes/timeformat/testdata/src/b/b.go new file mode 100644 index 00000000000..de5690863c9 --- /dev/null +++ b/go/analysis/passes/timeformat/testdata/src/b/b.go @@ -0,0 +1,11 @@ +package b + +type B struct { +} + +func Parse(string, string) B { + return B{} +} + +func (b B) Format(string) { +} diff --git a/go/analysis/passes/timeformat/timeformat.go b/go/analysis/passes/timeformat/timeformat.go new file mode 100644 index 00000000000..acb198f95c4 --- /dev/null +++ b/go/analysis/passes/timeformat/timeformat.go @@ -0,0 +1,129 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeformat defines an Analyzer that checks for the use +// of time.Format or time.Parse calls with a bad format. +package timeformat + +import ( + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const badFormat = "2006-02-01" +const goodFormat = "2006-01-02" + +const Doc = `check for calls of (time.Time).Format or time.Parse with 2006-02-01 + +The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm) +format. Internationally, "yyyy-dd-mm" does not occur in common calendar date +standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended. +` + +var Analyzer = &analysis.Analyzer{ + Name: "timeformat", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if !ok { + return + } + if !isTimeDotFormat(fn) && !isTimeDotParse(fn) { + return + } + if len(call.Args) > 0 { + arg := call.Args[0] + badAt := badFormatAt(pass.TypesInfo, arg) + + if badAt > -1 { + // Check if it's a literal string, otherwise we can't suggest a fix. + if _, ok := arg.(*ast.BasicLit); ok { + pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or ` + end := pos + len(badFormat) + + pass.Report(analysis.Diagnostic{ + Pos: token.Pos(pos), + End: token.Pos(end), + Message: badFormat + " should be " + goodFormat, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace " + badFormat + " with " + goodFormat, + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos), + End: token.Pos(end), + NewText: []byte(goodFormat), + }}, + }}, + }) + } else { + pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat) + } + } + } + }) + return nil, nil +} + +func isTimeDotFormat(f *types.Func) bool { + if f.Name() != "Format" || f.Pkg().Path() != "time" { + return false + } + sig, ok := f.Type().(*types.Signature) + if !ok { + return false + } + // Verify that the receiver is time.Time. + recv := sig.Recv() + if recv == nil { + return false + } + named, ok := recv.Type().(*types.Named) + return ok && named.Obj().Name() == "Time" +} + +func isTimeDotParse(f *types.Func) bool { + if f.Name() != "Parse" || f.Pkg().Path() != "time" { + return false + } + // Verify that there is no receiver. + sig, ok := f.Type().(*types.Signature) + return ok && sig.Recv() == nil +} + +// badFormatAt return the start of a bad format in e or -1 if no bad format is found. +func badFormatAt(info *types.Info, e ast.Expr) int { + tv, ok := info.Types[e] + if !ok { // no type info, assume good + return -1 + } + + t, ok := tv.Type.(*types.Basic) + if !ok || t.Info()&types.IsString == 0 { + return -1 + } + + if tv.Value == nil { + return -1 + } + + return strings.Index(constant.StringVal(tv.Value), badFormat) +} diff --git a/go/analysis/passes/timeformat/timeformat_test.go b/go/analysis/passes/timeformat/timeformat_test.go new file mode 100644 index 00000000000..86bbe1bb3fb --- /dev/null +++ b/go/analysis/passes/timeformat/timeformat_test.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeformat_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/timeformat" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + analysistest.RunWithSuggestedFixes(t, testdata, timeformat.Analyzer, "a") +} diff --git a/go/analysis/unitchecker/unitchecker.go b/go/analysis/unitchecker/unitchecker.go index 9827b57f529..37693564e5b 100644 --- a/go/analysis/unitchecker/unitchecker.go +++ b/go/analysis/unitchecker/unitchecker.go @@ -50,7 +50,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/analysisflags" - "golang.org/x/tools/go/analysis/internal/facts" + "golang.org/x/tools/internal/facts" "golang.org/x/tools/internal/typeparams" ) @@ -249,6 +249,10 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re // In VetxOnly mode, analyzers are only for their facts, // so we can skip any analysis that neither produces facts // nor depends on any analysis that produces facts. + // + // TODO(adonovan): fix: the command (and logic!) here are backwards. + // It should say "...nor is required by any...". (Issue 443099) + // // Also build a map to hold working state and result. type action struct { once sync.Once @@ -287,13 +291,13 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re analyzers = filtered // Read facts from imported packages. - read := func(path string) ([]byte, error) { - if vetx, ok := cfg.PackageVetx[path]; ok { + read := func(imp *types.Package) ([]byte, error) { + if vetx, ok := cfg.PackageVetx[imp.Path()]; ok { return ioutil.ReadFile(vetx) } return nil, nil // no .vetx file, no facts } - facts, err := facts.Decode(pkg, read) + facts, err := facts.NewDecoder(pkg).Decode(read) if err != nil { return nil, err } @@ -340,6 +344,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re Pkg: pkg, TypesInfo: info, TypesSizes: tc.Sizes, + TypeErrors: nil, // unitchecker doesn't RunDespiteErrors ResultOf: inputs, Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, ImportObjectFact: facts.ImportObjectFact, diff --git a/go/analysis/unitchecker/unitchecker_test.go b/go/analysis/unitchecker/unitchecker_test.go index 7e5b848de86..197abd9a168 100644 --- a/go/analysis/unitchecker/unitchecker_test.go +++ b/go/analysis/unitchecker/unitchecker_test.go @@ -20,6 +20,7 @@ import ( "strings" "testing" + "golang.org/x/tools/go/analysis/passes/assign" "golang.org/x/tools/go/analysis/passes/findcall" "golang.org/x/tools/go/analysis/passes/printf" "golang.org/x/tools/go/analysis/unitchecker" @@ -41,6 +42,7 @@ func main() { unitchecker.Main( findcall.Analyzer, printf.Analyzer, + assign.Analyzer, ) } @@ -74,6 +76,13 @@ func _() { } func MyFunc123() {} +`, + "c/c.go": `package c + +func _() { + i := 5 + i = i +} `, }}}) defer exported.Cleanup() @@ -84,6 +93,9 @@ func MyFunc123() {} const wantB = `# golang.org/fake/b ([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:6:13: call of MyFunc123\(...\) ([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:7:11: call of MyFunc123\(...\) +` + const wantC = `# golang.org/fake/c +([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5: self-assignment of i to i ` const wantAJSON = `# golang.org/fake/a \{ @@ -91,23 +103,62 @@ func MyFunc123() {} "findcall": \[ \{ "posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go:4:11", - "message": "call of MyFunc123\(...\)" + "message": "call of MyFunc123\(...\)", + "suggested_fixes": \[ + \{ + "message": "Add '_TEST_'", + "edits": \[ + \{ + "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go", + "start": 32, + "end": 32, + "new": "_TEST_" + \} + \] + \} + \] + \} + \] + \} +\} +` + const wantCJSON = `# golang.org/fake/c +\{ + "golang.org/fake/c": \{ + "assign": \[ + \{ + "posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5", + "message": "self-assignment of i to i", + "suggested_fixes": \[ + \{ + "message": "Remove", + "edits": \[ + \{ + "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go", + "start": 37, + "end": 42, + "new": "" + \} + \] + \} + \] \} \] \} \} ` - for _, test := range []struct { - args string - wantOut string - wantExit int + args string + wantOut string + wantExitError bool }{ - {args: "golang.org/fake/a", wantOut: wantA, wantExit: 2}, - {args: "golang.org/fake/b", wantOut: wantB, wantExit: 2}, - {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExit: 2}, - {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExit: 0}, - {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExit: 2}, + {args: "golang.org/fake/a", wantOut: wantA, wantExitError: true}, + {args: "golang.org/fake/b", wantOut: wantB, wantExitError: true}, + {args: "golang.org/fake/c", wantOut: wantC, wantExitError: true}, + {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExitError: true}, + {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExitError: false}, + {args: "-json golang.org/fake/c", wantOut: wantCJSON, wantExitError: false}, + {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExitError: true}, } { cmd := exec.Command("go", "vet", "-vettool="+os.Args[0], "-findcall.name=MyFunc123") cmd.Args = append(cmd.Args, strings.Fields(test.args)...) @@ -119,13 +170,17 @@ func MyFunc123() {} if exitErr, ok := err.(*exec.ExitError); ok { exitcode = exitErr.ExitCode() } - if exitcode != test.wantExit { - t.Errorf("%s: got exit code %d, want %d", test.args, exitcode, test.wantExit) + if (exitcode != 0) != test.wantExitError { + want := "zero" + if test.wantExitError { + want = "nonzero" + } + t.Errorf("%s: got exit code %d, want %s", test.args, exitcode, want) } matched, err := regexp.Match(test.wantOut, out) if err != nil { - t.Fatal(err) + t.Fatalf("regexp.Match(<<%s>>): %v", test.wantOut, err) } if !matched { t.Errorf("%s: got <<%s>>, want match of regexp <<%s>>", test.args, out, test.wantOut) diff --git a/go/ast/inspector/inspector.go b/go/ast/inspector/inspector.go index af5e17feeea..3fbfebf3693 100644 --- a/go/ast/inspector/inspector.go +++ b/go/ast/inspector/inspector.go @@ -53,10 +53,13 @@ func New(files []*ast.File) *Inspector { // of an ast.Node during a traversal. type event struct { node ast.Node - typ uint64 // typeOf(node) - index int // 1 + index of corresponding pop event, or 0 if this is a pop + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event } +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits // n's children. @@ -72,10 +75,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { + if ev.index > i { + // push + if ev.typ&mask != 0 { f(ev.node) } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } } i++ } @@ -94,15 +104,24 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { - // push + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { if !f(ev.node, true) { - i = ev.index // jump to corresponding pop + 1 + i = pop + 1 // jump to corresponding pop + 1 continue } - } else { - // pop + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { f(ev.node, false) } } @@ -119,19 +138,26 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s var stack []ast.Node for i := 0; i < len(in.events); { ev := in.events[i] - if ev.index > 0 { + if ev.index > i { // push + pop := ev.index stack = append(stack, ev.node) if ev.typ&mask != 0 { if !f(ev.node, true, stack) { - i = ev.index + i = pop + 1 stack = stack[:len(stack)-1] continue } } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } } else { // pop - if ev.typ&mask != 0 { + push := ev.index + if in.events[push].typ&mask != 0 { f(ev.node, false, stack) } stack = stack[:len(stack)-1] @@ -157,25 +183,31 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event + stack = append(stack, event{}) // include an extra event so file nodes have a parent for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ node: n, - typ: typeOf(n), + typ: 0, // temporarily used to accumulate type bits of subtree index: len(events), // push event temporarily holds own index } stack = append(stack, ev) events = append(events, ev) } else { // pop - ev := stack[len(stack)-1] - stack = stack[:len(stack)-1] + top := len(stack) - 1 + ev := stack[top] + typ := typeOf(ev.node) + push := ev.index + parent := top - 1 - events[ev.index].index = len(events) + 1 // make push refer to pop + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop - ev.index = 0 // turn ev into a pop event + stack = stack[:top] events = append(events, ev) } return true diff --git a/go/ast/inspector/inspector_test.go b/go/ast/inspector/inspector_test.go index 9e539189680..e88d584b5c0 100644 --- a/go/ast/inspector/inspector_test.go +++ b/go/ast/inspector/inspector_test.go @@ -244,9 +244,11 @@ func typeOf(n ast.Node) string { // but a break-even point (NewInspector/(ASTInspect-Inspect)) of about 5 // traversals. // -// BenchmarkNewInspector 4.5 ms -// BenchmarkNewInspect 0.33ms -// BenchmarkASTInspect 1.2 ms +// BenchmarkASTInspect 1.0 ms +// BenchmarkNewInspector 2.2 ms +// BenchmarkInspect 0.39ms +// BenchmarkInspectFilter 0.01ms +// BenchmarkInspectCalls 0.14ms func BenchmarkNewInspector(b *testing.B) { // Measure one-time construction overhead. @@ -274,6 +276,42 @@ func BenchmarkInspect(b *testing.B) { } } +func BenchmarkInspectFilter(b *testing.B) { + b.StopTimer() + inspect := inspector.New(netFiles) + b.StartTimer() + + // Measure marginal cost of traversal. + nodeFilter := []ast.Node{(*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)} + var ndecls, nlits int + for i := 0; i < b.N; i++ { + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n.(type) { + case *ast.FuncDecl: + ndecls++ + case *ast.FuncLit: + nlits++ + } + }) + } +} + +func BenchmarkInspectCalls(b *testing.B) { + b.StopTimer() + inspect := inspector.New(netFiles) + b.StartTimer() + + // Measure marginal cost of traversal. + nodeFilter := []ast.Node{(*ast.CallExpr)(nil)} + var ncalls int + for i := 0; i < b.N; i++ { + inspect.Preorder(nodeFilter, func(n ast.Node) { + _ = n.(*ast.CallExpr) + ncalls++ + }) + } +} + func BenchmarkASTInspect(b *testing.B) { var ndecls, nlits int for i := 0; i < b.N; i++ { diff --git a/go/ast/inspector/typeof.go b/go/ast/inspector/typeof.go index 11ab2bc85aa..703c8139544 100644 --- a/go/ast/inspector/typeof.go +++ b/go/ast/inspector/typeof.go @@ -11,6 +11,7 @@ package inspector import ( "go/ast" + "math" "golang.org/x/tools/internal/typeparams" ) @@ -218,7 +219,7 @@ func typeOf(n ast.Node) uint64 { func maskOf(nodes []ast.Node) uint64 { if nodes == nil { - return 1<<64 - 1 // match all node types + return math.MaxUint64 // match all node types } var mask uint64 for _, n := range nodes { diff --git a/go/buildutil/util.go b/go/buildutil/util.go index d771b18e32d..bee6390de4c 100644 --- a/go/buildutil/util.go +++ b/go/buildutil/util.go @@ -80,7 +80,7 @@ func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Packag // (go/build.Context defines these as methods, but does not export them.) -// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// HasSubdir calls ctxt.HasSubdir (if not nil) or else uses // the local file system to answer the question. func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { if f := ctxt.HasSubdir; f != nil { diff --git a/go/callgraph/callgraph.go b/go/callgraph/callgraph.go index 352ce0c76ed..905623753d6 100644 --- a/go/callgraph/callgraph.go +++ b/go/callgraph/callgraph.go @@ -37,6 +37,8 @@ package callgraph // import "golang.org/x/tools/go/callgraph" // More generally, we could eliminate "uninteresting" nodes such as // nodes from packages we don't care about. +// TODO(zpavlinovic): decide how callgraphs handle calls to and from generic function bodies. + import ( "fmt" "go/token" diff --git a/go/callgraph/callgraph_test.go b/go/callgraph/callgraph_test.go new file mode 100644 index 00000000000..dd6baafa5ec --- /dev/null +++ b/go/callgraph/callgraph_test.go @@ -0,0 +1,253 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package callgraph_test + +import ( + "log" + "sync" + "testing" + + "golang.org/x/tools/go/callgraph" + "golang.org/x/tools/go/callgraph/cha" + "golang.org/x/tools/go/callgraph/rta" + "golang.org/x/tools/go/callgraph/static" + "golang.org/x/tools/go/callgraph/vta" + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/pointer" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/go/ssa/ssautil" +) + +// Benchmarks comparing different callgraph algorithms implemented in +// x/tools/go/callgraph. Comparison is on both speed, memory and precision. +// Fewer edges and fewer reachable nodes implies a more precise result. +// Comparison is done on a hello world http server using net/http. +// +// Current results were on an i7 macbook on go version devel go1.20-2730. +// Number of nodes, edges, and reachable function are expected to vary between +// go versions. Timing results are expected to vary between machines. +// BenchmarkStatic-12 53 ms/op 6 MB/op 12113 nodes 37355 edges 1522 reachable +// BenchmarkCHA-12 86 ms/op 16 MB/op 12113 nodes 131717 edges 7640 reachable +// BenchmarkRTA-12 110 ms/op 12 MB/op 6566 nodes 42291 edges 5099 reachable +// BenchmarkPTA-12 1427 ms/op 600 MB/op 8714 nodes 28244 edges 4184 reachable +// BenchmarkVTA-12 600 ms/op 78 MB/op 12114 nodes 44861 edges 4919 reachable +// BenchmarkVTA2-12 793 ms/op 104 MB/op 5450 nodes 22208 edges 4042 reachable +// BenchmarkVTA3-12 977 ms/op 124 MB/op 4621 nodes 19331 edges 3700 reachable +// BenchmarkVTAAlt-12 372 ms/op 57 MB/op 7763 nodes 29912 edges 4258 reachable +// BenchmarkVTAAlt2-12 570 ms/op 78 MB/op 4838 nodes 20169 edges 3737 reachable +// +// Note: +// * Static is unsound and may miss real edges. +// * RTA starts from a main function and only includes reachable functions. +// * CHA starts from all functions. +// * VTA, VTA2, and VTA3 are starting from all functions and the CHA callgraph. +// VTA2 and VTA3 are the result of re-applying VTA to the functions reachable +// from main() via the callgraph of the previous stage. +// * VTAAlt, and VTAAlt2 start from the functions reachable from main via the +// CHA callgraph. +// * All algorithms are unsound w.r.t. reflection. + +const httpEx = `package main + +import ( + "fmt" + "net/http" +) + +func hello(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "hello world\n") +} + +func main() { + http.HandleFunc("/hello", hello) + http.ListenAndServe(":8090", nil) +} +` + +var ( + once sync.Once + prog *ssa.Program + main *ssa.Function +) + +func example() (*ssa.Program, *ssa.Function) { + once.Do(func() { + var conf loader.Config + f, err := conf.ParseFile("", httpEx) + if err != nil { + log.Fatal(err) + } + conf.CreateFromFiles(f.Name.Name, f) + + lprog, err := conf.Load() + if err != nil { + log.Fatalf("test 'package %s': Load: %s", f.Name.Name, err) + } + prog = ssautil.CreateProgram(lprog, ssa.InstantiateGenerics) + prog.Build() + + main = prog.Package(lprog.Created[0].Pkg).Members["main"].(*ssa.Function) + }) + return prog, main +} + +var stats bool = false // print stats? + +func logStats(b *testing.B, cnd bool, name string, cg *callgraph.Graph, main *ssa.Function) { + if cnd && stats { + e := 0 + for _, n := range cg.Nodes { + e += len(n.Out) + } + r := len(reaches(main, cg, false)) + b.Logf("%s:\t%d nodes\t%d edges\t%d reachable", name, len(cg.Nodes), e, r) + } +} + +func BenchmarkStatic(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + cg := static.CallGraph(prog) + logStats(b, i == 0, "static", cg, main) + } +} + +func BenchmarkCHA(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + cg := cha.CallGraph(prog) + logStats(b, i == 0, "cha", cg, main) + } +} + +func BenchmarkRTA(b *testing.B) { + b.StopTimer() + _, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + res := rta.Analyze([]*ssa.Function{main}, true) + cg := res.CallGraph + logStats(b, i == 0, "rta", cg, main) + } +} + +func BenchmarkPTA(b *testing.B) { + b.StopTimer() + _, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + config := &pointer.Config{Mains: []*ssa.Package{main.Pkg}, BuildCallGraph: true} + res, err := pointer.Analyze(config) + if err != nil { + b.Fatal(err) + } + logStats(b, i == 0, "pta", res.CallGraph, main) + } +} + +func BenchmarkVTA(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + cg := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog)) + logStats(b, i == 0, "vta", cg, main) + } +} + +func BenchmarkVTA2(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog)) + cg := vta.CallGraph(reaches(main, vta1, true), vta1) + logStats(b, i == 0, "vta2", cg, main) + } +} + +func BenchmarkVTA3(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog)) + vta2 := vta.CallGraph(reaches(main, vta1, true), vta1) + cg := vta.CallGraph(reaches(main, vta2, true), vta2) + logStats(b, i == 0, "vta3", cg, main) + } +} + +func BenchmarkVTAAlt(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + cha := cha.CallGraph(prog) + cg := vta.CallGraph(reaches(main, cha, true), cha) // start from only functions reachable by CHA. + logStats(b, i == 0, "vta-alt", cg, main) + } +} + +func BenchmarkVTAAlt2(b *testing.B) { + b.StopTimer() + prog, main := example() + b.StartTimer() + + for i := 0; i < b.N; i++ { + cha := cha.CallGraph(prog) + vta1 := vta.CallGraph(reaches(main, cha, true), cha) + cg := vta.CallGraph(reaches(main, vta1, true), vta1) + logStats(b, i == 0, "vta-alt2", cg, main) + } +} + +// reaches computes the transitive closure of functions forward reachable +// via calls in cg starting from `sources`. If refs is true, include +// functions referred to in an instruction. +func reaches(source *ssa.Function, cg *callgraph.Graph, refs bool) map[*ssa.Function]bool { + seen := make(map[*ssa.Function]bool) + var visit func(f *ssa.Function) + visit = func(f *ssa.Function) { + if seen[f] { + return + } + seen[f] = true + + if n := cg.Nodes[f]; n != nil { + for _, e := range n.Out { + if e.Site != nil { + visit(e.Callee.Func) + } + } + } + + if refs { + var buf [10]*ssa.Value // avoid alloc in common case + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ssa.Function); ok { + visit(fn) + } + } + } + } + } + } + visit(source) + return seen +} diff --git a/go/callgraph/cha/cha.go b/go/callgraph/cha/cha.go index 170040426b8..6296d48d91a 100644 --- a/go/callgraph/cha/cha.go +++ b/go/callgraph/cha/cha.go @@ -22,6 +22,8 @@ // partial programs, such as libraries without a main or test function. package cha // import "golang.org/x/tools/go/callgraph/cha" +// TODO(zpavlinovic): update CHA for how it handles generic function bodies. + import ( "go/types" @@ -38,6 +40,54 @@ func CallGraph(prog *ssa.Program) *callgraph.Graph { allFuncs := ssautil.AllFunctions(prog) + calleesOf := lazyCallees(allFuncs) + + addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) { + gnode := cg.CreateNode(g) + callgraph.AddEdge(fnode, site, gnode) + } + + addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) { + // Because every call to a highly polymorphic and + // frequently used abstract method such as + // (io.Writer).Write is assumed to call every concrete + // Write method in the program, the call graph can + // contain a lot of duplication. + // + // TODO(taking): opt: consider making lazyCallees public. + // Using the same benchmarks as callgraph_test.go, removing just + // the explicit callgraph.Graph construction is 4x less memory + // and is 37% faster. + // CHA 86 ms/op 16 MB/op + // lazyCallees 63 ms/op 4 MB/op + for _, g := range callees { + addEdge(fnode, site, g) + } + } + + for f := range allFuncs { + fnode := cg.CreateNode(f) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if site, ok := instr.(ssa.CallInstruction); ok { + if g := site.Common().StaticCallee(); g != nil { + addEdge(fnode, site, g) + } else { + addEdges(fnode, site, calleesOf(site)) + } + } + } + } + } + + return cg +} + +// lazyCallees returns a function that maps a call site (in a function in fns) +// to its callees within fns. +// +// The resulting function is not concurrency safe. +func lazyCallees(fns map[*ssa.Function]bool) func(site ssa.CallInstruction) []*ssa.Function { // funcsBySig contains all functions, keyed by signature. It is // the effective set of address-taken functions used to resolve // a dynamic call of a particular signature. @@ -79,7 +129,7 @@ func CallGraph(prog *ssa.Program) *callgraph.Graph { return methods } - for f := range allFuncs { + for f := range fns { if f.Signature.Recv() == nil { // Package initializers can never be address-taken. if f.Name() == "init" && f.Synthetic == "package initializer" { @@ -93,45 +143,17 @@ func CallGraph(prog *ssa.Program) *callgraph.Graph { } } - addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) { - gnode := cg.CreateNode(g) - callgraph.AddEdge(fnode, site, gnode) - } - - addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) { - // Because every call to a highly polymorphic and - // frequently used abstract method such as - // (io.Writer).Write is assumed to call every concrete - // Write method in the program, the call graph can - // contain a lot of duplication. - // - // TODO(adonovan): opt: consider factoring the callgraph - // API so that the Callers component of each edge is a - // slice of nodes, not a singleton. - for _, g := range callees { - addEdge(fnode, site, g) - } - } - - for f := range allFuncs { - fnode := cg.CreateNode(f) - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if site, ok := instr.(ssa.CallInstruction); ok { - call := site.Common() - if call.IsInvoke() { - tiface := call.Value.Type().Underlying().(*types.Interface) - addEdges(fnode, site, lookupMethods(tiface, call.Method)) - } else if g := call.StaticCallee(); g != nil { - addEdge(fnode, site, g) - } else if _, ok := call.Value.(*ssa.Builtin); !ok { - callees, _ := funcsBySig.At(call.Signature()).([]*ssa.Function) - addEdges(fnode, site, callees) - } - } - } + return func(site ssa.CallInstruction) []*ssa.Function { + call := site.Common() + if call.IsInvoke() { + tiface := call.Value.Type().Underlying().(*types.Interface) + return lookupMethods(tiface, call.Method) + } else if g := call.StaticCallee(); g != nil { + return []*ssa.Function{g} + } else if _, ok := call.Value.(*ssa.Builtin); !ok { + fns, _ := funcsBySig.At(call.Signature()).([]*ssa.Function) + return fns } + return nil } - - return cg } diff --git a/go/callgraph/cha/testdata/generics.go b/go/callgraph/cha/testdata/generics.go index 79250a56ca1..0323c7582b6 100644 --- a/go/callgraph/cha/testdata/generics.go +++ b/go/callgraph/cha/testdata/generics.go @@ -41,5 +41,9 @@ func f(h func(), g func(I), k func(A), a A, b B) { // f --> instantiated[main.A] // f --> instantiated[main.A] // f --> instantiated[main.B] +// instantiated --> (*A).Foo +// instantiated --> (*B).Foo +// instantiated --> (A).Foo +// instantiated --> (B).Foo // instantiated[main.A] --> (A).Foo // instantiated[main.B] --> (B).Foo diff --git a/go/callgraph/static/static.go b/go/callgraph/static/static.go index c7fae75bbde..62d2364bf2c 100644 --- a/go/callgraph/static/static.go +++ b/go/callgraph/static/static.go @@ -6,6 +6,8 @@ // only static call edges. package static // import "golang.org/x/tools/go/callgraph/static" +// TODO(zpavlinovic): update static for how it handles generic function bodies. + import ( "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/ssa" diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go index 365d7a5b0f7..2537123f4c4 100644 --- a/go/callgraph/vta/graph.go +++ b/go/callgraph/vta/graph.go @@ -12,6 +12,7 @@ import ( "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" ) // node interface for VTA nodes. @@ -327,14 +328,16 @@ func (b *builder) instr(instr ssa.Instruction) { // change type command a := A(b) results in a and b being the // same value. For concrete type A, there is no interesting flow. // - // Note: When A is an interface, most interface casts are handled + // When A is an interface, most interface casts are handled // by the ChangeInterface instruction. The relevant case here is // when converting a pointer to an interface type. This can happen // when the underlying interfaces have the same method set. - // type I interface{ foo() } - // type J interface{ foo() } - // var b *I - // a := (*J)(b) + // + // type I interface{ foo() } + // type J interface{ foo() } + // var b *I + // a := (*J)(b) + // // When this happens we add flows between a <--> b. b.addInFlowAliasEdges(b.nodeFromVal(i), b.nodeFromVal(i.X)) case *ssa.TypeAssert: @@ -373,6 +376,8 @@ func (b *builder) instr(instr ssa.Instruction) { // SliceToArrayPointer: t1 = slice to array pointer *[4]T <- []T (t0) // No interesting flow as sliceArrayElem(t1) == sliceArrayElem(t0). return + case *ssa.MultiConvert: + b.multiconvert(i) default: panic(fmt.Sprintf("unsupported instruction %v\n", instr)) } @@ -568,7 +573,9 @@ func (b *builder) panic(p *ssa.Panic) { func (b *builder) call(c ssa.CallInstruction) { // When c is r := recover() call register instruction, we add Recover -> r. if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" { - b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(c.(*ssa.Call))) + if v, ok := c.(ssa.Value); ok { + b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(v)) + } return } @@ -586,10 +593,18 @@ func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) { return } cc := c.Common() - // When c is an unresolved method call (cc.Method != nil), cc.Value contains - // the receiver object rather than cc.Args[0]. if cc.Method != nil { - b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value)) + // In principle we don't add interprocedural flows for receiver + // objects. At a call site, the receiver object is interface + // while the callee object is concrete. The flow from interface + // to concrete type in general does not make sense. The exception + // is when the concrete type is a named function type (see #57756). + // + // The flow other way around would bake in information from the + // initial call graph. + if isFunction(f.Params[0].Type()) { + b.addInFlowEdge(b.nodeFromVal(cc.Value), b.nodeFromVal(f.Params[0])) + } } offset := 0 @@ -643,6 +658,71 @@ func addReturnFlows(b *builder, r *ssa.Return, site ssa.Value) { } } +func (b *builder) multiconvert(c *ssa.MultiConvert) { + // TODO(zpavlinovic): decide what to do on MultiConvert long term. + // TODO(zpavlinovic): add unit tests. + typeSetOf := func(typ types.Type) []*typeparams.Term { + // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. + var terms []*typeparams.Term + var err error + switch typ := typ.(type) { + case *typeparams.TypeParam: + terms, err = typeparams.StructuralTerms(typ) + case *typeparams.Union: + terms, err = typeparams.UnionTermSet(typ) + case *types.Interface: + terms, err = typeparams.InterfaceTermSet(typ) + default: + // Common case. + // Specializing the len=1 case to avoid a slice + // had no measurable space/time benefit. + terms = []*typeparams.Term{typeparams.NewTerm(false, typ)} + } + + if err != nil { + return nil + } + return terms + } + // isValuePreserving returns true if a conversion from ut_src to + // ut_dst is value-preserving, i.e. just a change of type. + // Precondition: neither argument is a named type. + isValuePreserving := func(ut_src, ut_dst types.Type) bool { + // Identical underlying types? + if types.IdenticalIgnoreTags(ut_dst, ut_src) { + return true + } + + switch ut_dst.(type) { + case *types.Chan: + // Conversion between channel types? + _, ok := ut_src.(*types.Chan) + return ok + + case *types.Pointer: + // Conversion between pointers with identical base types? + _, ok := ut_src.(*types.Pointer) + return ok + } + return false + } + dst_terms := typeSetOf(c.Type()) + src_terms := typeSetOf(c.X.Type()) + for _, s := range src_terms { + us := s.Type().Underlying() + for _, d := range dst_terms { + ud := d.Type().Underlying() + if isValuePreserving(us, ud) { + // This is equivalent to a ChangeType. + b.addInFlowAliasEdges(b.nodeFromVal(c), b.nodeFromVal(c.X)) + return + } + // This is equivalent to either: SliceToArrayPointer,, + // SliceToArrayPointer+Deref, Size 0 Array constant, or a Convert. + } + } +} + // addInFlowEdge adds s -> d to g if d is node that can have an inflow, i.e., a node // that represents an interface or an unresolved function value. Otherwise, there // is no interesting type flow so the edge is omitted. @@ -654,7 +734,7 @@ func (b *builder) addInFlowEdge(s, d node) { // Creates const, pointer, global, func, and local nodes based on register instructions. func (b *builder) nodeFromVal(val ssa.Value) node { - if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) && !isFunction(p.Elem()) { + if p, ok := val.Type().(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) { // Nested pointer to interfaces are modeled as a special // nestedPtrInterface node. if i := interfaceUnderPtr(p.Elem()); i != nil { @@ -687,7 +767,9 @@ func (b *builder) nodeFromVal(val ssa.Value) node { // semantically equivalent types can have different implementations, // this method guarantees the same implementation is always used. func (b *builder) representative(n node) node { - if !hasInitialTypes(n) { + if n.Type() == nil { + // panicArg and recoverReturn do not have + // types and are unique by definition. return n } t := canonicalize(n.Type(), &b.canon) diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go index 5934ebc2167..5817e89380f 100644 --- a/go/callgraph/vta/propagation.go +++ b/go/callgraph/vta/propagation.go @@ -20,53 +20,52 @@ import ( // with ids X and Y s.t. X < Y, Y comes before X in the topological order. func scc(g vtaGraph) (map[node]int, int) { // standard data structures used by Tarjan's algorithm. - var index uint64 + type state struct { + index int + lowLink int + onStack bool + } + states := make(map[node]*state, len(g)) var stack []node - indexMap := make(map[node]uint64) - lowLink := make(map[node]uint64) - onStack := make(map[node]bool) - nodeToSccID := make(map[node]int) + nodeToSccID := make(map[node]int, len(g)) sccID := 0 var doSCC func(node) doSCC = func(n node) { - indexMap[n] = index - lowLink[n] = index - index = index + 1 - onStack[n] = true + index := len(states) + ns := &state{index: index, lowLink: index, onStack: true} + states[n] = ns stack = append(stack, n) for s := range g[n] { - if _, ok := indexMap[s]; !ok { + if ss, visited := states[s]; !visited { // Analyze successor s that has not been visited yet. doSCC(s) - lowLink[n] = min(lowLink[n], lowLink[s]) - } else if onStack[s] { + ss = states[s] + ns.lowLink = min(ns.lowLink, ss.lowLink) + } else if ss.onStack { // The successor is on the stack, meaning it has to be // in the current SCC. - lowLink[n] = min(lowLink[n], indexMap[s]) + ns.lowLink = min(ns.lowLink, ss.index) } } // if n is a root node, pop the stack and generate a new SCC. - if lowLink[n] == indexMap[n] { - for { - w := stack[len(stack)-1] + if ns.lowLink == index { + var w node + for w != n { + w = stack[len(stack)-1] stack = stack[:len(stack)-1] - onStack[w] = false + states[w].onStack = false nodeToSccID[w] = sccID - if w == n { - break - } } sccID++ } } - index = 0 for n := range g { - if _, ok := indexMap[n]; !ok { + if _, visited := states[n]; !visited { doSCC(n) } } @@ -74,7 +73,7 @@ func scc(g vtaGraph) (map[node]int, int) { return nodeToSccID, sccID } -func min(x, y uint64) uint64 { +func min(x, y int) int { if x < y { return x } @@ -175,6 +174,18 @@ func nodeTypes(nodes []node, builder *trie.Builder, propTypeId func(p propType) return &typeSet } +// hasInitialTypes check if a node can have initial types. +// Returns true iff `n` is not a panic, recover, nestedPtr* +// node, nor a node whose type is an interface. +func hasInitialTypes(n node) bool { + switch n.(type) { + case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface: + return false + default: + return !types.IsInterface(n.Type()) + } +} + // getPropType creates a propType for `node` based on its type. // propType.typ is always node.Type(). If node is function, then // propType.val is the underlying function; nil otherwise. diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go index 00b21277f22..f4a754f9663 100644 --- a/go/callgraph/vta/propagation_test.go +++ b/go/callgraph/vta/propagation_test.go @@ -58,7 +58,7 @@ func newLocal(name string, t types.Type) local { // newNamedType creates a bogus type named `name`. func newNamedType(name string) *types.Named { - return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil) + return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), types.Universe.Lookup("int").Type(), nil) } // sccString is a utility for stringifying `nodeToScc`. Every diff --git a/go/callgraph/vta/testdata/src/callgraph_issue_57756.go b/go/callgraph/vta/testdata/src/callgraph_issue_57756.go new file mode 100644 index 00000000000..e18f16eba01 --- /dev/null +++ b/go/callgraph/vta/testdata/src/callgraph_issue_57756.go @@ -0,0 +1,67 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go:build ignore + +package testdata + +// Test that the values of a named function type are correctly +// flowing from interface objects i in i.Foo() to the receiver +// parameters of callees of i.Foo(). + +type H func() + +func (h H) Do() { + h() +} + +type I interface { + Do() +} + +func Bar() I { + return H(func() {}) +} + +func For(g G) { + b := Bar() + b.Do() + + g[0] = b + g.Goo() +} + +type G []I + +func (g G) Goo() { + g[0].Do() +} + +// Relevant SSA: +// func Bar$1(): +// return +// +// func Bar() I: +// t0 = changetype H <- func() (Bar$1) +// t1 = make I <- H (t0) +// +// func For(): +// t0 = Bar() +// t1 = invoke t0.Do() +// t2 = &g[0:int] +// *t2 = t0 +// t3 = (G).Goo(g) +// +// func (h H) Do(): +// t0 = h() +// +// func (g G) Goo(): +// t0 = &g[0:int] +// t1 = *t0 +// t2 = invoke t1.Do() + +// WANT: +// For: (G).Goo(g) -> G.Goo; Bar() -> Bar; invoke t0.Do() -> H.Do +// H.Do: h() -> Bar$1 +// G.Goo: invoke t1.Do() -> H.Do diff --git a/go/callgraph/vta/testdata/src/function_alias.go b/go/callgraph/vta/testdata/src/function_alias.go index b38e0e00d69..0a8dffe79d4 100644 --- a/go/callgraph/vta/testdata/src/function_alias.go +++ b/go/callgraph/vta/testdata/src/function_alias.go @@ -33,42 +33,42 @@ func Baz(f func()) { // t2 = *t1 // *t2 = Baz$1 // t3 = local A (a) -// t4 = &t3.foo [#0] -// t5 = *t1 -// t6 = *t5 -// *t4 = t6 +// t4 = *t1 +// t5 = *t4 +// t6 = &t3.foo [#0] +// *t6 = t5 // t7 = &t3.foo [#0] // t8 = *t7 // t9 = t8() -// t10 = &t3.do [#1] *Doer -// t11 = &t3.foo [#0] *func() -// t12 = *t11 func() -// t13 = changetype Doer <- func() (t12) Doer -// *t10 = t13 +// t10 = &t3.foo [#0] *func() +// t11 = *t10 func() +// t12 = &t3.do [#1] *Doer +// t13 = changetype Doer <- func() (t11) Doer +// *t12 = t13 // t14 = &t3.do [#1] *Doer // t15 = *t14 Doer // t16 = t15() () // Flow chain showing that Baz$1 reaches t8(): -// Baz$1 -> t2 <-> PtrFunction(func()) <-> t5 -> t6 -> t4 <-> Field(testdata.A:foo) <-> t7 -> t8 +// Baz$1 -> t2 <-> PtrFunction(func()) <-> t4 -> t5 -> t6 <-> Field(testdata.A:foo) <-> t7 -> t8 // Flow chain showing that Baz$1 reaches t15(): -// Field(testdata.A:foo) <-> t11 -> t12 -> t13 -> t10 <-> Field(testdata.A:do) <-> t14 -> t15 +// Field(testdata.A:foo) <-> t10 -> t11 -> t13 -> t12 <-> Field(testdata.A:do) <-> t14 -> t15 // WANT: // Local(f) -> Local(t0) // Local(t0) -> PtrFunction(func()) // Function(Baz$1) -> Local(t2) -// PtrFunction(func()) -> Local(t0), Local(t2), Local(t5) +// PtrFunction(func()) -> Local(t0), Local(t2), Local(t4) // Local(t2) -> PtrFunction(func()) -// Local(t4) -> Field(testdata.A:foo) -// Local(t5) -> Local(t6), PtrFunction(func()) -// Local(t6) -> Local(t4) +// Local(t6) -> Field(testdata.A:foo) +// Local(t4) -> Local(t5), PtrFunction(func()) +// Local(t5) -> Local(t6) // Local(t7) -> Field(testdata.A:foo), Local(t8) -// Field(testdata.A:foo) -> Local(t11), Local(t4), Local(t7) -// Local(t4) -> Field(testdata.A:foo) -// Field(testdata.A:do) -> Local(t10), Local(t14) -// Local(t10) -> Field(testdata.A:do) -// Local(t11) -> Field(testdata.A:foo), Local(t12) -// Local(t12) -> Local(t13) -// Local(t13) -> Local(t10) +// Field(testdata.A:foo) -> Local(t10), Local(t6), Local(t7) +// Local(t6) -> Field(testdata.A:foo) +// Field(testdata.A:do) -> Local(t12), Local(t14) +// Local(t12) -> Field(testdata.A:do) +// Local(t10) -> Field(testdata.A:foo), Local(t11) +// Local(t11) -> Local(t13) +// Local(t13) -> Local(t12) // Local(t14) -> Field(testdata.A:do), Local(t15) diff --git a/go/callgraph/vta/testdata/src/panic.go b/go/callgraph/vta/testdata/src/panic.go index 2d39c70ea89..5ef3548577b 100644 --- a/go/callgraph/vta/testdata/src/panic.go +++ b/go/callgraph/vta/testdata/src/panic.go @@ -27,12 +27,12 @@ func recover2() { func Baz(a A) { defer recover1() + defer recover() panic(a) } // Relevant SSA: // func recover1(): -// 0: // t0 = print("only this recover...":string) // t1 = recover() // t2 = typeassert,ok t1.(I) @@ -53,6 +53,7 @@ func Baz(a A) { // t0 = local A (a) // *t0 = a // defer recover1() +// defer recover() // t1 = *t0 // t2 = make interface{} <- A (t1) // panic t2 diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go index 0531a227f6c..d1831983ad6 100644 --- a/go/callgraph/vta/utils.go +++ b/go/callgraph/vta/utils.go @@ -9,6 +9,7 @@ import ( "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/typeparams" ) func canAlias(n1, n2 node) bool { @@ -56,24 +57,7 @@ func hasInFlow(n node) bool { return true } - return isInterface(t) || isFunction(t) -} - -// hasInitialTypes check if a node can have initial types. -// Returns true iff `n` is not a panic or recover node as -// those are artificial. -func hasInitialTypes(n node) bool { - switch n.(type) { - case panicArg, recoverReturn: - return false - default: - return true - } -} - -func isInterface(t types.Type) bool { - _, ok := t.Underlying().(*types.Interface) - return ok + return types.IsInterface(t) || isFunction(t) } func isFunction(t types.Type) bool { @@ -98,7 +82,7 @@ func interfaceUnderPtr(t types.Type) types.Type { return nil } - if isInterface(p.Elem()) { + if types.IsInterface(p.Elem()) { return p.Elem() } @@ -134,19 +118,27 @@ func functionUnderPtr(t types.Type) types.Type { } // sliceArrayElem returns the element type of type `t` that is -// expected to be a (pointer to) array or slice, consistent with +// expected to be a (pointer to) array, slice or string, consistent with // the ssa.Index and ssa.IndexAddr instructions. Panics otherwise. func sliceArrayElem(t types.Type) types.Type { - u := t.Underlying() - - if p, ok := u.(*types.Pointer); ok { - u = p.Elem().Underlying() - } - - if a, ok := u.(*types.Array); ok { - return a.Elem() + switch u := t.Underlying().(type) { + case *types.Pointer: + return u.Elem().Underlying().(*types.Array).Elem() + case *types.Array: + return u.Elem() + case *types.Slice: + return u.Elem() + case *types.Basic: + return types.Typ[types.Byte] + case *types.Interface: // type param. + terms, err := typeparams.InterfaceTermSet(u) + if err != nil || len(terms) == 0 { + panic(t) + } + return sliceArrayElem(terms[0].Type()) // Element types must match. + default: + panic(t) } - return u.(*types.Slice).Elem() } // siteCallees computes a set of callees for call site `c` given program `callgraph`. diff --git a/go/callgraph/vta/vta.go b/go/callgraph/vta/vta.go index 9839bd3f3cd..58393600337 100644 --- a/go/callgraph/vta/vta.go +++ b/go/callgraph/vta/vta.go @@ -54,6 +54,8 @@ // reaching the node representing the call site to create a set of callees. package vta +// TODO(zpavlinovic): update VTA for how it handles generic function bodies and instantiation wrappers. + import ( "go/types" diff --git a/go/callgraph/vta/vta_test.go b/go/callgraph/vta/vta_test.go index 4cd26a54aba..549c4af4529 100644 --- a/go/callgraph/vta/vta_test.go +++ b/go/callgraph/vta/vta_test.go @@ -26,6 +26,7 @@ func TestVTACallGraph(t *testing.T) { "testdata/src/callgraph_fields.go", "testdata/src/callgraph_field_funcs.go", "testdata/src/callgraph_recursive_types.go", + "testdata/src/callgraph_issue_57756.go", } { t.Run(file, func(t *testing.T) { prog, want, err := testProg(file, ssa.BuilderMode(0)) diff --git a/go/expect/expect_test.go b/go/expect/expect_test.go index bd25ef831e2..e9ae40f7e09 100644 --- a/go/expect/expect_test.go +++ b/go/expect/expect_test.go @@ -43,7 +43,7 @@ func TestMarker(t *testing.T) { }, }, { - filename: "testdata/go.mod", + filename: "testdata/go.fake.mod", expectNotes: 2, expectMarkers: map[string]string{ "αMarker": "αfake1α", diff --git a/go/expect/testdata/go.fake.mod b/go/expect/testdata/go.fake.mod new file mode 100644 index 00000000000..ca84fcee9f3 --- /dev/null +++ b/go/expect/testdata/go.fake.mod @@ -0,0 +1,9 @@ +// This file is named go.fake.mod so it does not define a real module, which +// would make the contents of this directory unavailable to the test when run +// from outside the repository. + +module αfake1α //@mark(αMarker, "αfake1α") + +go 1.14 + +require golang.org/modfile v0.0.0 //@mark(βMarker, "require golang.org/modfile v0.0.0") diff --git a/go/expect/testdata/go.mod b/go/expect/testdata/go.mod deleted file mode 100644 index d0323eae6a1..00000000000 --- a/go/expect/testdata/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module αfake1α //@mark(αMarker, "αfake1α") - -go 1.14 - -require golang.org/modfile v0.0.0 //@mark(βMarker, "require golang.org/modfile v0.0.0") diff --git a/go/gcexportdata/example_test.go b/go/gcexportdata/example_test.go index e81e705b1c4..7371d31d430 100644 --- a/go/gcexportdata/example_test.go +++ b/go/gcexportdata/example_test.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && gc -// +build go1.7,gc +//go:build go1.7 && gc && !android && !ios && !js +// +build go1.7,gc,!android,!ios,!js package gcexportdata_test @@ -30,7 +30,6 @@ func ExampleRead() { log.Fatalf("can't find export data for fmt") } fmt.Printf("Package path: %s\n", path) - fmt.Printf("Export data: %s\n", filepath.Base(filename)) // Open and read the file. f, err := os.Open(filename) @@ -80,7 +79,6 @@ func ExampleRead() { // Output: // // Package path: fmt - // Export data: fmt.a // Package members: Println found // Println type: func(a ...any) (n int, err error) // Println location: $GOROOT/src/fmt/print.go:123:1 diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go index d50826dbf7e..165ede0f8f3 100644 --- a/go/gcexportdata/gcexportdata.go +++ b/go/gcexportdata/gcexportdata.go @@ -22,26 +22,41 @@ package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" "bytes" + "encoding/json" "fmt" "go/token" "go/types" "io" - "io/ioutil" + "os/exec" - "golang.org/x/tools/go/internal/gcimporter" + "golang.org/x/tools/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file // containing type information for the specified import path, -// using the workspace layout conventions of go/build. +// using the go command. // If no file was found, an empty filename is returned. // // A relative srcDir is interpreted relative to the current working directory. // // Find also returns the package's resolved (canonical) import path, // reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath } // NewReader returns a reader for the export data section of an object @@ -69,9 +84,26 @@ func NewReader(r io.Reader) (io.Reader, error) { } } +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + // Read reads export data from in, decodes it, and returns type // information for the package. -// The package name is specified by path. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// // File position information is added to fset. // // Read may inspect and add to the imports map to ensure that references @@ -82,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) { // // On return, the state of the reader is undefined. func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := ioutil.ReadAll(in) + data, err := readAll(in) if err != nil { return nil, fmt.Errorf("reading export data for %q: %v", path, err) } @@ -91,22 +123,32 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) } - // The App Engine Go runtime v1.6 uses the old export data format. - // TODO(adonovan): delete once v1.7 has been around for a while. - if bytes.HasPrefix(data, []byte("package ")) { - return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) - } - // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err + + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) } // Write writes encoded type information for the specified package to out. @@ -129,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { // // Experimental: This API is experimental and may change in the future. func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { - data, err := ioutil.ReadAll(in) + data, err := readAll(in) if err != nil { return nil, fmt.Errorf("reading export bundle: %v", err) } diff --git a/go/gcexportdata/gcexportdata_test.go b/go/gcexportdata/gcexportdata_test.go deleted file mode 100644 index a0006c02d5a..00000000000 --- a/go/gcexportdata/gcexportdata_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcexportdata_test - -import ( - "go/token" - "go/types" - "log" - "os" - "testing" - - "golang.org/x/tools/go/gcexportdata" -) - -// Test to ensure that gcexportdata can read files produced by App -// Engine Go runtime v1.6. -func TestAppEngine16(t *testing.T) { - // Open and read the file. - f, err := os.Open("testdata/errors-ae16.a") - if err != nil { - t.Fatal(err) - } - defer f.Close() - r, err := gcexportdata.NewReader(f) - if err != nil { - log.Fatalf("reading export data: %v", err) - } - - // Decode the export data. - fset := token.NewFileSet() - imports := make(map[string]*types.Package) - pkg, err := gcexportdata.Read(r, fset, imports, "errors") - if err != nil { - log.Fatal(err) - } - - // Print package information. - got := pkg.Scope().Lookup("New").Type().String() - want := "func(text string) error" - if got != want { - t.Errorf("New.Type = %s, want %s", got, want) - } -} diff --git a/go/gcexportdata/importer.go b/go/gcexportdata/importer.go index fe6ed93215c..37a7247e268 100644 --- a/go/gcexportdata/importer.go +++ b/go/gcexportdata/importer.go @@ -22,6 +22,9 @@ import ( // version-skew problems described in the documentation of this package, // or to control the FileSet or access the imports map populated during // package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { return importer{fset, imports} } diff --git a/go/gcexportdata/testdata/errors-ae16.a b/go/gcexportdata/testdata/errors-ae16.a deleted file mode 100644 index 3f1dad54f07..00000000000 Binary files a/go/gcexportdata/testdata/errors-ae16.a and /dev/null differ diff --git a/go/internal/gccgoimporter/parser.go b/go/internal/gccgoimporter/parser.go index 5c24845831a..9fdb6f8b059 100644 --- a/go/internal/gccgoimporter/parser.go +++ b/go/internal/gccgoimporter/parser.go @@ -1167,7 +1167,7 @@ func (p *parser) maybeCreatePackage() { } } -// parseInitDateDirective parses an InitDataDirective: +// parseInitDataDirective parses an InitDataDirective: // // InitDataDirective = ( "v1" | "v2" | "v3" ) ";" | // "priority" int ";" | diff --git a/go/internal/gcimporter/gcimporter.go b/go/internal/gcimporter/gcimporter.go deleted file mode 100644 index 493bfa03b0f..00000000000 --- a/go/internal/gcimporter/gcimporter.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, -// but it also contains the original source-based importer code for Go1.6. -// Once we stop supporting 1.6, we can remove that code. - -// Package gcimporter provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" - -import ( - "bufio" - "errors" - "fmt" - "go/build" - "go/constant" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "text/scanner" -) - -const ( - // Enable debug during development: it adds some additional checks, and - // prevents errors from being recovered. - debug = false - - // If trace is set, debugging output is printed to std out. - trace = false -) - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - id = path // make sure we have an id to print in error message - return - } - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// ImportData imports a package by reading the gc-generated export data, -// adds the corresponding package object to the packages map indexed by id, -// and returns the object. -// -// The packages map must contains all packages already imported. The data -// reader position must be the beginning of the export data section. The -// filename is only used in error messages. -// -// If packages[id] contains the completely imported package, that package -// can be used directly, and there is no need to call this function (but -// there is also no harm but for extra time used). -func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { - // support for parser error handling - defer func() { - switch r := recover().(type) { - case nil: - // nothing to do - case importError: - err = r - default: - panic(r) // internal error - } - }() - - var p parser - p.init(filename, id, data, packages) - pkg = p.parseExport() - - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string - if lookup != nil { - // With custom lookup specified, assume that caller has - // converted path to a canonical import path for use in the map. - if path == "unsafe" { - return types.Unsafe, nil - } - id = path - - // No need to re-import if the package was imported completely before. - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - f, err := lookup(path) - if err != nil { - return nil, err - } - rc = f - } else { - filename, id = FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %q", id) - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // add file name to error - err = fmt.Errorf("%s: %v", filename, err) - } - }() - rc = f - } - defer rc.Close() - - var hdr string - buf := bufio.NewReader(rc) - if hdr, _, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$\n": - // Work-around if we don't have a filename; happens only if lookup != nil. - // Either way, the filename is only needed for importer error messages, so - // this is fine. - if filename == "" { - filename = path - } - return ImportData(packages, filename, id, buf) - - case "$$B\n": - var data []byte - data, err = ioutil.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -// ---------------------------------------------------------------------------- -// Parser - -// TODO(gri) Imported objects don't have position information. -// Ideally use the debug table line info; alternatively -// create some fake position (or the position of the -// import). That way error messages referring to imported -// objects can print meaningful information. - -// parser parses the exports inside a gc compiler-produced -// object/archive file and populates its scope with the results. -type parser struct { - scanner scanner.Scanner - tok rune // current token - lit string // literal string; only valid for Ident, Int, String tokens - id string // package id of imported package - sharedPkgs map[string]*types.Package // package id -> package object (across importer) - localPkgs map[string]*types.Package // package id -> package object (just this package) -} - -func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { - p.scanner.Init(src) - p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<' ' - p.scanner.Filename = filename // for good error messages - p.next() - p.id = id - p.sharedPkgs = packages - if debug { - // check consistency of packages map - for _, pkg := range packages { - if pkg.Name() == "" { - fmt.Printf("no package name for %s\n", pkg.Path()) - } - } - } -} - -func (p *parser) next() { - p.tok = p.scanner.Scan() - switch p.tok { - case scanner.Ident, scanner.Int, scanner.Char, scanner.String, 'Ā·': - p.lit = p.scanner.TokenText() - default: - p.lit = "" - } - if debug { - fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) - } -} - -func declTypeName(pkg *types.Package, name string) *types.TypeName { - scope := pkg.Scope() - if obj := scope.Lookup(name); obj != nil { - return obj.(*types.TypeName) - } - obj := types.NewTypeName(token.NoPos, pkg, name, nil) - // a named type may be referred to before the underlying type - // is known - set it up - types.NewNamed(obj, nil, nil) - scope.Insert(obj) - return obj -} - -// ---------------------------------------------------------------------------- -// Error handling - -// Internal errors are boxed as importErrors. -type importError struct { - pos scanner.Position - err error -} - -func (e importError) Error() string { - return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) -} - -func (p *parser) error(err interface{}) { - if s, ok := err.(string); ok { - err = errors.New(s) - } - // panic with a runtime.Error if err is not an error - panic(importError{p.scanner.Pos(), err.(error)}) -} - -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Sprintf(format, args...)) -} - -func (p *parser) expect(tok rune) string { - lit := p.lit - if p.tok != tok { - p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) - } - p.next() - return lit -} - -func (p *parser) expectSpecial(tok string) { - sep := 'x' // not white space - i := 0 - for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - i++ - } - if i < len(tok) { - p.errorf("expected %q, got %q", tok, tok[0:i]) - } -} - -func (p *parser) expectKeyword(keyword string) { - lit := p.expect(scanner.Ident) - if lit != keyword { - p.errorf("expected keyword %s, got %q", keyword, lit) - } -} - -// ---------------------------------------------------------------------------- -// Qualified and unqualified names - -// parsePackageID parses a PackageId: -// -// PackageId = string_lit . -func (p *parser) parsePackageID() string { - id, err := strconv.Unquote(p.expect(scanner.String)) - if err != nil { - p.error(err) - } - // id == "" stands for the imported package id - // (only known at time of package installation) - if id == "" { - id = p.id - } - return id -} - -// parsePackageName parse a PackageName: -// -// PackageName = ident . -func (p *parser) parsePackageName() string { - return p.expect(scanner.Ident) -} - -// parseDotIdent parses a dotIdentifier: -// -// dotIdentifier = ( ident | 'Ā·' ) { ident | int | 'Ā·' } . -func (p *parser) parseDotIdent() string { - ident := "" - if p.tok != scanner.Int { - sep := 'x' // not white space - for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == 'Ā·') && sep > ' ' { - ident += p.lit - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - } - } - if ident == "" { - p.expect(scanner.Ident) // use expect() for error handling - } - return ident -} - -// parseQualifiedName parses a QualifiedName: -// -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . -func (p *parser) parseQualifiedName() (id, name string) { - p.expect('@') - id = p.parsePackageID() - p.expect('.') - // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. - if p.tok == '?' { - p.next() - } else { - name = p.parseDotIdent() - } - return -} - -// getPkg returns the package for a given id. If the package is -// not found, create the package and add it to the p.localPkgs -// and p.sharedPkgs maps. name is the (expected) name of the -// package. If name == "", the package name is expected to be -// set later via an import clause in the export data. -// -// id identifies a package, usually by a canonical package path like -// "encoding/json" but possibly by a non-canonical import path like -// "./json". -func (p *parser) getPkg(id, name string) *types.Package { - // package unsafe is not in the packages maps - handle explicitly - if id == "unsafe" { - return types.Unsafe - } - - pkg := p.localPkgs[id] - if pkg == nil { - // first import of id from this package - pkg = p.sharedPkgs[id] - if pkg == nil { - // first import of id by this importer; - // add (possibly unnamed) pkg to shared packages - pkg = types.NewPackage(id, name) - p.sharedPkgs[id] = pkg - } - // add (possibly unnamed) pkg to local packages - if p.localPkgs == nil { - p.localPkgs = make(map[string]*types.Package) - } - p.localPkgs[id] = pkg - } else if name != "" { - // package exists already and we have an expected package name; - // make sure names match or set package name if necessary - if pname := pkg.Name(); pname == "" { - pkg.SetName(name) - } else if pname != name { - p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) - } - } - return pkg -} - -// parseExportedName is like parseQualifiedName, but -// the package id is resolved to an imported *types.Package. -func (p *parser) parseExportedName() (pkg *types.Package, name string) { - id, name := p.parseQualifiedName() - pkg = p.getPkg(id, "") - return -} - -// ---------------------------------------------------------------------------- -// Types - -// parseBasicType parses a BasicType: -// -// BasicType = identifier . -func (p *parser) parseBasicType() types.Type { - id := p.expect(scanner.Ident) - obj := types.Universe.Lookup(id) - if obj, ok := obj.(*types.TypeName); ok { - return obj.Type() - } - p.errorf("not a basic type: %s", id) - return nil -} - -// parseArrayType parses an ArrayType: -// -// ArrayType = "[" int_lit "]" Type . -func (p *parser) parseArrayType(parent *types.Package) types.Type { - // "[" already consumed and lookahead known not to be "]" - lit := p.expect(scanner.Int) - p.expect(']') - elem := p.parseType(parent) - n, err := strconv.ParseInt(lit, 10, 64) - if err != nil { - p.error(err) - } - return types.NewArray(elem, n) -} - -// parseMapType parses a MapType: -// -// MapType = "map" "[" Type "]" Type . -func (p *parser) parseMapType(parent *types.Package) types.Type { - p.expectKeyword("map") - p.expect('[') - key := p.parseType(parent) - p.expect(']') - elem := p.parseType(parent) - return types.NewMap(key, elem) -} - -// parseName parses a Name: -// -// Name = identifier | "?" | QualifiedName . -// -// For unqualified and anonymous names, the returned package is the parent -// package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the name -// is an unqualified struct field or interface method name belonging to a -// type declared in another package.) -// -// For qualified names, the returned package is nil (and not created if -// it doesn't exist yet) unless materializePkg is set (which creates an -// unnamed package with valid package path). In the latter case, a -// subsequent import clause is expected to provide a name for the package. -func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { - pkg = parent - if pkg == nil { - pkg = p.sharedPkgs[p.id] - } - switch p.tok { - case scanner.Ident: - name = p.lit - p.next() - case '?': - // anonymous - p.next() - case '@': - // exported name prefixed with package path - pkg = nil - var id string - id, name = p.parseQualifiedName() - if materializePkg { - pkg = p.getPkg(id, "") - } - default: - p.error("name expected") - } - return -} - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - -// parseField parses a Field: -// -// Field = Name Type [ string_lit ] . -func (p *parser) parseField(parent *types.Package) (*types.Var, string) { - pkg, name := p.parseName(parent, true) - - if name == "_" { - // Blank fields should be package-qualified because they - // are unexported identifiers, but gc does not qualify them. - // Assuming that the ident belongs to the current package - // causes types to change during re-exporting, leading - // to spurious "can't assign A to B" errors from go/types. - // As a workaround, pretend all blank fields belong - // to the same unique dummy package. - const blankpkg = "<_>" - pkg = p.getPkg(blankpkg, blankpkg) - } - - typ := p.parseType(parent) - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - p.errorf("anonymous field expected") - } - anonymous = true - } - tag := "" - if p.tok == scanner.String { - s := p.expect(scanner.String) - var err error - tag, err = strconv.Unquote(s) - if err != nil { - p.errorf("invalid struct tag %s: %s", s, err) - } - } - return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag -} - -// parseStructType parses a StructType: -// -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . -func (p *parser) parseStructType(parent *types.Package) types.Type { - var fields []*types.Var - var tags []string - - p.expectKeyword("struct") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - fld, tag := p.parseField(parent) - if tag != "" && tags == nil { - tags = make([]string, i) - } - if tags != nil { - tags = append(tags, tag) - } - fields = append(fields, fld) - } - p.expect('}') - - return types.NewStruct(fields, tags) -} - -// parseParameter parses a Parameter: -// -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . -func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { - _, name := p.parseName(nil, false) - // remove gc-specific parameter numbering - if i := strings.Index(name, "Ā·"); i >= 0 { - name = name[:i] - } - if p.tok == '.' { - p.expectSpecial("...") - isVariadic = true - } - typ := p.parseType(nil) - if isVariadic { - typ = types.NewSlice(typ) - } - // ignore argument tag (e.g. "noescape") - if p.tok == scanner.String { - p.next() - } - // TODO(gri) should we provide a package? - par = types.NewVar(token.NoPos, nil, name, typ) - return -} - -// parseParameters parses a Parameters: -// -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . -func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { - p.expect('(') - for p.tok != ')' && p.tok != scanner.EOF { - if len(list) > 0 { - p.expect(',') - } - par, variadic := p.parseParameter() - list = append(list, par) - if variadic { - if isVariadic { - p.error("... not on final argument") - } - isVariadic = true - } - } - p.expect(')') - - return -} - -// parseSignature parses a Signature: -// -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . -func (p *parser) parseSignature(recv *types.Var) *types.Signature { - params, isVariadic := p.parseParameters() - - // optional result type - var results []*types.Var - if p.tok == '(' { - var variadic bool - results, variadic = p.parseParameters() - if variadic { - p.error("... not permitted on result type") - } - } - - return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) -} - -// parseInterfaceType parses an InterfaceType: -// -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . -// -// The methods of embedded interfaces are always "inlined" -// by the compiler and thus embedded interfaces are never -// visible in the export data. -func (p *parser) parseInterfaceType(parent *types.Package) types.Type { - var methods []*types.Func - - p.expectKeyword("interface") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - pkg, name := p.parseName(parent, true) - sig := p.parseSignature(nil) - methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) - } - p.expect('}') - - // Complete requires the type's embedded interfaces to be fully defined, - // but we do not define any - return newInterface(methods, nil).Complete() -} - -// parseChanType parses a ChanType: -// -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . -func (p *parser) parseChanType(parent *types.Package) types.Type { - dir := types.SendRecv - if p.tok == scanner.Ident { - p.expectKeyword("chan") - if p.tok == '<' { - p.expectSpecial("<-") - dir = types.SendOnly - } - } else { - p.expectSpecial("<-") - p.expectKeyword("chan") - dir = types.RecvOnly - } - elem := p.parseType(parent) - return types.NewChan(dir, elem) -} - -// parseType parses a Type: -// -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . -// -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . -func (p *parser) parseType(parent *types.Package) types.Type { - switch p.tok { - case scanner.Ident: - switch p.lit { - default: - return p.parseBasicType() - case "struct": - return p.parseStructType(parent) - case "func": - // FuncType - p.next() - return p.parseSignature(nil) - case "interface": - return p.parseInterfaceType(parent) - case "map": - return p.parseMapType(parent) - case "chan": - return p.parseChanType(parent) - } - case '@': - // TypeName - pkg, name := p.parseExportedName() - return declTypeName(pkg, name).Type() - case '[': - p.next() // look ahead - if p.tok == ']' { - // SliceType - p.next() - return types.NewSlice(p.parseType(parent)) - } - return p.parseArrayType(parent) - case '*': - // PointerType - p.next() - return types.NewPointer(p.parseType(parent)) - case '<': - return p.parseChanType(parent) - case '(': - // "(" Type ")" - p.next() - typ := p.parseType(parent) - p.expect(')') - return typ - } - p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) - return nil -} - -// ---------------------------------------------------------------------------- -// Declarations - -// parseImportDecl parses an ImportDecl: -// -// ImportDecl = "import" PackageName PackageId . -func (p *parser) parseImportDecl() { - p.expectKeyword("import") - name := p.parsePackageName() - p.getPkg(p.parsePackageID(), name) -} - -// parseInt parses an int_lit: -// -// int_lit = [ "+" | "-" ] { "0" ... "9" } . -func (p *parser) parseInt() string { - s := "" - switch p.tok { - case '-': - s = "-" - p.next() - case '+': - p.next() - } - return s + p.expect(scanner.Int) -} - -// parseNumber parses a number: -// -// number = int_lit [ "p" int_lit ] . -func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { - // mantissa - mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) - if mant == nil { - panic("invalid mantissa") - } - - if p.lit == "p" { - // exponent (base 2) - p.next() - exp, err := strconv.ParseInt(p.parseInt(), 10, 0) - if err != nil { - p.error(err) - } - if exp < 0 { - denom := constant.MakeInt64(1) - denom = constant.Shift(denom, token.SHL, uint(-exp)) - typ = types.Typ[types.UntypedFloat] - val = constant.BinaryOp(mant, token.QUO, denom) - return - } - if exp > 0 { - mant = constant.Shift(mant, token.SHL, uint(exp)) - } - typ = types.Typ[types.UntypedFloat] - val = mant - return - } - - typ = types.Typ[types.UntypedInt] - val = mant - return -} - -// parseConstDecl parses a ConstDecl: -// -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . -func (p *parser) parseConstDecl() { - p.expectKeyword("const") - pkg, name := p.parseExportedName() - - var typ0 types.Type - if p.tok != '=' { - // constant types are never structured - no need for parent type - typ0 = p.parseType(nil) - } - - p.expect('=') - var typ types.Type - var val constant.Value - switch p.tok { - case scanner.Ident: - // bool_lit - if p.lit != "true" && p.lit != "false" { - p.error("expected true or false") - } - typ = types.Typ[types.UntypedBool] - val = constant.MakeBool(p.lit == "true") - p.next() - - case '-', scanner.Int: - // int_lit - typ, val = p.parseNumber() - - case '(': - // complex_lit or rune_lit - p.next() - if p.tok == scanner.Char { - p.next() - p.expect('+') - typ = types.Typ[types.UntypedRune] - _, val = p.parseNumber() - p.expect(')') - break - } - _, re := p.parseNumber() - p.expect('+') - _, im := p.parseNumber() - p.expectKeyword("i") - p.expect(')') - typ = types.Typ[types.UntypedComplex] - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - case scanner.Char: - // rune_lit - typ = types.Typ[types.UntypedRune] - val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) - p.next() - - case scanner.String: - // string_lit - typ = types.Typ[types.UntypedString] - val = constant.MakeFromLiteral(p.lit, token.STRING, 0) - p.next() - - default: - p.errorf("expected literal got %s", scanner.TokenString(p.tok)) - } - - if typ0 == nil { - typ0 = typ - } - - pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) -} - -// parseTypeDecl parses a TypeDecl: -// -// TypeDecl = "type" ExportedName Type . -func (p *parser) parseTypeDecl() { - p.expectKeyword("type") - pkg, name := p.parseExportedName() - obj := declTypeName(pkg, name) - - // The type object may have been imported before and thus already - // have a type associated with it. We still need to parse the type - // structure, but throw it away if the object already has a type. - // This ensures that all imports refer to the same type object for - // a given type declaration. - typ := p.parseType(pkg) - - if name := obj.Type().(*types.Named); name.Underlying() == nil { - name.SetUnderlying(typ) - } -} - -// parseVarDecl parses a VarDecl: -// -// VarDecl = "var" ExportedName Type . -func (p *parser) parseVarDecl() { - p.expectKeyword("var") - pkg, name := p.parseExportedName() - typ := p.parseType(pkg) - pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) -} - -// parseFunc parses a Func: -// -// Func = Signature [ Body ] . -// Body = "{" ... "}" . -func (p *parser) parseFunc(recv *types.Var) *types.Signature { - sig := p.parseSignature(recv) - if p.tok == '{' { - p.next() - for i := 1; i > 0; p.next() { - switch p.tok { - case '{': - i++ - case '}': - i-- - } - } - } - return sig -} - -// parseMethodDecl parses a MethodDecl: -// -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . -func (p *parser) parseMethodDecl() { - // "func" already consumed - p.expect('(') - recv, _ := p.parseParameter() // receiver - p.expect(')') - - // determine receiver base type object - base := deref(recv.Type()).(*types.Named) - - // parse method name, signature, and possibly inlined body - _, name := p.parseName(nil, false) - sig := p.parseFunc(recv) - - // methods always belong to the same package as the base type object - pkg := base.Obj().Pkg() - - // add method to type unless type was imported before - // and method exists already - // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. - base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) -} - -// parseFuncDecl parses a FuncDecl: -// -// FuncDecl = "func" ExportedName Func . -func (p *parser) parseFuncDecl() { - // "func" already consumed - pkg, name := p.parseExportedName() - typ := p.parseFunc(nil) - pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) -} - -// parseDecl parses a Decl: -// -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . -func (p *parser) parseDecl() { - if p.tok == scanner.Ident { - switch p.lit { - case "import": - p.parseImportDecl() - case "const": - p.parseConstDecl() - case "type": - p.parseTypeDecl() - case "var": - p.parseVarDecl() - case "func": - p.next() // look ahead - if p.tok == '(' { - p.parseMethodDecl() - } else { - p.parseFuncDecl() - } - } - } - p.expect('\n') -} - -// ---------------------------------------------------------------------------- -// Export - -// parseExport parses an Export: -// -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . -func (p *parser) parseExport() *types.Package { - p.expectKeyword("package") - name := p.parsePackageName() - if p.tok == scanner.Ident && p.lit == "safe" { - // package was compiled with -u option - ignore - p.next() - } - p.expect('\n') - - pkg := p.getPkg(p.id, name) - - for p.tok != '$' && p.tok != scanner.EOF { - p.parseDecl() - } - - if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { - // don't call next()/expect() since reading past the - // export data may cause scanner errors (e.g. NUL chars) - p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) - } - - if n := p.scanner.ErrorCount; n != 0 { - p.errorf("expected no scanner errors, got %d", n) - } - - // Record all locally referenced packages as imports. - var imports []*types.Package - for id, pkg2 := range p.localPkgs { - if pkg2.Name() == "" { - p.errorf("%s package has no name", id) - } - if id == p.id { - continue // avoid self-edge - } - imports = append(imports, pkg2) - } - sort.Sort(byPath(imports)) - pkg.SetImports(imports) - - // package was imported completely and without errors - pkg.MarkComplete() - - return pkg -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/go/internal/gcimporter/gcimporter_test.go b/go/internal/gcimporter/gcimporter_test.go deleted file mode 100644 index 4e992af76b3..00000000000 --- a/go/internal/gcimporter/gcimporter_test.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go, -// adjusted to make it build with code from (std lib) internal/testenv copied. - -package gcimporter - -import ( - "bytes" - "fmt" - "go/build" - "go/constant" - "go/types" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "golang.org/x/tools/internal/testenv" -) - -func TestMain(m *testing.M) { - testenv.ExitIfSmallMachine() - os.Exit(m.Run()) -} - -// ---------------------------------------------------------------------------- - -func needsCompiler(t *testing.T, compiler string) { - if runtime.Compiler == compiler { - return - } - switch compiler { - case "gc": - t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) - } -} - -// compile runs the compiler on filename, with dirname as the working directory, -// and writes the output file to outdirname. -func compile(t *testing.T, dirname, filename, outdirname string) string { - testenv.NeedsGoBuild(t) - - // filename must end with ".go" - if !strings.HasSuffix(filename, ".go") { - t.Fatalf("filename doesn't end in .go: %s", filename) - } - basename := filepath.Base(filename) - outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o") - cmd := exec.Command("go", "tool", "compile", "-p=p", "-o", outname, filename) - cmd.Dir = dirname - out, err := cmd.CombinedOutput() - if err != nil { - t.Logf("%s", out) - t.Fatalf("go tool compile %s failed: %s", filename, err) - } - return outname -} - -func testPath(t *testing.T, path, srcDir string) *types.Package { - t0 := time.Now() - pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) - if err != nil { - t.Errorf("testPath(%s): %s", path, err) - return nil - } - t.Logf("testPath(%s): %v", path, time.Since(t0)) - return pkg -} - -const maxTime = 30 * time.Second - -func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) { - dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir) - list, err := ioutil.ReadDir(dirname) - if err != nil { - t.Fatalf("testDir(%s): %s", dirname, err) - } - for _, f := range list { - if time.Now().After(endTime) { - t.Log("testing time used up") - return - } - switch { - case !f.IsDir(): - // try extensions - for _, ext := range pkgExts { - if strings.HasSuffix(f.Name(), ext) { - name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension - if testPath(t, filepath.Join(dir, name), dir) != nil { - nimports++ - } - } - } - case f.IsDir(): - nimports += testDir(t, filepath.Join(dir, f.Name()), endTime) - } - } - return -} - -func mktmpdir(t *testing.T) string { - tmpdir, err := ioutil.TempDir("", "gcimporter_test") - if err != nil { - t.Fatal("mktmpdir:", err) - } - if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil { - os.RemoveAll(tmpdir) - t.Fatal("mktmpdir:", err) - } - return tmpdir -} - -const testfile = "exports.go" - -func TestImportTestdata(t *testing.T) { - needsCompiler(t, "gc") - - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - - compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata")) - - // filename should end with ".go" - filename := testfile[:len(testfile)-3] - if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil { - // The package's Imports list must include all packages - // explicitly imported by testfile, plus all packages - // referenced indirectly via exported objects in testfile. - // With the textual export format (when run against Go1.6), - // the list may also include additional packages that are - // not strictly required for import processing alone (they - // are exported to err "on the safe side"). - // For now, we just test the presence of a few packages - // that we know are there for sure. - got := fmt.Sprint(pkg.Imports()) - for _, want := range []string{"go/ast", "go/token"} { - if !strings.Contains(got, want) { - t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) - } - } - } -} - -func TestVersionHandling(t *testing.T) { - if debug { - t.Skip("TestVersionHandling panics in debug mode") - } - - // This package only handles gc export data. - needsCompiler(t, "gc") - - const dir = "./testdata/versions" - list, err := ioutil.ReadDir(dir) - if err != nil { - t.Fatal(err) - } - - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - corruptdir := filepath.Join(tmpdir, "testdata", "versions") - if err := os.Mkdir(corruptdir, 0700); err != nil { - t.Fatal(err) - } - - for _, f := range list { - name := f.Name() - if !strings.HasSuffix(name, ".a") { - continue // not a package file - } - if strings.Contains(name, "corrupted") { - continue // don't process a leftover corrupted file - } - pkgpath := "./" + name[:len(name)-2] - - if testing.Verbose() { - t.Logf("importing %s", name) - } - - // test that export data can be imported - _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil) - if err != nil { - // ok to fail if it fails with a newer version error for select files - if strings.Contains(err.Error(), "newer version") { - switch name { - case "test_go1.11_999b.a", "test_go1.11_999i.a": - continue - } - // fall through - } - t.Errorf("import %q failed: %v", pkgpath, err) - continue - } - - // create file with corrupted export data - // 1) read file - data, err := ioutil.ReadFile(filepath.Join(dir, name)) - if err != nil { - t.Fatal(err) - } - // 2) find export data - i := bytes.Index(data, []byte("\n$$B\n")) + 5 - j := bytes.Index(data[i:], []byte("\n$$\n")) + i - if i < 0 || j < 0 || i > j { - t.Fatalf("export data section not found (i = %d, j = %d)", i, j) - } - // 3) corrupt the data (increment every 7th byte) - for k := j - 13; k >= i; k -= 7 { - data[k]++ - } - // 4) write the file - pkgpath += "_corrupted" - filename := filepath.Join(corruptdir, pkgpath) + ".a" - ioutil.WriteFile(filename, data, 0666) - - // test that importing the corrupted file results in an error - _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil) - if err == nil { - t.Errorf("import corrupted %q succeeded", pkgpath) - } else if msg := err.Error(); !strings.Contains(msg, "version skew") { - t.Errorf("import %q error incorrect (%s)", pkgpath, msg) - } - } -} - -func TestImportStdLib(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - dt := maxTime - if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - dt = 10 * time.Millisecond - } - nimports := testDir(t, "", time.Now().Add(dt)) // installed packages - t.Logf("tested %d imports", nimports) -} - -var importedObjectTests = []struct { - name string - want string -}{ - // non-interfaces - {"crypto.Hash", "type Hash uint"}, - {"go/ast.ObjKind", "type ObjKind int"}, - {"go/types.Qualifier", "type Qualifier func(*Package) string"}, - {"go/types.Comparable", "func Comparable(T Type) bool"}, - {"math.Pi", "const Pi untyped float"}, - {"math.Sin", "func Sin(x float64) float64"}, - {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"}, - {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"}, - - // interfaces - {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"}, - {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"}, - {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"}, - {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"}, - {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"}, - {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"}, - {"go/types.Type", "type Type interface{String() string; Underlying() Type}"}, -} - -// TODO(rsc): Delete this init func after x/tools no longer needs to test successfully with Go 1.17. -func init() { - if build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] <= "go1.17" { - for i := range importedObjectTests { - if importedObjectTests[i].name == "context.Context" { - // Expand any to interface{}. - importedObjectTests[i].want = "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}" - } - } - } -} - -func TestImportedTypes(t *testing.T) { - testenv.NeedsGo1Point(t, 11) - // This package only handles gc export data. - needsCompiler(t, "gc") - - for _, test := range importedObjectTests { - obj := importObject(t, test.name) - if obj == nil { - continue // error reported elsewhere - } - got := types.ObjectString(obj, types.RelativeTo(obj.Pkg())) - - // TODO(rsc): Delete this block once go.dev/cl/368254 lands. - if got != test.want && test.want == strings.ReplaceAll(got, "interface{}", "any") { - got = test.want - } - - if got != test.want { - t.Errorf("%s: got %q; want %q", test.name, got, test.want) - } - - if named, _ := obj.Type().(*types.Named); named != nil { - verifyInterfaceMethodRecvs(t, named, 0) - } - } -} - -func TestImportedConsts(t *testing.T) { - testenv.NeedsGo1Point(t, 11) - tests := []struct { - name string - want constant.Kind - }{ - {"math.Pi", constant.Float}, - {"math.MaxFloat64", constant.Float}, - {"math.MaxInt64", constant.Int}, - } - - for _, test := range tests { - obj := importObject(t, test.name) - if got := obj.(*types.Const).Val().Kind(); got != test.want { - t.Errorf("%s: imported as constant.Kind(%v), want constant.Kind(%v)", test.name, got, test.want) - } - } -} - -// importObject imports the object specified by a name of the form -// ., e.g. go/types.Type. -// -// If any errors occur they are reported via t and the resulting object will -// be nil. -func importObject(t *testing.T, name string) types.Object { - s := strings.Split(name, ".") - if len(s) != 2 { - t.Fatal("inconsistent test data") - } - importPath := s[0] - objName := s[1] - - pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil) - if err != nil { - t.Error(err) - return nil - } - - obj := pkg.Scope().Lookup(objName) - if obj == nil { - t.Errorf("%s: object not found", name) - return nil - } - return obj -} - -// verifyInterfaceMethodRecvs verifies that method receiver types -// are named if the methods belong to a named interface type. -func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) { - // avoid endless recursion in case of an embedding bug that lead to a cycle - if level > 10 { - t.Errorf("%s: embeds itself", named) - return - } - - iface, _ := named.Underlying().(*types.Interface) - if iface == nil { - return // not an interface - } - - // check explicitly declared methods - for i := 0; i < iface.NumExplicitMethods(); i++ { - m := iface.ExplicitMethod(i) - recv := m.Type().(*types.Signature).Recv() - if recv == nil { - t.Errorf("%s: missing receiver type", m) - continue - } - if recv.Type() != named { - t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named) - } - } - - // check embedded interfaces (if they are named, too) - for i := 0; i < iface.NumEmbeddeds(); i++ { - // embedding of interfaces cannot have cycles; recursion will terminate - if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil { - verifyInterfaceMethodRecvs(t, etype, level+1) - } - } -} - -func TestIssue5815(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - pkg := importPkg(t, "strings", ".") - - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - if obj.Pkg() == nil { - t.Errorf("no pkg for %s", obj) - } - if tname, _ := obj.(*types.TypeName); tname != nil { - named := tname.Type().(*types.Named) - for i := 0; i < named.NumMethods(); i++ { - m := named.Method(i) - if m.Pkg() == nil { - t.Errorf("no pkg for %s", m) - } - } - } - } -} - -// Smoke test to ensure that imported methods get the correct package. -func TestCorrectMethodPackage(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - imports := make(map[string]*types.Package) - _, err := Import(imports, "net/http", ".", nil) - if err != nil { - t.Fatal(err) - } - - mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type() - mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex - sel := mset.Lookup(nil, "Lock") - lock := sel.Obj().(*types.Func) - if got, want := lock.Pkg().Path(), "sync"; got != want { - t.Errorf("got package path %q; want %q", got, want) - } -} - -func TestIssue13566(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - testoutdir := filepath.Join(tmpdir, "testdata") - - // b.go needs to be compiled from the output directory so that the compiler can - // find the compiled package a. We pass the full path to compile() so that we - // don't have to copy the file to that directory. - bpath, err := filepath.Abs(filepath.Join("testdata", "b.go")) - if err != nil { - t.Fatal(err) - } - compile(t, "testdata", "a.go", testoutdir) - compile(t, testoutdir, bpath, testoutdir) - - // import must succeed (test for issue at hand) - pkg := importPkg(t, "./testdata/b", tmpdir) - - // make sure all indirectly imported packages have names - for _, imp := range pkg.Imports() { - if imp.Name() == "" { - t.Errorf("no name for %s package", imp.Path()) - } - } -} - -func TestIssue13898(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - // import go/internal/gcimporter which imports go/types partially - imports := make(map[string]*types.Package) - _, err := Import(imports, "go/internal/gcimporter", ".", nil) - if err != nil { - t.Fatal(err) - } - - // look for go/types package - var goTypesPkg *types.Package - for path, pkg := range imports { - if path == "go/types" { - goTypesPkg = pkg - break - } - } - if goTypesPkg == nil { - t.Fatal("go/types not found") - } - - // look for go/types.Object type - obj := lookupObj(t, goTypesPkg.Scope(), "Object") - typ, ok := obj.Type().(*types.Named) - if !ok { - t.Fatalf("go/types.Object type is %v; wanted named type", typ) - } - - // lookup go/types.Object.Pkg method - m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg") - if m == nil { - t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect) - } - - // the method must belong to go/types - if m.Pkg().Path() != "go/types" { - t.Fatalf("found %v; want go/types", m.Pkg()) - } -} - -func TestIssue15517(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - - compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata")) - - // Multiple imports of p must succeed without redeclaration errors. - // We use an import path that's not cleaned up so that the eventual - // file path for the package is different from the package path; this - // will expose the error if it is present. - // - // (Issue: Both the textual and the binary importer used the file path - // of the package to be imported as key into the shared packages map. - // However, the binary importer then used the package path to identify - // the imported package to mark it as complete; effectively marking the - // wrong package as complete. By using an "unclean" package path, the - // file and package path are different, exposing the problem if present. - // The same issue occurs with vendoring.) - imports := make(map[string]*types.Package) - for i := 0; i < 3; i++ { - if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestIssue15920(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - compileAndImportPkg(t, "issue15920") -} - -func TestIssue20046(t *testing.T) { - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - // "./issue20046".V.M must exist - pkg := compileAndImportPkg(t, "issue20046") - obj := lookupObj(t, pkg.Scope(), "V") - if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil { - t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect) - } -} - -func TestIssue25301(t *testing.T) { - testenv.NeedsGo1Point(t, 11) - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - compileAndImportPkg(t, "issue25301") -} - -func TestIssue51836(t *testing.T) { - testenv.NeedsGo1Point(t, 18) // requires generics - - // This package only handles gc export data. - needsCompiler(t, "gc") - - // On windows, we have to set the -D option for the compiler to avoid having a drive - // letter and an illegal ':' in the import path - just skip it (see also issue #3483). - if runtime.GOOS == "windows" { - t.Skip("avoid dealing with relative paths/drive letters on windows") - } - - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - testoutdir := filepath.Join(tmpdir, "testdata") - - dir := filepath.Join("testdata", "issue51836") - // Following the pattern of TestIssue13898, aa.go needs to be compiled from - // the output directory. We pass the full path to compile() so that we don't - // have to copy the file to that directory. - bpath, err := filepath.Abs(filepath.Join(dir, "aa.go")) - if err != nil { - t.Fatal(err) - } - compile(t, dir, "a.go", testoutdir) - compile(t, testoutdir, bpath, testoutdir) - - // import must succeed (test for issue at hand) - _ = importPkg(t, "./testdata/aa", tmpdir) -} - -func importPkg(t *testing.T, path, srcDir string) *types.Package { - pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) - if err != nil { - t.Fatal(err) - } - return pkg -} - -func compileAndImportPkg(t *testing.T, name string) *types.Package { - tmpdir := mktmpdir(t) - defer os.RemoveAll(tmpdir) - compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata")) - return importPkg(t, "./testdata/"+name, tmpdir) -} - -func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object { - if obj := scope.Lookup(name); obj != nil { - return obj - } - t.Fatalf("%s not found", name) - return nil -} diff --git a/go/internal/gcimporter/support_go118.go b/go/internal/gcimporter/support_go118.go deleted file mode 100644 index a993843230c..00000000000 --- a/go/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} diff --git a/go/packages/golist.go b/go/packages/golist.go index 50533995a65..6bb7168d2e3 100644 --- a/go/packages/golist.go +++ b/go/packages/golist.go @@ -60,6 +60,7 @@ func (r *responseDeduper) addAll(dr *driverResponse) { for _, root := range dr.Roots { r.addRoot(root) } + r.dr.GoVersion = dr.GoVersion } func (r *responseDeduper) addPackage(p *Package) { @@ -302,11 +303,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries } dirResponse, err := state.createDriverResponse(pattern) - // If there was an error loading the package, or the package is returned - // with errors, try to load the file as an ad-hoc package. + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. // Usually the error will appear in a returned package, but may not if we're // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && len(dirResponse.Packages[0].Errors) == 1 { var queryErr error if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { @@ -453,11 +455,14 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse if err != nil { return nil, err } + seen := make(map[string]*jsonPackage) pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - var response driverResponse + response := &driverResponse{ + GoVersion: goVersion, + } for dec := json.NewDecoder(buf); dec.More(); { p := new(jsonPackage) if err := dec.Decode(p); err != nil { @@ -599,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Work around https://golang.org/issue/28749: // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Filter out any elements of CompiledGoFiles that are also in OtherFiles. - // We have to keep this workaround in place until go1.12 is a distant memory. - if len(pkg.OtherFiles) > 0 { - other := make(map[string]bool, len(pkg.OtherFiles)) - for _, f := range pkg.OtherFiles { - other[f] = true - } - + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { out := pkg.CompiledGoFiles[:0] for _, f := range pkg.CompiledGoFiles { - if other[f] { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file continue } out = append(out, f) @@ -729,7 +729,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) - return &response, nil + return response, nil } func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { @@ -755,6 +755,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath } +// getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) diff --git a/go/packages/overlay_test.go b/go/packages/overlay_test.go index f2164c274e2..4318739eb79 100644 --- a/go/packages/overlay_test.go +++ b/go/packages/overlay_test.go @@ -109,8 +109,6 @@ func TestOverlayChangesTestPackageName(t *testing.T) { testAllOrModulesParallel(t, testOverlayChangesTestPackageName) } func testOverlayChangesTestPackageName(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 16) - exported := packagestest.Export(t, exporter, []packagestest.Module{{ Name: "fake", Files: map[string]interface{}{ @@ -717,8 +715,6 @@ func TestInvalidFilesBeforeOverlay(t *testing.T) { } func testInvalidFilesBeforeOverlay(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 15) - exported := packagestest.Export(t, exporter, []packagestest.Module{ { Name: "golang.org/fake", @@ -756,8 +752,6 @@ func TestInvalidFilesBeforeOverlayContains(t *testing.T) { testAllOrModulesParallel(t, testInvalidFilesBeforeOverlayContains) } func testInvalidFilesBeforeOverlayContains(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 15) - exported := packagestest.Export(t, exporter, []packagestest.Module{ { Name: "golang.org/fake", @@ -1046,6 +1040,7 @@ func Hi() { // This does not use go/packagestest because it needs to write a replace // directive with an absolute path in one of the module's go.mod files. func TestOverlaysInReplace(t *testing.T) { + testenv.NeedsGoPackages(t) t.Parallel() // Create module b.com in a temporary directory. Do not add any Go files diff --git a/go/packages/packages.go b/go/packages/packages.go index a93dc6add4d..0f1505b808a 100644 --- a/go/packages/packages.go +++ b/go/packages/packages.go @@ -15,10 +15,12 @@ import ( "go/scanner" "go/token" "go/types" + "io" "io/ioutil" "log" "os" "path/filepath" + "runtime" "strings" "sync" "time" @@ -233,6 +235,11 @@ type driverResponse struct { // Imports will be connected and then type and syntax information added in a // later pass (see refine). Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int } // Load loads and returns the Go packages named by the given patterns. @@ -256,7 +263,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { return nil, err } l.sizes = response.Sizes - return l.refine(response.Roots, response.Packages...) + return l.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. @@ -297,6 +304,9 @@ type Package struct { // of the package, or while parsing or type-checking its files. Errors []Error + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + // GoFiles lists the absolute file paths of the package's Go source files. GoFiles []string @@ -532,6 +542,7 @@ type loaderPackage struct { needsrc bool // load from source (Mode >= LoadTypes) needtypes bool // type information is either requested or depended on initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. @@ -618,7 +629,8 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type and // and syntax information as requested by the LoadMode. -func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { rootMap[root] = i @@ -626,7 +638,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs = make(map[string]*loaderPackage) // first pass, fixup and build the map and roots var initial = make([]*loaderPackage, len(roots)) - for _, pkg := range list { + for _, pkg := range response.Packages { rootIndex := -1 if i, found := rootMap[pkg.ID]; found { rootIndex = i @@ -648,6 +660,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { Package: pkg, needtypes: needtypes, needsrc: needsrc, + goVersion: response.GoVersion, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -865,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // never has to create a types.Package for an indirect dependency, // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) // The Diamond test exercises this case. if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { - ld.loadFromExportData(lpkg) + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } return // not a source package, don't get syntax trees } @@ -902,6 +922,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { case types.Error: // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) errs = append(errs, Error{ Pos: err.Fset.Position(err.Pos).String(), Msg: err.Msg, @@ -923,11 +944,41 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Errors = append(lpkg.Errors, errs...) } + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { // The config requested loading sources and types, but sources are missing. // Add an error to the package and fall back to loading from export data. appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) - ld.loadFromExportData(lpkg) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + return // can't get syntax trees for this package } @@ -981,7 +1032,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { tc := &types.Config{ Importer: importer, - // Type-check bodies of functions only in non-initial packages. + // Type-check bodies of functions only in initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, @@ -1151,9 +1202,10 @@ func sameFile(x, y string) bool { return false } -// loadFromExportData returns type information for the specified +// loadFromExportData ensures that type information is present for the specified // package, loading it from an export data file on the first request. -func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { if lpkg.PkgPath == "" { log.Fatalf("internal error: Package %s has no PkgPath", lpkg) } @@ -1164,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // must be sequential. (Finer-grained locking would require // changes to the gcexportdata API.) // - // The exportMu lock guards the Package.Pkg field and the - // types.Package it points to, for each Package in the graph. + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. // // Not all accesses to Package.Pkg need to be protected by exportMu: // graph ordering ensures that direct dependencies of source @@ -1174,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error defer ld.exportMu.Unlock() if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { - return tpkg, nil // cache hit + return nil // cache hit } lpkg.IllTyped = true // fail safe if lpkg.ExportFile == "" { // Errors while building export data will have been printed to stderr. - return nil, fmt.Errorf("no export data file") + return fmt.Errorf("no export data file") } f, err := os.Open(lpkg.ExportFile) if err != nil { - return nil, err + return err } defer f.Close() @@ -1197,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // queries.) r, err := gcexportdata.NewReader(f) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } // Build the view. @@ -1241,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // (May modify incomplete packages in view but not create new ones.) tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } if _, ok := view["go.shape"]; ok { // Account for the pseudopackage "go.shape" that gets @@ -1254,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error lpkg.Types = tpkg lpkg.IllTyped = false - - return tpkg, nil + return nil } // impliedLoadMode returns loadMode with its dependencies. @@ -1271,3 +1322,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go index 796edb6b7b4..0da72851c76 100644 --- a/go/packages/packages_test.go +++ b/go/packages/packages_test.go @@ -2471,10 +2471,55 @@ func testIssue37098(t *testing.T, exporter packagestest.Exporter) { } } +// TestIssue56632 checks that CompiledGoFiles does not contain non-go files regardless of +// whether the NeedFiles mode bit is set. +func TestIssue56632(t *testing.T) { + t.Parallel() + testenv.NeedsGoBuild(t) + testenv.NeedsTool(t, "cgo") + + exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{ + Name: "golang.org/issue56632", + Files: map[string]interface{}{ + "a/a.go": `package a`, + "a/a_cgo.go": `package a + +import "C"`, + "a/a.s": ``, + "a/a.c": ``, + }}}) + defer exported.Cleanup() + + modes := []packages.LoadMode{packages.NeedCompiledGoFiles, packages.NeedCompiledGoFiles | packages.NeedFiles, packages.NeedImports | packages.NeedCompiledGoFiles, packages.NeedImports | packages.NeedFiles | packages.NeedCompiledGoFiles} + for _, mode := range modes { + exported.Config.Mode = mode + + initial, err := packages.Load(exported.Config, "golang.org/issue56632/a") + if err != nil { + t.Fatalf("failed to load package: %v", err) + } + + if len(initial) != 1 { + t.Errorf("expected 3 packages, got %d", len(initial)) + } + + p := initial[0] + + if len(p.Errors) != 0 { + t.Errorf("expected no errors, got %v", p.Errors) + } + + for _, f := range p.CompiledGoFiles { + if strings.HasSuffix(f, ".s") || strings.HasSuffix(f, ".c") { + t.Errorf("expected no non-Go CompiledGoFiles, got file %q in CompiledGoFiles", f) + } + } + } +} + // TestInvalidFilesInXTest checks the fix for golang/go#37971 in Go 1.15. func TestInvalidFilesInXTest(t *testing.T) { testAllOrModulesParallel(t, testInvalidFilesInXTest) } func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 15) exported := packagestest.Export(t, exporter, []packagestest.Module{ { Name: "golang.org/fake", @@ -2501,7 +2546,6 @@ func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) { func TestTypecheckCgo(t *testing.T) { testAllOrModulesParallel(t, testTypecheckCgo) } func testTypecheckCgo(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 15) testenv.NeedsTool(t, "cgo") const cgo = `package cgo @@ -2673,8 +2717,6 @@ func TestInvalidPackageName(t *testing.T) { } func testInvalidPackageName(t *testing.T, exporter packagestest.Exporter) { - testenv.NeedsGo1Point(t, 15) - exported := packagestest.Export(t, exporter, []packagestest.Module{{ Name: "golang.org/fake", Files: map[string]interface{}{ @@ -2709,6 +2751,31 @@ func TestEmptyEnvironment(t *testing.T) { } } +func TestPackageLoadSingleFile(t *testing.T) { + tmp, err := ioutil.TempDir("", "a") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + filename := filepath.Join(tmp, "a.go") + + if err := ioutil.WriteFile(filename, []byte(`package main; func main() { println("hello world") }`), 0775); err != nil { + t.Fatal(err) + } + + pkgs, err := packages.Load(&packages.Config{Mode: packages.LoadSyntax, Dir: tmp}, "file="+filename) + if err != nil { + t.Fatalf("could not load package: %v", err) + } + if len(pkgs) != 1 { + t.Fatalf("expected one package to be loaded, got %d", len(pkgs)) + } + if len(pkgs[0].CompiledGoFiles) != 1 || pkgs[0].CompiledGoFiles[0] != filename { + t.Fatalf("expected one compiled go file (%q), got %v", filename, pkgs[0].CompiledGoFiles) + } +} + func errorMessages(errors []packages.Error) []string { var msgs []string for _, err := range errors { diff --git a/go/packages/packagestest/expect.go b/go/packages/packagestest/expect.go index 430258681f5..92c20a64a8d 100644 --- a/go/packages/packagestest/expect.go +++ b/go/packages/packagestest/expect.go @@ -16,7 +16,6 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/span" ) const ( @@ -124,14 +123,31 @@ func (e *Exported) Expect(methods map[string]interface{}) error { return nil } -// Range is a type alias for span.Range for backwards compatibility, prefer -// using span.Range directly. -type Range = span.Range +// A Range represents an interval within a source file in go/token notation. +type Range struct { + TokFile *token.File // non-nil + Start, End token.Pos // both valid and within range of TokFile +} + +// A rangeSetter abstracts a variable that can be set from a Range value. +// +// The parameter conversion machinery will automatically construct a +// variable of type T and call the SetRange method on its address if +// *T implements rangeSetter. This allows alternative notations of +// source ranges to interoperate transparently with this package. +// +// This type intentionally does not mention Range itself, to avoid a +// dependency from the application's range type upon this package. +// +// Currently this is a secret back door for use only by gopls. +type rangeSetter interface { + SetRange(file *token.File, start, end token.Pos) +} // Mark adds a new marker to the known set. func (e *Exported) Mark(name string, r Range) { if e.markers == nil { - e.markers = make(map[string]span.Range) + e.markers = make(map[string]Range) } e.markers[name] = r } @@ -221,22 +237,22 @@ func (e *Exported) getMarkers() error { return nil } // set markers early so that we don't call getMarkers again from Expect - e.markers = make(map[string]span.Range) + e.markers = make(map[string]Range) return e.Expect(map[string]interface{}{ markMethod: e.Mark, }) } var ( - noteType = reflect.TypeOf((*expect.Note)(nil)) - identifierType = reflect.TypeOf(expect.Identifier("")) - posType = reflect.TypeOf(token.Pos(0)) - positionType = reflect.TypeOf(token.Position{}) - rangeType = reflect.TypeOf(span.Range{}) - spanType = reflect.TypeOf(span.Span{}) - fsetType = reflect.TypeOf((*token.FileSet)(nil)) - regexType = reflect.TypeOf((*regexp.Regexp)(nil)) - exportedType = reflect.TypeOf((*Exported)(nil)) + noteType = reflect.TypeOf((*expect.Note)(nil)) + identifierType = reflect.TypeOf(expect.Identifier("")) + posType = reflect.TypeOf(token.Pos(0)) + positionType = reflect.TypeOf(token.Position{}) + rangeType = reflect.TypeOf(Range{}) + rangeSetterType = reflect.TypeOf((*rangeSetter)(nil)).Elem() + fsetType = reflect.TypeOf((*token.FileSet)(nil)) + regexType = reflect.TypeOf((*regexp.Regexp)(nil)) + exportedType = reflect.TypeOf((*Exported)(nil)) ) // converter converts from a marker's argument parsed from the comment to @@ -295,17 +311,16 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) { } return reflect.ValueOf(r), remains, nil }, nil - case pt == spanType: + case reflect.PtrTo(pt).AssignableTo(rangeSetterType): + // (*pt).SetRange method exists: call it. return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) { r, remains, err := e.rangeConverter(n, args) if err != nil { return reflect.Value{}, nil, err } - spn, err := r.Span() - if err != nil { - return reflect.Value{}, nil, err - } - return reflect.ValueOf(spn), remains, nil + v := reflect.New(pt) + v.Interface().(rangeSetter).SetRange(r.TokFile, r.Start, r.End) + return v.Elem(), remains, nil }, nil case pt == identifierType: return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) { @@ -408,9 +423,10 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) { } } -func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) { +func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (Range, []interface{}, error) { + tokFile := e.ExpectFileSet.File(n.Pos) if len(args) < 1 { - return span.Range{}, nil, fmt.Errorf("missing argument") + return Range{}, nil, fmt.Errorf("missing argument") } arg := args[0] args = args[1:] @@ -419,37 +435,62 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang // handle the special identifiers switch arg { case eofIdentifier: - // end of file identifier, look up the current file - f := e.ExpectFileSet.File(n.Pos) - eof := f.Pos(f.Size()) - return span.NewRange(e.ExpectFileSet, eof, token.NoPos), args, nil + // end of file identifier + eof := tokFile.Pos(tokFile.Size()) + return newRange(tokFile, eof, eof), args, nil default: // look up an marker by name mark, ok := e.markers[string(arg)] if !ok { - return span.Range{}, nil, fmt.Errorf("cannot find marker %v", arg) + return Range{}, nil, fmt.Errorf("cannot find marker %v", arg) } return mark, args, nil } case string: start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg) if err != nil { - return span.Range{}, nil, err + return Range{}, nil, err } - if start == token.NoPos { - return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) + if !start.IsValid() { + return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(e.ExpectFileSet, start, end), args, nil + return newRange(tokFile, start, end), args, nil case *regexp.Regexp: start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg) if err != nil { - return span.Range{}, nil, err + return Range{}, nil, err } - if start == token.NoPos { - return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) + if !start.IsValid() { + return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(e.ExpectFileSet, start, end), args, nil + return newRange(tokFile, start, end), args, nil default: - return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg) + return Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg) + } +} + +// newRange creates a new Range from a token.File and two valid positions within it. +func newRange(file *token.File, start, end token.Pos) Range { + fileBase := file.Base() + fileEnd := fileBase + file.Size() + if !start.IsValid() { + panic("invalid start token.Pos") + } + if !end.IsValid() { + panic("invalid end token.Pos") + } + if int(start) < fileBase || int(start) > fileEnd { + panic(fmt.Sprintf("invalid start: %d not in [%d, %d]", start, fileBase, fileEnd)) + } + if int(end) < fileBase || int(end) > fileEnd { + panic(fmt.Sprintf("invalid end: %d not in [%d, %d]", end, fileBase, fileEnd)) + } + if start > end { + panic("invalid start: greater than end") + } + return Range{ + TokFile: file, + Start: start, + End: end, } } diff --git a/go/packages/packagestest/expect_test.go b/go/packages/packagestest/expect_test.go index 2587f580b06..46d96d61fb9 100644 --- a/go/packages/packagestest/expect_test.go +++ b/go/packages/packagestest/expect_test.go @@ -10,7 +10,6 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages/packagestest" - "golang.org/x/tools/internal/span" ) func TestExpect(t *testing.T) { @@ -43,7 +42,7 @@ func TestExpect(t *testing.T) { } }, "directNote": func(n *expect.Note) {}, - "range": func(r span.Range) { + "range": func(r packagestest.Range) { if r.Start == token.NoPos || r.Start == 0 { t.Errorf("Range had no valid starting position") } diff --git a/go/packages/packagestest/export.go b/go/packages/packagestest/export.go index 894dcdd445d..b687a44fb4f 100644 --- a/go/packages/packagestest/export.go +++ b/go/packages/packagestest/export.go @@ -79,7 +79,6 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/testenv" ) @@ -129,7 +128,7 @@ type Exported struct { primary string // the first non GOROOT module that was exported written map[string]map[string]string // the full set of exported files notes []*expect.Note // The list of expectations extracted from go source files - markers map[string]span.Range // The set of markers extracted from go source files + markers map[string]Range // The set of markers extracted from go source files } // Exporter implementations are responsible for converting from the generic description of some diff --git a/go/packages/packagestest/modules_test.go b/go/packages/packagestest/modules_test.go index 6f627b1e5bd..de290ead94a 100644 --- a/go/packages/packagestest/modules_test.go +++ b/go/packages/packagestest/modules_test.go @@ -9,11 +9,9 @@ import ( "testing" "golang.org/x/tools/go/packages/packagestest" - "golang.org/x/tools/internal/testenv" ) func TestModulesExport(t *testing.T) { - testenv.NeedsGo1Point(t, 11) exported := packagestest.Export(t, packagestest.Modules, testdata) defer exported.Cleanup() // Check that the cfg contains all the right bits diff --git a/go/pointer/analysis.go b/go/pointer/analysis.go index 35ad8abdb12..e3c85ede4f7 100644 --- a/go/pointer/analysis.go +++ b/go/pointer/analysis.go @@ -16,6 +16,7 @@ import ( "runtime" "runtime/debug" "sort" + "strings" "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/ssa" @@ -377,12 +378,27 @@ func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) { fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee) } - // Warn about calls to non-intrinsic external functions. + // Warn about calls to functions that are handled unsoundly. // TODO(adonovan): de-dup these messages. - if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil { + fn := callee.fn + + // Warn about calls to non-intrinsic external functions. + if fn.Blocks == nil && a.findIntrinsic(fn) == nil { a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn) a.warnf(fn.Pos(), " (declared here)") } + + // Warn about calls to generic function bodies. + if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 { + a.warnf(site.pos(), "unsound call to generic function body: %s (build with ssa.InstantiateGenerics)", fn) + a.warnf(fn.Pos(), " (declared here)") + } + + // Warn about calls to instantiation wrappers of generics functions. + if fn.Origin() != nil && strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") { + a.warnf(site.pos(), "unsound call to instantiation wrapper of generic: %s (build with ssa.InstantiateGenerics)", fn) + a.warnf(fn.Pos(), " (declared here)") + } } // dumpSolution writes the PTS solution to the specified file. diff --git a/go/pointer/api.go b/go/pointer/api.go index 9a4cc0af4a2..8c9a8c7752b 100644 --- a/go/pointer/api.go +++ b/go/pointer/api.go @@ -28,7 +28,11 @@ type Config struct { // dependencies of any main package may still affect the // analysis result, because they contribute runtime types and // thus methods. + // // TODO(adonovan): investigate whether this is desirable. + // + // Calls to generic functions will be unsound unless packages + // are built using the ssa.InstantiateGenerics builder mode. Mains []*ssa.Package // Reflection determines whether to handle reflection @@ -93,7 +97,7 @@ func (c *Config) AddQuery(v ssa.Value) { c.Queries[v] = struct{}{} } -// AddQuery adds v to Config.IndirectQueries. +// AddIndirectQuery adds v to Config.IndirectQueries. // Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()). func (c *Config) AddIndirectQuery(v ssa.Value) { if c.IndirectQueries == nil { diff --git a/go/pointer/doc.go b/go/pointer/doc.go index d41346e699f..aca343b88e3 100644 --- a/go/pointer/doc.go +++ b/go/pointer/doc.go @@ -358,6 +358,14 @@ A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2}, type-unsafe combination (T1,V2). Treating the value and its concrete type as inseparable makes the analysis type-safe.) +Type parameters: + +Type parameters are not directly supported by the analysis. +Calls to generic functions will be left as if they had empty bodies. +Users of the package are expected to use the ssa.InstantiateGenerics +builder mode when building code that uses or depends on code +containing generics. + reflect.Value: A reflect.Value is modelled very similar to an interface{}, i.e. as diff --git a/go/pointer/gen.go b/go/pointer/gen.go index 09705948d9c..5e527f21ab2 100644 --- a/go/pointer/gen.go +++ b/go/pointer/gen.go @@ -14,9 +14,11 @@ import ( "fmt" "go/token" "go/types" + "strings" "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/typeparams" ) var ( @@ -204,7 +206,7 @@ func (a *analysis) makeRtype(T types.Type) nodeid { return id } -// rtypeValue returns the type of the *reflect.rtype-tagged object obj. +// rtypeTaggedValue returns the type of the *reflect.rtype-tagged object obj. func (a *analysis) rtypeTaggedValue(obj nodeid) types.Type { tDyn, t, _ := a.taggedValue(obj) if tDyn != a.reflectRtypePtr { @@ -978,7 +980,10 @@ func (a *analysis) genInstr(cgn *cgnode, instr ssa.Instruction) { a.sizeof(instr.Type())) case *ssa.Index: - a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type())) + _, isstring := typeparams.CoreType(instr.X.Type()).(*types.Basic) + if !isstring { + a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type())) + } case *ssa.Select: recv := a.valueOffsetNode(instr, 2) // instr : (index, recvOk, recv0, ... recv_n-1) @@ -1202,6 +1207,19 @@ func (a *analysis) genFunc(cgn *cgnode) { return } + if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 { + // Body of generic function. + // We'll warn about calls to such functions at the end. + return + } + + if strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") { + // instantiation wrapper of a generic function. + // These may contain type coercions which are not currently supported. + // We'll warn about calls to such functions at the end. + return + } + if a.log != nil { fmt.Fprintln(a.log, "; Creating nodes for local values") } diff --git a/go/pointer/pointer_race_test.go b/go/pointer/pointer_race_test.go new file mode 100644 index 00000000000..d3c9b475e25 --- /dev/null +++ b/go/pointer/pointer_race_test.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race +// +build race + +package pointer_test + +func init() { + raceEnabled = true +} diff --git a/go/pointer/pointer_test.go b/go/pointer/pointer_test.go index 47074f620f2..1fa54f6e8f7 100644 --- a/go/pointer/pointer_test.go +++ b/go/pointer/pointer_test.go @@ -66,6 +66,8 @@ var inputs = []string{ // "testdata/timer.go", // TODO(adonovan): fix broken assumptions about runtime timers } +var raceEnabled = false + // Expectation grammar: // // @calls f -> g @@ -238,9 +240,14 @@ func doOneInput(t *testing.T, input, fpath string) bool { // Find all calls to the built-in print(x). Analytically, // print is a no-op, but it's a convenient hook for testing // the PTS of an expression, so our tests use it. + // Exclude generic bodies as these should be dead code for pointer. + // Instance of generics are included. probes := make(map[*ssa.CallCommon]bool) for fn := range ssautil.AllFunctions(prog) { - // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if _Origin is exported. + if isGenericBody(fn) { + continue // skip generic bodies + } + // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if Origin is exported. if fn.Pkg == mainpkg || (fn.Pkg == nil && mainFiles[prog.Fset.File(fn.Pos())]) { for _, b := range fn.Blocks { for _, instr := range b.Instrs { @@ -609,10 +616,6 @@ func TestInput(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113") } - if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 { - t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113") - } - ok := true wd, err := os.Getwd() if err != nil { @@ -627,24 +630,44 @@ func TestInput(t *testing.T) { fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd) for _, filename := range inputs { - content, err := ioutil.ReadFile(filename) - if err != nil { - t.Errorf("couldn't read file '%s': %s", filename, err) - continue - } + filename := filename + t.Run(filename, func(t *testing.T) { + if filename == "testdata/a_test.go" { + // For some reason this particular file is way more expensive than the others. + if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 { + t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113") + } + if raceEnabled { + t.Skip("skipping memory-intensive test under race detector; https://golang.org/issue/14113") + } + } else { + t.Parallel() + } - fpath, err := filepath.Abs(filename) - if err != nil { - t.Errorf("couldn't get absolute path for '%s': %s", filename, err) - } + content, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("couldn't read file '%s': %s", filename, err) + } - if !doOneInput(t, string(content), fpath) { - ok = false - } + fpath, err := filepath.Abs(filename) + if err != nil { + t.Fatalf("couldn't get absolute path for '%s': %s", filename, err) + } + + if !doOneInput(t, string(content), fpath) { + t.Fail() + } + }) } - if !ok { - t.Fail() +} + +// isGenericBody returns true if fn is the body of a generic function. +func isGenericBody(fn *ssa.Function) bool { + sig := fn.Signature + if typeparams.ForSignature(sig).Len() > 0 || typeparams.RecvTypeParams(sig).Len() > 0 { + return fn.Synthetic == "" } + return false } // join joins the elements of multiset with " | "s. diff --git a/go/pointer/reflect.go b/go/pointer/reflect.go index efb11b00096..3762dd8d401 100644 --- a/go/pointer/reflect.go +++ b/go/pointer/reflect.go @@ -1024,7 +1024,7 @@ func extŪ°reflectŪ°ChanOf(a *analysis, cgn *cgnode) { var dir reflect.ChanDir // unknown if site := cgn.callersite; site != nil { if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - v, _ := constant.Int64Val(c.Value) + v := c.Int64() if 0 <= v && v <= int64(reflect.BothDir) { dir = reflect.ChanDir(v) } @@ -1751,8 +1751,7 @@ func extŪ°reflectŪ°rtypeŪ°InOut(a *analysis, cgn *cgnode, out bool) { index := -1 if site := cgn.callersite; site != nil { if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - v, _ := constant.Int64Val(c.Value) - index = int(v) + index = int(c.Int64()) } } a.addConstraint(&rtypeInOutConstraint{ diff --git a/go/pointer/util.go b/go/pointer/util.go index 5fec1fc4ed5..17728aa06ac 100644 --- a/go/pointer/util.go +++ b/go/pointer/util.go @@ -8,12 +8,13 @@ import ( "bytes" "fmt" "go/types" - exec "golang.org/x/sys/execabs" "log" "os" "runtime" "time" + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/container/intsets" ) @@ -125,7 +126,7 @@ func (a *analysis) flatten(t types.Type) []*fieldInfo { // Debuggability hack: don't remove // the named type from interfaces as // they're very verbose. - fl = append(fl, &fieldInfo{typ: t}) + fl = append(fl, &fieldInfo{typ: t}) // t may be a type param } else { fl = a.flatten(u) } diff --git a/go/ssa/TODO b/go/ssa/TODO new file mode 100644 index 00000000000..6c35253c73c --- /dev/null +++ b/go/ssa/TODO @@ -0,0 +1,16 @@ +-*- text -*- + +SSA Generics to-do list +=========================== + +DOCUMENTATION: +- Read me for internals + +TYPE PARAMETERIZED GENERIC FUNCTIONS: +- sanity.go updates. +- Check source functions going to generics. +- Tests, tests, tests... + +USAGE: +- Back fill users for handling ssa.InstantiateGenerics being off. + diff --git a/go/ssa/builder.go b/go/ssa/builder.go index b36775a4e34..be8d36a6eeb 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -101,6 +101,9 @@ package ssa // // This is a low level operation for creating functions that do not exist in // the source. Use with caution. +// +// TODO(taking): Use consistent terminology for "concrete". +// TODO(taking): Use consistent terminology for "monomorphization"/"instantiate"/"expand". import ( "fmt" @@ -272,7 +275,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { return fn.emit(&c) case *ast.IndexExpr: - mapt := fn.typeOf(e.X).Underlying().(*types.Map) + mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map. lookup := &Lookup{ X: b.expr(fn, e.X), Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), @@ -309,7 +312,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ typ = fn.typ(typ) switch obj.Name() { case "make": - switch typ.Underlying().(type) { + switch ct := typeparams.CoreType(typ).(type) { case *types.Slice: n := b.expr(fn, args[1]) m := n @@ -319,7 +322,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ if m, ok := m.(*Const); ok { // treat make([]T, n, m) as new([m]T)[:n] cap := m.Int64() - at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) + at := types.NewArray(ct.Elem(), cap) alloc := emitNew(fn, at, pos) alloc.Comment = "makeslice" v := &Slice{ @@ -370,6 +373,8 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // We must still evaluate the value, though. (If it // was side-effect free, the whole call would have // been constant-folded.) + // + // Type parameters are always non-constant so use Underlying. t := deref(fn.typeOf(args[0])).Underlying() if at, ok := t.(*types.Array); ok { b.expr(fn, args[0]) // for effects only @@ -453,47 +458,57 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { } wantAddr := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) - last := len(sel.index) - 1 - return &address{ - addr: emitFieldSelection(fn, v, sel.index[last], true, e.Sel), - pos: e.Sel.Pos(), - expr: e.Sel, + index := sel.index[len(sel.index)-1] + fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) + + // Due to the two phases of resolving AssignStmt, a panic from x.f = p() + // when x is nil is required to come after the side-effects of + // evaluating x and p(). + emit := func(fn *Function) Value { + return emitFieldSelection(fn, v, index, true, e.Sel) } + return &lazyAddress{addr: emit, t: fld.Type(), pos: e.Sel.Pos(), expr: e.Sel} case *ast.IndexExpr: + xt := fn.typeOf(e.X) + elem, mode := indexType(xt) var x Value var et types.Type - switch t := fn.typeOf(e.X).Underlying().(type) { - case *types.Array: + switch mode { + case ixArrVar: // array, array|slice, array|*array, or array|*array|slice. x = b.addr(fn, e.X, escaping).address(fn) - et = types.NewPointer(t.Elem()) - case *types.Pointer: // *array - x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) - case *types.Slice: + et = types.NewPointer(elem) + case ixVar: // *array, slice, *array|slice x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem()) - case *types.Map: + et = types.NewPointer(elem) + case ixMap: + mt := typeparams.CoreType(xt).(*types.Map) return &element{ m: b.expr(fn, e.X), - k: emitConv(fn, b.expr(fn, e.Index), t.Key()), - t: t.Elem(), + k: emitConv(fn, b.expr(fn, e.Index), mt.Key()), + t: mt.Elem(), pos: e.Lbrack, } default: - panic("unexpected container type in IndexExpr: " + t.String()) + panic("unexpected container type in IndexExpr: " + xt.String()) } index := b.expr(fn, e.Index) if isUntyped(index.Type()) { index = emitConv(fn, index, tInt) } - v := &IndexAddr{ - X: x, - Index: index, + // Due to the two phases of resolving AssignStmt, a panic from x[i] = p() + // when x is nil or i is out-of-bounds is required to come after the + // side-effects of evaluating x, i and p(). + emit := func(fn *Function) Value { + v := &IndexAddr{ + X: x, + Index: index, + } + v.setPos(e.Lbrack) + v.setType(et) + return fn.emit(v) } - v.setPos(e.Lbrack) - v.setType(et) - return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} + return &lazyAddress{addr: emit, t: deref(et), pos: e.Lbrack, expr: e} case *ast.StarExpr: return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} @@ -552,7 +567,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * } if _, ok := loc.(*address); ok { - if isInterface(loc.typ()) { + if isNonTypeParamInterface(loc.typ()) { // e.g. var x interface{} = T{...} // Can't in-place initialize an interface value. // Fall back to copying. @@ -622,18 +637,19 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *ast.FuncLit: fn2 := &Function{ - name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), - Signature: fn.typeOf(e.Type).Underlying().(*types.Signature), - pos: e.Type.Func, - parent: fn, - Pkg: fn.Pkg, - Prog: fn.Prog, - syntax: e, - _Origin: nil, // anon funcs do not have an origin. - _TypeParams: fn._TypeParams, // share the parent's type parameters. - _TypeArgs: fn._TypeArgs, // share the parent's type arguments. - info: fn.info, - subst: fn.subst, // share the parent's type substitutions. + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.typeOf(e.Type).(*types.Signature), + pos: e.Type.Func, + parent: fn, + anonIdx: int32(len(fn.AnonFuncs)), + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: e, + topLevelOrigin: nil, // use anonIdx to lookup an anon instance's origin. + typeparams: fn.typeparams, // share the parent's type parameters. + typeargs: fn.typeargs, // share the parent's type arguments. + info: fn.info, + subst: fn.subst, // share the parent's type substitutions. } fn.AnonFuncs = append(fn.AnonFuncs, fn2) b.created.Add(fn2) @@ -669,6 +685,8 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { y.pos = e.Lparen case *SliceToArrayPointer: y.pos = e.Lparen + case *UnOp: // conversion from slice to array. + y.pos = e.Lparen } } return y @@ -733,14 +751,20 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *ast.SliceExpr: var low, high, max Value var x Value - switch fn.typeOf(e.X).Underlying().(type) { + xtyp := fn.typeOf(e.X) + switch typeparams.CoreType(xtyp).(type) { case *types.Array: // Potentially escaping. x = b.addr(fn, e.X, true).address(fn) case *types.Basic, *types.Slice, *types.Pointer: // *array x = b.expr(fn, e.X) default: - panic("unreachable") + // core type exception? + if isBytestring(xtyp) { + x = b.expr(fn, e.X) // bytestring is handled as string and []byte. + } else { + panic("unexpected sequence type in SliceExpr") + } } if e.Low != nil { low = b.expr(fn, e.Low) @@ -768,7 +792,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *types.Builtin: return &Builtin{name: obj.Name(), sig: fn.instanceType(e).(*types.Signature)} case *types.Nil: - return nilConst(fn.instanceType(e)) + return zeroConst(fn.instanceType(e)) } // Package-level func or var? if v := fn.Prog.packageLevelMember(obj); v != nil { @@ -776,7 +800,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { return emitLoad(fn, g) // var (address) } callee := v.(*Function) // (func) - if len(callee._TypeParams) > 0 { + if callee.typeparams.Len() > 0 { targs := fn.subst.types(instanceArgs(fn.info, e)) callee = fn.Prog.needsInstance(callee, targs, b.created) } @@ -810,11 +834,32 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { wantAddr := isPointer(rt) escaping := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) - if isInterface(rt) { - // If v has interface type I, + + if types.IsInterface(rt) { + // If v may be an interface type I (after instantiating), // we must emit a check that v is non-nil. - // We use: typeassert v.(I). - emitTypeAssert(fn, v, rt, token.NoPos) + if recv, ok := sel.recv.(*typeparams.TypeParam); ok { + // Emit a nil check if any possible instantiation of the + // type parameter is an interface type. + if typeSetOf(recv).Len() > 0 { + // recv has a concrete term its typeset. + // So it cannot be instantiated as an interface. + // + // Example: + // func _[T interface{~int; Foo()}] () { + // var v T + // _ = v.Foo // <-- MethodVal + // } + } else { + // rt may be instantiated as an interface. + // Emit nil check: typeassert (any(v)).(any). + emitTypeAssert(fn, emitConv(fn, v, tEface), tEface, token.NoPos) + } + } else { + // non-type param interface + // Emit nil check: typeassert v.(I). + emitTypeAssert(fn, v, rt, token.NoPos) + } } if targs := receiverTypeArgs(obj); len(targs) > 0 { // obj is generic. @@ -851,9 +896,17 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases. } // not a generic instantiation. - switch t := fn.typeOf(e.X).Underlying().(type) { - case *types.Array: - // Non-addressable array (in a register). + xt := fn.typeOf(e.X) + switch et, mode := indexType(xt); mode { + case ixVar: + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn) + + case ixArrVar, ixValue: + // An array in a register, a string or a combined type that contains + // either an [_]array (ixArrVar) or string (ixValue). + + // Note: for ixArrVar and CoreType(xt)==nil can be IndexAddr and Load. index := b.expr(fn, e.Index) if isUntyped(index.Type()) { index = emitConv(fn, index, tInt) @@ -863,38 +916,20 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { Index: index, } v.setPos(e.Lbrack) - v.setType(t.Elem()) + v.setType(et) return fn.emit(v) - case *types.Map: + case ixMap: + ct := typeparams.CoreType(xt).(*types.Map) v := &Lookup{ X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), t.Key()), + Index: emitConv(fn, b.expr(fn, e.Index), ct.Key()), } v.setPos(e.Lbrack) - v.setType(t.Elem()) + v.setType(ct.Elem()) return fn.emit(v) - - case *types.Basic: // => string - // Strings are not addressable. - index := b.expr(fn, e.Index) - if isUntyped(index.Type()) { - index = emitConv(fn, index, tInt) - } - v := &Lookup{ - X: b.expr(fn, e.X), - Index: index, - } - v.setPos(e.Lbrack) - v.setType(tByte) - return fn.emit(v) - - case *types.Slice, *types.Pointer: // *array - // Addressable slice/array; use IndexAddr and Load. - return b.addr(fn, e, false).load(fn) - default: - panic("unexpected container type in IndexExpr: " + t.String()) + panic("unexpected container type in IndexExpr: " + xt.String()) } case *ast.CompositeLit, *ast.StarExpr: @@ -955,14 +990,14 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { wantAddr := isPointer(recv) escaping := true v := b.receiver(fn, selector.X, wantAddr, escaping, sel) - if isInterface(recv) { + if types.IsInterface(recv) { // Invoke-mode call. - c.Value = v + c.Value = v // possibly type param c.Method = obj } else { // "Call"-mode call. callee := fn.Prog.originFunc(obj) - if len(callee._TypeParams) > 0 { + if callee.typeparams.Len() > 0 { callee = fn.Prog.needsInstance(callee, receiverTypeArgs(obj), b.created) } c.Value = callee @@ -1053,7 +1088,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx st := sig.Params().At(np).Type().(*types.Slice) vt := st.Elem() if len(varargs) == 0 { - args = append(args, nilConst(st)) + args = append(args, zeroConst(st)) } else { // Replace a suffix of args with a slice containing it. at := types.NewArray(vt, int64(len(varargs))) @@ -1085,7 +1120,7 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { b.setCallFunc(fn, e, c) // Then append the other actual parameters. - sig, _ := fn.typeOf(e.Fun).Underlying().(*types.Signature) + sig, _ := typeparams.CoreType(fn.typeOf(e.Fun)).(*types.Signature) if sig == nil { panic(fmt.Sprintf("no signature for call of %s", e.Fun)) } @@ -1218,8 +1253,32 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.typeOf(e)) - switch t := typ.Underlying().(type) { + typ := deref(fn.typeOf(e)) // type with name [may be type param] + t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case + // Computing typ and t is subtle as these handle pointer types. + // For example, &T{...} is valid even for maps and slices. + // Also typ should refer to T (not *T) while t should be the core type of T. + // + // To show the ordering to take into account, consider the composite literal + // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here: + // type N struct{f int} + // func _[T N, S *N]() { + // _ = &T{f: 1} + // _ = []S{{f: 1}} + // } + // For `&T{f: 1}`, we compute `typ` and `t` as: + // typeOf(&T{f: 1}) == *T + // deref(*T) == T (typ) + // CoreType(T) == N + // deref(N) == N + // N.Underlying() == struct{f int} (t) + // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as: + // typeOf({f: 1}) == S + // deref(S) == S (typ) + // CoreType(S) == *N + // deref(*N) == N + // N.Underlying() == struct{f int} (t) + switch t := t.(type) { case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear @@ -1247,6 +1306,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero X: addr, Field: fieldIndex, } + faddr.setPos(pos) faddr.setType(types.NewPointer(sf.Type())) fn.emit(faddr) b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) @@ -1517,7 +1577,7 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl casetype = fn.typeOf(cond) var condv Value if casetype == tUntypedNil { - condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), cond.Pos()) + condv = emitCompare(fn, token.EQL, x, zeroConst(x.Type()), cond.Pos()) ti = x } else { yok := emitTypeTest(fn, x, casetype, cc.Case) @@ -1600,7 +1660,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { case *ast.SendStmt: // ch<- i ch := b.expr(fn, comm.Chan) - chtyp := fn.typ(ch.Type()).Underlying().(*types.Chan) + chtyp := typeparams.CoreType(fn.typ(ch.Type())).(*types.Chan) st = &SelectState{ Dir: types.SendOnly, Chan: ch, @@ -1657,9 +1717,8 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { vars = append(vars, varIndex, varOk) for _, st := range states { if st.Dir == types.RecvOnly { - chtyp := fn.typ(st.Chan.Type()).Underlying().(*types.Chan) - tElem := chtyp.Elem() - vars = append(vars, anonVar(tElem)) + chtyp := typeparams.CoreType(fn.typ(st.Chan.Type())).(*types.Chan) + vars = append(vars, anonVar(chtyp.Elem())) } } sel.setType(types.NewTuple(vars...)) @@ -1823,6 +1882,8 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // elimination if x is pure, static unrolling, etc. // Ranging over a nil *array may have >0 iterations. // We still generate code for x, in case it has effects. + // + // TypeParams do not have constant length. Use underlying instead of core type. length = intConst(arr.Len()) } else { // length = len(x). @@ -1855,7 +1916,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P k = emitLoad(fn, index) if tv != nil { - switch t := x.Type().Underlying().(type) { + switch t := typeparams.CoreType(x.Type()).(type) { case *types.Array: instr := &Index{ X: x, @@ -1925,11 +1986,9 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. emitJump(fn, loop) fn.currentBlock = loop - _, isString := x.Type().Underlying().(*types.Basic) - okv := &Next{ Iter: it, - IsString: isString, + IsString: isBasic(typeparams.CoreType(x.Type())), } okv.setType(types.NewTuple( varOk, @@ -1979,7 +2038,7 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) } recv.setPos(pos) recv.setType(types.NewTuple( - newVar("k", x.Type().Underlying().(*types.Chan).Elem()), + newVar("k", typeparams.CoreType(x.Type()).(*types.Chan).Elem()), varOk, )) ko := fn.emit(recv) @@ -2023,7 +2082,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { var k, v Value var loop, done *BasicBlock - switch rt := x.Type().Underlying().(type) { + switch rt := typeparams.CoreType(x.Type()).(type) { case *types.Slice, *types.Array, *types.Pointer: // *array k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) @@ -2101,11 +2160,11 @@ start: b.expr(fn, s.X) case *ast.SendStmt: + chtyp := typeparams.CoreType(fn.typeOf(s.Chan)).(*types.Chan) fn.emit(&Send{ Chan: b.expr(fn, s.Chan), - X: emitConv(fn, b.expr(fn, s.Value), - fn.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), - pos: s.Arrow, + X: emitConv(fn, b.expr(fn, s.Value), chtyp.Elem()), + pos: s.Arrow, }) case *ast.IncDecStmt: @@ -2283,11 +2342,9 @@ func (b *builder) buildFunctionBody(fn *Function) { var functype *ast.FuncType switch n := fn.syntax.(type) { case nil: - // TODO(taking): Temporarily this can be the body of a generic function. if fn.Params != nil { return // not a Go source function. (Synthetic, or from object file.) } - // fn.Params == nil is handled within body == nil case. case *ast.FuncDecl: functype = n.Type recvField = n.Recv @@ -2319,6 +2376,13 @@ func (b *builder) buildFunctionBody(fn *Function) { } return } + + // Build instantiation wrapper around generic body? + if fn.topLevelOrigin != nil && fn.subst == nil { + buildInstantiationWrapper(fn) + return + } + if fn.Prog.mode&LogSource != 0 { defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() } @@ -2423,7 +2487,17 @@ func (p *Package) build() { // TODO(adonovan): ideally belongs in memberFromObject, but // that would require package creation in topological order. for name, mem := range p.Members { - if ast.IsExported(name) && !isGeneric(mem) { + isGround := func(m Member) bool { + switch m := m.(type) { + case *Type: + named, _ := m.Type().(*types.Named) + return named == nil || typeparams.ForNamed(named) == nil + case *Function: + return m.typeparams.Len() == 0 + } + return true // *NamedConst, *Global + } + if ast.IsExported(name) && isGround(mem) { p.Prog.needMethodsOf(mem.Type(), &p.created) } } @@ -2461,6 +2535,9 @@ func (p *Package) build() { } // Initialize package-level vars in correct order. + if len(p.info.InitOrder) > 0 && len(p.files) == 0 { + panic("no source files provided for package. cannot initialize globals") + } for _, varinit := range p.info.InitOrder { if init.Prog.mode&LogSource != 0 { fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go new file mode 100644 index 00000000000..2588f74c5f9 --- /dev/null +++ b/go/ssa/builder_generic_test.go @@ -0,0 +1,679 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "fmt" + "go/parser" + "go/token" + "reflect" + "sort" + "testing" + + "golang.org/x/tools/go/expect" + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/typeparams" +) + +// TestGenericBodies tests that bodies of generic functions and methods containing +// different constructs can be built in BuilderMode(0). +// +// Each test specifies the contents of package containing a single go file. +// Each call print(arg0, arg1, ...) to the builtin print function +// in ssa is correlated a comment at the end of the line of the form: +// +// //@ types(a, b, c) +// +// where a, b and c are the types of the arguments to the print call +// serialized using go/types.Type.String(). +// See x/tools/go/expect for details on the syntax. +func TestGenericBodies(t *testing.T) { + if !typeparams.Enabled { + t.Skip("TestGenericBodies requires type parameters") + } + for _, test := range []struct { + pkg string // name of the package. + contents string // contents of the Go package. + }{ + { + pkg: "p", + contents: ` + package p + + func f(x int) { + var i interface{} + print(i, 0) //@ types("interface{}", int) + print() //@ types() + print(x) //@ types(int) + } + `, + }, + { + pkg: "q", + contents: ` + package q + + func f[T any](x T) { + print(x) //@ types(T) + } + `, + }, + { + pkg: "r", + contents: ` + package r + + func f[T ~int]() { + var x T + print(x) //@ types(T) + } + `, + }, + { + pkg: "s", + contents: ` + package s + + func a[T ~[4]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) + } + } + func b[T ~*[4]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) + } + } + func c[T ~[]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) + } + } + func d[T ~string](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, rune) + } + } + func e[T ~map[int]string](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, string) + } + } + func f[T ~chan string](x T) { + for v := range x { + print(x, v) //@ types(T, string) + } + } + + func From() { + type A [4]byte + print(a[A]) //@ types("func(x s.A)") + + type B *[4]byte + print(b[B]) //@ types("func(x s.B)") + + type C []byte + print(c[C]) //@ types("func(x s.C)") + + type D string + print(d[D]) //@ types("func(x s.D)") + + type E map[int]string + print(e[E]) //@ types("func(x s.E)") + + type F chan string + print(f[F]) //@ types("func(x s.F)") + } + `, + }, + { + pkg: "t", + contents: ` + package t + + func f[S any, T ~chan S](x T) { + for v := range x { + print(x, v) //@ types(T, S) + } + } + + func From() { + type F chan string + print(f[string, F]) //@ types("func(x t.F)") + } + `, + }, + { + pkg: "u", + contents: ` + package u + + func fibonacci[T ~chan int](c, quit T) { + x, y := 0, 1 + for { + select { + case c <- x: + x, y = y, x+y + case <-quit: + print(c, quit, x, y) //@ types(T, T, int, int) + return + } + } + } + func start[T ~chan int](c, quit T) { + go func() { + for i := 0; i < 10; i++ { + print(<-c) //@ types(int) + } + quit <- 0 + }() + } + func From() { + type F chan int + c := make(F) + quit := make(F) + print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") + print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") + } + `, + }, + { + pkg: "v", + contents: ` + package v + + func f[T ~struct{ x int; y string }](i int) T { + u := []T{ T{0, "lorem"}, T{1, "ipsum"}} + return u[i] + } + func From() { + type S struct{ x int; y string } + print(f[S]) //@ types("func(i int) v.S") + } + `, + }, + { + pkg: "w", + contents: ` + package w + + func f[T ~[4]int8](x T, l, h int) []int8 { + return x[l:h] + } + func g[T ~*[4]int16](x T, l, h int) []int16 { + return x[l:h] + } + func h[T ~[]int32](x T, l, h int) T { + return x[l:h] + } + func From() { + type F [4]int8 + type G *[4]int16 + type H []int32 + print(f[F](F{}, 0, 0)) //@ types("[]int8") + print(g[G](nil, 0, 0)) //@ types("[]int16") + print(h[H](nil, 0, 0)) //@ types("w.H") + } + `, + }, + { + pkg: "x", + contents: ` + package x + + func h[E any, T ~[]E](x T, l, h int) []E { + s := x[l:h] + print(s) //@ types("T") + return s + } + func From() { + type H []int32 + print(h[int32, H](nil, 0, 0)) //@ types("[]int32") + } + `, + }, + { + pkg: "y", + contents: ` + package y + + // Test "make" builtin with different forms on core types and + // when capacities are constants or variable. + func h[E any, T ~[]E](m, n int) { + print(make(T, 3)) //@ types(T) + print(make(T, 3, 5)) //@ types(T) + print(make(T, m)) //@ types(T) + print(make(T, m, n)) //@ types(T) + } + func i[K comparable, E any, T ~map[K]E](m int) { + print(make(T)) //@ types(T) + print(make(T, 5)) //@ types(T) + print(make(T, m)) //@ types(T) + } + func j[E any, T ~chan E](m int) { + print(make(T)) //@ types(T) + print(make(T, 6)) //@ types(T) + print(make(T, m)) //@ types(T) + } + func From() { + type H []int32 + h[int32, H](3, 4) + type I map[int8]H + i[int8, H, I](5) + type J chan I + j[I, J](6) + } + `, + }, + { + pkg: "z", + contents: ` + package z + + func h[T ~[4]int](x T) { + print(len(x), cap(x)) //@ types(int, int) + } + func i[T ~[4]byte | []int | ~chan uint8](x T) { + print(len(x), cap(x)) //@ types(int, int) + } + func j[T ~[4]int | any | map[string]int]() { + print(new(T)) //@ types("*T") + } + func k[T ~[4]int | any | map[string]int](x T) { + print(x) //@ types(T) + panic(x) + } + `, + }, + { + pkg: "a", + contents: ` + package a + + func f[E any, F ~func() E](x F) { + print(x, x()) //@ types(F, E) + } + func From() { + type T func() int + f[int, T](func() int { return 0 }) + f[int, func() int](func() int { return 1 }) + } + `, + }, + { + pkg: "b", + contents: ` + package b + + func f[E any, M ~map[string]E](m M) { + y, ok := m["lorem"] + print(m, y, ok) //@ types(M, E, bool) + } + func From() { + type O map[string][]int + f(O{"lorem": []int{0, 1, 2, 3}}) + } + `, + }, + { + pkg: "c", + contents: ` + package c + + func a[T interface{ []int64 | [5]int64 }](x T) int64 { + print(x, x[2], x[3]) //@ types(T, int64, int64) + x[2] = 5 + return x[3] + } + func b[T interface{ []byte | string }](x T) byte { + print(x, x[3]) //@ types(T, byte) + return x[3] + } + func c[T interface{ []byte }](x T) byte { + print(x, x[2], x[3]) //@ types(T, byte, byte) + x[2] = 'b' + return x[3] + } + func d[T interface{ map[int]int64 }](x T) int64 { + print(x, x[2], x[3]) //@ types(T, int64, int64) + x[2] = 43 + return x[3] + } + func e[T ~string](t T) { + print(t, t[0]) //@ types(T, uint8) + } + func f[T ~string|[]byte](t T) { + print(t, t[0]) //@ types(T, uint8) + } + func g[T []byte](t T) { + print(t, t[0]) //@ types(T, byte) + } + func h[T ~[4]int|[]int](t T) { + print(t, t[0]) //@ types(T, int) + } + func i[T ~[4]int|*[4]int|[]int](t T) { + print(t, t[0]) //@ types(T, int) + } + func j[T ~[4]int|*[4]int|[]int](t T) { + print(t, &t[0]) //@ types(T, "*int") + } + `, + }, + { + pkg: "d", + contents: ` + package d + + type MyInt int + type Other int + type MyInterface interface{ foo() } + + // ChangeType tests + func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ } + func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ } + + // Convert tests + func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/} + func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ } + func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ } + + // MakeInterface tests + func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ } + + // NewConst tests + func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/} + + // SliceToArrayPointer + func sl0[T *[4]int | *[2]int](x []int) { v := T(x); print(x, v) /*@ types("[]int", T)*/ } + func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + `, + }, + { + pkg: "e", + contents: ` + package e + + func c[T interface{ foo() string }](x T) { + print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/ + } + `, + }, + { + pkg: "f", + contents: `package f + + func eq[T comparable](t T, i interface{}) bool { + return t == i + } + `, + }, + { + pkg: "g", + contents: `package g + type S struct{ f int } + func c[P *S]() []P { return []P{{f: 1}} } + `, + }, + { + pkg: "h", + contents: `package h + func sign[bytes []byte | string](s bytes) (bool, bool) { + neg := false + if len(s) > 0 && (s[0] == '-' || s[0] == '+') { + neg = s[0] == '-' + s = s[1:] + } + return !neg, len(s) > 0 + }`, + }, + { + pkg: "i", + contents: `package i + func digits[bytes []byte | string](s bytes) bool { + for _, c := range []byte(s) { + if c < '0' || '9' < c { + return false + } + } + return true + }`, + }, + { + pkg: "j", + contents: ` + package j + + type E interface{} + + func Foo[T E, PT interface{ *T }]() T { + pt := PT(new(T)) + x := *pt + print(x) /*@ types(T)*/ + return x + } + `, + }, + } { + test := test + t.Run(test.pkg, func(t *testing.T) { + // Parse + conf := loader.Config{ParserMode: parser.ParseComments} + fname := test.pkg + ".go" + f, err := conf.ParseFile(fname, test.contents) + if err != nil { + t.Fatalf("parse: %v", err) + } + conf.CreateFromFiles(test.pkg, f) + + // Load + lprog, err := conf.Load() + if err != nil { + t.Fatalf("Load: %v", err) + } + + // Create and build SSA + prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions) + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + p := prog.Package(lprog.Package(test.pkg).Pkg) + p.Build() + + // Collect calls to the builtin print function. + probes := make(map[*ssa.CallCommon]bool) + for _, mem := range p.Members { + if fn, ok := mem.(*ssa.Function); ok { + for _, bb := range fn.Blocks { + for _, i := range bb.Instrs { + if i, ok := i.(ssa.CallInstruction); ok { + call := i.Common() + if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" { + probes[i.Common()] = true + } + } + } + } + } + } + + // Collect all notes in f, i.e. comments starting with "//@ types". + notes, err := expect.ExtractGo(prog.Fset, f) + if err != nil { + t.Errorf("expect.ExtractGo: %v", err) + } + + // Matches each probe with a note that has the same line. + sameLine := func(x, y token.Pos) bool { + xp := prog.Fset.Position(x) + yp := prog.Fset.Position(y) + return xp.Filename == yp.Filename && xp.Line == yp.Line + } + expectations := make(map[*ssa.CallCommon]*expect.Note) + for call := range probes { + var match *expect.Note + for _, note := range notes { + if note.Name == "types" && sameLine(call.Pos(), note.Pos) { + match = note // first match is good enough. + break + } + } + if match != nil { + expectations[call] = match + } else { + t.Errorf("Unmatched probe: %v", call) + } + } + + // Check each expectation. + for call, note := range expectations { + var args []string + for _, a := range call.Args { + args = append(args, a.Type().String()) + } + if got, want := fmt.Sprint(args), fmt.Sprint(note.Args); got != want { + t.Errorf("Arguments to print() were expected to be %q. got %q", want, got) + } + } + }) + } +} + +// TestInstructionString tests serializing instructions via Instruction.String(). +func TestInstructionString(t *testing.T) { + if !typeparams.Enabled { + t.Skip("TestInstructionString requires type parameters") + } + // Tests (ssa.Instruction).String(). Instructions are from a single go file. + // The Instructions tested are those that match a comment of the form: + // + // //@ instrs(f, kind, strs...) + // + // where f is the name of the function, kind is the type of the instructions matched + // within the function, and tests that the String() value for all of the instructions + // matched of String() is strs (in some order). + // See x/tools/go/expect for details on the syntax. + + const contents = ` + package p + + //@ instrs("f", "*ssa.TypeAssert") + //@ instrs("f", "*ssa.Call", "print(nil:interface{}, 0:int)") + func f(x int) { // non-generic smoke test. + var i interface{} + print(i, 0) + } + + //@ instrs("h", "*ssa.Alloc", "local T (u)") + //@ instrs("h", "*ssa.FieldAddr", "&t0.x [#0]") + func h[T ~struct{ x string }]() T { + u := T{"lorem"} + return u + } + + //@ instrs("c", "*ssa.TypeAssert", "typeassert t0.(interface{})") + //@ instrs("c", "*ssa.Call", "invoke x.foo()") + func c[T interface{ foo() string }](x T) { + _ = x.foo + _ = x.foo() + } + + //@ instrs("d", "*ssa.TypeAssert", "typeassert t0.(interface{})") + //@ instrs("d", "*ssa.Call", "invoke x.foo()") + func d[T interface{ foo() string; comparable }](x T) { + _ = x.foo + _ = x.foo() + } + ` + + // Parse + conf := loader.Config{ParserMode: parser.ParseComments} + const fname = "p.go" + f, err := conf.ParseFile(fname, contents) + if err != nil { + t.Fatalf("parse: %v", err) + } + conf.CreateFromFiles("p", f) + + // Load + lprog, err := conf.Load() + if err != nil { + t.Fatalf("Load: %v", err) + } + + // Create and build SSA + prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions) + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + p := prog.Package(lprog.Package("p").Pkg) + p.Build() + + // Collect all notes in f, i.e. comments starting with "//@ instr". + notes, err := expect.ExtractGo(prog.Fset, f) + if err != nil { + t.Errorf("expect.ExtractGo: %v", err) + } + + // Expectation is a {function, type string} -> {want, matches} + // where matches is all Instructions.String() that match the key. + // Each expecation is that some permutation of matches is wants. + type expKey struct { + function string + kind string + } + type expValue struct { + wants []string + matches []string + } + expectations := make(map[expKey]*expValue) + for _, note := range notes { + if note.Name == "instrs" { + if len(note.Args) < 2 { + t.Error("Had @instrs annotation without at least 2 arguments") + continue + } + fn, kind := fmt.Sprint(note.Args[0]), fmt.Sprint(note.Args[1]) + var wants []string + for _, arg := range note.Args[2:] { + wants = append(wants, fmt.Sprint(arg)) + } + expectations[expKey{fn, kind}] = &expValue{wants, nil} + } + } + + // Collect all Instructions that match the expectations. + for _, mem := range p.Members { + if fn, ok := mem.(*ssa.Function); ok { + for _, bb := range fn.Blocks { + for _, i := range bb.Instrs { + kind := fmt.Sprintf("%T", i) + if e := expectations[expKey{fn.Name(), kind}]; e != nil { + e.matches = append(e.matches, i.String()) + } + } + } + } + } + + // Check each expectation. + for key, value := range expectations { + if _, ok := p.Members[key.function]; !ok { + t.Errorf("Expectation on %s does not match a member in %s", key.function, p.Pkg.Name()) + } + got, want := value.matches, value.wants + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(want, got) { + t.Errorf("Within %s wanted instructions of kind %s: %q. got %q", key.function, key.kind, want, got) + } + } +} diff --git a/go/ssa/builder_go117_test.go b/go/ssa/builder_go117_test.go index f6545e5e2cf..69985970596 100644 --- a/go/ssa/builder_go117_test.go +++ b/go/ssa/builder_go117_test.go @@ -57,7 +57,6 @@ func TestBuildPackageFailuresGo117(t *testing.T) { importer types.Importer }{ {"slice to array pointer - source is not a slice", "package p; var s [4]byte; var _ = (*[4]byte)(s)", nil}, - {"slice to array pointer - dest is not a pointer", "package p; var s []byte; var _ = ([4]byte)(s)", nil}, {"slice to array pointer - dest pointer elem is not an array", "package p; var s []byte; var _ = (*byte)(s)", nil}, } diff --git a/go/ssa/builder_go120_test.go b/go/ssa/builder_go120_test.go new file mode 100644 index 00000000000..a691f938c04 --- /dev/null +++ b/go/ssa/builder_go120_test.go @@ -0,0 +1,102 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package ssa_test + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "testing" + + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/go/ssa/ssautil" +) + +func TestBuildPackageGo120(t *testing.T) { + tests := []struct { + name string + src string + importer types.Importer + }{ + {"slice to array", "package p; var s []byte; var _ = ([4]byte)(s)", nil}, + {"slice to zero length array", "package p; var s []byte; var _ = ([0]byte)(s)", nil}, + {"slice to zero length array type parameter", "package p; var s []byte; func f[T ~[0]byte]() { tmp := (T)(s); var z T; _ = tmp == z}", nil}, + {"slice to non-zero length array type parameter", "package p; var s []byte; func h[T ~[1]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil}, + {"slice to maybe-zero length array type parameter", "package p; var s []byte; func g[T ~[0]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil}, + { + "rune sequence to sequence cast patterns", ` + package p + // Each of fXX functions describes a 1.20 legal cast between sequences of runes + // as []rune, pointers to rune arrays, rune arrays, or strings. + // + // Comments listed given the current emitted instructions [approximately]. + // If multiple conversions are needed, these are seperated by |. + // rune was selected as it leads to string casts (byte is similar). + // The length 2 is not significant. + // Multiple array lengths may occur in a cast in practice (including 0). + func f00[S string, D string](s S) { _ = D(s) } // ChangeType + func f01[S string, D []rune](s S) { _ = D(s) } // Convert + func f02[S string, D []rune | string](s S) { _ = D(s) } // ChangeType | Convert + func f03[S [2]rune, D [2]rune](s S) { _ = D(s) } // ChangeType + func f04[S *[2]rune, D *[2]rune](s S) { _ = D(s) } // ChangeType + func f05[S []rune, D string](s S) { _ = D(s) } // Convert + func f06[S []rune, D [2]rune](s S) { _ = D(s) } // SliceToArrayPointer; Deref + func f07[S []rune, D [2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer; Deref | Convert + func f08[S []rune, D *[2]rune](s S) { _ = D(s) } // SliceToArrayPointer + func f09[S []rune, D *[2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer; Deref | Convert + func f10[S []rune, D *[2]rune | [2]rune](s S) { _ = D(s) } // SliceToArrayPointer | SliceToArrayPointer; Deref + func f11[S []rune, D *[2]rune | [2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer | SliceToArrayPointer; Deref | Convert + func f12[S []rune, D []rune](s S) { _ = D(s) } // ChangeType + func f13[S []rune, D []rune | string](s S) { _ = D(s) } // Convert | ChangeType + func f14[S []rune, D []rune | [2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer; Deref + func f15[S []rune, D []rune | [2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer; Deref | Convert + func f16[S []rune, D []rune | *[2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer + func f17[S []rune, D []rune | *[2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | Convert + func f18[S []rune, D []rune | *[2]rune | [2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | SliceToArrayPointer; Deref + func f19[S []rune, D []rune | *[2]rune | [2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | SliceToArrayPointer; Deref | Convert + func f20[S []rune | string, D string](s S) { _ = D(s) } // Convert | ChangeType + func f21[S []rune | string, D []rune](s S) { _ = D(s) } // Convert | ChangeType + func f22[S []rune | string, D []rune | string](s S) { _ = D(s) } // ChangeType | Convert | Convert | ChangeType + func f23[S []rune | [2]rune, D [2]rune](s S) { _ = D(s) } // SliceToArrayPointer; Deref | ChangeType + func f24[S []rune | *[2]rune, D *[2]rune](s S) { _ = D(s) } // SliceToArrayPointer | ChangeType + `, nil, + }, + { + "matching named and underlying types", ` + package p + type a string + type b string + func g0[S []rune | a | b, D []rune | a | b](s S) { _ = D(s) } + func g1[S []rune | ~string, D []rune | a | b](s S) { _ = D(s) } + func g2[S []rune | a | b, D []rune | ~string](s S) { _ = D(s) } + func g3[S []rune | ~string, D []rune |~string](s S) { _ = D(s) } + `, nil, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "p.go", tc.src, parser.ParseComments) + if err != nil { + t.Error(err) + } + files := []*ast.File{f} + + pkg := types.NewPackage("p", "") + conf := &types.Config{Importer: tc.importer} + _, _, err = ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/go/ssa/builder_test.go b/go/ssa/builder_test.go index 1975e26e45c..a80d8d5ab73 100644 --- a/go/ssa/builder_test.go +++ b/go/ssa/builder_test.go @@ -24,6 +24,7 @@ import ( "golang.org/x/tools/go/loader" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" + "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/typeparams" ) @@ -32,6 +33,8 @@ func isEmpty(f *ssa.Function) bool { return f.Blocks == nil } // Tests that programs partially loaded from gc object files contain // functions with no code for the external portions, but are otherwise ok. func TestBuildPackage(t *testing.T) { + testenv.NeedsGoBuild(t) // for importer.Default() + input := ` package main @@ -164,6 +167,8 @@ func main() { // TestRuntimeTypes tests that (*Program).RuntimeTypes() includes all necessary types. func TestRuntimeTypes(t *testing.T) { + testenv.NeedsGoBuild(t) // for importer.Default() + tests := []struct { input string want []string @@ -221,6 +226,18 @@ func TestRuntimeTypes(t *testing.T) { nil, }, } + + if typeparams.Enabled { + tests = append(tests, []struct { + input string + want []string + }{ + // MakeInterface does not create runtime type for parameterized types. + {`package N; var g interface{}; func f[S any]() { var v []S; g = v }; `, + nil, + }, + }...) + } for _, test := range tests { // Parse the file. fset := token.NewFileSet() diff --git a/go/ssa/const.go b/go/ssa/const.go index dc182d9616c..4a51a2cb4bb 100644 --- a/go/ssa/const.go +++ b/go/ssa/const.go @@ -12,65 +12,73 @@ import ( "go/token" "go/types" "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" ) // NewConst returns a new constant of the specified value and type. // val must be valid according to the specification of Const.Value. func NewConst(val constant.Value, typ types.Type) *Const { + if val == nil { + switch soleTypeKind(typ) { + case types.IsBoolean: + val = constant.MakeBool(false) + case types.IsInteger: + val = constant.MakeInt64(0) + case types.IsString: + val = constant.MakeString("") + } + } return &Const{typ, val} } +// soleTypeKind returns a BasicInfo for which constant.Value can +// represent all zero values for the types in the type set. +// +// types.IsBoolean for false is a representative. +// types.IsInteger for 0 +// types.IsString for "" +// 0 otherwise. +func soleTypeKind(typ types.Type) types.BasicInfo { + // State records the set of possible zero values (false, 0, ""). + // Candidates (perhaps all) are eliminated during the type-set + // iteration, which executes at least once. + state := types.IsBoolean | types.IsInteger | types.IsString + underIs(typeSetOf(typ), func(t types.Type) bool { + var c types.BasicInfo + if t, ok := t.(*types.Basic); ok { + c = t.Info() + } + if c&types.IsNumeric != 0 { // int/float/complex + c = types.IsInteger + } + state = state & c + return state != 0 + }) + return state +} + // intConst returns an 'int' constant that evaluates to i. // (i is an int64 in case the host is narrower than the target.) func intConst(i int64) *Const { return NewConst(constant.MakeInt64(i), tInt) } -// nilConst returns a nil constant of the specified type, which may -// be any reference type, including interfaces. -func nilConst(typ types.Type) *Const { - return NewConst(nil, typ) -} - // stringConst returns a 'string' constant that evaluates to s. func stringConst(s string) *Const { return NewConst(constant.MakeString(s), tString) } -// zeroConst returns a new "zero" constant of the specified type, -// which must not be an array or struct type: the zero values of -// aggregates are well-defined but cannot be represented by Const. +// zeroConst returns a new "zero" constant of the specified type. func zeroConst(t types.Type) *Const { - switch t := t.(type) { - case *types.Basic: - switch { - case t.Info()&types.IsBoolean != 0: - return NewConst(constant.MakeBool(false), t) - case t.Info()&types.IsNumeric != 0: - return NewConst(constant.MakeInt64(0), t) - case t.Info()&types.IsString != 0: - return NewConst(constant.MakeString(""), t) - case t.Kind() == types.UnsafePointer: - fallthrough - case t.Kind() == types.UntypedNil: - return nilConst(t) - default: - panic(fmt.Sprint("zeroConst for unexpected type:", t)) - } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return nilConst(t) - case *types.Named: - return NewConst(zeroConst(t.Underlying()).Value, t) - case *types.Array, *types.Struct, *types.Tuple: - panic(fmt.Sprint("zeroConst applied to aggregate:", t)) - } - panic(fmt.Sprint("zeroConst: unexpected ", t)) + return NewConst(nil, t) } func (c *Const) RelString(from *types.Package) string { var s string if c.Value == nil { - s = "nil" + s = zeroString(c.typ, from) } else if c.Value.Kind() == constant.String { s = constant.StringVal(c.Value) const max = 20 @@ -85,6 +93,44 @@ func (c *Const) RelString(from *types.Package) string { return s + ":" + relType(c.Type(), from) } +// zeroString returns the string representation of the "zero" value of the type t. +func zeroString(t types.Type, from *types.Package) string { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false" + case t.Info()&types.IsNumeric != 0: + return "0" + case t.Info()&types.IsString != 0: + return `""` + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil" + default: + panic(fmt.Sprint("zeroString for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return "nil" + case *types.Named: + return zeroString(t.Underlying(), from) + case *types.Array, *types.Struct: + return relType(t, from) + "{}" + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + components[i] = zeroString(t.At(i).Type(), from) + } + return "(" + strings.Join(components, ", ") + ")" + case *typeparams.TypeParam: + return "*new(" + relType(t, from) + ")" + } + panic(fmt.Sprint("zeroString: unexpected ", t)) +} + func (c *Const) Name() string { return c.RelString(nil) } @@ -107,9 +153,30 @@ func (c *Const) Pos() token.Pos { return token.NoPos } -// IsNil returns true if this constant represents a typed or untyped nil value. +// IsNil returns true if this constant is a nil value of +// a nillable reference type (pointer, slice, channel, map, or function), +// a basic interface type, or +// a type parameter all of whose possible instantiations are themselves nillable. func (c *Const) IsNil() bool { - return c.Value == nil + return c.Value == nil && nillable(c.typ) +} + +// nillable reports whether *new(T) == nil is legal for type T. +func nillable(t types.Type) bool { + if typeparams.IsTypeParam(t) { + return underIs(typeSetOf(t), func(u types.Type) bool { + // empty type set (u==nil) => any underlying types => not nillable + return u != nil && nillable(u) + }) + } + switch t.Underlying().(type) { + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return true + case *types.Interface: + return true // basic interface. + default: + return false + } } // TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp. @@ -149,14 +216,16 @@ func (c *Const) Uint64() uint64 { // Float64 returns the numeric value of this constant truncated to fit // a float64. func (c *Const) Float64() float64 { - f, _ := constant.Float64Val(c.Value) + x := constant.ToFloat(c.Value) // (c.Value == nil) => x.Kind() == Unknown + f, _ := constant.Float64Val(x) return f } // Complex128 returns the complex value of this constant truncated to // fit a complex128. func (c *Const) Complex128() complex128 { - re, _ := constant.Float64Val(constant.Real(c.Value)) - im, _ := constant.Float64Val(constant.Imag(c.Value)) + x := constant.ToComplex(c.Value) // (c.Value == nil) => x.Kind() == Unknown + re, _ := constant.Float64Val(constant.Real(x)) + im, _ := constant.Float64Val(constant.Imag(x)) return complex(re, im) } diff --git a/go/ssa/const_test.go b/go/ssa/const_test.go new file mode 100644 index 00000000000..131fe1aced2 --- /dev/null +++ b/go/ssa/const_test.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "go/ast" + "go/constant" + "go/parser" + "go/token" + "go/types" + "math/big" + "strings" + "testing" + + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/typeparams" +) + +func TestConstString(t *testing.T) { + if !typeparams.Enabled { + t.Skip("TestConstString requires type parameters.") + } + + const source = ` + package P + + type Named string + + func fn() (int, bool, string) + func gen[T int]() {} + ` + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "p.go", source, 0) + if err != nil { + t.Fatal(err) + } + + var conf types.Config + pkg, err := conf.Check("P", fset, []*ast.File{f}, nil) + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + expr string // type expression + constant interface{} // constant value + want string // expected String() value + }{ + {"int", int64(0), "0:int"}, + {"int64", int64(0), "0:int64"}, + {"float32", int64(0), "0:float32"}, + {"float32", big.NewFloat(1.5), "1.5:float32"}, + {"bool", false, "false:bool"}, + {"string", "", `"":string`}, + {"Named", "", `"":P.Named`}, + {"struct{x string}", nil, "struct{x string}{}:struct{x string}"}, + {"[]int", nil, "nil:[]int"}, + {"[3]int", nil, "[3]int{}:[3]int"}, + {"*int", nil, "nil:*int"}, + {"interface{}", nil, "nil:interface{}"}, + {"interface{string}", nil, `"":interface{string}`}, + {"interface{int|int64}", nil, "0:interface{int|int64}"}, + {"interface{bool}", nil, "false:interface{bool}"}, + {"interface{bool|int}", nil, "nil:interface{bool|int}"}, + {"interface{int|string}", nil, "nil:interface{int|string}"}, + {"interface{bool|string}", nil, "nil:interface{bool|string}"}, + {"interface{struct{x string}}", nil, "nil:interface{struct{x string}}"}, + {"interface{int|int64}", int64(1), "1:interface{int|int64}"}, + {"interface{~bool}", true, "true:interface{~bool}"}, + {"interface{Named}", "lorem ipsum", `"lorem ipsum":interface{P.Named}`}, + {"func() (int, bool, string)", nil, "nil:func() (int, bool, string)"}, + } { + // Eval() expr for its type. + tv, err := types.Eval(fset, pkg, 0, test.expr) + if err != nil { + t.Fatalf("Eval(%s) failed: %v", test.expr, err) + } + var val constant.Value + if test.constant != nil { + val = constant.Make(test.constant) + } + c := ssa.NewConst(val, tv.Type) + got := strings.ReplaceAll(c.String(), " | ", "|") // Accept both interface{a | b} and interface{a|b}. + if got != test.want { + t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", val, tv.Type, got, test.want) + } + } + + // Test tuples + fn := pkg.Scope().Lookup("fn") + tup := fn.Type().(*types.Signature).Results() + if got, want := ssa.NewConst(nil, tup).String(), `(0, false, ""):(int, bool, string)`; got != want { + t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want) + } + + // Test type-param + gen := pkg.Scope().Lookup("gen") + tp := typeparams.ForSignature(gen.Type().(*types.Signature)).At(0) + if got, want := ssa.NewConst(nil, tp).String(), "0:T"; got != want { + t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want) + } +} diff --git a/go/ssa/coretype.go b/go/ssa/coretype.go new file mode 100644 index 00000000000..128d61e4267 --- /dev/null +++ b/go/ssa/coretype.go @@ -0,0 +1,159 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Utilities for dealing with core types. + +// isBytestring returns true if T has the same terms as interface{[]byte | string}. +// These act like a core type for some operations: slice expressions, append and copy. +// +// See https://go.dev/ref/spec#Core_types for the details on bytestring. +func isBytestring(T types.Type) bool { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return false + } + + tset := typeSetOf(U) + if tset.Len() != 2 { + return false + } + hasBytes, hasString := false, false + underIs(tset, func(t types.Type) bool { + switch { + case isString(t): + hasString = true + case isByteSlice(t): + hasBytes = true + } + return hasBytes || hasString + }) + return hasBytes && hasString +} + +// termList is a list of types. +type termList []*typeparams.Term // type terms of the type set +func (s termList) Len() int { return len(s) } +func (s termList) At(i int) types.Type { return s[i].Type() } + +// typeSetOf returns the type set of typ. Returns an empty typeset on an error. +func typeSetOf(typ types.Type) termList { + // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. + var terms []*typeparams.Term + var err error + switch typ := typ.(type) { + case *typeparams.TypeParam: + terms, err = typeparams.StructuralTerms(typ) + case *typeparams.Union: + terms, err = typeparams.UnionTermSet(typ) + case *types.Interface: + terms, err = typeparams.InterfaceTermSet(typ) + default: + // Common case. + // Specializing the len=1 case to avoid a slice + // had no measurable space/time benefit. + terms = []*typeparams.Term{typeparams.NewTerm(false, typ)} + } + + if err != nil { + return termList(nil) + } + return termList(terms) +} + +// underIs calls f with the underlying types of the specific type terms +// of s and reports whether all calls to f returned true. If there are +// no specific terms, underIs returns the result of f(nil). +func underIs(s termList, f func(types.Type) bool) bool { + if s.Len() == 0 { + return f(nil) + } + for i := 0; i < s.Len(); i++ { + u := s.At(i).Underlying() + if !f(u) { + return false + } + } + return true +} + +// indexType returns the element type and index mode of a IndexExpr over a type. +// It returns (nil, invalid) if the type is not indexable; this should never occur in a well-typed program. +func indexType(typ types.Type) (types.Type, indexMode) { + switch U := typ.Underlying().(type) { + case *types.Array: + return U.Elem(), ixArrVar + case *types.Pointer: + if arr, ok := U.Elem().Underlying().(*types.Array); ok { + return arr.Elem(), ixVar + } + case *types.Slice: + return U.Elem(), ixVar + case *types.Map: + return U.Elem(), ixMap + case *types.Basic: + return tByte, ixValue // must be a string + case *types.Interface: + tset := typeSetOf(U) + if tset.Len() == 0 { + return nil, ixInvalid // no underlying terms or error is empty. + } + + elem, mode := indexType(tset.At(0)) + for i := 1; i < tset.Len() && mode != ixInvalid; i++ { + e, m := indexType(tset.At(i)) + if !types.Identical(elem, e) { // if type checked, just a sanity check + return nil, ixInvalid + } + // Update the mode to the most constrained address type. + mode = mode.meet(m) + } + if mode != ixInvalid { + return elem, mode + } + } + return nil, ixInvalid +} + +// An indexMode specifies the (addressing) mode of an index operand. +// +// Addressing mode of an index operation is based on the set of +// underlying types. +// Hasse diagram of the indexMode meet semi-lattice: +// +// ixVar ixMap +// | | +// ixArrVar | +// | | +// ixValue | +// \ / +// ixInvalid +type indexMode byte + +const ( + ixInvalid indexMode = iota // index is invalid + ixValue // index is a computed value (not addressable) + ixArrVar // like ixVar, but index operand contains an array + ixVar // index is an addressable variable + ixMap // index is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment) +) + +// meet is the address type that is constrained by both x and y. +func (x indexMode) meet(y indexMode) indexMode { + if (x == ixMap || y == ixMap) && x != y { + return ixInvalid + } + // Use int representation and return min. + if x < y { + return y + } + return x +} diff --git a/go/ssa/coretype_test.go b/go/ssa/coretype_test.go new file mode 100644 index 00000000000..74fe4db1667 --- /dev/null +++ b/go/ssa/coretype_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "testing" + + "golang.org/x/tools/internal/typeparams" +) + +func TestCoreType(t *testing.T) { + if !typeparams.Enabled { + t.Skip("TestCoreType requires type parameters.") + } + + const source = ` + package P + + type Named int + + type A any + type B interface{~int} + type C interface{int} + type D interface{Named} + type E interface{~int|interface{Named}} + type F interface{~int|~float32} + type G interface{chan int|interface{chan int}} + type H interface{chan int|chan float32} + type I interface{chan<- int|chan int} + type J interface{chan int|chan<- int} + type K interface{<-chan int|chan int} + type L interface{chan int|<-chan int} + type M interface{chan int|chan Named} + type N interface{<-chan int|chan<- int} + type O interface{chan int|bool} + type P struct{ Named } + type Q interface{ Foo() } + type R interface{ Foo() ; Named } + type S interface{ Foo() ; ~int } + + type T interface{chan int|interface{chan int}|<-chan int} +` + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "hello.go", source, 0) + if err != nil { + t.Fatal(err) + } + + var conf types.Config + pkg, err := conf.Check("P", fset, []*ast.File{f}, nil) + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + expr string // type expression of Named type + want string // expected core type (or "" if none) + }{ + {"Named", "int"}, // Underlying type is not interface. + {"A", ""}, // Interface has no terms. + {"B", "int"}, // Tilde term. + {"C", "int"}, // Non-tilde term. + {"D", "int"}, // Named term. + {"E", "int"}, // Identical underlying types. + {"F", ""}, // Differing underlying types. + {"G", "chan int"}, // Identical Element types. + {"H", ""}, // Element type int has differing underlying type to float32. + {"I", "chan<- int"}, // SendRecv followed by SendOnly + {"J", "chan<- int"}, // SendOnly followed by SendRecv + {"K", "<-chan int"}, // RecvOnly followed by SendRecv + {"L", "<-chan int"}, // SendRecv followed by RecvOnly + {"M", ""}, // Element type int is not *identical* to Named. + {"N", ""}, // Differing channel directions + {"O", ""}, // A channel followed by a non-channel. + {"P", "struct{P.Named}"}, // Embedded type. + {"Q", ""}, // interface type with no terms and functions + {"R", "int"}, // interface type with both terms and functions. + {"S", "int"}, // interface type with a tilde term + {"T", "<-chan int"}, // Prefix of 2 terms that are identical before switching to channel. + } { + // Eval() expr for its type. + tv, err := types.Eval(fset, pkg, 0, test.expr) + if err != nil { + t.Fatalf("Eval(%s) failed: %v", test.expr, err) + } + + ct := typeparams.CoreType(tv.Type) + var got string + if ct == nil { + got = "" + } else { + got = ct.String() + } + if got != test.want { + t.Errorf("CoreType(%s) = %v, want %v", test.expr, got, test.want) + } + } +} diff --git a/go/ssa/create.go b/go/ssa/create.go index 345d9acfbbd..ccb20e79683 100644 --- a/go/ssa/create.go +++ b/go/ssa/create.go @@ -91,37 +91,31 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { } // Collect type parameters if this is a generic function/method. - var tparams []*typeparams.TypeParam - for i, rtparams := 0, typeparams.RecvTypeParams(sig); i < rtparams.Len(); i++ { - tparams = append(tparams, rtparams.At(i)) - } - for i, sigparams := 0, typeparams.ForSignature(sig); i < sigparams.Len(); i++ { - tparams = append(tparams, sigparams.At(i)) + var tparams *typeparams.TypeParamList + if rtparams := typeparams.RecvTypeParams(sig); rtparams.Len() > 0 { + tparams = rtparams + } else if sigparams := typeparams.ForSignature(sig); sigparams.Len() > 0 { + tparams = sigparams } fn := &Function{ - name: name, - object: obj, - Signature: sig, - syntax: syntax, - pos: obj.Pos(), - Pkg: pkg, - Prog: pkg.Prog, - _TypeParams: tparams, - info: pkg.info, + name: name, + object: obj, + Signature: sig, + syntax: syntax, + pos: obj.Pos(), + Pkg: pkg, + Prog: pkg.Prog, + typeparams: tparams, + info: pkg.info, } pkg.created.Add(fn) if syntax == nil { fn.Synthetic = "loaded from gc object file" } - if len(tparams) > 0 { + if tparams.Len() > 0 { fn.Prog.createInstanceSet(fn) } - if len(tparams) > 0 && syntax != nil { - fn.Synthetic = "generic function" - // TODO(taking): Allow for the function to be built once type params are supported. - fn.syntax = nil // Treating as an external function temporarily. - } pkg.objects[obj] = fn if sig.Recv() == nil { diff --git a/go/ssa/doc.go b/go/ssa/doc.go index 13d02413a67..afda476b369 100644 --- a/go/ssa/doc.go +++ b/go/ssa/doc.go @@ -66,6 +66,7 @@ // *FieldAddr āœ” āœ” // *FreeVar āœ” // *Function āœ” āœ” (func) +// *GenericConvert āœ” āœ” // *Global āœ” āœ” (var) // *Go āœ” // *If āœ” diff --git a/go/ssa/dom.go b/go/ssa/dom.go index ce2473cafce..66a2f5e6ed3 100644 --- a/go/ssa/dom.go +++ b/go/ssa/dom.go @@ -303,7 +303,7 @@ func sanityCheckDomTree(f *Function) { // Printing functions ---------------------------------------- -// printDomTree prints the dominator tree as text, using indentation. +// printDomTreeText prints the dominator tree as text, using indentation. func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) for _, child := range v.dom.children { diff --git a/go/ssa/emit.go b/go/ssa/emit.go index fb11c3558d3..1731c797506 100644 --- a/go/ssa/emit.go +++ b/go/ssa/emit.go @@ -11,6 +11,8 @@ import ( "go/ast" "go/token" "go/types" + + "golang.org/x/tools/internal/typeparams" ) // emitNew emits to f a new (heap Alloc) instruction allocating an @@ -27,7 +29,7 @@ func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { // new temporary, and returns the value so defined. func emitLoad(f *Function, addr Value) *UnOp { v := &UnOp{Op: token.MUL, X: addr} - v.setType(deref(addr.Type())) + v.setType(deref(typeparams.CoreType(addr.Type()))) f.emit(v) return v } @@ -121,9 +123,9 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { if types.Identical(xt, yt) { // no conversion necessary - } else if _, ok := xt.(*types.Interface); ok { + } else if isNonTypeParamInterface(x.Type()) { y = emitConv(f, y, x.Type()) - } else if _, ok := yt.(*types.Interface); ok { + } else if isNonTypeParamInterface(y.Type()) { x = emitConv(f, x, y.Type()) } else if _, ok := x.(*Const); ok { x = emitConv(f, x, y.Type()) @@ -177,21 +179,20 @@ func emitConv(f *Function, val Value, typ types.Type) Value { if types.Identical(t_src, typ) { return val } - ut_dst := typ.Underlying() ut_src := t_src.Underlying() - // Just a change of type, but not value or representation? - if isValuePreserving(ut_src, ut_dst) { - c := &ChangeType{X: val} - c.setType(typ) - return f.emit(c) - } - // Conversion to, or construction of a value of, an interface type? - if _, ok := ut_dst.(*types.Interface); ok { + if isNonTypeParamInterface(typ) { + // Interface name change? + if isValuePreserving(ut_src, ut_dst) { + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + } + // Assignment from one interface type to another? - if _, ok := ut_src.(*types.Interface); ok { + if isNonTypeParamInterface(t_src) { c := &ChangeInterface{X: val} c.setType(typ) return f.emit(c) @@ -199,7 +200,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value { // Untyped nil constant? Return interface-typed nil constant. if ut_src == tUntypedNil { - return nilConst(typ) + return zeroConst(typ) } // Convert (non-nil) "untyped" literals to their default type. @@ -212,9 +213,83 @@ func emitConv(f *Function, val Value, typ types.Type) Value { return f.emit(mi) } + // In the common case, the typesets of src and dst are singletons + // and we emit an appropriate conversion. But if either contains + // a type parameter, the conversion may represent a cross product, + // in which case which we emit a MultiConvert. + dst_terms := typeSetOf(ut_dst) + src_terms := typeSetOf(ut_src) + + // conversionCase describes an instruction pattern that maybe emitted to + // model d <- s for d in dst_terms and s in src_terms. + // Multiple conversions can match the same pattern. + type conversionCase uint8 + const ( + changeType conversionCase = 1 << iota + sliceToArray + sliceToArrayPtr + sliceTo0Array + sliceTo0ArrayPtr + convert + ) + classify := func(s, d types.Type) conversionCase { + // Just a change of type, but not value or representation? + if isValuePreserving(s, d) { + return changeType + } + + // Conversion from slice to array or slice to array pointer? + if slice, ok := s.(*types.Slice); ok { + var arr *types.Array + var ptr bool + // Conversion from slice to array pointer? + switch d := d.(type) { + case *types.Array: + arr = d + case *types.Pointer: + arr, _ = d.Elem().Underlying().(*types.Array) + ptr = true + } + if arr != nil && types.Identical(slice.Elem(), arr.Elem()) { + if arr.Len() == 0 { + if ptr { + return sliceTo0ArrayPtr + } else { + return sliceTo0Array + } + } + if ptr { + return sliceToArrayPtr + } else { + return sliceToArray + } + } + } + + // The only remaining case in well-typed code is a representation- + // changing conversion of basic types (possibly with []byte/[]rune). + if !isBasic(s) && !isBasic(d) { + panic(fmt.Sprintf("in %s: cannot convert term %s (%s [within %s]) to type %s [within %s]", f, val, val.Type(), s, typ, d)) + } + return convert + } + + var classifications conversionCase + for _, s := range src_terms { + us := s.Type().Underlying() + for _, d := range dst_terms { + ud := d.Type().Underlying() + classifications |= classify(us, ud) + } + } + if classifications == 0 { + panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) + } + // Conversion of a compile-time constant value? if c, ok := val.(*Const); ok { - if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { + // Conversion to a basic type? + if isBasic(ut_dst) { // Conversion of a compile-time constant to // another constant type results in a new // constant of the destination type and @@ -222,33 +297,76 @@ func emitConv(f *Function, val Value, typ types.Type) Value { // We don't truncate the value yet. return NewConst(c.Value, typ) } + // Can we always convert from zero value without panicking? + const mayPanic = sliceToArray | sliceToArrayPtr + if c.Value == nil && classifications&mayPanic == 0 { + return NewConst(nil, typ) + } // We're converting from constant to non-constant type, // e.g. string -> []byte/[]rune. } - // Conversion from slice to array pointer? - if slice, ok := ut_src.(*types.Slice); ok { - if ptr, ok := ut_dst.(*types.Pointer); ok { - if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { - c := &SliceToArrayPointer{X: val} - c.setType(ut_dst) - return f.emit(c) - } - } - } - // A representation-changing conversion? - // At least one of {ut_src,ut_dst} must be *Basic. - // (The other may be []byte or []rune.) - _, ok1 := ut_src.(*types.Basic) - _, ok2 := ut_dst.(*types.Basic) - if ok1 || ok2 { + switch classifications { + case changeType: // representation-preserving change + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + + case sliceToArrayPtr, sliceTo0ArrayPtr: // slice to array pointer + c := &SliceToArrayPointer{X: val} + c.setType(typ) + return f.emit(c) + + case sliceToArray: // slice to arrays (not zero-length) + ptype := types.NewPointer(typ) + p := &SliceToArrayPointer{X: val} + p.setType(ptype) + x := f.emit(p) + unOp := &UnOp{Op: token.MUL, X: x} + unOp.setType(typ) + return f.emit(unOp) + + case sliceTo0Array: // slice to zero-length arrays (constant) + return zeroConst(typ) + + case convert: // representation-changing conversion c := &Convert{X: val} c.setType(typ) return f.emit(c) + + default: // multiple conversion + c := &MultiConvert{X: val, from: src_terms, to: dst_terms} + c.setType(typ) + return f.emit(c) } +} - panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) +// emitTypeCoercion emits to f code to coerce the type of a +// Value v to exactly type typ, and returns the coerced value. +// +// Requires that coercing v.Typ() to typ is a value preserving change. +// +// Currently used only when v.Type() is a type instance of typ or vice versa. +// A type v is a type instance of a type t if there exists a +// type parameter substitution σ s.t. σ(v) == t. Example: +// +// σ(func(T) T) == func(int) int for σ == [T ↦ int] +// +// This happens in instantiation wrappers for conversion +// from an instantiation to a parameterized type (and vice versa) +// with σ substituting f.typeparams by f.typeargs. +func emitTypeCoercion(f *Function, v Value, typ types.Type) Value { + if types.Identical(v.Type(), typ) { + return v // no coercion needed + } + // TODO(taking): for instances should we record which side is the instance? + c := &ChangeType{ + X: v, + } + c.setType(typ) + f.emit(c) + return c } // emitStore emits to f an instruction to store value val at location @@ -359,7 +477,7 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) if isPointer(v.Type()) { instr := &FieldAddr{ @@ -393,7 +511,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) if isPointer(v.Type()) { instr := &FieldAddr{ X: v, diff --git a/go/ssa/example_test.go b/go/ssa/example_test.go index 492a02f766e..9a5fd436928 100644 --- a/go/ssa/example_test.go +++ b/go/ssa/example_test.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !android && !ios && !js +// +build !android,!ios,!js + package ssa_test import ( diff --git a/go/ssa/func.go b/go/ssa/func.go index c598ff836d3..57f5f718f73 100644 --- a/go/ssa/func.go +++ b/go/ssa/func.go @@ -251,7 +251,10 @@ func buildReferrers(f *Function) { } // mayNeedRuntimeTypes returns all of the types in the body of fn that might need runtime types. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) func mayNeedRuntimeTypes(fn *Function) []types.Type { + // Collect all types that may need rtypes, i.e. those that flow into an interface. var ts []types.Type for _, bb := range fn.Blocks { for _, instr := range bb.Instrs { @@ -260,7 +263,21 @@ func mayNeedRuntimeTypes(fn *Function) []types.Type { } } } - return ts + + // Types that contain a parameterized type are considered to not be runtime types. + if fn.typeparams.Len() == 0 { + return ts // No potentially parameterized types. + } + // Filter parameterized types, in place. + fn.Prog.methodsMu.Lock() + defer fn.Prog.methodsMu.Unlock() + filtered := ts[:0] + for _, t := range ts { + if !fn.Prog.parameterized.isParameterized(t) { + filtered = append(filtered, t) + } + } + return filtered } // finishBody() finalizes the contents of the function after SSA code generation of its body. @@ -518,8 +535,8 @@ func (fn *Function) declaredPackage() *Package { switch { case fn.Pkg != nil: return fn.Pkg // non-generic function - case fn._Origin != nil: - return fn._Origin.Pkg // instance of a named generic function + case fn.topLevelOrigin != nil: + return fn.topLevelOrigin.Pkg // instance of a named generic function case fn.parent != nil: return fn.parent.declaredPackage() // instance of an anonymous [generic] function default: diff --git a/go/ssa/instantiate.go b/go/ssa/instantiate.go index 049b53487d5..f6b2533f24b 100644 --- a/go/ssa/instantiate.go +++ b/go/ssa/instantiate.go @@ -12,13 +12,13 @@ import ( "golang.org/x/tools/internal/typeparams" ) -// Instances returns all of the instances generated by runtime types for this function in an unspecified order. +// _Instances returns all of the instances generated by runtime types for this function in an unspecified order. // // Thread-safe. // // This is an experimental interface! It may change without warning. func (prog *Program) _Instances(fn *Function) []*Function { - if len(fn._TypeParams) == 0 { + if fn.typeparams.Len() == 0 || len(fn.typeargs) > 0 { return nil } @@ -29,7 +29,7 @@ func (prog *Program) _Instances(fn *Function) []*Function { // A set of instantiations of a generic function fn. type instanceSet struct { - fn *Function // len(fn._TypeParams) > 0 and len(fn._TypeArgs) == 0. + fn *Function // fn.typeparams.Len() > 0 and len(fn.typeargs) == 0. instances map[*typeList]*Function // canonical type arguments to an instance. syntax *ast.FuncDecl // fn.syntax copy for instantiating after fn is done. nil on synthetic packages. info *types.Info // fn.pkg.info copy for building after fn is done.. nil on synthetic packages. @@ -56,7 +56,7 @@ func (insts *instanceSet) list() []*Function { // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu) func (prog *Program) createInstanceSet(fn *Function) { - assert(len(fn._TypeParams) > 0 && len(fn._TypeArgs) == 0, "Can only create instance sets for generic functions") + assert(fn.typeparams.Len() > 0 && len(fn.typeargs) == 0, "Can only create instance sets for generic functions") prog.methodsMu.Lock() defer prog.methodsMu.Unlock() @@ -73,7 +73,7 @@ func (prog *Program) createInstanceSet(fn *Function) { } } -// needsInstance returns an Function that that is the instantiation of fn with the type arguments targs. +// needsInstance returns a Function that is the instantiation of fn with the type arguments targs. // // Any CREATEd instance is added to cr. // @@ -82,41 +82,45 @@ func (prog *Program) needsInstance(fn *Function, targs []types.Type, cr *creator prog.methodsMu.Lock() defer prog.methodsMu.Unlock() - return prog.instances[fn].lookupOrCreate(targs, cr) + return prog.lookupOrCreateInstance(fn, targs, cr) +} + +// lookupOrCreateInstance returns a Function that is the instantiation of fn with the type arguments targs. +// +// Any CREATEd instance is added to cr. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodMu) +func (prog *Program) lookupOrCreateInstance(fn *Function, targs []types.Type, cr *creator) *Function { + return prog.instances[fn].lookupOrCreate(targs, &prog.parameterized, cr) } // lookupOrCreate returns the instantiation of insts.fn using targs. -// If the instantiation is reported, this is added to cr. -func (insts *instanceSet) lookupOrCreate(targs []types.Type, cr *creator) *Function { +// If the instantiation is created, this is added to cr. +func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWalker, cr *creator) *Function { if insts.instances == nil { insts.instances = make(map[*typeList]*Function) } + fn := insts.fn + prog := fn.Prog + // canonicalize on a tuple of targs. Sig is not unique. // // func A[T any]() { // var x T // fmt.Println("%T", x) // } - key := insts.fn.Prog.canon.List(targs) + key := prog.canon.List(targs) if inst, ok := insts.instances[key]; ok { return inst } + // CREATE instance/instantiation wrapper var syntax ast.Node if insts.syntax != nil { syntax = insts.syntax } - instance := createInstance(insts.fn, targs, insts.info, syntax, cr) - insts.instances[key] = instance - return instance -} -// createInstance returns an CREATEd instantiation of fn using targs. -// -// Function is added to cr. -func createInstance(fn *Function, targs []types.Type, info *types.Info, syntax ast.Node, cr *creator) *Function { - prog := fn.Prog var sig *types.Signature var obj *types.Func if recv := fn.Signature.Recv(); recv != nil { @@ -137,25 +141,36 @@ func createInstance(fn *Function, targs []types.Type, info *types.Info, syntax a sig = prog.canon.Type(instance).(*types.Signature) } + var synthetic string + var subst *subster + + concrete := !parameterized.anyParameterized(targs) + + if prog.mode&InstantiateGenerics != 0 && concrete { + synthetic = fmt.Sprintf("instance of %s", fn.Name()) + subst = makeSubster(prog.ctxt, fn.typeparams, targs, false) + } else { + synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name()) + } + name := fmt.Sprintf("%s%s", fn.Name(), targs) // may not be unique - synthetic := fmt.Sprintf("instantiation of %s", fn.Name()) instance := &Function{ - name: name, - object: obj, - Signature: sig, - Synthetic: synthetic, - _Origin: fn, - pos: obj.Pos(), - Pkg: nil, - Prog: fn.Prog, - _TypeParams: fn._TypeParams, - _TypeArgs: targs, - info: info, // on synthetic packages info is nil. - subst: makeSubster(prog.ctxt, fn._TypeParams, targs, false), - } - if prog.mode&InstantiateGenerics != 0 { - instance.syntax = syntax // otherwise treat instance as an external function. + name: name, + object: obj, + Signature: sig, + Synthetic: synthetic, + syntax: syntax, + topLevelOrigin: fn, + pos: obj.Pos(), + Pkg: nil, + Prog: fn.Prog, + typeparams: fn.typeparams, // share with origin + typeargs: targs, + info: insts.info, // on synthetic packages info is nil. + subst: subst, } + cr.Add(instance) + insts.instances[key] = instance return instance } diff --git a/go/ssa/instantiate_test.go b/go/ssa/instantiate_test.go index 0da8c63042e..cd33e7e659e 100644 --- a/go/ssa/instantiate_test.go +++ b/go/ssa/instantiate_test.go @@ -4,19 +4,52 @@ package ssa -// Note: Tests use unexported functions. +// Note: Tests use unexported method _Instances. import ( "bytes" + "fmt" "go/types" "reflect" "sort" + "strings" "testing" "golang.org/x/tools/go/loader" "golang.org/x/tools/internal/typeparams" ) +// loadProgram creates loader.Program out of p. +func loadProgram(p string) (*loader.Program, error) { + // Parse + var conf loader.Config + f, err := conf.ParseFile("", p) + if err != nil { + return nil, fmt.Errorf("parse: %v", err) + } + conf.CreateFromFiles("p", f) + + // Load + lprog, err := conf.Load() + if err != nil { + return nil, fmt.Errorf("Load: %v", err) + } + return lprog, nil +} + +// buildPackage builds and returns ssa representation of package pkg of lprog. +func buildPackage(lprog *loader.Program, pkg string, mode BuilderMode) *Package { + prog := NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + + p := prog.Package(lprog.Package(pkg).Pkg) + p.Build() + return p +} + // TestNeedsInstance ensures that new method instances can be created via needsInstance, // that TypeArgs are as expected, and can be accessed via _Instances. func TestNeedsInstance(t *testing.T) { @@ -45,30 +78,15 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) // func init func() // var init$guard bool - // Parse - var conf loader.Config - f, err := conf.ParseFile("", input) - if err != nil { - t.Fatalf("parse: %v", err) - } - conf.CreateFromFiles("p", f) - - // Load - lprog, err := conf.Load() - if err != nil { - t.Fatalf("Load: %v", err) + lprog, err := loadProgram(input) + if err != err { + t.Fatal(err) } for _, mode := range []BuilderMode{BuilderMode(0), InstantiateGenerics} { // Create and build SSA - prog := NewProgram(lprog.Fset, mode) - - for _, info := range lprog.AllPackages { - prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) - } - - p := prog.Package(lprog.Package("p").Pkg) - p.Build() + p := buildPackage(lprog, "p", mode) + prog := p.Prog ptr := p.Type("Pointer").Type().(*types.Named) if ptr.NumMethods() != 1 { @@ -88,11 +106,11 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) if len(cr) != 1 { t.Errorf("Expected first instance to create a function. got %d created functions", len(cr)) } - if instance._Origin != meth { - t.Errorf("Expected Origin of %s to be %s. got %s", instance, meth, instance._Origin) + if instance.Origin() != meth { + t.Errorf("Expected Origin of %s to be %s. got %s", instance, meth, instance.Origin()) } - if len(instance._TypeArgs) != 1 || !types.Identical(instance._TypeArgs[0], intSliceTyp) { - t.Errorf("Expected TypeArgs of %s to be %v. got %v", instance, []types.Type{intSliceTyp}, instance._TypeArgs) + if len(instance.TypeArgs()) != 1 || !types.Identical(instance.TypeArgs()[0], intSliceTyp) { + t.Errorf("Expected TypeArgs of %s to be %v. got %v", instance, []types.Type{intSliceTyp}, instance.typeargs) } instances := prog._Instances(meth) if want := []*Function{instance}; !reflect.DeepEqual(instances, want) { @@ -126,3 +144,218 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) } } } + +// TestCallsToInstances checks that calles of calls to generic functions, +// without monomorphization, are wrappers around the origin generic function. +func TestCallsToInstances(t *testing.T) { + if !typeparams.Enabled { + return + } + const input = ` +package p + +type I interface { + Foo() +} + +type A int +func (a A) Foo() {} + +type J[T any] interface{ Bar() T } +type K[T any] struct{ J[T] } + +func Id[T any] (t T) T { + return t +} + +func Lambda[T I]() func() func(T) { + return func() func(T) { + return T.Foo + } +} + +func NoOp[T any]() {} + +func Bar[T interface { Foo(); ~int | ~string }, U any] (t T, u U) { + Id[U](u) + Id[T](t) +} + +func Make[T any]() interface{} { + NoOp[K[T]]() + return nil +} + +func entry(i int, a A) int { + Lambda[A]()()(a) + + x := Make[int]() + if j, ok := x.(interface{ Bar() int }); ok { + print(j) + } + + Bar[A, int](a, i) + + return Id[int](i) +} +` + lprog, err := loadProgram(input) + if err != err { + t.Fatal(err) + } + + p := buildPackage(lprog, "p", SanityCheckFunctions) + prog := p.Prog + + for _, ti := range []struct { + orig string + instance string + tparams string + targs string + chTypeInstrs int // number of ChangeType instructions in f's body + }{ + {"Id", "Id[int]", "[T]", "[int]", 2}, + {"Lambda", "Lambda[p.A]", "[T]", "[p.A]", 1}, + {"Make", "Make[int]", "[T]", "[int]", 0}, + {"NoOp", "NoOp[p.K[T]]", "[T]", "[p.K[T]]", 0}, + } { + test := ti + t.Run(test.instance, func(t *testing.T) { + f := p.Members[test.orig].(*Function) + if f == nil { + t.Fatalf("origin function not found") + } + + i := instanceOf(f, test.instance, prog) + if i == nil { + t.Fatalf("instance not found") + } + + // for logging on failures + var body strings.Builder + i.WriteTo(&body) + t.Log(body.String()) + + if len(i.Blocks) != 1 { + t.Fatalf("body has more than 1 block") + } + + if instrs := changeTypeInstrs(i.Blocks[0]); instrs != test.chTypeInstrs { + t.Errorf("want %v instructions; got %v", test.chTypeInstrs, instrs) + } + + if test.tparams != tparams(i) { + t.Errorf("want %v type params; got %v", test.tparams, tparams(i)) + } + + if test.targs != targs(i) { + t.Errorf("want %v type arguments; got %v", test.targs, targs(i)) + } + }) + } +} + +func instanceOf(f *Function, name string, prog *Program) *Function { + for _, i := range prog._Instances(f) { + if i.Name() == name { + return i + } + } + return nil +} + +func tparams(f *Function) string { + tplist := f.TypeParams() + var tps []string + for i := 0; i < tplist.Len(); i++ { + tps = append(tps, tplist.At(i).String()) + } + return fmt.Sprint(tps) +} + +func targs(f *Function) string { + var tas []string + for _, ta := range f.TypeArgs() { + tas = append(tas, ta.String()) + } + return fmt.Sprint(tas) +} + +func changeTypeInstrs(b *BasicBlock) int { + cnt := 0 + for _, i := range b.Instrs { + if _, ok := i.(*ChangeType); ok { + cnt++ + } + } + return cnt +} + +func TestInstanceUniqueness(t *testing.T) { + if !typeparams.Enabled { + return + } + const input = ` +package p + +func H[T any](t T) { + print(t) +} + +func F[T any](t T) { + H[T](t) + H[T](t) + H[T](t) +} + +func G[T any](t T) { + H[T](t) + H[T](t) +} + +func Foo[T any, S any](t T, s S) { + Foo[S, T](s, t) + Foo[T, S](t, s) +} +` + lprog, err := loadProgram(input) + if err != err { + t.Fatal(err) + } + + p := buildPackage(lprog, "p", SanityCheckFunctions) + prog := p.Prog + + for _, test := range []struct { + orig string + instances string + }{ + {"H", "[p.H[T] p.H[T]]"}, + {"Foo", "[p.Foo[S T] p.Foo[T S]]"}, + } { + t.Run(test.orig, func(t *testing.T) { + f := p.Members[test.orig].(*Function) + if f == nil { + t.Fatalf("origin function not found") + } + + instances := prog._Instances(f) + sort.Slice(instances, func(i, j int) bool { return instances[i].Name() < instances[j].Name() }) + + if got := fmt.Sprintf("%v", instances); !reflect.DeepEqual(got, test.instances) { + t.Errorf("got %v instances, want %v", got, test.instances) + } + }) + } +} + +// instancesStr returns a sorted slice of string +// representation of instances. +func instancesStr(instances []*Function) []string { + var is []string + for _, i := range instances { + is = append(is, fmt.Sprintf("%v", i)) + } + sort.Strings(is) + return is +} diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go index 2b21aad708b..58cac464241 100644 --- a/go/ssa/interp/interp.go +++ b/go/ssa/interp/interp.go @@ -51,7 +51,6 @@ import ( "os" "reflect" "runtime" - "strings" "sync/atomic" "golang.org/x/tools/go/ssa" @@ -335,7 +334,17 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation { } case *ssa.Index: - fr.env[instr] = fr.get(instr.X).(array)[asInt64(fr.get(instr.Index))] + x := fr.get(instr.X) + idx := fr.get(instr.Index) + + switch x := x.(type) { + case array: + fr.env[instr] = x[asInt64(idx)] + case string: + fr.env[instr] = x[asInt64(idx)] + default: + panic(fmt.Sprintf("unexpected x type in Index: %T", x)) + } case *ssa.Lookup: fr.env[instr] = lookup(instr, fr.get(instr.X), fr.get(instr.Index)) @@ -506,13 +515,15 @@ func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function, return ext(fr, args) } if fn.Blocks == nil { - var reason string // empty by default - if strings.HasPrefix(fn.Synthetic, "instantiation") { - reason = " (interp requires ssa.BuilderMode to include InstantiateGenerics on generics)" - } - panic("no code for function: " + name + reason) + panic("no code for function: " + name) } } + + // generic function body? + if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 { + panic("interp requires ssa.BuilderMode to include InstantiateGenerics to execute generics") + } + fr.env = make(map[ssa.Value]value) fr.block = fn.Blocks[0] fr.locals = make([]value, len(fn.Locals)) diff --git a/go/ssa/interp/interp_go120_test.go b/go/ssa/interp/interp_go120_test.go new file mode 100644 index 00000000000..d8eb2c21341 --- /dev/null +++ b/go/ssa/interp/interp_go120_test.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package interp_test + +func init() { + testdataTests = append(testdataTests, "slice2array.go") +} diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go index a0acf2f968a..c893d83e753 100644 --- a/go/ssa/interp/interp_test.go +++ b/go/ssa/interp/interp_test.go @@ -132,6 +132,9 @@ var testdataTests = []string{ func init() { if typeparams.Enabled { testdataTests = append(testdataTests, "fixedbugs/issue52835.go") + testdataTests = append(testdataTests, "fixedbugs/issue55086.go") + testdataTests = append(testdataTests, "typeassert.go") + testdataTests = append(testdataTests, "zeros.go") } } diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go index 8f031384f03..39830bc8fcb 100644 --- a/go/ssa/interp/ops.go +++ b/go/ssa/interp/ops.go @@ -34,9 +34,10 @@ type exitPanic int // constValue returns the value of the constant with the // dynamic type tag appropriate for c.Type(). func constValue(c *ssa.Const) value { - if c.IsNil() { - return zero(c.Type()) // typed nil + if c.Value == nil { + return zero(c.Type()) // typed zero } + // c is not a type parameter so it's underlying type is basic. if t, ok := c.Type().Underlying().(*types.Basic); ok { // TODO(adonovan): eliminate untyped constants from SSA form. @@ -307,7 +308,7 @@ func slice(x, lo, hi, max value) value { panic(fmt.Sprintf("slice: unexpected X type: %T", x)) } -// lookup returns x[idx] where x is a map or string. +// lookup returns x[idx] where x is a map. func lookup(instr *ssa.Lookup, x, idx value) value { switch x := x.(type) { // map or string case map[value]value, *hashmap: @@ -327,8 +328,6 @@ func lookup(instr *ssa.Lookup, x, idx value) value { v = tuple{v, ok} } return v - case string: - return x[asInt64(idx)] } panic(fmt.Sprintf("unexpected x type in Lookup: %T", x)) } @@ -933,6 +932,8 @@ func typeAssert(i *interpreter, instr *ssa.TypeAssert, itf iface) value { } else { err = fmt.Sprintf("interface conversion: interface is %s, not %s", itf.t, instr.AssertedType) } + // Note: if instr.Underlying==true ever becomes reachable from interp check that + // types.Identical(itf.t.Underlying(), instr.AssertedType) if err != "" { if !instr.CommaOk { @@ -1396,18 +1397,15 @@ func conv(t_dst, t_src types.Type, x value) value { // sliceToArrayPointer converts the value x of type slice to type t_dst // a pointer to array and returns the result. func sliceToArrayPointer(t_dst, t_src types.Type, x value) value { - utSrc := t_src.Underlying() - utDst := t_dst.Underlying() - - if _, ok := utSrc.(*types.Slice); ok { - if utSrc, ok := utDst.(*types.Pointer); ok { - if arr, ok := utSrc.Elem().(*types.Array); ok { + if _, ok := t_src.Underlying().(*types.Slice); ok { + if ptr, ok := t_dst.Underlying().(*types.Pointer); ok { + if arr, ok := ptr.Elem().Underlying().(*types.Array); ok { x := x.([]value) if arr.Len() > int64(len(x)) { panic("array length is greater than slice length") } if x == nil { - return zero(utSrc) + return zero(t_dst) } v := value(array(x[:arr.Len()])) return &v diff --git a/go/ssa/interp/testdata/boundmeth.go b/go/ssa/interp/testdata/boundmeth.go index 69937f9d3c7..47b94068591 100644 --- a/go/ssa/interp/testdata/boundmeth.go +++ b/go/ssa/interp/testdata/boundmeth.go @@ -123,7 +123,8 @@ func nilInterfaceMethodValue() { r := fmt.Sprint(recover()) // runtime panic string varies across toolchains if r != "interface conversion: interface is nil, not error" && - r != "runtime error: invalid memory address or nil pointer dereference" { + r != "runtime error: invalid memory address or nil pointer dereference" && + r != "method value: interface is nil" { panic("want runtime panic from nil interface method value, got " + r) } }() diff --git a/go/ssa/interp/testdata/fixedbugs/issue55086.go b/go/ssa/interp/testdata/fixedbugs/issue55086.go new file mode 100644 index 00000000000..84c81e91a26 --- /dev/null +++ b/go/ssa/interp/testdata/fixedbugs/issue55086.go @@ -0,0 +1,132 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func a() (r string) { + s := "initial" + var p *struct{ i int } + defer func() { + recover() + r = s + }() + + s, p.i = "set", 2 // s must be set before p.i panics + return "unreachable" +} + +func b() (r string) { + s := "initial" + fn := func() []int { panic("") } + defer func() { + recover() + r = s + }() + + s, fn()[0] = "set", 2 // fn() panics before any assignment occurs + return "unreachable" +} + +func c() (r string) { + s := "initial" + var p map[int]int + defer func() { + recover() + r = s + }() + + s, p[0] = "set", 2 //s must be set before p[0] index panics" + return "unreachable" +} + +func d() (r string) { + s := "initial" + var p map[int]int + defer func() { + recover() + r = s + }() + fn := func() int { panic("") } + + s, p[0] = "set", fn() // fn() panics before s is set + return "unreachable" +} + +func e() (r string) { + s := "initial" + p := map[int]int{} + defer func() { + recover() + r = s + }() + fn := func() int { panic("") } + + s, p[fn()] = "set", 0 // fn() panics before any assignment occurs + return "unreachable" +} + +func f() (r string) { + s := "initial" + p := []int{} + defer func() { + recover() + r = s + }() + + s, p[1] = "set", 0 // p[1] panics after s is set + return "unreachable" +} + +func g() (r string) { + s := "initial" + p := map[any]any{} + defer func() { + recover() + r = s + }() + var i any = func() {} + s, p[i] = "set", 0 // p[i] panics after s is set + return "unreachable" +} + +func h() (r string) { + fail := false + defer func() { + recover() + if fail { + r = "fail" + } else { + r = "success" + } + }() + + type T struct{ f int } + var p *struct{ *T } + + // The implicit "p.T" operand should be evaluated in phase 1 (and panic), + // before the "fail = true" assignment in phase 2. + fail, p.f = true, 0 + return "unreachable" +} + +func main() { + for _, test := range []struct { + fn func() string + want string + desc string + }{ + {a, "set", "s must be set before p.i panics"}, + {b, "initial", "p() panics before s is set"}, + {c, "set", "s must be set before p[0] index panics"}, + {d, "initial", "fn() panics before s is set"}, + {e, "initial", "fn() panics before s is set"}, + {f, "set", "p[1] panics after s is set"}, + {g, "set", "p[i] panics after s is set"}, + {h, "success", "p.T panics before fail is set"}, + } { + if test.fn() != test.want { + panic(test.desc) + } + } +} diff --git a/go/ssa/interp/testdata/slice2array.go b/go/ssa/interp/testdata/slice2array.go new file mode 100644 index 00000000000..84e6b733008 --- /dev/null +++ b/go/ssa/interp/testdata/slice2array.go @@ -0,0 +1,92 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test for slice to array conversion introduced in go1.20 +// See: https://tip.golang.org/ref/spec#Conversions_from_slice_to_array_pointer + +package main + +func main() { + s := make([]byte, 3, 4) + s[0], s[1], s[2] = 2, 3, 5 + a := ([2]byte)(s) + s[0] = 7 + + if a != [2]byte{2, 3} { + panic("converted from non-nil slice to array") + } + + { + var s []int + a := ([0]int)(s) + if a != [0]int{} { + panic("zero len array is not equal") + } + } + + if emptyToEmptyDoesNotPanic() { + panic("no panic expected from emptyToEmptyDoesNotPanic()") + } + if !threeToFourDoesPanic() { + panic("panic expected from threeToFourDoesPanic()") + } + + if !fourPanicsWhileOneDoesNot[[4]int]() { + panic("panic expected from fourPanicsWhileOneDoesNot[[4]int]()") + } + if fourPanicsWhileOneDoesNot[[1]int]() { + panic("no panic expected from fourPanicsWhileOneDoesNot[[1]int]()") + } + + if !fourPanicsWhileZeroDoesNot[[4]int]() { + panic("panic expected from fourPanicsWhileZeroDoesNot[[4]int]()") + } + if fourPanicsWhileZeroDoesNot[[0]int]() { + panic("no panic expected from fourPanicsWhileZeroDoesNot[[0]int]()") + } +} + +func emptyToEmptyDoesNotPanic() (raised bool) { + defer func() { + if e := recover(); e != nil { + raised = true + } + }() + var s []int + _ = ([0]int)(s) + return false +} + +func threeToFourDoesPanic() (raised bool) { + defer func() { + if e := recover(); e != nil { + raised = true + } + }() + s := make([]int, 3, 5) + _ = ([4]int)(s) + return false +} + +func fourPanicsWhileOneDoesNot[T [1]int | [4]int]() (raised bool) { + defer func() { + if e := recover(); e != nil { + raised = true + } + }() + s := make([]int, 3, 5) + _ = T(s) + return false +} + +func fourPanicsWhileZeroDoesNot[T [0]int | [4]int]() (raised bool) { + defer func() { + if e := recover(); e != nil { + raised = true + } + }() + var s []int + _ = T(s) + return false +} diff --git a/go/ssa/interp/testdata/slice2arrayptr.go b/go/ssa/interp/testdata/slice2arrayptr.go index ff2d9b55ccd..d9d8804d36a 100644 --- a/go/ssa/interp/testdata/slice2arrayptr.go +++ b/go/ssa/interp/testdata/slice2arrayptr.go @@ -32,6 +32,8 @@ func main() { }, "runtime error: array length is greater than slice length", ) + + f() } type arr [2]int diff --git a/go/ssa/interp/testdata/typeassert.go b/go/ssa/interp/testdata/typeassert.go new file mode 100644 index 00000000000..792a7558f61 --- /dev/null +++ b/go/ssa/interp/testdata/typeassert.go @@ -0,0 +1,32 @@ +// Tests of type asserts. +// Requires type parameters. +package typeassert + +type fooer interface{ foo() string } + +type X int + +func (_ X) foo() string { return "x" } + +func f[T fooer](x T) func() string { + return x.foo +} + +func main() { + if f[X](0)() != "x" { + panic("f[X]() != 'x'") + } + + p := false + func() { + defer func() { + if recover() != nil { + p = true + } + }() + f[fooer](nil) // panics on x.foo when T is an interface and nil. + }() + if !p { + panic("f[fooer] did not panic") + } +} diff --git a/go/ssa/interp/testdata/zeros.go b/go/ssa/interp/testdata/zeros.go new file mode 100644 index 00000000000..509c78a36ec --- /dev/null +++ b/go/ssa/interp/testdata/zeros.go @@ -0,0 +1,45 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test interpretation on zero values with type params. +package zeros + +func assert(cond bool, msg string) { + if !cond { + panic(msg) + } +} + +func tp0[T int | string | float64]() T { return T(0) } + +func tpFalse[T ~bool]() T { return T(false) } + +func tpEmptyString[T string | []byte]() T { return T("") } + +func tpNil[T *int | []byte]() T { return T(nil) } + +func main() { + // zero values + var zi int + var zf float64 + var zs string + + assert(zi == int(0), "zero value of int is int(0)") + assert(zf == float64(0), "zero value of float64 is float64(0)") + assert(zs != string(0), "zero value of string is not string(0)") + + assert(zi == tp0[int](), "zero value of int is int(0)") + assert(zf == tp0[float64](), "zero value of float64 is float64(0)") + assert(zs != tp0[string](), "zero value of string is not string(0)") + + assert(zf == -0.0, "constant -0.0 is converted to 0.0") + + assert(!tpFalse[bool](), "zero value of bool is false") + + assert(tpEmptyString[string]() == zs, `zero value of string is string("")`) + assert(len(tpEmptyString[[]byte]()) == 0, `[]byte("") is empty`) + + assert(tpNil[*int]() == nil, "nil is nil") + assert(tpNil[[]byte]() == nil, "nil is nil") +} diff --git a/go/ssa/lift.go b/go/ssa/lift.go index c350481db76..945536bbbf4 100644 --- a/go/ssa/lift.go +++ b/go/ssa/lift.go @@ -44,6 +44,8 @@ import ( "go/types" "math/big" "os" + + "golang.org/x/tools/internal/typeparams" ) // If true, show diagnostic information at each step of lifting. @@ -381,10 +383,9 @@ type newPhiMap map[*BasicBlock][]newPhi // // fresh is a source of fresh ids for phi nodes. func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { - // Don't lift aggregates into registers, because we don't have - // a way to express their zero-constants. + // TODO(taking): zero constants of aggregated types can now be lifted. switch deref(alloc.Type()).Underlying().(type) { - case *types.Array, *types.Struct: + case *types.Array, *types.Struct, *typeparams.TypeParam: return false } diff --git a/go/ssa/lvalue.go b/go/ssa/lvalue.go index 64262def8b2..51122b8e85e 100644 --- a/go/ssa/lvalue.go +++ b/go/ssa/lvalue.go @@ -56,12 +56,12 @@ func (a *address) typ() types.Type { } // An element is an lvalue represented by m[k], the location of an -// element of a map or string. These locations are not addressable +// element of a map. These locations are not addressable // since pointers cannot be formed from them, but they do support -// load(), and in the case of maps, store(). +// load() and store(). type element struct { - m, k Value // map or string - t types.Type // map element type or string byte type + m, k Value // map + t types.Type // map element type pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) } @@ -86,13 +86,49 @@ func (e *element) store(fn *Function, v Value) { } func (e *element) address(fn *Function) Value { - panic("map/string elements are not addressable") + panic("map elements are not addressable") } func (e *element) typ() types.Type { return e.t } +// A lazyAddress is an lvalue whose address is the result of an instruction. +// These work like an *address except a new address.address() Value +// is created on each load, store and address call. +// A lazyAddress can be used to control when a side effect (nil pointer +// dereference, index out of bounds) of using a location happens. +type lazyAddress struct { + addr func(fn *Function) Value // emit to fn the computation of the address + t types.Type // type of the location + pos token.Pos // source position + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (l *lazyAddress) load(fn *Function) Value { + load := emitLoad(fn, l.addr(fn)) + load.pos = l.pos + return load +} + +func (l *lazyAddress) store(fn *Function, v Value) { + store := emitStore(fn, l.addr(fn), v, l.pos) + if l.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, l.expr, store.Val, false) + } +} + +func (l *lazyAddress) address(fn *Function) Value { + addr := l.addr(fn) + if l.expr != nil { + emitDebugRef(fn, l.expr, addr, true) + } + return addr +} + +func (l *lazyAddress) typ() types.Type { return l.t } + // A blank is a dummy variable whose name is "_". // It is not reified: loads are illegal and stores are ignored. type blank struct{} diff --git a/go/ssa/methods.go b/go/ssa/methods.go index 6954e17b772..4185618cdd6 100644 --- a/go/ssa/methods.go +++ b/go/ssa/methods.go @@ -27,8 +27,8 @@ func (prog *Program) MethodValue(sel *types.Selection) *Function { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) } T := sel.Recv() - if isInterface(T) { - return nil // abstract method (interface) + if types.IsInterface(T) { + return nil // abstract method (interface, possibly type param) } if prog.mode&LogSource != 0 { defer logStack("MethodValue %s %v", T, sel)() @@ -76,7 +76,7 @@ type methodSet struct { // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) func (prog *Program) createMethodSet(T types.Type) *methodSet { if prog.mode&SanityCheckFunctions != 0 { - if isInterface(T) || prog.parameterized.isParameterized(T) { + if types.IsInterface(T) || prog.parameterized.isParameterized(T) { panic("type is interface or parameterized") } } @@ -107,9 +107,9 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creato fn = makeWrapper(prog, sel, cr) } else { fn = prog.originFunc(obj) - if len(fn._TypeParams) > 0 { // instantiate + if fn.typeparams.Len() > 0 { // instantiate targs := receiverTypeArgs(obj) - fn = prog.instances[fn].lookupOrCreate(targs, cr) + fn = prog.lookupOrCreateInstance(fn, targs, cr) } } if fn.Signature.Recv() == nil { @@ -190,7 +190,7 @@ func (prog *Program) needMethods(T types.Type, skip bool, cr *creator) { tmset := prog.MethodSets.MethodSet(T) - if !skip && !isInterface(T) && tmset.Len() > 0 { + if !skip && !types.IsInterface(T) && tmset.Len() > 0 { // Create methods of T. mset := prog.createMethodSet(T) if !mset.complete { diff --git a/go/ssa/parameterized.go b/go/ssa/parameterized.go index 956718cd723..b11413c8184 100644 --- a/go/ssa/parameterized.go +++ b/go/ssa/parameterized.go @@ -111,3 +111,12 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { return false } + +func (w *tpWalker) anyParameterized(ts []types.Type) bool { + for _, t := range ts { + if w.isParameterized(t) { + return true + } + } + return false +} diff --git a/go/ssa/print.go b/go/ssa/print.go index b8e53923a17..8b783196e49 100644 --- a/go/ssa/print.go +++ b/go/ssa/print.go @@ -17,6 +17,7 @@ import ( "strings" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" ) // relName returns the name of v relative to i. @@ -50,6 +51,14 @@ func relType(t types.Type, from *types.Package) string { return s } +func relTerm(term *typeparams.Term, from *types.Package) string { + s := relType(term.Type(), from) + if term.Tilde() { + return "~" + s + } + return s +} + func relString(m Member, from *types.Package) string { // NB: not all globals have an Object (e.g. init$guard), // so use Package().Object not Object.Package(). @@ -173,6 +182,24 @@ func (v *ChangeInterface) String() string { return printConv("change interfa func (v *SliceToArrayPointer) String() string { return printConv("slice to array pointer", v, v.X) } func (v *MakeInterface) String() string { return printConv("make", v, v.X) } +func (v *MultiConvert) String() string { + from := v.Parent().relPkg() + + var b strings.Builder + b.WriteString(printConv("multiconvert", v, v.X)) + b.WriteString(" [") + for i, s := range v.from { + for j, d := range v.to { + if i != 0 || j != 0 { + b.WriteString(" | ") + } + fmt.Fprintf(&b, "%s <- %s", relTerm(d, from), relTerm(s, from)) + } + } + b.WriteString("]") + return b.String() +} + func (v *MakeClosure) String() string { var b bytes.Buffer fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) @@ -232,7 +259,7 @@ func (v *MakeChan) String() string { } func (v *FieldAddr) String() string { - st := deref(v.X.Type()).Underlying().(*types.Struct) + st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct) // Be robust against a bad index. name := "?" if 0 <= v.Field && v.Field < st.NumFields() { @@ -242,7 +269,7 @@ func (v *FieldAddr) String() string { } func (v *Field) String() string { - st := v.X.Type().Underlying().(*types.Struct) + st := typeparams.CoreType(v.X.Type()).(*types.Struct) // Be robust against a bad index. name := "?" if 0 <= v.Field && v.Field < st.NumFields() { diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go index 7d71302756e..88ad374ded0 100644 --- a/go/ssa/sanity.go +++ b/go/ssa/sanity.go @@ -108,6 +108,9 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { for i, e := range instr.Edges { if e == nil { s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) + } else if !types.Identical(instr.typ, e.Type()) { + s.errorf("phi node '%s' has a different type (%s) for edge #%d from %s (%s)", + instr.Comment, instr.Type(), i, s.block.Preds[i], e.Type()) } } } @@ -132,12 +135,12 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *ChangeType: case *SliceToArrayPointer: case *Convert: - if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { - if _, ok := instr.Type().Underlying().(*types.Basic); !ok { - s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) + if from := instr.X.Type(); !isBasicConvTypes(typeSetOf(from)) { + if to := instr.Type(); !isBasicConvTypes(typeSetOf(to)) { + s.errorf("convert %s -> %s: at least one type must be basic (or all basic, []byte, or []rune)", from, to) } } - + case *MultiConvert: case *Defer: case *Extract: case *Field: @@ -403,7 +406,7 @@ func (s *sanity) checkFunction(fn *Function) bool { // - check transient fields are nil // - warn if any fn.Locals do not appear among block instructions. - // TODO(taking): Sanity check _Origin, _TypeParams, and _TypeArgs. + // TODO(taking): Sanity check origin, typeparams, and typeargs. s.fn = fn if fn.Prog == nil { s.errorf("nil Prog") @@ -420,16 +423,19 @@ func (s *sanity) checkFunction(fn *Function) bool { strings.HasPrefix(fn.Synthetic, "bound ") || strings.HasPrefix(fn.Synthetic, "thunk ") || strings.HasSuffix(fn.name, "Error") || - strings.HasPrefix(fn.Synthetic, "instantiation") || - (fn.parent != nil && len(fn._TypeArgs) > 0) /* anon fun in instance */ { + strings.HasPrefix(fn.Synthetic, "instance ") || + strings.HasPrefix(fn.Synthetic, "instantiation ") || + (fn.parent != nil && len(fn.typeargs) > 0) /* anon fun in instance */ { // ok } else { s.errorf("nil Pkg") } } if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { - if strings.HasPrefix(fn.Synthetic, "instantiation") && fn.Prog.mode&InstantiateGenerics != 0 { - // ok + if len(fn.typeargs) > 0 && fn.Prog.mode&InstantiateGenerics != 0 { + // ok (instantiation with InstantiateGenerics on) + } else if fn.topLevelOrigin != nil && len(fn.typeargs) > 0 { + // ok (we always have the syntax set for instantiation) } else { s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) } @@ -494,6 +500,9 @@ func (s *sanity) checkFunction(fn *Function) bool { if anon.Parent() != fn { s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) } + if i != int(anon.anonIdx) { + s.errorf("AnonFuncs[%d]=%s but %s.anonIdx=%d", i, anon, anon, anon.anonIdx) + } } s.fn = nil return !s.insane diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go index cbc638c81a8..5904b817b31 100644 --- a/go/ssa/ssa.go +++ b/go/ssa/ssa.go @@ -294,16 +294,15 @@ type Node interface { // // Type() returns the function's Signature. // -// A function is generic iff it has a non-empty TypeParams list and an -// empty TypeArgs list. TypeParams lists the type parameters of the -// function's Signature or the receiver's type parameters for a method. -// -// The instantiation of a generic function is a concrete function. These -// are a list of n>0 TypeParams and n TypeArgs. An instantiation will -// have a generic Origin function. There is at most one instantiation -// of each origin type per Identical() type list. Instantiations do not -// belong to any Pkg. The generic function and the instantiations will -// share the same source Pos for the functions and the instructions. +// A generic function is a function or method that has uninstantiated type +// parameters (TypeParams() != nil). Consider a hypothetical generic +// method, (*Map[K,V]).Get. It may be instantiated with all ground +// (non-parameterized) types as (*Map[string,int]).Get or with +// parameterized types as (*Map[string,U]).Get, where U is a type parameter. +// In both instantiations, Origin() refers to the instantiated generic +// method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of +// the generic method. TypeArgs() refers to [string,U] or [string,int], +// respectively, and is nil in the generic method. type Function struct { name string object types.Object // a declared *types.Func or one of its wrappers @@ -324,10 +323,11 @@ type Function struct { AnonFuncs []*Function // anonymous functions directly beneath this one referrers []Instruction // referring instructions (iff Parent() != nil) built bool // function has completed both CREATE and BUILD phase. + anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn. - _Origin *Function // the origin function if this the instantiation of a generic function. nil if Parent() != nil. - _TypeParams []*typeparams.TypeParam // the type paramaters of this function. len(TypeParams) == len(_TypeArgs) => runtime function - _TypeArgs []types.Type // type arguments for for an instantiation. len(_TypeArgs) != 0 => instantiation + typeparams *typeparams.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function + typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function + topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. // The following fields are set transiently during building, // then cleared. @@ -337,7 +337,7 @@ type Function struct { targets *targets // linked stack of branch targets lblocks map[types.Object]*lblock // labelled blocks info *types.Info // *types.Info to build from. nil for wrappers. - subst *subster // type substitution cache + subst *subster // non-nil => expand generic body using this type substitution of ground types } // BasicBlock represents an SSA basic block. @@ -409,26 +409,28 @@ type Parameter struct { referrers []Instruction } -// A Const represents the value of a constant expression. +// A Const represents a value known at build time. // -// The underlying type of a constant may be any boolean, numeric, or -// string type. In addition, a Const may represent the nil value of -// any reference type---interface, map, channel, pointer, slice, or -// function---but not "untyped nil". +// Consts include true constants of boolean, numeric, and string types, as +// defined by the Go spec; these are represented by a non-nil Value field. // -// All source-level constant expressions are represented by a Const -// of the same type and value. -// -// Value holds the value of the constant, independent of its Type(), -// using go/constant representation, or nil for a typed nil value. +// Consts also include the "zero" value of any type, of which the nil values +// of various pointer-like types are a special case; these are represented +// by a nil Value field. // // Pos() returns token.NoPos. // -// Example printed form: -// -// 42:int -// "hello":untyped string -// 3+4i:MyComplex +// Example printed forms: +// +// 42:int +// "hello":untyped string +// 3+4i:MyComplex +// nil:*int +// nil:[]string +// [3]int{}:[3]int +// struct{x string}{}:struct{x string} +// 0:interface{int|int64} +// nil:interface{bool|int} // no go/constant representation type Const struct { typ types.Type Value constant.Value @@ -603,9 +605,17 @@ type UnOp struct { // - between (possibly named) pointers to identical base types. // - from a bidirectional channel to a read- or write-channel, // optionally adding/removing a name. +// - between a type (t) and an instance of the type (tσ), i.e. +// Type() == σ(X.Type()) (or X.Type()== σ(Type())) where +// σ is the type substitution of Parent().TypeParams by +// Parent().TypeArgs. // // This operation cannot fail dynamically. // +// Type changes may to be to or from a type parameter (or both). All +// types in the type set of X.Type() have a value-preserving type +// change to all types in the type set of Type(). +// // Pos() returns the ast.CallExpr.Lparen, if the instruction arose // from an explicit conversion in the source. // @@ -631,6 +641,10 @@ type ChangeType struct { // // A conversion may imply a type name change also. // +// Conversions may to be to or from a type parameter. All types in +// the type set of X.Type() can be converted to all types in the type +// set of Type(). +// // This operation cannot fail dynamically. // // Conversions of untyped string/number/bool constants to a specific @@ -647,6 +661,30 @@ type Convert struct { X Value } +// The MultiConvert instruction yields the conversion of value X to type +// Type(). Either X.Type() or Type() must be a type parameter. Each +// type in the type set of X.Type() can be converted to each type in the +// type set of Type(). +// +// See the documentation for Convert, ChangeType, and SliceToArrayPointer +// for the conversions that are permitted. Additionally conversions of +// slices to arrays are permitted. +// +// This operation can fail dynamically (see SliceToArrayPointer). +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t1 = multiconvert D <- S (t0) [*[2]rune <- []rune | string <- []rune] +type MultiConvert struct { + register + X Value + from []*typeparams.Term + to []*typeparams.Term +} + // ChangeInterface constructs a value of one interface type from a // value of another interface type known to be assignable to it. // This operation cannot fail. @@ -670,6 +708,14 @@ type ChangeInterface struct { // Pos() returns the ast.CallExpr.Lparen, if the instruction arose // from an explicit conversion in the source. // +// Conversion may to be to or from a type parameter. All types in +// the type set of X.Type() must be a slice types that can be converted to +// all types in the type set of Type() which must all be pointer to array +// types. +// +// This operation can fail dynamically if the length of the slice is less +// than the length of the array. +// // Example printed form: // // t1 = slice to array pointer *[4]byte <- []byte (t0) @@ -809,7 +855,9 @@ type Slice struct { // // Pos() returns the position of the ast.SelectorExpr.Sel for the // field, if explicit in the source. For implicit selections, returns -// the position of the inducing explicit selection. +// the position of the inducing explicit selection. If produced for a +// struct literal S{f: e}, it returns the position of the colon; for +// S{e} it returns the start of expression e. // // Example printed form: // @@ -817,7 +865,7 @@ type Slice struct { type FieldAddr struct { register X Value // *struct - Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field) + Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field) } // The Field instruction yields the Field of struct X. @@ -836,14 +884,14 @@ type FieldAddr struct { type Field struct { register X Value // struct - Field int // index into X.Type().(*types.Struct).Fields + Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields } // The IndexAddr instruction yields the address of the element at // index Index of collection X. Index is an integer expression. // -// The elements of maps and strings are not addressable; use Lookup or -// MapUpdate instead. +// The elements of maps and strings are not addressable; use Lookup (map), +// Index (string), or MapUpdate instead. // // Dynamically, this instruction panics if X evaluates to a nil *array // pointer. @@ -858,11 +906,13 @@ type Field struct { // t2 = &t0[t1] type IndexAddr struct { register - X Value // slice or *array, + X Value // *array, slice or type parameter with types array, *array, or slice. Index Value // numeric index } -// The Index instruction yields element Index of array X. +// The Index instruction yields element Index of collection X, an array, +// string or type parameter containing an array, a string, a pointer to an, +// array or a slice. // // Pos() returns the ast.IndexExpr.Lbrack for the index operation, if // explicit in the source. @@ -872,13 +922,12 @@ type IndexAddr struct { // t2 = t0[t1] type Index struct { register - X Value // array + X Value // array, string or type parameter with types array, *array, slice, or string. Index Value // integer index } -// The Lookup instruction yields element Index of collection X, a map -// or string. Index is an integer expression if X is a string or the -// appropriate key type if X is a map. +// The Lookup instruction yields element Index of collection map X. +// Index is the appropriate key type. // // If CommaOk, the result is a 2-tuple of the value above and a // boolean indicating the result of a map membership test for the key. @@ -892,8 +941,8 @@ type Index struct { // t5 = t3[t4],ok type Lookup struct { register - X Value // string or map - Index Value // numeric or key-typed index + X Value // map + Index Value // key-typed index CommaOk bool // return a value,ok pair } @@ -1002,6 +1051,9 @@ type Next struct { // is AssertedType's zero value. The components of the pair must be // accessed using the Extract instruction. // +// If Underlying: tests whether interface value X has the underlying +// type AssertedType. +// // If AssertedType is a concrete type, TypeAssert checks whether the // dynamic type in interface X is equal to it, and if so, the result // of the conversion is a copy of the value in the interface. @@ -1337,9 +1389,10 @@ type anInstruction struct { // 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon // represents a dynamically dispatched call to an interface method. // In this mode, Value is the interface value and Method is the -// interface's abstract method. Note: an abstract method may be -// shared by multiple interfaces due to embedding; Value.Type() -// provides the specific interface used for this call. +// interface's abstract method. The interface value may be a type +// parameter. Note: an abstract method may be shared by multiple +// interfaces due to embedding; Value.Type() provides the specific +// interface used for this call. // // Value is implicitly supplied to the concrete method implementation // as the receiver parameter; in other words, Args[0] holds not the @@ -1378,7 +1431,7 @@ func (c *CallCommon) Signature() *types.Signature { if c.Method != nil { return c.Method.Type().(*types.Signature) } - return c.Value.Type().Underlying().(*types.Signature) + return typeparams.CoreType(c.Value.Type()).(*types.Signature) } // StaticCallee returns the callee if this is a trivially static @@ -1469,6 +1522,29 @@ func (v *Function) Referrers() *[]Instruction { return nil } +// TypeParams are the function's type parameters if generic or the +// type parameters that were instantiated if fn is an instantiation. +// +// TODO(taking): declare result type as *types.TypeParamList +// after we drop support for go1.17. +func (fn *Function) TypeParams() *typeparams.TypeParamList { + return fn.typeparams +} + +// TypeArgs are the types that TypeParams() were instantiated by to create fn +// from fn.Origin(). +func (fn *Function) TypeArgs() []types.Type { return fn.typeargs } + +// Origin is the function fn is an instantiation of. Returns nil if fn is not +// an instantiation. +func (fn *Function) Origin() *Function { + if fn.parent != nil && len(fn.typeargs) > 0 { + // Nested functions are BUILT at a different time than there instances. + return fn.parent.Origin().AnonFuncs[fn.anonIdx] + } + return fn.topLevelOrigin +} + func (v *Parameter) Type() types.Type { return v.typ } func (v *Parameter) Name() string { return v.name } func (v *Parameter) Object() types.Object { return v.object } @@ -1596,6 +1672,10 @@ func (v *Convert) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *MultiConvert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value { return append(rands, &v.X) } diff --git a/go/ssa/ssautil/load.go b/go/ssa/ssautil/load.go index 58d185f6727..96d69a20a17 100644 --- a/go/ssa/ssautil/load.go +++ b/go/ssa/ssautil/load.go @@ -77,10 +77,12 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (* packages.Visit(initial, nil, func(p *packages.Package) { if p.Types != nil && !p.IllTyped { var files []*ast.File + var info *types.Info if deps || isInitial[p] { files = p.Syntax + info = p.TypesInfo } - ssamap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true) + ssamap[p] = prog.CreatePackage(p.Types, files, info, true) } }) diff --git a/go/ssa/ssautil/load_test.go b/go/ssa/ssautil/load_test.go index f769be273bb..efa2ba40a8b 100644 --- a/go/ssa/ssautil/load_test.go +++ b/go/ssa/ssautil/load_test.go @@ -12,10 +12,12 @@ import ( "go/token" "go/types" "os" + "path" "strings" "testing" "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/packages/packagestest" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" "golang.org/x/tools/internal/testenv" @@ -31,6 +33,8 @@ func main() { ` func TestBuildPackage(t *testing.T) { + testenv.NeedsGoBuild(t) // for importer.Default() + // There is a more substantial test of BuildPackage and the // SSA program it builds in ../ssa/builder_test.go. @@ -135,3 +139,57 @@ func TestIssue28106(t *testing.T) { prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0)) prog.Build() // no crash } + +func TestIssue53604(t *testing.T) { + // Tests that variable initializers are not added to init() when syntax + // is not present but types.Info is available. + // + // Packages x, y, z are loaded with mode `packages.LoadSyntax`. + // Package x imports y, and y imports z. + // Packages are built using ssautil.Packages() with x and z as roots. + // This setup creates y using CreatePackage(pkg, files, info, ...) + // where len(files) == 0 but info != nil. + // + // Tests that globals from y are not initialized. + e := packagestest.Export(t, packagestest.Modules, []packagestest.Module{ + { + Name: "golang.org/fake", + Files: map[string]interface{}{ + "x/x.go": `package x; import "golang.org/fake/y"; var V = y.F()`, + "y/y.go": `package y; import "golang.org/fake/z"; var F = func () *int { return &z.Z } `, + "z/z.go": `package z; var Z int`, + }, + }, + }) + defer e.Cleanup() + + // Load x and z as entry packages using packages.LoadSyntax + e.Config.Mode = packages.LoadSyntax + pkgs, err := packages.Load(e.Config, path.Join(e.Temp(), "fake/x"), path.Join(e.Temp(), "fake/z")) + if err != nil { + t.Fatal(err) + } + for _, p := range pkgs { + if len(p.Errors) > 0 { + t.Fatalf("%v", p.Errors) + } + } + + prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0)) + prog.Build() + + // y does not initialize F. + y := prog.ImportedPackage("golang.org/fake/y") + if y == nil { + t.Fatal("Failed to load intermediate package y") + } + yinit := y.Members["init"].(*ssa.Function) + for _, bb := range yinit.Blocks { + for _, i := range bb.Instrs { + if store, ok := i.(*ssa.Store); ok && store.Addr == y.Var("F") { + t.Errorf("y.init() stores to F %v", store) + } + } + } + +} diff --git a/go/ssa/stdlib_test.go b/go/ssa/stdlib_test.go index 7e02f97a7ed..8b9f4238da8 100644 --- a/go/ssa/stdlib_test.go +++ b/go/ssa/stdlib_test.go @@ -21,12 +21,10 @@ import ( "testing" "time" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" "golang.org/x/tools/internal/testenv" - "golang.org/x/tools/internal/typeparams/genericfeatures" ) func bytesAllocated() uint64 { @@ -51,22 +49,6 @@ func TestStdlib(t *testing.T) { if err != nil { t.Fatal(err) } - var nonGeneric int - for i := 0; i < len(pkgs); i++ { - pkg := pkgs[i] - inspect := inspector.New(pkg.Syntax) - features := genericfeatures.ForPackage(inspect, pkg.TypesInfo) - // Skip standard library packages that use generics. This won't be - // sufficient if any standard library packages start _importing_ packages - // that use generics. - if features != 0 { - t.Logf("skipping package %q which uses generics", pkg.PkgPath) - continue - } - pkgs[nonGeneric] = pkg - nonGeneric++ - } - pkgs = pkgs[:nonGeneric] t1 := time.Now() alloc1 := bytesAllocated() diff --git a/go/ssa/subst.go b/go/ssa/subst.go index b29130ea0cb..d7f8ae4a700 100644 --- a/go/ssa/subst.go +++ b/go/ssa/subst.go @@ -1,6 +1,7 @@ // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package ssa import ( @@ -18,6 +19,8 @@ import ( // // Not concurrency-safe. type subster struct { + // TODO(zpavlinovic): replacements can contain type params + // when generating instances inside of a generic function body. replacements map[*typeparams.TypeParam]types.Type // values should contain no type params cache map[types.Type]types.Type // cache of subst results ctxt *typeparams.Context @@ -27,17 +30,17 @@ type subster struct { // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. -func makeSubster(ctxt *typeparams.Context, tparams []*typeparams.TypeParam, targs []types.Type, debug bool) *subster { - assert(len(tparams) == len(targs), "makeSubster argument count must match") +func makeSubster(ctxt *typeparams.Context, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster { + assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ - replacements: make(map[*typeparams.TypeParam]types.Type, len(tparams)), + replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()), cache: make(map[types.Type]types.Type), ctxt: ctxt, debug: debug, } - for i, tpar := range tparams { - subst.replacements[tpar] = targs[i] + for i := 0; i < tparams.Len(); i++ { + subst.replacements[tparams.At(i)] = targs[i] } if subst.debug { if err := subst.wellFormed(); err != nil { @@ -331,9 +334,9 @@ func (subst *subster) named(t *types.Named) types.Type { // type N[A any] func() A // func Foo[T](g N[T]) {} // To instantiate Foo[string], one goes through {T->string}. To get the type of g - // one subsitutes T with string in {N with TypeArgs == {T} and TypeParams == {A} } - // to get {N with TypeArgs == {string} and TypeParams == {A} }. - assert(targs.Len() == tparams.Len(), "TypeArgs().Len() must match TypeParams().Len() if present") + // one subsitutes T with string in {N with typeargs == {T} and typeparams == {A} } + // to get {N with TypeArgs == {string} and typeparams == {A} }. + assert(targs.Len() == tparams.Len(), "typeargs.Len() must match typeparams.Len() if present") for i, n := 0, targs.Len(); i < n; i++ { inst := subst.typ(targs.At(i)) // TODO(generic): Check with rfindley for mutual recursion insts[i] = inst diff --git a/go/ssa/subst_test.go b/go/ssa/subst_test.go index fe84adcc3da..5fa88270004 100644 --- a/go/ssa/subst_test.go +++ b/go/ssa/subst_test.go @@ -99,12 +99,8 @@ var _ L[int] = Fn0[L[int]](nil) } T := tv.Type.(*types.Named) - var tparams []*typeparams.TypeParam - for i, l := 0, typeparams.ForNamed(T); i < l.Len(); i++ { - tparams = append(tparams, l.At(i)) - } - subst := makeSubster(typeparams.NewContext(), tparams, targs, true) + subst := makeSubster(typeparams.NewContext(), typeparams.ForNamed(T), targs, true) sub := subst.typ(T.Underlying()) if got := sub.String(); got != test.want { t.Errorf("subst{%v->%v}.typ(%s) = %v, want %v", test.expr, test.args, T.Underlying(), got, test.want) diff --git a/go/ssa/testdata/valueforexpr.go b/go/ssa/testdata/valueforexpr.go index da76f13a392..243ec614f64 100644 --- a/go/ssa/testdata/valueforexpr.go +++ b/go/ssa/testdata/valueforexpr.go @@ -1,3 +1,4 @@ +//go:build ignore // +build ignore package main diff --git a/go/ssa/util.go b/go/ssa/util.go index 80c7d5cbec0..db53aebee43 100644 --- a/go/ssa/util.go +++ b/go/ssa/util.go @@ -49,7 +49,56 @@ func isPointer(typ types.Type) bool { return ok } -func isInterface(T types.Type) bool { return types.IsInterface(T) } +// isNonTypeParamInterface reports whether t is an interface type but not a type parameter. +func isNonTypeParamInterface(t types.Type) bool { + return !typeparams.IsTypeParam(t) && types.IsInterface(t) +} + +// isBasic reports whether t is a basic type. +func isBasic(t types.Type) bool { + _, ok := t.(*types.Basic) + return ok +} + +// isString reports whether t is exactly a string type. +func isString(t types.Type) bool { + return isBasic(t) && t.(*types.Basic).Info()&types.IsString != 0 +} + +// isByteSlice reports whether t is of the form []~bytes. +func isByteSlice(t types.Type) bool { + if b, ok := t.(*types.Slice); ok { + e, _ := b.Elem().Underlying().(*types.Basic) + return e != nil && e.Kind() == types.Byte + } + return false +} + +// isRuneSlice reports whether t is of the form []~runes. +func isRuneSlice(t types.Type) bool { + if b, ok := t.(*types.Slice); ok { + e, _ := b.Elem().Underlying().(*types.Basic) + return e != nil && e.Kind() == types.Rune + } + return false +} + +// isBasicConvTypes returns true when a type set can be +// one side of a Convert operation. This is when: +// - All are basic, []byte, or []rune. +// - At least 1 is basic. +// - At most 1 is []byte or []rune. +func isBasicConvTypes(tset termList) bool { + basics := 0 + all := underIs(tset, func(t types.Type) bool { + if isBasic(t) { + basics++ + return true + } + return isByteSlice(t) || isRuneSlice(t) + }) + return all && basics >= 1 && tset.Len()-basics <= 1 +} // deref returns a pointer's element type; otherwise it returns typ. func deref(typ types.Type) types.Type { @@ -113,7 +162,7 @@ func nonbasicTypes(ts []types.Type) []types.Type { added := make(map[types.Type]bool) // additionally filter duplicates var filtered []types.Type for _, T := range ts { - if _, basic := T.(*types.Basic); !basic { + if !isBasic(T) { if !added[T] { added[T] = true filtered = append(filtered, T) @@ -123,22 +172,6 @@ func nonbasicTypes(ts []types.Type) []types.Type { return filtered } -// isGeneric returns true if a package-level member is generic. -func isGeneric(m Member) bool { - switch m := m.(type) { - case *NamedConst, *Global: - return false - case *Type: - // lifted from types.isGeneric. - named, _ := m.Type().(*types.Named) - return named != nil && named.Obj() != nil && typeparams.NamedTypeArgs(named) == nil && typeparams.ForNamed(named) != nil - case *Function: - return len(m._TypeParams) != len(m._TypeArgs) - default: - panic("unreachable") - } -} - // receiverTypeArgs returns the type arguments to a function's reciever. // Returns an empty list if obj does not have a reciever or its reciever does not have type arguments. func receiverTypeArgs(obj *types.Func) []types.Type { diff --git a/go/ssa/wrappers.go b/go/ssa/wrappers.go index 3f2267c8a1b..228daf6158a 100644 --- a/go/ssa/wrappers.go +++ b/go/ssa/wrappers.go @@ -120,19 +120,19 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { // address of implicit C field. var c Call - if r := recvType(obj); !isInterface(r) { // concrete method + if r := recvType(obj); !types.IsInterface(r) { // concrete method if !isPointer(r) { v = emitLoad(fn, v) } callee := prog.originFunc(obj) - if len(callee._TypeParams) > 0 { - callee = prog.instances[callee].lookupOrCreate(receiverTypeArgs(obj), cr) + if callee.typeparams.Len() > 0 { + callee = prog.lookupOrCreateInstance(callee, receiverTypeArgs(obj), cr) } c.Call.Value = callee c.Call.Args = append(c.Call.Args, v) } else { c.Call.Method = obj - c.Call.Value = emitLoad(fn, v) + c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam) } for _, arg := range fn.Params[1:] { c.Call.Args = append(c.Call.Args, arg) @@ -208,16 +208,16 @@ func makeBound(prog *Program, obj *types.Func, cr *creator) *Function { createParams(fn, 0) var c Call - if !isInterface(recvType(obj)) { // concrete + if !types.IsInterface(recvType(obj)) { // concrete callee := prog.originFunc(obj) - if len(callee._TypeParams) > 0 { - callee = prog.instances[callee].lookupOrCreate(targs, cr) + if callee.typeparams.Len() > 0 { + callee = prog.lookupOrCreateInstance(callee, targs, cr) } c.Call.Value = callee c.Call.Args = []Value{fv} } else { - c.Call.Value = fv c.Call.Method = obj + c.Call.Value = fv // interface (possibly a typeparam) } for _, arg := range fn.Params { c.Call.Args = append(c.Call.Args, arg) @@ -324,3 +324,63 @@ func toSelection(sel *types.Selection) *selection { indirect: sel.Indirect(), } } + +// -- instantiations -------------------------------------------------- + +// buildInstantiationWrapper creates a body for an instantiation +// wrapper fn. The body calls the original generic function, +// bracketed by ChangeType conversions on its arguments and results. +func buildInstantiationWrapper(fn *Function) { + orig := fn.topLevelOrigin + sig := fn.Signature + + fn.startBody() + if sig.Recv() != nil { + fn.addParamObj(sig.Recv()) + } + createParams(fn, 0) + + // Create body. Add a call to origin generic function + // and make type changes between argument and parameters, + // as well as return values. + var c Call + c.Call.Value = orig + if res := orig.Signature.Results(); res.Len() == 1 { + c.typ = res.At(0).Type() + } else { + c.typ = res + } + + // parameter of instance becomes an argument to the call + // to the original generic function. + argOffset := 0 + for i, arg := range fn.Params { + var typ types.Type + if i == 0 && sig.Recv() != nil { + typ = orig.Signature.Recv().Type() + argOffset = 1 + } else { + typ = orig.Signature.Params().At(i - argOffset).Type() + } + c.Call.Args = append(c.Call.Args, emitTypeCoercion(fn, arg, typ)) + } + + results := fn.emit(&c) + var ret Return + switch res := sig.Results(); res.Len() { + case 0: + // no results, do nothing. + case 1: + ret.Results = []Value{emitTypeCoercion(fn, results, res.At(0).Type())} + default: + for i := 0; i < sig.Results().Len(); i++ { + v := emitExtract(fn, results, i) + ret.Results = append(ret.Results, emitTypeCoercion(fn, v, res.At(i).Type())) + } + } + + fn.emit(&ret) + fn.currentBlock = nil + + fn.finishBody() +} diff --git a/go/types/objectpath/objectpath_test.go b/go/types/objectpath/objectpath_test.go index 39e7b1bcdf6..adfad2cd2cd 100644 --- a/go/types/objectpath/objectpath_test.go +++ b/go/types/objectpath/objectpath_test.go @@ -182,7 +182,7 @@ func testPath(prog *loader.Program, test pathTest) error { return fmt.Errorf("Object(%s, %q) returned error %q, want %q", pkg.Path(), test.path, err, test.wantErr) } if test.wantErr != "" { - if got := stripSubscripts(err.Error()); got != test.wantErr { + if got := err.Error(); got != test.wantErr { return fmt.Errorf("Object(%s, %q) error was %q, want %q", pkg.Path(), test.path, got, test.wantErr) } @@ -190,7 +190,7 @@ func testPath(prog *loader.Program, test pathTest) error { } // Inv: err == nil - if objString := stripSubscripts(obj.String()); objString != test.wantobj { + if objString := obj.String(); objString != test.wantobj { return fmt.Errorf("Object(%s, %q) = %s, want %s", pkg.Path(), test.path, objString, test.wantobj) } if obj.Pkg() != pkg { @@ -215,25 +215,6 @@ func testPath(prog *loader.Program, test pathTest) error { return nil } -// stripSubscripts removes type parameter id subscripts. -// -// TODO(rfindley): remove this function once subscripts are removed from the -// type parameter type string. -func stripSubscripts(s string) string { - var runes []rune - for _, r := range s { - // For debugging/uniqueness purposes, TypeString on a type parameter adds a - // subscript corresponding to the type parameter's unique id. This is going - // to be removed, but in the meantime we skip the subscript runes to get a - // deterministic output. - if 'ā‚€' <= r && r < 'ā‚€'+10 { - continue // trim type parameter subscripts - } - runes = append(runes, r) - } - return string(runes) -} - // TestSourceAndExportData uses objectpath to compute a correspondence // of objects between two versions of the same package, one loaded from // source, the other from export data. diff --git a/go/types/typeutil/map.go b/go/types/typeutil/map.go index dcc029b8733..7bd2fdb38be 100644 --- a/go/types/typeutil/map.go +++ b/go/types/typeutil/map.go @@ -332,7 +332,9 @@ func (h Hasher) hashFor(t types.Type) uint32 { // Method order is not significant. // Ignore m.Pkg(). m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) } // Hash type restrictions. @@ -434,3 +436,76 @@ func (h Hasher) hashPtr(ptr interface{}) uint32 { h.ptrMap[ptr] = hash return hash } + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *typeparams.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *typeparams.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/go/types/typeutil/map_test.go b/go/types/typeutil/map_test.go index 8cd643e5b48..ee73ff9cfd5 100644 --- a/go/types/typeutil/map_test.go +++ b/go/types/typeutil/map_test.go @@ -244,6 +244,14 @@ func Bar[P Constraint[P]]() {} func Baz[Q any]() {} // The underlying type of Constraint[P] is any. // But Quux is not. func Quux[Q interface{ quux() }]() {} + + +type Issue56048_I interface{ m() interface { Issue56048_I } } +var Issue56048 = Issue56048_I.m + +type Issue56048_Ib interface{ m() chan []*interface { Issue56048_Ib } } +var Issue56048b = Issue56048_Ib.m + ` fset := token.NewFileSet() @@ -296,12 +304,14 @@ func Quux[Q interface{ quux() }]() {} ME1Type = scope.Lookup("ME1Type").Type() ME2 = scope.Lookup("ME2").Type() - Constraint = scope.Lookup("Constraint").Type() - Foo = scope.Lookup("Foo").Type() - Fn = scope.Lookup("Fn").Type() - Bar = scope.Lookup("Foo").Type() - Baz = scope.Lookup("Foo").Type() - Quux = scope.Lookup("Quux").Type() + Constraint = scope.Lookup("Constraint").Type() + Foo = scope.Lookup("Foo").Type() + Fn = scope.Lookup("Fn").Type() + Bar = scope.Lookup("Foo").Type() + Baz = scope.Lookup("Foo").Type() + Quux = scope.Lookup("Quux").Type() + Issue56048 = scope.Lookup("Issue56048").Type() + Issue56048b = scope.Lookup("Issue56048b").Type() ) tmap := new(typeutil.Map) @@ -371,6 +381,9 @@ func Quux[Q interface{ quux() }]() {} {Bar, "Bar", false}, {Baz, "Baz", false}, {Quux, "Quux", true}, + + {Issue56048, "Issue56048", true}, // (not actually about generics) + {Issue56048b, "Issue56048b", true}, // (not actually about generics) } for _, step := range steps { diff --git a/godoc/godoc.go b/godoc/godoc.go index 7ff2eab6239..dfac2111a67 100644 --- a/godoc/godoc.go +++ b/godoc/godoc.go @@ -345,11 +345,16 @@ func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) } -func comment_htmlFunc(comment string) string { +func comment_htmlFunc(info *PageInfo, comment string) string { var buf bytes.Buffer // TODO(gri) Provide list of words (e.g. function parameters) // to be emphasized by ToHTML. - doc.ToHTML(&buf, comment, nil) // does html-escaping + + // godocToHTML is: + // - buf.Write(info.PDoc.HTML(comment)) on go1.19 + // - go/doc.ToHTML(&buf, comment, nil) on other versions + godocToHTML(&buf, info.PDoc, comment) + return buf.String() } @@ -448,7 +453,7 @@ func srcToPkgLinkFunc(relpath string) string { return fmt.Sprintf(`%s`, relpath, relpath[len("pkg/"):]) } -// srcBreadcrumbFun converts each segment of relpath to a HTML . +// srcBreadcrumbFunc converts each segment of relpath to a HTML . // Each segment links to its corresponding src directories. func srcBreadcrumbFunc(relpath string) string { segments := strings.Split(relpath, "/") @@ -658,7 +663,7 @@ func (p *Presentation) example_suffixFunc(name string) string { return suffix } -// implements_html returns the "> Implements" toggle for a package-level named type. +// implements_htmlFunc returns the "> Implements" toggle for a package-level named type. // Its contents are populated from JSON data by client-side JS at load time. func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) string { if p.ImplementsHTML == nil { @@ -676,7 +681,7 @@ func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) stri return buf.String() } -// methodset_html returns the "> Method set" toggle for a package-level named type. +// methodset_htmlFunc returns the "> Method set" toggle for a package-level named type. // Its contents are populated from JSON data by client-side JS at load time. func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) string { if p.MethodSetHTML == nil { @@ -694,7 +699,7 @@ func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) strin return buf.String() } -// callgraph_html returns the "> Call graph" toggle for a package-level func. +// callgraph_htmlFunc returns the "> Call graph" toggle for a package-level func. // Its contents are populated from JSON data by client-side JS at load time. func (p *Presentation) callgraph_htmlFunc(info *PageInfo, recv, name string) string { if p.CallGraphHTML == nil { diff --git a/godoc/index.go b/godoc/index.go index d3f9f64fc5c..4471f59167a 100644 --- a/godoc/index.go +++ b/godoc/index.go @@ -50,6 +50,7 @@ import ( "index/suffixarray" "io" "log" + "math" "os" pathpkg "path" "path/filepath" @@ -161,7 +162,7 @@ func newKindRun(h RunList) interface{} { // bit is always the same for all infos in one // list we can simply compare the entire info. k := 0 - prev := SpotInfo(1<<32 - 1) // an unlikely value + prev := SpotInfo(math.MaxUint32) // an unlikely value for _, x := range run { if x != prev { run[k] = x @@ -1421,7 +1422,7 @@ func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileL return } -// InvalidateIndex should be called whenever any of the file systems +// invalidateIndex should be called whenever any of the file systems // under godoc's observation change so that the indexer is kicked on. func (c *Corpus) invalidateIndex() { c.fsModified.Set(nil) diff --git a/godoc/meta.go b/godoc/meta.go index 751b72e7161..76a27508b68 100644 --- a/godoc/meta.go +++ b/godoc/meta.go @@ -60,7 +60,7 @@ func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) { return } -// UpdateMetadata scans $GOROOT/doc for HTML and Markdown files, reads their metadata, +// updateMetadata scans $GOROOT/doc for HTML and Markdown files, reads their metadata, // and updates the DocMetadata map. func (c *Corpus) updateMetadata() { metadata := make(map[string]*Metadata) @@ -147,7 +147,7 @@ func (c *Corpus) refreshMetadata() { } } -// RefreshMetadataLoop runs forever, updating DocMetadata when the underlying +// refreshMetadataLoop runs forever, updating DocMetadata when the underlying // file system changes. It should be launched in a goroutine. func (c *Corpus) refreshMetadataLoop() { for { diff --git a/godoc/redirect/hash.go b/godoc/redirect/hash.go deleted file mode 100644 index d5a1e3eb67b..00000000000 --- a/godoc/redirect/hash.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides a compact encoding of -// a map of Mercurial hashes to Git hashes. - -package redirect - -import ( - "encoding/binary" - "fmt" - "io" - "os" - "sort" - "strconv" - "strings" -) - -// hashMap is a map of Mercurial hashes to Git hashes. -type hashMap struct { - file *os.File - entries int -} - -// newHashMap takes a file handle that contains a map of Mercurial to Git -// hashes. The file should be a sequence of pairs of little-endian encoded -// uint32s, representing a hgHash and a gitHash respectively. -// The sequence must be sorted by hgHash. -// The file must remain open for as long as the returned hashMap is used. -func newHashMap(f *os.File) (*hashMap, error) { - fi, err := f.Stat() - if err != nil { - return nil, err - } - return &hashMap{file: f, entries: int(fi.Size() / 8)}, nil -} - -// Lookup finds an hgHash in the map that matches the given prefix, and returns -// its corresponding gitHash. The prefix must be at least 8 characters long. -func (m *hashMap) Lookup(s string) gitHash { - if m == nil { - return 0 - } - hg, err := hgHashFromString(s) - if err != nil { - return 0 - } - var git gitHash - b := make([]byte, 8) - sort.Search(m.entries, func(i int) bool { - n, err := m.file.ReadAt(b, int64(i*8)) - if err != nil { - panic(err) - } - if n != 8 { - panic(io.ErrUnexpectedEOF) - } - v := hgHash(binary.LittleEndian.Uint32(b[:4])) - if v == hg { - git = gitHash(binary.LittleEndian.Uint32(b[4:])) - } - return v >= hg - }) - return git -} - -// hgHash represents the lower (leftmost) 32 bits of a Mercurial hash. -type hgHash uint32 - -func (h hgHash) String() string { - return intToHash(int64(h)) -} - -func hgHashFromString(s string) (hgHash, error) { - if len(s) < 8 { - return 0, fmt.Errorf("string too small: len(s) = %d", len(s)) - } - hash := s[:8] - i, err := strconv.ParseInt(hash, 16, 64) - if err != nil { - return 0, err - } - return hgHash(i), nil -} - -// gitHash represents the leftmost 28 bits of a Git hash in its upper 28 bits, -// and it encodes hash's repository in the lower 4 bits. -type gitHash uint32 - -func (h gitHash) Hash() string { - return intToHash(int64(h))[:7] -} - -func (h gitHash) Repo() string { - return repo(h & 0xF).String() -} - -func intToHash(i int64) string { - s := strconv.FormatInt(i, 16) - if len(s) < 8 { - s = strings.Repeat("0", 8-len(s)) + s - } - return s -} - -// repo represents a Go Git repository. -type repo byte - -const ( - repoGo repo = iota - repoBlog - repoCrypto - repoExp - repoImage - repoMobile - repoNet - repoSys - repoTalks - repoText - repoTools -) - -func (r repo) String() string { - return map[repo]string{ - repoGo: "go", - repoBlog: "blog", - repoCrypto: "crypto", - repoExp: "exp", - repoImage: "image", - repoMobile: "mobile", - repoNet: "net", - repoSys: "sys", - repoTalks: "talks", - repoText: "text", - repoTools: "tools", - }[r] -} diff --git a/godoc/redirect/redirect.go b/godoc/redirect/redirect.go index 57d779ccb41..d0145ee183b 100644 --- a/godoc/redirect/redirect.go +++ b/godoc/redirect/redirect.go @@ -3,147 +3,22 @@ // license that can be found in the LICENSE file. // Package redirect provides hooks to register HTTP handlers that redirect old -// godoc paths to their new equivalents and assist in accessing the issue -// tracker, wiki, code review system, etc. +// godoc paths to their new equivalents. package redirect // import "golang.org/x/tools/godoc/redirect" import ( - "context" - "fmt" - "html/template" "net/http" - "os" "regexp" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context/ctxhttp" ) -// Register registers HTTP handlers that redirect old godoc paths to their new -// equivalents and assist in accessing the issue tracker, wiki, code review -// system, etc. If mux is nil it uses http.DefaultServeMux. +// Register registers HTTP handlers that redirect old godoc paths to their new equivalents. +// If mux is nil it uses http.DefaultServeMux. func Register(mux *http.ServeMux) { if mux == nil { mux = http.DefaultServeMux } - handlePathRedirects(mux, pkgRedirects, "/pkg/") - handlePathRedirects(mux, cmdRedirects, "/cmd/") - for prefix, redirect := range prefixHelpers { - p := "/" + prefix + "/" - mux.Handle(p, PrefixHandler(p, redirect)) - } - for path, redirect := range redirects { - mux.Handle(path, Handler(redirect)) - } // NB: /src/pkg (sans trailing slash) is the index of packages. mux.HandleFunc("/src/pkg/", srcPkgHandler) - mux.HandleFunc("/cl/", clHandler) - mux.HandleFunc("/change/", changeHandler) - mux.HandleFunc("/design/", designHandler) -} - -func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) { - for source, target := range redirects { - h := Handler(prefix + target + "/") - p := prefix + source - mux.Handle(p, h) - mux.Handle(p+"/", h) - } -} - -// Packages that were renamed between r60 and go1. -var pkgRedirects = map[string]string{ - "asn1": "encoding/asn1", - "big": "math/big", - "cmath": "math/cmplx", - "csv": "encoding/csv", - "exec": "os/exec", - "exp/template/html": "html/template", - "gob": "encoding/gob", - "http": "net/http", - "http/cgi": "net/http/cgi", - "http/fcgi": "net/http/fcgi", - "http/httptest": "net/http/httptest", - "http/pprof": "net/http/pprof", - "json": "encoding/json", - "mail": "net/mail", - "rand": "math/rand", - "rpc": "net/rpc", - "rpc/jsonrpc": "net/rpc/jsonrpc", - "scanner": "text/scanner", - "smtp": "net/smtp", - "tabwriter": "text/tabwriter", - "template": "text/template", - "template/parse": "text/template/parse", - "url": "net/url", - "utf16": "unicode/utf16", - "utf8": "unicode/utf8", - "xml": "encoding/xml", -} - -// Commands that were renamed between r60 and go1. -var cmdRedirects = map[string]string{ - "gofix": "fix", - "goinstall": "go", - "gopack": "pack", - "gotest": "go", - "govet": "vet", - "goyacc": "yacc", -} - -var redirects = map[string]string{ - "/blog": "/blog/", - "/build": "http://build.golang.org", - "/change": "https://go.googlesource.com/go", - "/cl": "https://go-review.googlesource.com", - "/cmd/godoc/": "https://pkg.go.dev/golang.org/x/tools/cmd/godoc", - "/issue": "https://github.com/golang/go/issues", - "/issue/new": "https://github.com/golang/go/issues/new", - "/issues": "https://github.com/golang/go/issues", - "/issues/new": "https://github.com/golang/go/issues/new", - "/play": "http://play.golang.org", - "/design": "https://go.googlesource.com/proposal/+/master/design", - - // In Go 1.2 the references page is part of /doc/. - "/ref": "/doc/#references", - // This next rule clobbers /ref/spec and /ref/mem. - // TODO(adg): figure out what to do here, if anything. - // "/ref/": "/doc/#references", - - // Be nice to people who are looking in the wrong place. - "/doc/mem": "/ref/mem", - "/doc/spec": "/ref/spec", - - "/talks": "http://talks.golang.org", - "/tour": "http://tour.golang.org", - "/wiki": "https://github.com/golang/go/wiki", - - "/doc/articles/c_go_cgo.html": "/blog/c-go-cgo", - "/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and", - "/doc/articles/defer_panic_recover.html": "/blog/defer-panic-and-recover", - "/doc/articles/error_handling.html": "/blog/error-handling-and-go", - "/doc/articles/gobs_of_data.html": "/blog/gobs-of-data", - "/doc/articles/godoc_documenting_go_code.html": "/blog/godoc-documenting-go-code", - "/doc/articles/gos_declaration_syntax.html": "/blog/gos-declaration-syntax", - "/doc/articles/image_draw.html": "/blog/go-imagedraw-package", - "/doc/articles/image_package.html": "/blog/go-image-package", - "/doc/articles/json_and_go.html": "/blog/json-and-go", - "/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces", - "/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection", - "/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals", - "/doc/go_for_cpp_programmers.html": "/wiki/GoForCPPProgrammers", - "/doc/go_tutorial.html": "http://tour.golang.org/", -} - -var prefixHelpers = map[string]string{ - "issue": "https://github.com/golang/go/issues/", - "issues": "https://github.com/golang/go/issues/", - "play": "http://play.golang.org/", - "talks": "http://talks.golang.org/", - "wiki": "https://github.com/golang/go/wiki/", } func Handler(target string) http.Handler { @@ -181,144 +56,3 @@ func srcPkgHandler(w http.ResponseWriter, r *http.Request) { r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):] http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently) } - -func clHandler(w http.ResponseWriter, r *http.Request) { - const prefix = "/cl/" - if p := r.URL.Path; p == prefix { - // redirect /prefix/ to /prefix - http.Redirect(w, r, p[:len(p)-1], http.StatusFound) - return - } - id := r.URL.Path[len(prefix):] - // support /cl/152700045/, which is used in commit 0edafefc36. - id = strings.TrimSuffix(id, "/") - if !validID.MatchString(id) { - http.Error(w, "Not found", http.StatusNotFound) - return - } - target := "" - - if n, err := strconv.Atoi(id); err == nil && isRietveldCL(n) { - // Issue 28836: if this Rietveld CL happens to - // also be a Gerrit CL, render a disambiguation HTML - // page with two links instead. We need to make a - // Gerrit API call to figure that out, but we cache - // known Gerrit CLs so it's done at most once per CL. - if ok, err := isGerritCL(r.Context(), n); err == nil && ok { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - clDisambiguationHTML.Execute(w, n) - return - } - - target = "https://codereview.appspot.com/" + id - } else { - target = "https://go-review.googlesource.com/" + id - } - http.Redirect(w, r, target, http.StatusFound) -} - -var clDisambiguationHTML = template.Must(template.New("").Parse(` - - - Go CL {{.}} Disambiguation - - - - CL number {{.}} exists in both Gerrit (the current code review system) - and Rietveld (the previous code review system). Please make a choice: - - - -`)) - -// isGerritCL reports whether a Gerrit CL with the specified numeric change ID (e.g., "4247") -// is known to exist by querying the Gerrit API at https://go-review.googlesource.com. -// isGerritCL uses gerritCLCache as a cache of Gerrit CL IDs that exist. -func isGerritCL(ctx context.Context, id int) (bool, error) { - // Check cache first. - gerritCLCache.Lock() - ok := gerritCLCache.exist[id] - gerritCLCache.Unlock() - if ok { - return true, nil - } - - // Query the Gerrit API Get Change endpoint, as documented at - // https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-change. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - resp, err := ctxhttp.Get(ctx, nil, fmt.Sprintf("https://go-review.googlesource.com/changes/%d", id)) - if err != nil { - return false, err - } - resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - // A Gerrit CL with this ID exists. Add it to cache. - gerritCLCache.Lock() - gerritCLCache.exist[id] = true - gerritCLCache.Unlock() - return true, nil - case http.StatusNotFound: - // A Gerrit CL with this ID doesn't exist. It may get created in the future. - return false, nil - default: - return false, fmt.Errorf("unexpected status code: %v", resp.Status) - } -} - -var gerritCLCache = struct { - sync.Mutex - exist map[int]bool // exist is a set of Gerrit CL IDs that are known to exist. -}{exist: make(map[int]bool)} - -var changeMap *hashMap - -// LoadChangeMap loads the specified map of Mercurial to Git revisions, -// which is used by the /change/ handler to intelligently map old hg -// revisions to their new git equivalents. -// It should be called before calling Register. -// The file should remain open as long as the process is running. -// See the implementation of this package for details. -func LoadChangeMap(filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - m, err := newHashMap(f) - if err != nil { - return err - } - changeMap = m - return nil -} - -func changeHandler(w http.ResponseWriter, r *http.Request) { - const prefix = "/change/" - if p := r.URL.Path; p == prefix { - // redirect /prefix/ to /prefix - http.Redirect(w, r, p[:len(p)-1], http.StatusFound) - return - } - hash := r.URL.Path[len(prefix):] - target := "https://go.googlesource.com/go/+/" + hash - if git := changeMap.Lookup(hash); git > 0 { - target = fmt.Sprintf("https://go.googlesource.com/%v/+/%v", git.Repo(), git.Hash()) - } - http.Redirect(w, r, target, http.StatusFound) -} - -func designHandler(w http.ResponseWriter, r *http.Request) { - const prefix = "/design/" - if p := r.URL.Path; p == prefix { - // redirect /prefix/ to /prefix - http.Redirect(w, r, p[:len(p)-1], http.StatusFound) - return - } - name := r.URL.Path[len(prefix):] - target := "https://go.googlesource.com/proposal/+/master/design/" + name + ".md" - http.Redirect(w, r, target, http.StatusFound) -} diff --git a/godoc/redirect/redirect_test.go b/godoc/redirect/redirect_test.go index 1de3c6ca779..59677c435cc 100644 --- a/godoc/redirect/redirect_test.go +++ b/godoc/redirect/redirect_test.go @@ -21,56 +21,7 @@ func errorResult(status int) redirectResult { func TestRedirects(t *testing.T) { var tests = map[string]redirectResult{ - "/build": {301, "http://build.golang.org"}, - "/ref": {301, "/doc/#references"}, - "/doc/mem": {301, "/ref/mem"}, - "/doc/spec": {301, "/ref/spec"}, - "/tour": {301, "http://tour.golang.org"}, - "/foo": errorResult(404), - - "/pkg/asn1": {301, "/pkg/encoding/asn1/"}, - "/pkg/template/parse": {301, "/pkg/text/template/parse/"}, - - "/src/pkg/foo": {301, "/src/foo"}, - - "/cmd/gofix": {301, "/cmd/fix/"}, - - // git commits (/change) - // TODO: mercurial tags and LoadChangeMap. - "/change": {301, "https://go.googlesource.com/go"}, - "/change/a": {302, "https://go.googlesource.com/go/+/a"}, - - "/issue": {301, "https://github.com/golang/go/issues"}, - "/issue?": {301, "https://github.com/golang/go/issues"}, - "/issue/1": {302, "https://github.com/golang/go/issues/1"}, - "/issue/new": {301, "https://github.com/golang/go/issues/new"}, - "/issue/new?a=b&c=d%20&e=f": {301, "https://github.com/golang/go/issues/new?a=b&c=d%20&e=f"}, - "/issues": {301, "https://github.com/golang/go/issues"}, - "/issues/1": {302, "https://github.com/golang/go/issues/1"}, - "/issues/new": {301, "https://github.com/golang/go/issues/new"}, - "/issues/1/2/3": errorResult(404), - - "/wiki/foo": {302, "https://github.com/golang/go/wiki/foo"}, - "/wiki/foo/": {302, "https://github.com/golang/go/wiki/foo/"}, - - "/design": {301, "https://go.googlesource.com/proposal/+/master/design"}, - "/design/": {302, "/design"}, - "/design/123-foo": {302, "https://go.googlesource.com/proposal/+/master/design/123-foo.md"}, - "/design/text/123-foo": {302, "https://go.googlesource.com/proposal/+/master/design/text/123-foo.md"}, - - "/cl/1": {302, "https://go-review.googlesource.com/1"}, - "/cl/1/": {302, "https://go-review.googlesource.com/1"}, - "/cl/267120043": {302, "https://codereview.appspot.com/267120043"}, - "/cl/267120043/": {302, "https://codereview.appspot.com/267120043"}, - - // Verify that we're using the Rietveld CL table: - "/cl/152046": {302, "https://codereview.appspot.com/152046"}, - "/cl/152047": {302, "https://go-review.googlesource.com/152047"}, - "/cl/152048": {302, "https://codereview.appspot.com/152048"}, - - // And verify we're using the "bigEnoughAssumeRietveld" value: - "/cl/299999": {302, "https://go-review.googlesource.com/299999"}, - "/cl/300000": {302, "https://codereview.appspot.com/300000"}, + "/foo": errorResult(404), } mux := http.NewServeMux() diff --git a/godoc/redirect/rietveld.go b/godoc/redirect/rietveld.go deleted file mode 100644 index 81b1094db17..00000000000 --- a/godoc/redirect/rietveld.go +++ /dev/null @@ -1,1093 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package redirect - -// bigEnoughAssumeRietveld is the value where CLs equal or great are -// assumed to be on Rietveld. By including this threshold we shrink -// the size of the table below. When Go amasses 150,000 more CLs, we'll -// need to bump this number and regenerate the list below. -const bigEnoughAssumeRietveld = 300000 - -// isRietveldCL reports whether cl was a Rietveld CL number. -func isRietveldCL(cl int) bool { - return cl >= bigEnoughAssumeRietveld || lowRietveldCL[cl] -} - -// lowRietveldCLs are the old CL numbers assigned by Rietveld code -// review system as used by Go prior to Gerrit which are less than -// bigEnoughAssumeRietveld. -// -// This list of numbers is registered with the /cl/NNNN redirect -// handler to disambiguate which code review system a particular -// number corresponds to. In some rare cases there may be duplicates, -// in which case we might render an HTML choice for the user. -// -// To re-generate this list, run: -// -// $ cd $GOROOT -// $ git log 7d7c6a9..94151eb | grep "^ https://golang.org/cl/" | perl -ne 's,^\s+https://golang.org/cl/(\d+).*$,$1,; chomp; print "$_: true,\n" if $_ < 300000' | sort -n | uniq -// -// Note that we ignore the x/* repos because we didn't start using -// "subrepos" until the Rietveld CLs numbers were already 4,000,000+, -// well above bigEnoughAssumeRietveld. -var lowRietveldCL = map[int]bool{ - 152046: true, - 152048: true, - 152049: true, - 152050: true, - 152051: true, - 152052: true, - 152055: true, - 152056: true, - 152057: true, - 152072: true, - 152073: true, - 152075: true, - 152076: true, - 152077: true, - 152078: true, - 152079: true, - 152080: true, - 152082: true, - 152084: true, - 152085: true, - 152086: true, - 152088: true, - 152089: true, - 152091: true, - 152098: true, - 152101: true, - 152102: true, - 152105: true, - 152106: true, - 152107: true, - 152108: true, - 152109: true, - 152110: true, - 152114: true, - 152117: true, - 152118: true, - 152120: true, - 152123: true, - 152124: true, - 152128: true, - 152130: true, - 152131: true, - 152138: true, - 152141: true, - 152142: true, - 153048: true, - 153049: true, - 153050: true, - 153051: true, - 153055: true, - 153056: true, - 153057: true, - 154043: true, - 154044: true, - 154045: true, - 154049: true, - 154055: true, - 154057: true, - 154058: true, - 154059: true, - 154061: true, - 154064: true, - 154065: true, - 154067: true, - 154068: true, - 154069: true, - 154071: true, - 154072: true, - 154073: true, - 154076: true, - 154079: true, - 154096: true, - 154097: true, - 154099: true, - 154100: true, - 154101: true, - 154102: true, - 154108: true, - 154118: true, - 154121: true, - 154122: true, - 154123: true, - 154125: true, - 154126: true, - 154128: true, - 154136: true, - 154138: true, - 154139: true, - 154140: true, - 154141: true, - 154142: true, - 154143: true, - 154144: true, - 154145: true, - 154146: true, - 154152: true, - 154153: true, - 154156: true, - 154159: true, - 154161: true, - 154166: true, - 154167: true, - 154169: true, - 154171: true, - 154172: true, - 154173: true, - 154174: true, - 154175: true, - 154176: true, - 154177: true, - 154178: true, - 154179: true, - 154180: true, - 155041: true, - 155042: true, - 155045: true, - 155047: true, - 155048: true, - 155049: true, - 155050: true, - 155054: true, - 155055: true, - 155056: true, - 155057: true, - 155058: true, - 155059: true, - 155061: true, - 155062: true, - 155063: true, - 155065: true, - 155067: true, - 155069: true, - 155072: true, - 155074: true, - 155075: true, - 155077: true, - 155078: true, - 155079: true, - 156041: true, - 156044: true, - 156045: true, - 156046: true, - 156047: true, - 156051: true, - 156052: true, - 156054: true, - 156055: true, - 156056: true, - 156058: true, - 156059: true, - 156060: true, - 156061: true, - 156062: true, - 156063: true, - 156066: true, - 156067: true, - 156070: true, - 156071: true, - 156073: true, - 156075: true, - 156077: true, - 156079: true, - 156080: true, - 156081: true, - 156083: true, - 156084: true, - 156085: true, - 156086: true, - 156089: true, - 156091: true, - 156092: true, - 156093: true, - 156094: true, - 156097: true, - 156099: true, - 156100: true, - 156102: true, - 156103: true, - 156104: true, - 156106: true, - 156107: true, - 156108: true, - 156109: true, - 156110: true, - 156113: true, - 156115: true, - 156116: true, - 157041: true, - 157042: true, - 157043: true, - 157044: true, - 157046: true, - 157053: true, - 157055: true, - 157056: true, - 157058: true, - 157060: true, - 157061: true, - 157062: true, - 157065: true, - 157066: true, - 157067: true, - 157068: true, - 157069: true, - 157071: true, - 157072: true, - 157073: true, - 157074: true, - 157075: true, - 157076: true, - 157077: true, - 157082: true, - 157084: true, - 157085: true, - 157087: true, - 157088: true, - 157091: true, - 157095: true, - 157096: true, - 157099: true, - 157100: true, - 157101: true, - 157102: true, - 157103: true, - 157104: true, - 157106: true, - 157110: true, - 157111: true, - 157112: true, - 157114: true, - 157116: true, - 157119: true, - 157140: true, - 157142: true, - 157143: true, - 157144: true, - 157146: true, - 157147: true, - 157149: true, - 157151: true, - 157152: true, - 157153: true, - 157154: true, - 157156: true, - 157157: true, - 157158: true, - 157159: true, - 157160: true, - 157162: true, - 157166: true, - 157167: true, - 157168: true, - 157170: true, - 158041: true, - 159044: true, - 159049: true, - 159050: true, - 159051: true, - 160043: true, - 160044: true, - 160045: true, - 160046: true, - 160047: true, - 160054: true, - 160056: true, - 160057: true, - 160059: true, - 160060: true, - 160061: true, - 160064: true, - 160065: true, - 160069: true, - 160070: true, - 161049: true, - 161050: true, - 161056: true, - 161058: true, - 161060: true, - 161061: true, - 161069: true, - 161070: true, - 161073: true, - 161075: true, - 162041: true, - 162044: true, - 162046: true, - 162053: true, - 162054: true, - 162055: true, - 162056: true, - 162057: true, - 162058: true, - 162059: true, - 162061: true, - 162062: true, - 163042: true, - 163044: true, - 163049: true, - 163050: true, - 163051: true, - 163052: true, - 163053: true, - 163055: true, - 163058: true, - 163061: true, - 163062: true, - 163064: true, - 163067: true, - 163068: true, - 163069: true, - 163070: true, - 163071: true, - 163072: true, - 163082: true, - 163083: true, - 163085: true, - 163088: true, - 163091: true, - 163092: true, - 163097: true, - 163098: true, - 164043: true, - 164047: true, - 164049: true, - 164052: true, - 164053: true, - 164056: true, - 164059: true, - 164060: true, - 164062: true, - 164068: true, - 164069: true, - 164071: true, - 164073: true, - 164074: true, - 164075: true, - 164078: true, - 164079: true, - 164081: true, - 164082: true, - 164083: true, - 164085: true, - 164086: true, - 164088: true, - 164090: true, - 164091: true, - 164092: true, - 164093: true, - 164094: true, - 164095: true, - 165042: true, - 165044: true, - 165045: true, - 165048: true, - 165049: true, - 165050: true, - 165051: true, - 165055: true, - 165057: true, - 165058: true, - 165059: true, - 165061: true, - 165062: true, - 165063: true, - 165064: true, - 165065: true, - 165068: true, - 165070: true, - 165076: true, - 165078: true, - 165080: true, - 165083: true, - 165086: true, - 165097: true, - 165100: true, - 165101: true, - 166041: true, - 166043: true, - 166044: true, - 166047: true, - 166049: true, - 166052: true, - 166053: true, - 166055: true, - 166058: true, - 166059: true, - 166060: true, - 166064: true, - 166066: true, - 166067: true, - 166068: true, - 166070: true, - 166071: true, - 166072: true, - 166073: true, - 166074: true, - 166076: true, - 166077: true, - 166078: true, - 166080: true, - 167043: true, - 167044: true, - 167047: true, - 167050: true, - 167055: true, - 167057: true, - 167058: true, - 168041: true, - 168045: true, - 170042: true, - 170043: true, - 170044: true, - 170046: true, - 170047: true, - 170048: true, - 170049: true, - 171044: true, - 171046: true, - 171047: true, - 171048: true, - 171051: true, - 172041: true, - 172042: true, - 172043: true, - 172045: true, - 172049: true, - 173041: true, - 173044: true, - 173045: true, - 174042: true, - 174047: true, - 174048: true, - 174050: true, - 174051: true, - 174052: true, - 174053: true, - 174063: true, - 174064: true, - 174072: true, - 174076: true, - 174077: true, - 174078: true, - 174082: true, - 174083: true, - 174087: true, - 175045: true, - 175046: true, - 175047: true, - 175048: true, - 176056: true, - 176057: true, - 176058: true, - 176061: true, - 176062: true, - 176063: true, - 176064: true, - 176066: true, - 176067: true, - 176070: true, - 176071: true, - 176076: true, - 178043: true, - 178044: true, - 178046: true, - 178048: true, - 179047: true, - 179055: true, - 179061: true, - 179062: true, - 179063: true, - 179067: true, - 179069: true, - 179070: true, - 179072: true, - 179079: true, - 179088: true, - 179095: true, - 179096: true, - 179097: true, - 179099: true, - 179105: true, - 179106: true, - 179108: true, - 179118: true, - 179120: true, - 179125: true, - 179126: true, - 179128: true, - 179129: true, - 179130: true, - 180044: true, - 180045: true, - 180046: true, - 180047: true, - 180048: true, - 180049: true, - 180050: true, - 180052: true, - 180053: true, - 180054: true, - 180055: true, - 180056: true, - 180057: true, - 180059: true, - 180061: true, - 180064: true, - 180065: true, - 180068: true, - 180069: true, - 180070: true, - 180074: true, - 180075: true, - 180081: true, - 180082: true, - 180085: true, - 180092: true, - 180099: true, - 180105: true, - 180108: true, - 180112: true, - 180118: true, - 181041: true, - 181043: true, - 181044: true, - 181045: true, - 181049: true, - 181050: true, - 181055: true, - 181057: true, - 181058: true, - 181059: true, - 181063: true, - 181071: true, - 181073: true, - 181075: true, - 181077: true, - 181080: true, - 181083: true, - 181084: true, - 181085: true, - 181086: true, - 181087: true, - 181089: true, - 181097: true, - 181099: true, - 181102: true, - 181111: true, - 181130: true, - 181135: true, - 181137: true, - 181138: true, - 181139: true, - 181151: true, - 181152: true, - 181153: true, - 181155: true, - 181156: true, - 181157: true, - 181158: true, - 181160: true, - 181161: true, - 181163: true, - 181164: true, - 181171: true, - 181179: true, - 181183: true, - 181184: true, - 181186: true, - 182041: true, - 182043: true, - 182044: true, - 183042: true, - 183043: true, - 183044: true, - 183047: true, - 183049: true, - 183065: true, - 183066: true, - 183073: true, - 183074: true, - 183075: true, - 183083: true, - 183084: true, - 183087: true, - 183088: true, - 183090: true, - 183095: true, - 183104: true, - 183107: true, - 183109: true, - 183111: true, - 183112: true, - 183113: true, - 183116: true, - 183123: true, - 183124: true, - 183125: true, - 183126: true, - 183132: true, - 183133: true, - 183135: true, - 183136: true, - 183137: true, - 183138: true, - 183139: true, - 183140: true, - 183141: true, - 183142: true, - 183153: true, - 183155: true, - 183156: true, - 183157: true, - 183160: true, - 184043: true, - 184055: true, - 184058: true, - 184059: true, - 184068: true, - 184069: true, - 184079: true, - 184080: true, - 184081: true, - 185043: true, - 185045: true, - 186042: true, - 186043: true, - 186073: true, - 186076: true, - 186077: true, - 186078: true, - 186079: true, - 186081: true, - 186095: true, - 186108: true, - 186113: true, - 186115: true, - 186116: true, - 186118: true, - 186119: true, - 186132: true, - 186137: true, - 186138: true, - 186139: true, - 186143: true, - 186144: true, - 186145: true, - 186146: true, - 186147: true, - 186148: true, - 186159: true, - 186160: true, - 186161: true, - 186165: true, - 186169: true, - 186173: true, - 186180: true, - 186210: true, - 186211: true, - 186212: true, - 186213: true, - 186214: true, - 186215: true, - 186216: true, - 186228: true, - 186229: true, - 186230: true, - 186232: true, - 186234: true, - 186255: true, - 186263: true, - 186276: true, - 186279: true, - 186282: true, - 186283: true, - 188043: true, - 189042: true, - 189057: true, - 189059: true, - 189062: true, - 189078: true, - 189080: true, - 189083: true, - 189088: true, - 189093: true, - 189095: true, - 189096: true, - 189098: true, - 189100: true, - 190041: true, - 190042: true, - 190043: true, - 190044: true, - 190059: true, - 190062: true, - 190068: true, - 190074: true, - 190076: true, - 190077: true, - 190079: true, - 190085: true, - 190088: true, - 190103: true, - 190104: true, - 193055: true, - 193066: true, - 193067: true, - 193070: true, - 193075: true, - 193079: true, - 193080: true, - 193081: true, - 193091: true, - 193092: true, - 193095: true, - 193101: true, - 193104: true, - 194043: true, - 194045: true, - 194046: true, - 194050: true, - 194051: true, - 194052: true, - 194053: true, - 194064: true, - 194066: true, - 194069: true, - 194071: true, - 194072: true, - 194073: true, - 194074: true, - 194076: true, - 194077: true, - 194078: true, - 194082: true, - 194084: true, - 194085: true, - 194090: true, - 194091: true, - 194092: true, - 194094: true, - 194097: true, - 194098: true, - 194099: true, - 194100: true, - 194114: true, - 194116: true, - 194118: true, - 194119: true, - 194120: true, - 194121: true, - 194122: true, - 194126: true, - 194129: true, - 194131: true, - 194132: true, - 194133: true, - 194134: true, - 194146: true, - 194151: true, - 194156: true, - 194157: true, - 194159: true, - 194161: true, - 194165: true, - 195041: true, - 195044: true, - 195050: true, - 195051: true, - 195052: true, - 195068: true, - 195075: true, - 195076: true, - 195079: true, - 195080: true, - 195081: true, - 196042: true, - 196044: true, - 196050: true, - 196051: true, - 196055: true, - 196056: true, - 196061: true, - 196063: true, - 196065: true, - 196070: true, - 196071: true, - 196075: true, - 196077: true, - 196079: true, - 196087: true, - 196088: true, - 196090: true, - 196091: true, - 197041: true, - 197042: true, - 197043: true, - 197044: true, - 198044: true, - 198045: true, - 198046: true, - 198048: true, - 198049: true, - 198050: true, - 198053: true, - 198057: true, - 198058: true, - 198066: true, - 198071: true, - 198074: true, - 198081: true, - 198084: true, - 198085: true, - 198102: true, - 199042: true, - 199044: true, - 199045: true, - 199046: true, - 199047: true, - 199052: true, - 199054: true, - 199057: true, - 199066: true, - 199070: true, - 199082: true, - 199091: true, - 199094: true, - 199096: true, - 201041: true, - 201042: true, - 201043: true, - 201047: true, - 201048: true, - 201049: true, - 201058: true, - 201061: true, - 201064: true, - 201065: true, - 201068: true, - 202042: true, - 202043: true, - 202044: true, - 202051: true, - 202054: true, - 202055: true, - 203043: true, - 203050: true, - 203051: true, - 203053: true, - 203060: true, - 203062: true, - 204042: true, - 204044: true, - 204048: true, - 204052: true, - 204053: true, - 204061: true, - 204062: true, - 204064: true, - 204065: true, - 204067: true, - 204068: true, - 204069: true, - 205042: true, - 205044: true, - 206043: true, - 206044: true, - 206047: true, - 206050: true, - 206051: true, - 206052: true, - 206053: true, - 206054: true, - 206055: true, - 206058: true, - 206059: true, - 206060: true, - 206067: true, - 206069: true, - 206077: true, - 206078: true, - 206079: true, - 206084: true, - 206089: true, - 206101: true, - 206107: true, - 206109: true, - 207043: true, - 207044: true, - 207049: true, - 207050: true, - 207051: true, - 207052: true, - 207053: true, - 207054: true, - 207055: true, - 207061: true, - 207062: true, - 207069: true, - 207071: true, - 207085: true, - 207086: true, - 207087: true, - 207088: true, - 207095: true, - 207096: true, - 207102: true, - 207103: true, - 207106: true, - 207108: true, - 207110: true, - 207111: true, - 207112: true, - 209041: true, - 209042: true, - 209043: true, - 209044: true, - 210042: true, - 210043: true, - 210044: true, - 210047: true, - 211041: true, - 212041: true, - 212045: true, - 212046: true, - 212047: true, - 213041: true, - 213042: true, - 214042: true, - 214046: true, - 214049: true, - 214050: true, - 215042: true, - 215048: true, - 215050: true, - 216043: true, - 216046: true, - 216047: true, - 216052: true, - 216053: true, - 216054: true, - 216059: true, - 216068: true, - 217041: true, - 217044: true, - 217047: true, - 217048: true, - 217049: true, - 217056: true, - 217058: true, - 217059: true, - 217060: true, - 217061: true, - 217064: true, - 217066: true, - 217069: true, - 217071: true, - 217085: true, - 217086: true, - 217088: true, - 217093: true, - 217094: true, - 217108: true, - 217109: true, - 217111: true, - 217115: true, - 217116: true, - 218042: true, - 218044: true, - 218046: true, - 218050: true, - 218060: true, - 218061: true, - 218063: true, - 218064: true, - 218065: true, - 218070: true, - 218071: true, - 218072: true, - 218074: true, - 218076: true, - 222041: true, - 223041: true, - 223043: true, - 223044: true, - 223050: true, - 223052: true, - 223054: true, - 223058: true, - 223059: true, - 223061: true, - 223068: true, - 223069: true, - 223070: true, - 223071: true, - 223073: true, - 223075: true, - 223076: true, - 223083: true, - 223087: true, - 223094: true, - 223096: true, - 223101: true, - 223106: true, - 223108: true, - 224041: true, - 224042: true, - 224043: true, - 224045: true, - 224051: true, - 224053: true, - 224057: true, - 224060: true, - 224061: true, - 224062: true, - 224063: true, - 224068: true, - 224069: true, - 224081: true, - 224084: true, - 224087: true, - 224090: true, - 224096: true, - 224105: true, - 225042: true, - 227041: true, - 229045: true, - 229046: true, - 229048: true, - 229049: true, - 229050: true, - 231042: true, - 236041: true, - 237041: true, - 238041: true, - 238042: true, - 240041: true, - 240042: true, - 240043: true, - 241041: true, - 243041: true, - 244041: true, - 245041: true, - 247041: true, - 250041: true, - 252041: true, - 253041: true, - 253045: true, - 254043: true, - 255042: true, - 255043: true, - 257041: true, - 257042: true, - 258041: true, - 261041: true, - 264041: true, - 294042: true, - 296042: true, -} diff --git a/godoc/static/package.html b/godoc/static/package.html index 86445df4c08..a04b08b63f5 100644 --- a/godoc/static/package.html +++ b/godoc/static/package.html @@ -17,7 +17,7 @@ {{if $.IsMain}} {{/* command documentation */}} - {{comment_html .Doc}} + {{comment_html $ .Doc}} {{else}} {{/* package documentation */}}
@@ -42,7 +42,7 @@

Overview ā–¹

Overview ā–¾

- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ ""}}
@@ -154,14 +154,14 @@

Inter {{with .Consts}}

Constants

{{range .}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{end}} {{with .Vars}}

Variables

{{range .}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{end}} @@ -174,7 +174,7 @@

func {{$name_html}}{{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ .Name}} {{callgraph_html $ "" .Name}} @@ -187,16 +187,16 @@

type {{$tname_html}}< {{$since := since "type" "" .Name $.PDoc.ImportPath}} {{if $since}}{{$since}}{{end}}

- {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{range .Consts}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{range .Vars}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} @@ -212,7 +212,7 @@

func {{$name_html}}{{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ .Name}} {{callgraph_html $ "" .Name}} {{end}} @@ -225,7 +225,7 @@

func ({{html .Recv}}) {{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{$name := printf "%s_%s" $tname .Name}} {{example_html $ $name}} {{callgraph_html $ .Recv .Name}} @@ -238,7 +238,7 @@

func ({{html .Recv}}) {{noteTitle $marker | html}}s

{{end}} diff --git a/godoc/static/searchdoc.html b/godoc/static/searchdoc.html index 679c02cf3a8..84dcb345270 100644 --- a/godoc/static/searchdoc.html +++ b/godoc/static/searchdoc.html @@ -15,7 +15,7 @@

{{$key.Name}}

{{html .Package}}.{{.Name}} {{end}} {{if .Doc}} -

{{comment_html .Doc}}

+

{{comment_html $ .Doc}}

{{else}}

No documentation available

{{end}} diff --git a/godoc/static/static.go b/godoc/static/static.go index ada60fab6c2..d6e5f2d2e0e 100644 --- a/godoc/static/static.go +++ b/godoc/static/static.go @@ -83,7 +83,7 @@ var Files = map[string]string{ "methodset.html": "\x0a\x09\x0a\x09\x09\xe2\x96\xb9\x20Method\x20set

\x0a\x09\x0a\x09\x0a\x09\x09\xe2\x96\xbe\x20Method\x20set

\x0a\x09\x09...\x0a\x09\x0a\x0a", - "package.html": "\x0a\x0a{{with\x20.PDoc}}\x0a\x09\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09\x0a\x09\x09\x09
\x0a\x09\x09\x09
import\x20\"{{html\x20.ImportPath}}\"
\x0a\x09\x09\x09
\x0a\x09\x09\x09
\x0a\x09\x09\x09
Overview
\x0a\x09\x09\x09
Index
\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09
Examples
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09
Subdirectories
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xb9\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xbe\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09
Constants
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09
Variables
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
type\x20{{$tname_html}}
\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
 \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
 \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09
{{noteTitle\x20$marker\x20|\x20html}}s
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x09\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x0a\x09\x09\x09

Examples

\x0a\x09\x09\x09(Expand\x20All)\x0a\x09\x09\x09
\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09
{{example_name\x20.Name}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09

Package\x20files

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{.|filename|html}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09

\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xb9\x0a\x09\x09\x20\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xbe\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls—perhaps\x20dynamically.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20func\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09

\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x0a\x09\x09\x20\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09Constants\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09Variables\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09type\x20{{$tname_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20({{html\x20.Recv}})\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09{{noteTitle\x20$marker\x20|\x20html}}s\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09
  • ☞\x20{{comment_html\x20.Body}}
  • \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09\x0a\x09\x09\x09\x09..\x0a\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09
    \x0a\x09\x0a{{end}}\x0a", + "package.html": "\x0a\x0a{{with\x20.PDoc}}\x0a\x09\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09
    import\x20\"{{html\x20.ImportPath}}\"
    \x0a\x09\x09\x09
    \x0a\x09\x09\x09
    \x0a\x09\x09\x09
    Overview
    \x0a\x09\x09\x09
    Index
    \x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09
    Examples
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09
    Subdirectories
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xb9\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xbe\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09
    Constants
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09
    Variables
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
    type\x20{{$tname_html}}
    \x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
     \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
     \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09
    {{noteTitle\x20$marker\x20|\x20html}}s
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x09\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x0a\x09\x09\x09

    Examples

    \x0a\x09\x09\x09(Expand\x20All)\x0a\x09\x09\x09
    \x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09
    {{example_name\x20.Name}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09

    Package\x20files

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{.|filename|html}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09

    \x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xb9\x0a\x09\x09\x20\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xbe\x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls—perhaps\x20dynamically.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20func\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09

    \x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x0a\x09\x09\x20\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09Constants\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09Variables\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09type\x20{{$tname_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20({{html\x20.Recv}})\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09{{noteTitle\x20$marker\x20|\x20html}}s\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09
  • ☞\x20{{comment_html\x20$\x20.Body}}
  • \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09\x0a\x09\x09\x09\x09..\x0a\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09
    \x0a\x09\x0a{{end}}\x0a", "packageroot.html": "\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09\x09
    Standard\x20library
    \x0a\x09\x09\x09\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x09\x09\x09
    Third\x20party
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09
    Other\x20packages
    \x0a\x09\x09\x09\x09
    Sub-repositories
    \x0a\x09\x09\x09\x09
    Community
    \x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Standard\x20library\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Standard\x20library\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x09\x09\x09\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOROOT\"}}\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09
    \x0a\x09\x09\x09\x09\x20\x0a\x09\x09\x09\x20\x0a\x09\x09\x20\x0a\x0a\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Third\x20party\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Third\x20party\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x09\x09\x09\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOPATH\"}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09
    \x0a\x09\x09\x09\x09\x20\x0a\x09\x09\x09\x20\x0a\x09\x09\x20\x0a\x09{{end}}\x0a\x0a\x09Other\x20packages\x0a\x09Sub-repositories\x0a\x09

    \x0a\x09These\x20packages\x20are\x20part\x20of\x20the\x20Go\x20Project\x20but\x20outside\x20the\x20main\x20Go\x20tree.\x0a\x09They\x20are\x20developed\x20under\x20looser\x20compatibility\x20requirements\x20than\x20the\x20Go\x20core.\x0a\x09Install\x20them\x20with\x20\"go\x20get\".\x0a\x09

    \x0a\x09
      \x0a\x09\x09
    • benchmarks\x20\xe2\x80\x94\x20benchmarks\x20to\x20measure\x20Go\x20as\x20it\x20is\x20developed.
    • \x0a\x09\x09
    • blog\x20\xe2\x80\x94\x20blog.golang.org's\x20implementation.
    • \x0a\x09\x09
    • build\x20\xe2\x80\x94\x20build.golang.org's\x20implementation.
    • \x0a\x09\x09
    • crypto\x20\xe2\x80\x94\x20additional\x20cryptography\x20packages.
    • \x0a\x09\x09
    • debug\x20\xe2\x80\x94\x20an\x20experimental\x20debugger\x20for\x20Go.
    • \x0a\x09\x09
    • image\x20\xe2\x80\x94\x20additional\x20imaging\x20packages.
    • \x0a\x09\x09
    • mobile\x20\xe2\x80\x94\x20experimental\x20support\x20for\x20Go\x20on\x20mobile\x20platforms.
    • \x0a\x09\x09
    • net\x20\xe2\x80\x94\x20additional\x20networking\x20packages.
    • \x0a\x09\x09
    • perf\x20\xe2\x80\x94\x20packages\x20and\x20tools\x20for\x20performance\x20measurement,\x20storage,\x20and\x20analysis.
    • \x0a\x09\x09
    • pkgsite\x20\xe2\x80\x94\x20home\x20of\x20the\x20pkg.go.dev\x20website.
    • \x0a\x09\x09
    • review\x20\xe2\x80\x94\x20a\x20tool\x20for\x20working\x20with\x20Gerrit\x20code\x20reviews.
    • \x0a\x09\x09
    • sync\x20\xe2\x80\x94\x20additional\x20concurrency\x20primitives.
    • \x0a\x09\x09
    • sys\x20\xe2\x80\x94\x20packages\x20for\x20making\x20system\x20calls.
    • \x0a\x09\x09
    • text\x20\xe2\x80\x94\x20packages\x20for\x20working\x20with\x20text.
    • \x0a\x09\x09
    • time\x20\xe2\x80\x94\x20additional\x20time\x20packages.
    • \x0a\x09\x09
    • tools\x20\xe2\x80\x94\x20godoc,\x20goimports,\x20gorename,\x20and\x20other\x20tools.
    • \x0a\x09\x09
    • tour\x20\xe2\x80\x94\x20tour.golang.org's\x20implementation.
    • \x0a\x09\x09
    • exp\x20\xe2\x80\x94\x20experimental\x20and\x20deprecated\x20packages\x20(handle\x20with\x20care;\x20may\x20change\x20without\x20warning).
    • \x0a\x09
    \x0a\x0a\x09Community\x0a\x09

    \x0a\x09These\x20services\x20can\x20help\x20you\x20find\x20Open\x20Source\x20packages\x20provided\x20by\x20the\x20community.\x0a\x09

    \x0a\x09
      \x0a\x09\x09
    • Pkg.go.dev\x20-\x20the\x20Go\x20package\x20discovery\x20site.
    • \x0a\x09\x09
    • Projects\x20at\x20the\x20Go\x20Wiki\x20-\x20a\x20curated\x20list\x20of\x20Go\x20projects.
    • \x0a\x09
    \x0a{{end}}\x0a", @@ -95,7 +95,7 @@ var Files = map[string]string{ "searchcode.html": "\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{if\x20not\x20.Idents}}\x0a\x09{{with\x20.Pak}}\x0a\x09\x09Package\x20{{html\x20$.Query}}\x0a\x09\x09

    \x0a\x09\x09\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09{{$pkg_html}}\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09

    \x0a\x09{{end}}\x0a{{end}}\x0a{{with\x20.Hit}}\x0a\x09{{with\x20.Decls}}\x0a\x09\x09Package-level\x20declarations\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09package\x20{{html\x20.Pak.Name}}\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$file}}:{{$line}}\x0a\x09\x09\x09\x09\x09\x09{{infoSnippet_html\x20.}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x09{{with\x20.Others}}\x0a\x09\x09Local\x20declarations\x20and\x20uses\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09package\x20{{html\x20.Pak.Name}}\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{$file}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{index\x20.\x200\x20|\x20infoKind_html}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", - "searchdoc.html": "\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09{{$key.Name}}\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09{{html\x20.Package}}.{{.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09

    {{comment_html\x20.Doc}}

    \x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09

    No\x20documentation\x20available

    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", + "searchdoc.html": "\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09{{$key.Name}}\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09{{html\x20.Package}}.{{.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09

    {{comment_html\x20$\x20.Doc}}

    \x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09

    No\x20documentation\x20available

    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", "searchtxt.html": "\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{with\x20.Textual}}\x0a\x09{{if\x20$.Complete}}\x0a\x09\x09{{html\x20$.Found}}\x20textual\x20occurrences\x0a\x09{{else}}\x0a\x09\x09More\x20than\x20{{html\x20$.Found}}\x20textual\x20occurrences\x0a\x09\x09

    \x0a\x09\x09Not\x20all\x20files\x20or\x20lines\x20containing\x20\"{{html\x20$.Query}}\"\x20are\x20shown.\x0a\x09\x09

    \x0a\x09{{end}}\x0a\x09

    \x0a\x09\x0a\x09{{range\x20.}}\x0a\x09\x09{{$file\x20:=\x20.Filename}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{$file}}:\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{len\x20.Lines}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{range\x20.Lines}}\x0a\x09\x09\x09{{html\x20.}}\x0a\x09\x09{{end}}\x0a\x09\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09\x09...\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x09{{end}}\x0a\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09...\x0a\x09{{end}}\x0a\x09\x0a\x09

    \x0a{{end}}\x0a", diff --git a/godoc/tohtml_go119.go b/godoc/tohtml_go119.go new file mode 100644 index 00000000000..6dbf7212b9a --- /dev/null +++ b/godoc/tohtml_go119.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package godoc + +import ( + "bytes" + "go/doc" +) + +func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) { + buf.Write(pkg.HTML(comment)) +} diff --git a/godoc/tohtml_other.go b/godoc/tohtml_other.go new file mode 100644 index 00000000000..a1dcf2e195b --- /dev/null +++ b/godoc/tohtml_other.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package godoc + +import ( + "bytes" + "go/doc" +) + +func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) { + doc.ToHTML(buf, comment, nil) +} diff --git a/gopls/README.md b/gopls/README.md index 9afc2e48c1e..56d15921a70 100644 --- a/gopls/README.md +++ b/gopls/README.md @@ -5,56 +5,57 @@ `gopls` (pronounced "Go please") is the official Go [language server] developed by the Go team. It provides IDE features to any [LSP]-compatible editor. - + You should not need to interact with `gopls` directly--it will be automatically integrated into your editor. The specific features and settings vary slightly -by editor, so we recommend that you proceed to the [documentation for your -editor](#editors) below. +by editor, so we recommend that you proceed to the +[documentation for your editor](#editors) below. ## Editors To get started with `gopls`, install an LSP plugin in your editor of choice. -* [VSCode](https://github.com/golang/vscode-go/blob/master/README.md) +* [VS Code](https://github.com/golang/vscode-go/blob/master/README.md) * [Vim / Neovim](doc/vim.md) * [Emacs](doc/emacs.md) * [Atom](https://github.com/MordFustang21/ide-gopls) * [Sublime Text](doc/subl.md) * [Acme](https://github.com/fhs/acme-lsp) +* [Lapce](https://github.com/lapce-community/lapce-go) -If you use `gopls` with an editor that is not on this list, please let us know -by [filing an issue](#new-issue) or [modifying this documentation](doc/contributing.md). +If you use `gopls` with an editor that is not on this list, please send us a CL +[updating this documentation](doc/contributing.md). ## Installation For the most part, you should not need to install or update `gopls`. Your editor should handle that step for you. -If you do want to get the latest stable version of `gopls`, change to any -directory that is both outside of your `GOPATH` and outside of a module (a temp -directory is fine), and run: +If you do want to get the latest stable version of `gopls`, run the following +command: ```sh go install golang.org/x/tools/gopls@latest ``` -Learn more in the [advanced installation -instructions](doc/advanced.md#installing-unreleased-versions). +Learn more in the +[advanced installation instructions](doc/advanced.md#installing-unreleased-versions). + +Learn more about gopls releases in the [release policy](doc/releases.md). ## Setting up your workspace -`gopls` supports both Go module and GOPATH modes, but if you are working with -multiple modules or uncommon project layouts, you will need to specifically -configure your workspace. See the [Workspace document](doc/workspace.md) for -information on supported workspace layouts. +`gopls` supports both Go module, multi-module and GOPATH modes. See the +[workspace documentation](doc/workspace.md) for information on supported +workspace layouts. ## Configuration You can configure `gopls` to change your editor experience or view additional debugging information. Configuration options will be made available by your editor, so see your [editor's instructions](#editors) for specific details. A -full list of `gopls` settings can be found in the [Settings documentation](doc/settings.md). +full list of `gopls` settings can be found in the [settings documentation](doc/settings.md). ### Environment variables @@ -62,27 +63,36 @@ full list of `gopls` settings can be found in the [Settings documentation](doc/s variables you configure. Some editors, such as VS Code, allow users to selectively override the values of some environment variables. -## Troubleshooting +## Support Policy -If you are having issues with `gopls`, please follow the steps described in the -[troubleshooting guide](doc/troubleshooting.md). +Gopls is maintained by engineers on the +[Go tools team](https://github.com/orgs/golang/teams/tools-team/members), +who actively monitor the +[Go](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+label%3Agopls) +and +[VS Code Go](https://github.com/golang/vscode-go/issues) issue trackers. -## Supported Go versions and build systems +### Supported Go versions `gopls` follows the [Go Release Policy](https://golang.org/doc/devel/release.html#policy), meaning that it officially supports the last 2 major Go releases. Per -[issue #39146](golang.org/issues/39146), we attempt to maintain best-effort +[issue #39146](https://go.dev/issues/39146), we attempt to maintain best-effort support for the last 4 major Go releases, but this support extends only to not breaking the build and avoiding easily fixable regressions. -The following table shows the final gopls version that supports being built at -a given Go Version. Any more recent Go versions missing from this table can -still be built with the latest version of gopls. +In the context of this discussion, gopls "supports" a Go version if it supports +being built with that Go version as well as integrating with the `go` command +of that Go version. -| Go Version | Final gopls Version With Support | -| ----------- | -------------------------------- | +The following table shows the final gopls version that supports a given Go +version. Go releases more recent than any in the table can be used with any +version of gopls. + +| Go Version | Final gopls version with support (without warnings) | +| ----------- | --------------------------------------------------- | | Go 1.12 | [gopls@v0.7.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.7.5) | +| Go 1.15 | [gopls@v0.9.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.9.5) | Our extended support is enforced via [continuous integration with older Go versions](doc/contributing.md#ci). This legacy Go CI may not block releases: @@ -90,13 +100,22 @@ test failures may be skipped rather than fixed. Furthermore, if a regression in an older Go version causes irreconcilable CI failures, we may drop support for that Go version in CI if it is 3 or 4 Go versions old. -`gopls` currently only supports the `go` command, so if you are using a -different build system, `gopls` will not work well. Bazel is not officially -supported, but Bazel support is in development (see -[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512)). +### Supported build systems + +`gopls` currently only supports the `go` command, so if you are using +a different build system, `gopls` will not work well. Bazel is not officially +supported, but may be made to work with an appropriately configured +`go/packages` driver. See +[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512) +for more information. You can follow [these instructions](https://github.com/bazelbuild/rules_go/wiki/Editor-setup) to configure your `gopls` to work with Bazel. +### Troubleshooting + +If you are having issues with `gopls`, please follow the steps described in the +[troubleshooting guide](doc/troubleshooting.md). + ## Additional information * [Features](doc/features.md) @@ -110,4 +129,3 @@ to configure your `gopls` to work with Bazel. [language server]: https://langserver.org [LSP]: https://microsoft.github.io/language-server-protocol/ -[Gophers Slack]: https://gophers.slack.com/ diff --git a/gopls/api-diff/api_diff.go b/gopls/api-diff/api_diff.go index 167bdbd1b9f..8bb54186bab 100644 --- a/gopls/api-diff/api_diff.go +++ b/gopls/api-diff/api_diff.go @@ -13,262 +13,77 @@ import ( "encoding/json" "flag" "fmt" - "io" - "io/ioutil" "log" "os" "os/exec" - "path/filepath" - "strings" - "golang.org/x/tools/internal/gocommand" - difflib "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/source" + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/source" ) -var ( - previousVersionFlag = flag.String("prev", "", "version to compare against") - versionFlag = flag.String("version", "", "version being tagged, or current version if omitted") -) +const usage = `api-diff [] + +Compare the API of two gopls versions. If the second argument is provided, it +will be used as the new version to compare against. Otherwise, compare against +the current API. +` func main() { flag.Parse() - apiDiff, err := diffAPI(*versionFlag, *previousVersionFlag) + if flag.NArg() < 1 || flag.NArg() > 2 { + fmt.Fprint(os.Stderr, usage) + os.Exit(2) + } + + oldVer := flag.Arg(0) + newVer := "" + if flag.NArg() == 2 { + newVer = flag.Arg(1) + } + + apiDiff, err := diffAPI(oldVer, newVer) if err != nil { log.Fatal(err) } - fmt.Printf(` -%s -`, apiDiff) -} - -type JSON interface { - String() string - Write(io.Writer) + fmt.Println("\n" + apiDiff) } -func diffAPI(version, prev string) (string, error) { +func diffAPI(oldVer, newVer string) (string, error) { ctx := context.Background() - previousApi, err := loadAPI(ctx, prev) + previousAPI, err := loadAPI(ctx, oldVer) if err != nil { - return "", fmt.Errorf("load previous API: %v", err) + return "", fmt.Errorf("loading %s: %v", oldVer, err) } - var currentApi *source.APIJSON - if version == "" { - currentApi = source.GeneratedAPIJSON + var currentAPI *source.APIJSON + if newVer == "" { + currentAPI = source.GeneratedAPIJSON } else { var err error - currentApi, err = loadAPI(ctx, version) + currentAPI, err = loadAPI(ctx, newVer) if err != nil { - return "", fmt.Errorf("load current API: %v", err) - } - } - - b := &strings.Builder{} - if err := diff(b, previousApi.Commands, currentApi.Commands, "command", func(c *source.CommandJSON) string { - return c.Command - }, diffCommands); err != nil { - return "", fmt.Errorf("diff commands: %v", err) - } - if diff(b, previousApi.Analyzers, currentApi.Analyzers, "analyzer", func(a *source.AnalyzerJSON) string { - return a.Name - }, diffAnalyzers); err != nil { - return "", fmt.Errorf("diff analyzers: %v", err) - } - if err := diff(b, previousApi.Lenses, currentApi.Lenses, "code lens", func(l *source.LensJSON) string { - return l.Lens - }, diffLenses); err != nil { - return "", fmt.Errorf("diff lenses: %v", err) - } - for key, prev := range previousApi.Options { - current, ok := currentApi.Options[key] - if !ok { - panic(fmt.Sprintf("unexpected option key: %s", key)) - } - if err := diff(b, prev, current, "option", func(o *source.OptionJSON) string { - return o.Name - }, diffOptions); err != nil { - return "", fmt.Errorf("diff options (%s): %v", key, err) + return "", fmt.Errorf("loading %s: %v", newVer, err) } } - return b.String(), nil + return cmp.Diff(previousAPI, currentAPI), nil } -func diff[T JSON](b *strings.Builder, previous, new []T, kind string, uniqueKey func(T) string, diffFunc func(*strings.Builder, T, T)) error { - prevJSON := collect(previous, uniqueKey) - newJSON := collect(new, uniqueKey) - for k := range newJSON { - delete(prevJSON, k) - } - for _, deleted := range prevJSON { - b.WriteString(fmt.Sprintf("%s %s was deleted.\n", kind, deleted)) - } - for _, prev := range previous { - delete(newJSON, uniqueKey(prev)) - } - if len(newJSON) > 0 { - b.WriteString("The following commands were added:\n") - for _, n := range newJSON { - n.Write(b) - b.WriteByte('\n') - } - } - previousMap := collect(previous, uniqueKey) - for _, current := range new { - prev, ok := previousMap[uniqueKey(current)] - if !ok { - continue - } - c, p := bytes.NewBuffer(nil), bytes.NewBuffer(nil) - prev.Write(p) - current.Write(c) - if diff, err := diffStr(p.String(), c.String()); err == nil && diff != "" { - diffFunc(b, prev, current) - b.WriteString("\n--\n") - } - } - return nil -} - -func collect[T JSON](args []T, uniqueKey func(T) string) map[string]T { - m := map[string]T{} - for _, arg := range args { - m[uniqueKey(arg)] = arg - } - return m -} - -var goCmdRunner = gocommand.Runner{} - func loadAPI(ctx context.Context, version string) (*source.APIJSON, error) { - tmpGopath, err := ioutil.TempDir("", "gopath*") - if err != nil { - return nil, fmt.Errorf("temp dir: %v", err) - } - defer os.RemoveAll(tmpGopath) + ver := fmt.Sprintf("golang.org/x/tools/gopls@%s", version) + cmd := exec.Command("go", "run", ver, "api-json") - exampleDir := fmt.Sprintf("%s/src/example.com", tmpGopath) - if err := os.MkdirAll(exampleDir, 0776); err != nil { - return nil, fmt.Errorf("mkdir: %v", err) - } + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + cmd.Stdout = stdout + cmd.Stderr = stderr - if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{ - Verb: "mod", - Args: []string{"init", "example.com"}, - WorkingDir: exampleDir, - Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)), - }); err != nil { - return nil, fmt.Errorf("go mod init failed: %v (stdout: %v)", err, stdout) - } - if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{ - Verb: "install", - Args: []string{fmt.Sprintf("golang.org/x/tools/gopls@%s", version)}, - WorkingDir: exampleDir, - Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)), - }); err != nil { - return nil, fmt.Errorf("go install failed: %v (stdout: %v)", err, stdout.String()) - } - cmd := exec.Cmd{ - Path: filepath.Join(tmpGopath, "bin", "gopls"), - Args: []string{"gopls", "api-json"}, - Dir: tmpGopath, - } - out, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("output: %v", err) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("go run failed: %v; stderr:\n%s", err, stderr) } apiJson := &source.APIJSON{} - if err := json.Unmarshal(out, apiJson); err != nil { + if err := json.Unmarshal(stdout.Bytes(), apiJson); err != nil { return nil, fmt.Errorf("unmarshal: %v", err) } return apiJson, nil } - -func diffCommands(b *strings.Builder, prev, current *source.CommandJSON) { - if prev.Title != current.Title { - b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", prev.Title, current.Title)) - } - if prev.Doc != current.Doc { - b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", prev.Doc, current.Doc)) - } - if prev.ArgDoc != current.ArgDoc { - b.WriteString("Arguments changed from " + formatBlock(prev.ArgDoc) + " to " + formatBlock(current.ArgDoc)) - } - if prev.ResultDoc != current.ResultDoc { - b.WriteString("Results changed from " + formatBlock(prev.ResultDoc) + " to " + formatBlock(current.ResultDoc)) - } -} - -func diffAnalyzers(b *strings.Builder, previous, current *source.AnalyzerJSON) { - b.WriteString(fmt.Sprintf("Changes to analyzer %s:\n\n", current.Name)) - if previous.Doc != current.Doc { - b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc)) - } - if previous.Default != current.Default { - b.WriteString(fmt.Sprintf("Default changed from %v to %v\n", previous.Default, current.Default)) - } -} - -func diffLenses(b *strings.Builder, previous, current *source.LensJSON) { - b.WriteString(fmt.Sprintf("Changes to code lens %s:\n\n", current.Title)) - if previous.Title != current.Title { - b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", previous.Title, current.Title)) - } - if previous.Doc != current.Doc { - b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc)) - } -} - -func diffOptions(b *strings.Builder, previous, current *source.OptionJSON) { - b.WriteString(fmt.Sprintf("Changes to option %s:\n\n", current.Name)) - if previous.Doc != current.Doc { - diff, err := diffStr(previous.Doc, current.Doc) - if err != nil { - panic(err) - } - b.WriteString(fmt.Sprintf("Documentation changed:\n%s\n", diff)) - } - if previous.Default != current.Default { - b.WriteString(fmt.Sprintf("Default changed from %q to %q\n", previous.Default, current.Default)) - } - if previous.Hierarchy != current.Hierarchy { - b.WriteString(fmt.Sprintf("Categorization changed from %q to %q\n", previous.Hierarchy, current.Hierarchy)) - } - if previous.Status != current.Status { - b.WriteString(fmt.Sprintf("Status changed from %q to %q\n", previous.Status, current.Status)) - } - if previous.Type != current.Type { - b.WriteString(fmt.Sprintf("Type changed from %q to %q\n", previous.Type, current.Type)) - } - // TODO(rstambler): Handle possibility of same number but different keys/values. - if len(previous.EnumKeys.Keys) != len(current.EnumKeys.Keys) { - b.WriteString(fmt.Sprintf("Enum keys changed from\n%s\n to \n%s\n", previous.EnumKeys, current.EnumKeys)) - } - if len(previous.EnumValues) != len(current.EnumValues) { - b.WriteString(fmt.Sprintf("Enum values changed from\n%s\n to \n%s\n", previous.EnumValues, current.EnumValues)) - } -} - -func formatBlock(str string) string { - if str == "" { - return `""` - } - return "\n```\n" + str + "\n```\n" -} - -func diffStr(before, after string) (string, error) { - // Add newlines to avoid newline messages in diff. - if before == after { - return "", nil - } - before += "\n" - after += "\n" - d, err := myers.ComputeEdits("", before, after) - if err != nil { - return "", err - } - return fmt.Sprintf("%q", difflib.ToUnified("previous", "current", before, d)), err -} diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md index f5c83d5771d..a1134bee3b3 100644 --- a/gopls/doc/analyzers.md +++ b/gopls/doc/analyzers.md @@ -48,7 +48,7 @@ check for common mistakes involving boolean operators ## **buildtag** -check that +build tags are well-formed and correctly located +check //go:build and // +build directives **Enabled by default.** @@ -106,6 +106,26 @@ The deepequalerrors checker looks for calls of the form: where err1 and err2 are errors. Using reflect.DeepEqual to compare errors is discouraged. +**Enabled by default.** + +## **directive** + +check Go toolchain directives such as //go:debug + +This analyzer checks for problems with known Go toolchain directives +in all Go source files in a package directory, even those excluded by +//go:build constraints, and all non-Go source files too. + +For //go:debug (see https://go.dev/doc/godebug), the analyzer checks +that the directives are placed only in Go source files, only above the +package comment, and only in package main or *_test.go files. + +Support for other known directives may be added in the future. + +This analyzer does not check //go:build, which is handled by the +buildtag analyzer. + + **Enabled by default.** ## **embed** @@ -131,7 +151,7 @@ of the second argument is not a pointer to a type implementing error. find structs that would use less memory if their fields were sorted This analyzer find structs that can be rearranged to use less memory, and provides -a suggested edit with the optimal order. +a suggested edit with the most compact order. Note that there are two different diagnostics reported. One checks struct size, and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the @@ -150,6 +170,11 @@ has 24 pointer bytes because it has to scan further through the *uint32. has 8 because it can stop immediately after the string pointer. +Be aware that the most compact order is not always the most efficient. +In rare cases it may cause two variables each updated by its own goroutine +to occupy the same CPU cache line, inducing a form of memory contention +known as "false sharing" that slows down both goroutines. + **Disabled by default. Enable it by setting `"analyses": {"fieldalignment": true}`.** @@ -213,19 +238,60 @@ inferred from function arguments, or from other type arguments: check references to loop variables from within nested functions -This analyzer checks for references to loop variables from within a -function literal inside the loop body. It checks only instances where -the function literal is called in a defer or go statement that is the -last statement in the loop body, as otherwise we would need whole -program analysis. - -For example: - - for i, v := range s { - go func() { - println(i, v) // not what you might expect - }() - } +This analyzer reports places where a function literal references the +iteration variable of an enclosing loop, and the loop calls the function +in such a way (e.g. with go or defer) that it may outlive the loop +iteration and possibly observe the wrong value of the variable. + +In this example, all the deferred functions run after the loop has +completed, so all observe the final value of v. + + for _, v := range list { + defer func() { + use(v) // incorrect + }() + } + +One fix is to create a new variable for each iteration of the loop: + + for _, v := range list { + v := v // new var per iteration + defer func() { + use(v) // ok + }() + } + +The next example uses a go statement and has a similar problem. +In addition, it has a data race because the loop updates v +concurrent with the goroutines accessing it. + + for _, v := range elem { + go func() { + use(v) // incorrect, and a data race + }() + } + +A fix is the same as before. The checker also reports problems +in goroutines started by golang.org/x/sync/errgroup.Group. +A hard-to-spot variant of this form is common in parallel tests: + + func Test(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + use(test) // incorrect, and a data race + }) + } + } + +The t.Parallel() call causes the rest of the function to execute +concurrent with the loop. + +The analyzer reports references only in the last statement, +as it is not deep enough to understand the effects of subsequent +statements that might render the reference benign. +("Last statement" is defined recursively in compound +statements such as if, switch, and select.) See: https://golang.org/doc/go_faq.html#closures_and_goroutines @@ -490,6 +556,17 @@ identifiers. Please see the documentation for package testing in golang.org/pkg/testing for the conventions that are enforced for Tests, Benchmarks, and Examples. +**Enabled by default.** + +## **timeformat** + +check for calls of (time.Time).Format or time.Parse with 2006-02-01 + +The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm) +format. Internationally, "yyyy-dd-mm" does not occur in common calendar date +standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended. + + **Enabled by default.** ## **unmarshal** @@ -652,6 +729,15 @@ func <>(inferred parameters) { **Enabled by default.** +## **unusedvariable** + +check for unused variables + +The unusedvariable analyzer suggests fixes for unused variables errors. + + +**Disabled by default. Enable it by setting `"analyses": {"unusedvariable": true}`.** + ## **fillstruct** note incomplete struct initializations diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md index f868a48936e..ac22c55113a 100644 --- a/gopls/doc/commands.md +++ b/gopls/doc/commands.md @@ -100,6 +100,26 @@ Args: } ``` +### **Get known vulncheck result** +Identifier: `gopls.fetch_vulncheck_result` + +Fetch the result of latest vulnerability check (`govulncheck`). + +Args: + +``` +{ + // The file URI. + "URI": string, +} +``` + +Result: + +``` +map[golang.org/x/tools/gopls/internal/lsp/protocol.DocumentURI]*golang.org/x/tools/gopls/internal/govulncheck.Result +``` + ### **Toggle gc_details** Identifier: `gopls.gc_details` @@ -127,20 +147,6 @@ Args: } ``` -### **Generate gopls.mod** -Identifier: `gopls.generate_gopls_mod` - -(Re)generate the gopls.mod file for a workspace. - -Args: - -``` -{ - // The file URI. - "URI": string, -} -``` - ### **go get a package** Identifier: `gopls.go_get_package` @@ -247,26 +253,26 @@ Args: } ``` -### **Run test(s)** -Identifier: `gopls.run_tests` +### **Reset go.mod diagnostics** +Identifier: `gopls.reset_go_mod_diagnostics` -Runs `go test` for a specific set of test or benchmark functions. +Reset diagnostics in the go.mod file of a module. Args: ``` { - // The test file containing the tests to run. - "URI": string, - // Specific test names to run, e.g. TestFoo. - "Tests": []string, - // Specific benchmarks to run, e.g. BenchmarkFoo. - "Benchmarks": []string, + "URIArg": { + "URI": string, + }, + // Optional: source of the diagnostics to reset. + // If not set, all resettable go.mod diagnostics will be cleared. + "DiagnosticSource": string, } ``` -### **Run vulncheck (experimental)** -Identifier: `gopls.run_vulncheck_exp` +### **Run govulncheck.** +Identifier: `gopls.run_govulncheck` Run vulnerability check (`govulncheck`). @@ -274,8 +280,8 @@ Args: ``` { - // Dir is the directory from which vulncheck will run from. - "Dir": string, + // Any document in the directory from which govulncheck will run. + "URI": string, // Package pattern. E.g. "", ".", "./...". "Pattern": string, } @@ -285,19 +291,27 @@ Result: ``` { - "Vuln": []{ - "ID": string, - "Details": string, - "Aliases": []string, - "Symbol": string, - "PkgPath": string, - "ModPath": string, - "URL": string, - "CurrentVersion": string, - "FixedVersion": string, - "CallStacks": [][]golang.org/x/tools/internal/lsp/command.StackEntry, - "CallStackSummaries": []string, - }, + // Token holds the progress token for LSP workDone reporting of the vulncheck + // invocation. + "Token": interface{}, +} +``` + +### **Run test(s)** +Identifier: `gopls.run_tests` + +Runs `go test` for a specific set of test or benchmark functions. + +Args: + +``` +{ + // The test file containing the tests to run. + "URI": string, + // Specific test names to run, e.g. TestFoo. + "Tests": []string, + // Specific benchmarks to run, e.g. BenchmarkFoo. + "Benchmarks": []string, } ``` diff --git a/gopls/doc/contributing.md b/gopls/doc/contributing.md index 99e45292296..367280f53e3 100644 --- a/gopls/doc/contributing.md +++ b/gopls/doc/contributing.md @@ -18,8 +18,8 @@ claiming it. ## Getting started -Most of the `gopls` logic is actually in the `golang.org/x/tools/internal/lsp` -directory, so you are most likely to develop in the golang.org/x/tools module. +Most of the `gopls` logic is in the `golang.org/x/tools/gopls/internal/lsp` +directory. ## Build diff --git a/gopls/doc/design/implementation.md b/gopls/doc/design/implementation.md index a8f7f0b0e01..859ec1c1219 100644 --- a/gopls/doc/design/implementation.md +++ b/gopls/doc/design/implementation.md @@ -29,7 +29,7 @@ Package | Description [internal/lsp/cache] | the cache layer [internal/lsp/cmd] | the gopls command line layer [internal/lsp/debug] | features to aid in debugging gopls -[internal/lsp/protocol] | the lsp protocol layer and wire format +[internal/lsp/protocol] | the types of LSP request and response messages [internal/lsp/source] | the core feature implementations [internal/span] | a package for dealing with source file locations [internal/memoize] | a function invocation cache used to reduce the work done diff --git a/gopls/doc/design/integrating.md b/gopls/doc/design/integrating.md index 845f9eb007f..ba2cc07aa71 100644 --- a/gopls/doc/design/integrating.md +++ b/gopls/doc/design/integrating.md @@ -20,7 +20,7 @@ Many LSP requests pass position or range information. This is described in the [ This means that integrators will need to calculate UTF-16 based column offsets. -[`golang.org/x/tools/internal/span`] has the code to do this in go. +[`golang.org/x/tools/gopls/internal/span`] has the code to do this in go. [#31080] tracks making `span` and other useful packages non-internal. ## Edits @@ -61,9 +61,9 @@ For instance, files that are needed to do correct type checking are modified by Monitoring files inside gopls directly has a lot of awkward problems, but the [LSP specification] has methods that allow gopls to request that the client notify it of file system changes, specifically [`workspace/didChangeWatchedFiles`]. This is currently being added to gopls by a community member, and tracked in [#31553] -[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#InitializeResult -[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#ServerCapabilities -[`golang.org/x/tools/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint +[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#InitializeResult +[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#ServerCapabilities +[`golang.org/x/tools/gopls/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint [LSP specification]: https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/ [lsp-response]: https://github.com/Microsoft/language-server-protocol/blob/gh-pages/_specifications/specification-3-14.md#response-message diff --git a/gopls/doc/generate.go b/gopls/doc/generate.go index e63653de6bc..d674bfce489 100644 --- a/gopls/doc/generate.go +++ b/gopls/doc/generate.go @@ -20,6 +20,7 @@ import ( "io" "io/ioutil" "os" + "os/exec" "path/filepath" "reflect" "regexp" @@ -32,47 +33,71 @@ import ( "github.com/jba/printsrc" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/command/commandmeta" - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/command/commandmeta" + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" ) func main() { - if _, err := doMain("..", true); err != nil { + if _, err := doMain(true); err != nil { fmt.Fprintf(os.Stderr, "Generation failed: %v\n", err) os.Exit(1) } } -func doMain(baseDir string, write bool) (bool, error) { +func doMain(write bool) (bool, error) { api, err := loadAPI() if err != nil { return false, err } - if ok, err := rewriteFile(filepath.Join(baseDir, "internal/lsp/source/api_json.go"), api, write, rewriteAPI); !ok || err != nil { + sourceDir, err := pkgDir("golang.org/x/tools/gopls/internal/lsp/source") + if err != nil { + return false, err + } + + if ok, err := rewriteFile(filepath.Join(sourceDir, "api_json.go"), api, write, rewriteAPI); !ok || err != nil { + return ok, err + } + + goplsDir, err := pkgDir("golang.org/x/tools/gopls") + if err != nil { + return false, err + } + + if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "settings.md"), api, write, rewriteSettings); !ok || err != nil { return ok, err } - if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/settings.md"), api, write, rewriteSettings); !ok || err != nil { + if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "commands.md"), api, write, rewriteCommands); !ok || err != nil { return ok, err } - if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/commands.md"), api, write, rewriteCommands); !ok || err != nil { + if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil { return ok, err } - if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil { + if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "inlayHints.md"), api, write, rewriteInlayHints); !ok || err != nil { return ok, err } return true, nil } +// pkgDir returns the directory corresponding to the import path pkgPath. +func pkgDir(pkgPath string) (string, error) { + out, err := exec.Command("go", "list", "-f", "{{.Dir}}", pkgPath).Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + func loadAPI() (*source.APIJSON, error) { pkgs, err := packages.Load( &packages.Config{ Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedDeps, }, - "golang.org/x/tools/internal/lsp/source", + "golang.org/x/tools/gopls/internal/lsp/source", ) if err != nil { return nil, err @@ -102,6 +127,7 @@ func loadAPI() (*source.APIJSON, error) { } { api.Analyzers = append(api.Analyzers, loadAnalyzers(m)...) } + api.Hints = loadHints(source.AllInlayHints) for _, category := range []reflect.Value{ reflect.ValueOf(defaults.UserOptions), } { @@ -146,6 +172,14 @@ func loadAPI() (*source.APIJSON, error) { Default: def, }) } + case "hints": + for _, a := range api.Hints { + opt.EnumKeys.Keys = append(opt.EnumKeys.Keys, source.EnumKey{ + Name: fmt.Sprintf("%q", a.Name), + Doc: a.Doc, + Default: strconv.FormatBool(a.Default), + }) + } } } } @@ -488,6 +522,23 @@ func loadAnalyzers(m map[string]*source.Analyzer) []*source.AnalyzerJSON { return json } +func loadHints(m map[string]*source.Hint) []*source.HintJSON { + var sorted []string + for _, h := range m { + sorted = append(sorted, h.Name) + } + sort.Strings(sorted) + var json []*source.HintJSON + for _, name := range sorted { + h := m[name] + json = append(json, &source.HintJSON{ + Name: h.Name, + Doc: h.Doc, + }) + } + return json +} + func lowerFirst(x string) string { if x == "" { return x @@ -505,7 +556,7 @@ func upperFirst(x string) string { func fileForPos(pkg *packages.Package, pos token.Pos) (*ast.File, error) { fset := pkg.Fset for _, f := range pkg.Syntax { - if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename { + if safetoken.StartPosition(fset, f.Pos()).Filename == safetoken.StartPosition(fset, pos).Filename { return f, nil } } @@ -537,7 +588,7 @@ func rewriteFile(file string, api *source.APIJSON, write bool, rewrite func([]by func rewriteAPI(_ []byte, api *source.APIJSON) ([]byte, error) { var buf bytes.Buffer fmt.Fprintf(&buf, "// Code generated by \"golang.org/x/tools/gopls/doc/generate\"; DO NOT EDIT.\n\npackage source\n\nvar GeneratedAPIJSON = ") - if err := printsrc.NewPrinter("golang.org/x/tools/internal/lsp/source").Fprint(&buf, api); err != nil { + if err := printsrc.NewPrinter("golang.org/x/tools/gopls/internal/lsp/source").Fprint(&buf, api); err != nil { return nil, err } return format.Source(buf.Bytes()) @@ -571,7 +622,7 @@ func rewriteSettings(doc []byte, api *source.APIJSON) ([]byte, error) { writeTitle(section, h.final, level) for _, opt := range h.options { header := strMultiply("#", level+1) - section.Write([]byte(fmt.Sprintf("%s ", header))) + fmt.Fprintf(section, "%s ", header) opt.Write(section) } } @@ -699,6 +750,21 @@ func rewriteAnalyzers(doc []byte, api *source.APIJSON) ([]byte, error) { return replaceSection(doc, "Analyzers", section.Bytes()) } +func rewriteInlayHints(doc []byte, api *source.APIJSON) ([]byte, error) { + section := bytes.NewBuffer(nil) + for _, hint := range api.Hints { + fmt.Fprintf(section, "## **%v**\n\n", hint.Name) + fmt.Fprintf(section, "%s\n\n", hint.Doc) + switch hint.Default { + case true: + fmt.Fprintf(section, "**Enabled by default.**\n\n") + case false: + fmt.Fprintf(section, "**Disabled by default. Enable it by setting `\"hints\": {\"%s\": true}`.**\n\n", hint.Name) + } + } + return replaceSection(doc, "Hints", section.Bytes()) +} + func replaceSection(doc []byte, sectionName string, replacement []byte) ([]byte, error) { re := regexp.MustCompile(fmt.Sprintf(`(?s)\n(.*?)`, sectionName, sectionName)) idx := re.FindSubmatchIndex(doc) diff --git a/gopls/doc/generate_test.go b/gopls/doc/generate_test.go index 137a646cd8d..d33594d6159 100644 --- a/gopls/doc/generate_test.go +++ b/gopls/doc/generate_test.go @@ -16,7 +16,7 @@ import ( func TestGenerated(t *testing.T) { testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code. - ok, err := doMain("../..", false) + ok, err := doMain(false) if err != nil { t.Fatal(err) } diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md new file mode 100644 index 00000000000..2ae9a2828af --- /dev/null +++ b/gopls/doc/inlayHints.md @@ -0,0 +1,80 @@ +# Hints + +This document describes the inlay hints that `gopls` uses inside the editor. + + +## **assignVariableTypes** + +Enable/disable inlay hints for variable types in assign statements: +```go + i/* int*/, j/* int*/ := 0, len(r)-1 +``` + +**Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.** + +## **compositeLiteralFields** + +Enable/disable inlay hints for composite literal field names: +```go + {/*in: */"Hello, world", /*want: */"dlrow ,olleH"} +``` + +**Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.** + +## **compositeLiteralTypes** + +Enable/disable inlay hints for composite literal types: +```go + for _, c := range []struct { + in, want string + }{ + /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, + } +``` + +**Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.** + +## **constantValues** + +Enable/disable inlay hints for constant values: +```go + const ( + KindNone Kind = iota/* = 0*/ + KindPrint/* = 1*/ + KindPrintf/* = 2*/ + KindErrorf/* = 3*/ + ) +``` + +**Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.** + +## **functionTypeParameters** + +Enable/disable inlay hints for implicit type parameters on generic functions: +```go + myFoo/*[int, string]*/(1, "hello") +``` + +**Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.** + +## **parameterNames** + +Enable/disable inlay hints for parameter names: +```go + parseInt(/* str: */ "123", /* radix: */ 8) +``` + +**Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.** + +## **rangeVariableTypes** + +Enable/disable inlay hints for variable types in range statements: +```go + for k/* int*/, v/* string*/ := range []string{} { + fmt.Println(k, v) + } +``` + +**Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.** + + diff --git a/gopls/doc/releases.md b/gopls/doc/releases.md new file mode 100644 index 00000000000..befb92c3966 --- /dev/null +++ b/gopls/doc/releases.md @@ -0,0 +1,25 @@ +# Gopls release policy + +Gopls releases follow [semver](http://semver.org), with major changes and new +features introduced only in new minor versions (i.e. versions of the form +`v*.N.0` for some N). Subsequent patch releases contain only cherry-picked +fixes or superficial updates. + +In order to align with the +[Go release timeline](https://github.com/golang/go/wiki/Go-Release-Cycle#timeline), +we aim to release a new minor version of Gopls approximately every three +months, with patch releases approximately every month, according to the +following table: + +| Month | Version(s) | +| ---- | ------- | +| Jan | `v*..0` | +| Jan-Mar | `v*..*` | +| Apr | `v*..0` | +| Apr-Jun | `v*..*` | +| Jul | `v*..0` | +| Jul-Sep | `v*..*` | +| Oct | `v*..0` | +| Oct-Dec | `v*..*` | + +For more background on this policy, see https://go.dev/issue/55267. diff --git a/gopls/doc/semantictokens.md b/gopls/doc/semantictokens.md index c9124b796e0..a1e140d29ec 100644 --- a/gopls/doc/semantictokens.md +++ b/gopls/doc/semantictokens.md @@ -57,7 +57,7 @@ different runes by their Unicode language assignment, or some other Unicode prop being [confusable](http://www.unicode.org/Public/security/10.0.0/confusables.txt). Gopls does not come close to either of these principles. Semantic tokens are returned for -identifiers, keywords, operators, comments, and literals. (Sematic tokens do not +identifiers, keywords, operators, comments, and literals. (Semantic tokens do not cover the file. They are not returned for white space or punctuation, and there is no semantic token for labels.) The following describes more precisely what gopls diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md index 092a3c7cfaf..52a75391048 100644 --- a/gopls/doc/settings.md +++ b/gopls/doc/settings.md @@ -1,6 +1,6 @@ # Settings - + This document describes the global settings for `gopls` inside the editor. The settings block will be called `"gopls"` and contains a collection of @@ -35,6 +35,7 @@ still be able to independently override specific experimental features. * [Completion](#completion) * [Diagnostic](#diagnostic) * [Documentation](#documentation) + * [Inlayhint](#inlayhint) * [Navigation](#navigation) ### Build @@ -62,15 +63,19 @@ relative to the workspace folder. They are evaluated in order, and the last filter that applies to a path controls whether it is included. The path prefix can be empty, so an initial `-` excludes everything. +DirectoryFilters also supports the `**` operator to match 0 or more directories. + Examples: -Exclude node_modules: `-node_modules` +Exclude node_modules at current depth: `-node_modules` + +Exclude node_modules at any depth: `-**/node_modules` Include only project_a: `-` (exclude everything), `+project_a` Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules` -Default: `["-node_modules"]`. +Default: `["-**/node_modules"]`. #### **templateExtensions** *[]string* @@ -111,29 +116,6 @@ a go.mod file, narrowing the scope to that directory if it exists. Default: `true`. -#### **experimentalWorkspaceModule** *bool* - -**This setting is experimental and may be deleted.** - -experimentalWorkspaceModule opts a user into the experimental support -for multi-module workspaces. - -Default: `false`. - -#### **experimentalPackageCacheKey** *bool* - -**This setting is experimental and may be deleted.** - -experimentalPackageCacheKey controls whether to use a coarser cache key -for package type information to increase cache hits. This setting removes -the user's environment, build flags, and working directory from the cache -key, which should be a safe change as all relevant inputs into the type -checking pass are already hashed into the key. This is temporarily guarded -by an experiment because caching behavior is subtle and difficult to -comprehensively test. - -Default: `true`. - #### **allowModfileModifications** *bool* **This setting is experimental and may be deleted.** @@ -153,16 +135,28 @@ be removed. Default: `false`. -#### **experimentalUseInvalidMetadata** *bool* +#### **standaloneTags** *[]string* -**This setting is experimental and may be deleted.** +standaloneTags specifies a set of build constraints that identify +individual Go source files that make up the entire main package of an +executable. -experimentalUseInvalidMetadata enables gopls to fall back on outdated -package metadata to provide editor features if the go command fails to -load packages for some reason (like an invalid go.mod file). This will -eventually be the default behavior, and this setting will be removed. +A common example of standalone main files is the convention of using the +directive `//go:build ignore` to denote files that are not intended to be +included in any package, for example because they are invoked directly by +the developer using `go run`. -Default: `false`. +Gopls considers a file to be a standalone main file if and only if it has +package name "main" and has a build directive of the exact form +"//go:build tag" or "// +build tag", where tag is among the list of tags +configured by this setting. Notably, if the build constraint is more +complicated than a simple tag (such as the composite constraint +`//go:build tag && go1.18`), the file is not considered to be a standalone +main file. + +This setting is only supported when gopls is built with Go 1.16 or later. + +Default: `["ignore"]`. ### Formatting @@ -214,6 +208,22 @@ semantic tokens to the client. Default: `false`. +#### **noSemanticString** *bool* + +**This setting is experimental and may be deleted.** + +noSemanticString turns off the sending of the semantic token 'string' + +Default: `false`. + +#### **noSemanticNumber** *bool* + +**This setting is experimental and may be deleted.** + +noSemanticNumber turns off the sending of the semantic token 'number' + +Default: `false`. + #### Completion ##### **usePlaceholders** *bool* @@ -265,8 +275,8 @@ Default: `true`. analyses specify analyses that the user would like to enable or disable. A map of the names of analysis passes that should be enabled/disabled. -A full list of analyzers that gopls uses can be found -[here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md). +A full list of analyzers that gopls uses can be found in +[analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md). Example Usage: @@ -286,6 +296,8 @@ Default: `{}`. **This setting is experimental and may be deleted.** staticcheck enables additional analyses from staticcheck.io. +These analyses are documented on +[Staticcheck's website](https://staticcheck.io/docs/checks/). Default: `false`. @@ -305,6 +317,20 @@ Can contain any of: Default: `{"bounds":true,"escape":true,"inline":true,"nil":true}`. +##### **vulncheck** *enum* + +**This setting is experimental and may be deleted.** + +vulncheck enables vulnerability scanning. + +Must be one of: + +* `"Imports"`: In Imports mode, `gopls` will report vulnerabilities that affect packages +directly and indirectly used by the analyzed main module. +* `"Off"`: Disable vulnerability analysis. + +Default: `"Off"`. + ##### **diagnosticsDelay** *time.Duration* **This is an advanced setting and should not be configured by most `gopls` users.** @@ -318,20 +344,6 @@ This option must be set to a valid duration string, for example `"250ms"`. Default: `"250ms"`. -##### **experimentalWatchedFileDelay** *time.Duration* - -**This setting is experimental and may be deleted.** - -experimentalWatchedFileDelay controls the amount of time that gopls waits -for additional workspace/didChangeWatchedFiles notifications to arrive, -before processing all such notifications in a single batch. This is -intended for use by LSP clients that don't support their own batching of -file system notifications. - -This option must be set to a valid duration string, for example `"100ms"`. - -Default: `"0s"`. - #### Documentation ##### **hoverKind** *enum* @@ -362,6 +374,9 @@ It might be one of: If company chooses to use its own `godoc.org`, its address can be used as well. +Modules matching the GOPRIVATE environment variable will not have +documentation links in hover. + Default: `"pkg.go.dev"`. ##### **linksInHover** *bool* @@ -370,6 +385,18 @@ linksInHover toggles the presence of links to documentation in hover. Default: `true`. +#### Inlayhint + +##### **hints** *map[string]bool* + +**This setting is experimental and may be deleted.** + +hints specify inlay hints that users want to see. A full list of hints +that gopls uses can be found in +[inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md). + +Default: `{}`. + #### Navigation ##### **importShortcut** *enum* @@ -439,6 +466,17 @@ Default: `false`. +#### **newDiff** *string* + +newDiff enables the new diff implementation. If this is "both", for now both +diffs will be run and statistics will be generated in a file in $TMPDIR. This +is a risky setting; help in trying it is appreciated. If it is "old" the old +implementation is used, and if it is "new", just the new implementation is +used. This setting will eventually be deleted, once gopls has fully migrated to +the new diff algorithm. + +Default: 'both'. + ## Code Lenses These are the code lenses that `gopls` currently supports. They can be enabled @@ -461,6 +499,11 @@ Runs `go generate` for a given directory. Identifier: `regenerate_cgo` Regenerates cgo definitions. +### **Run govulncheck.** + +Identifier: `run_govulncheck` + +Run vulnerability check (`govulncheck`). ### **Run test(s) (legacy)** Identifier: `test` diff --git a/gopls/doc/vim.md b/gopls/doc/vim.md index d9b33ac34dc..af54a7e088e 100644 --- a/gopls/doc/vim.md +++ b/gopls/doc/vim.md @@ -175,23 +175,22 @@ a helper function in Lua: lua < ../ diff --git a/gopls/go.sum b/gopls/go.sum index 5873afa1968..fd01bae2d38 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -1,23 +1,25 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= -github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM= github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= +github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8= +github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/jba/printsrc v0.2.2 h1:9OHK51UT+/iMAEBlQIIXW04qvKyF3/vvLuwW/hL8tDU= github.com/jba/printsrc v0.2.2/go.mod h1:1xULjw59sL0dPdWpDoVU06TIEO/Wnfv6AHRpiElTwYM= github.com/jba/templatecheck v0.6.0 h1:SwM8C4hlK/YNLsdcXStfnHWE2HKkuTVwy5FKQHt5ro8= @@ -33,62 +35,66 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338 h1:2O2DON6y3XMJiQRAS1UWU+54aec2uopH3x7MAiqGW6Y= +golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be h1:jokAF1mfylAi1iTQx7C44B7vyXUcSEMw8eDv0PzNu8s= -golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be/go.mod h1:twca1SxmF6/i2wHY/mj1vLIkkHdp+nil/yA32ZOP4kg= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815 h1:A9kONVi4+AnuOr1dopsibH6hLi1Huy54cbeJxnq4vmU= +golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815/go.mod h1:XJiVExZgoZfrrxoTeVsFYrSSk1snhfpOEC95JL+A4T0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU= -honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= -mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= -mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= +honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= +honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc= diff --git a/gopls/internal/coverage/coverage.go b/gopls/internal/coverage/coverage.go index 7bb3640bdbd..9a7d219945e 100644 --- a/gopls/internal/coverage/coverage.go +++ b/gopls/internal/coverage/coverage.go @@ -12,9 +12,13 @@ // -o controls where the coverage file is written, defaulting to /tmp/cover.out // -i coverage-file will generate the report from an existing coverage file // -v controls verbosity (0: only report coverage, 1: report as each directory is finished, -// 2: report on each test, 3: more details, 4: too much) +// +// 2: report on each test, 3: more details, 4: too much) +// // -t tests only tests packages in the given comma-separated list of directories in gopls. -// The names should start with ., as in ./internal/regtest/bench +// +// The names should start with ., as in ./internal/regtest/bench +// // -run tests. If set, -run tests is passed on to the go test command. // // Despite gopls' use of goroutines, the counts are almost deterministic. @@ -60,7 +64,7 @@ func main() { tests = realTestName(tests) // report coverage for packages under internal/lsp - parg := "golang.org/x/tools/internal/lsp/..." + parg := "golang.org/x/tools/gopls/internal/lsp/..." accum := []string{} seen := make(map[string]bool) @@ -184,7 +188,12 @@ func maybePrint(m result) { if *verbose > 3 { fmt.Printf("%s %s %q %.3f\n", m.Action, m.Test, m.Output, m.Elapsed) } + case "pause", "cont": + if *verbose > 2 { + fmt.Printf("%s %s %.3f\n", m.Action, m.Test, m.Elapsed) + } default: + fmt.Printf("%#v\n", m) log.Fatalf("unknown action %s\n", m.Action) } } @@ -224,7 +233,7 @@ func checkCwd() { if err != nil { log.Fatal(err) } - // we expect to be a the root of golang.org/x/tools + // we expect to be at the root of golang.org/x/tools cmd := exec.Command("go", "list", "-m", "-f", "{{.Dir}}", "golang.org/x/tools") buf, err := cmd.Output() buf = bytes.Trim(buf, "\n \t") // remove \n at end @@ -239,10 +248,6 @@ func checkCwd() { if err != nil { log.Fatalf("expected a gopls directory, %v", err) } - _, err = os.Stat("internal/lsp") - if err != nil { - log.Fatalf("expected to see internal/lsp, %v", err) - } } func listDirs(dir string) []string { diff --git a/gopls/internal/govulncheck/README.md b/gopls/internal/govulncheck/README.md deleted file mode 100644 index d8339c506f6..00000000000 --- a/gopls/internal/govulncheck/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# internal/govulncheck package - -This package is a literal copy of the cmd/govulncheck/internal/govulncheck -package in the vuln repo (https://go.googlesource.com/vuln). - -The `copy.sh` does the copying, after removing all .go files here. To use it: - -1. Clone the vuln repo to a directory next to the directory holding this repo - (tools). After doing that your directory structure should look something like - ``` - ~/repos/x/tools/gopls/... - ~/repos/x/vuln/... - ``` - -2. cd to this directory. - -3. Run `copy.sh`. diff --git a/gopls/internal/govulncheck/cache.go b/gopls/internal/govulncheck/cache.go deleted file mode 100644 index 404c3567320..00000000000 --- a/gopls/internal/govulncheck/cache.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -// Package govulncheck supports the govulncheck command. -package govulncheck - -import ( - "encoding/json" - "go/build" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/vuln/client" - "golang.org/x/vuln/osv" -) - -// The cache uses a single JSON index file for each vulnerability database -// which contains the map from packages to the time the last -// vulnerability for that package was added/modified and the time that -// the index was retrieved from the vulnerability database. The JSON -// format is as follows: -// -// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/indexes/index.json -// { -// Retrieved time.Time -// Index client.DBIndex -// } -// -// Each package also has a JSON file which contains the array of vulnerability -// entries for the package. The JSON format is as follows: -// -// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/{import path}/vulns.json -// []*osv.Entry - -// FSCache is a thread-safe file-system cache implementing osv.Cache -// -// TODO: use something like cmd/go/internal/lockedfile for thread safety? -type FSCache struct { - mu sync.Mutex - rootDir string -} - -// Assert that *FSCache implements client.Cache. -var _ client.Cache = (*FSCache)(nil) - -// use cfg.GOMODCACHE available in cmd/go/internal? -var defaultCacheRoot = filepath.Join(build.Default.GOPATH, "/pkg/mod/cache/download/vulndb") - -func DefaultCache() *FSCache { - return &FSCache{rootDir: defaultCacheRoot} -} - -type cachedIndex struct { - Retrieved time.Time - Index client.DBIndex -} - -func (c *FSCache) ReadIndex(dbName string) (client.DBIndex, time.Time, error) { - c.mu.Lock() - defer c.mu.Unlock() - - b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, "index.json")) - if err != nil { - if os.IsNotExist(err) { - return nil, time.Time{}, nil - } - return nil, time.Time{}, err - } - var index cachedIndex - if err := json.Unmarshal(b, &index); err != nil { - return nil, time.Time{}, err - } - return index.Index, index.Retrieved, nil -} - -func (c *FSCache) WriteIndex(dbName string, index client.DBIndex, retrieved time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - - path := filepath.Join(c.rootDir, dbName) - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - j, err := json.Marshal(cachedIndex{ - Index: index, - Retrieved: retrieved, - }) - if err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(path, "index.json"), j, 0666); err != nil { - return err - } - return nil -} - -func (c *FSCache) ReadEntries(dbName string, p string) ([]*osv.Entry, error) { - c.mu.Lock() - defer c.mu.Unlock() - - b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, p, "vulns.json")) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - var entries []*osv.Entry - if err := json.Unmarshal(b, &entries); err != nil { - return nil, err - } - return entries, nil -} - -func (c *FSCache) WriteEntries(dbName string, p string, entries []*osv.Entry) error { - c.mu.Lock() - defer c.mu.Unlock() - - path := filepath.Join(c.rootDir, dbName, p) - if err := os.MkdirAll(path, 0777); err != nil { - return err - } - j, err := json.Marshal(entries) - if err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(path, "vulns.json"), j, 0666); err != nil { - return err - } - return nil -} diff --git a/gopls/internal/govulncheck/cache_test.go b/gopls/internal/govulncheck/cache_test.go deleted file mode 100644 index 5a25c781020..00000000000 --- a/gopls/internal/govulncheck/cache_test.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package govulncheck - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "golang.org/x/sync/errgroup" - "golang.org/x/vuln/client" - "golang.org/x/vuln/osv" -) - -func TestCache(t *testing.T) { - tmpDir := t.TempDir() - - cache := &FSCache{rootDir: tmpDir} - dbName := "vulndb.golang.org" - - _, _, err := cache.ReadIndex(dbName) - if err != nil { - t.Fatalf("ReadIndex failed for non-existent database: %v", err) - } - - if err = os.Mkdir(filepath.Join(tmpDir, dbName), 0777); err != nil { - t.Fatalf("os.Mkdir failed: %v", err) - } - _, _, err = cache.ReadIndex(dbName) - if err != nil { - t.Fatalf("ReadIndex failed for database without cached index: %v", err) - } - - now := time.Now() - expectedIdx := client.DBIndex{ - "a.vuln.example.com": time.Time{}.Add(time.Hour), - "b.vuln.example.com": time.Time{}.Add(time.Hour * 2), - "c.vuln.example.com": time.Time{}.Add(time.Hour * 3), - } - if err = cache.WriteIndex(dbName, expectedIdx, now); err != nil { - t.Fatalf("WriteIndex failed to write index: %v", err) - } - - idx, retrieved, err := cache.ReadIndex(dbName) - if err != nil { - t.Fatalf("ReadIndex failed for database with cached index: %v", err) - } - if !reflect.DeepEqual(idx, expectedIdx) { - t.Errorf("ReadIndex returned unexpected index, got:\n%s\nwant:\n%s", idx, expectedIdx) - } - if !retrieved.Equal(now) { - t.Errorf("ReadIndex returned unexpected retrieved: got %s, want %s", retrieved, now) - } - - if _, err = cache.ReadEntries(dbName, "vuln.example.com"); err != nil { - t.Fatalf("ReadEntires failed for non-existent package: %v", err) - } - - expectedEntries := []*osv.Entry{ - {ID: "001"}, - {ID: "002"}, - {ID: "003"}, - } - if err := cache.WriteEntries(dbName, "vuln.example.com", expectedEntries); err != nil { - t.Fatalf("WriteEntries failed: %v", err) - } - - entries, err := cache.ReadEntries(dbName, "vuln.example.com") - if err != nil { - t.Fatalf("ReadEntries failed for cached package: %v", err) - } - if !reflect.DeepEqual(entries, expectedEntries) { - t.Errorf("ReadEntries returned unexpected entries, got:\n%v\nwant:\n%v", entries, expectedEntries) - } -} - -func TestConcurrency(t *testing.T) { - tmpDir := t.TempDir() - - cache := &FSCache{rootDir: tmpDir} - dbName := "vulndb.golang.org" - - g := new(errgroup.Group) - for i := 0; i < 1000; i++ { - i := i - g.Go(func() error { - id := i % 5 - p := fmt.Sprintf("package%d", id) - - entries, err := cache.ReadEntries(dbName, p) - if err != nil { - return err - } - - err = cache.WriteEntries(dbName, p, append(entries, &osv.Entry{ID: fmt.Sprint(id)})) - if err != nil { - return err - } - return nil - }) - } - - if err := g.Wait(); err != nil { - t.Errorf("error in parallel cache entries read/write: %v", err) - } - - // sanity checking - for i := 0; i < 5; i++ { - id := fmt.Sprint(i) - p := fmt.Sprintf("package%s", id) - - es, err := cache.ReadEntries(dbName, p) - if err != nil { - t.Fatalf("failed to read entries: %v", err) - } - for _, e := range es { - if e.ID != id { - t.Errorf("want %s ID for vuln entry; got %s", id, e.ID) - } - } - } - - // do similar for cache index - start := time.Now() - for i := 0; i < 1000; i++ { - i := i - g.Go(func() error { - id := i % 5 - p := fmt.Sprintf("package%v", id) - - idx, _, err := cache.ReadIndex(dbName) - if err != nil { - return err - } - - if idx == nil { - idx = client.DBIndex{} - } - - // sanity checking - if rt, ok := idx[p]; ok && rt.Before(start) { - return fmt.Errorf("unexpected past time in index: %v before start %v", rt, start) - } - - now := time.Now() - idx[p] = now - if err := cache.WriteIndex(dbName, idx, now); err != nil { - return err - } - return nil - }) - } - - if err := g.Wait(); err != nil { - t.Errorf("error in parallel cache index read/write: %v", err) - } -} diff --git a/gopls/internal/govulncheck/copy.sh b/gopls/internal/govulncheck/copy.sh deleted file mode 100755 index 24ed45bfe5a..00000000000 --- a/gopls/internal/govulncheck/copy.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -eu - -# Copyright 2020 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -o pipefail - -# Copy golang.org/x/vuln/cmd/govulncheck/internal/govulncheck into this directory. -# Assume the x/vuln repo is a sibling of the tools repo. - -rm -f *.go -cp ../../../../vuln/cmd/govulncheck/internal/govulncheck/*.go . diff --git a/gopls/internal/govulncheck/semver/semver.go b/gopls/internal/govulncheck/semver/semver.go new file mode 100644 index 00000000000..4ab298d137b --- /dev/null +++ b/gopls/internal/govulncheck/semver/semver.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +// Package semver provides shared utilities for manipulating +// Go semantic versions. +package semver + +import ( + "regexp" + "strings" +) + +// addSemverPrefix adds a 'v' prefix to s if it isn't already prefixed +// with 'v' or 'go'. This allows us to easily test go-style SEMVER +// strings against normal SEMVER strings. +func addSemverPrefix(s string) string { + if !strings.HasPrefix(s, "v") && !strings.HasPrefix(s, "go") { + return "v" + s + } + return s +} + +// removeSemverPrefix removes the 'v' or 'go' prefixes from go-style +// SEMVER strings, for usage in the public vulnerability format. +func removeSemverPrefix(s string) string { + s = strings.TrimPrefix(s, "v") + s = strings.TrimPrefix(s, "go") + return s +} + +// CanonicalizeSemverPrefix turns a SEMVER string into the canonical +// representation using the 'v' prefix, as used by the OSV format. +// Input may be a bare SEMVER ("1.2.3"), Go prefixed SEMVER ("go1.2.3"), +// or already canonical SEMVER ("v1.2.3"). +func CanonicalizeSemverPrefix(s string) string { + return addSemverPrefix(removeSemverPrefix(s)) +} + +var ( + // Regexp for matching go tags. The groups are: + // 1 the major.minor version + // 2 the patch version, or empty if none + // 3 the entire prerelease, if present + // 4 the prerelease type ("beta" or "rc") + // 5 the prerelease number + tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc|-pre)(\d+))?$`) +) diff --git a/gopls/internal/govulncheck/semver/semver_test.go b/gopls/internal/govulncheck/semver/semver_test.go new file mode 100644 index 00000000000..6daead6855b --- /dev/null +++ b/gopls/internal/govulncheck/semver/semver_test.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package semver + +import ( + "testing" +) + +func TestCanonicalize(t *testing.T) { + for _, test := range []struct { + v string + want string + }{ + {"v1.2.3", "v1.2.3"}, + {"1.2.3", "v1.2.3"}, + {"go1.2.3", "v1.2.3"}, + } { + got := CanonicalizeSemverPrefix(test.v) + if got != test.want { + t.Errorf("want %s; got %s", test.want, got) + } + } +} diff --git a/gopls/internal/govulncheck/source.go b/gopls/internal/govulncheck/source.go deleted file mode 100644 index 752a8313091..00000000000 --- a/gopls/internal/govulncheck/source.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package govulncheck - -import ( - "context" - "fmt" - "sort" - "strings" - - "golang.org/x/tools/go/packages" - "golang.org/x/vuln/client" - "golang.org/x/vuln/vulncheck" -) - -// A PackageError contains errors from loading a set of packages. -type PackageError struct { - Errors []packages.Error -} - -func (e *PackageError) Error() string { - var b strings.Builder - fmt.Fprintln(&b, "Packages contain errors:") - for _, e := range e.Errors { - fmt.Println(&b, e) - } - return b.String() -} - -// LoadPackages loads the packages matching patterns using cfg, after setting -// the cfg mode flags that vulncheck needs for analysis. -// If the packages contain errors, a PackageError is returned containing a list of the errors, -// along with the packages themselves. -func LoadPackages(cfg *packages.Config, patterns ...string) ([]*vulncheck.Package, error) { - cfg.Mode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | - packages.NeedImports | packages.NeedTypes | packages.NeedTypesSizes | - packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps | - packages.NeedModule - - pkgs, err := packages.Load(cfg, patterns...) - vpkgs := vulncheck.Convert(pkgs) - if err != nil { - return nil, err - } - var perrs []packages.Error - packages.Visit(pkgs, nil, func(p *packages.Package) { - perrs = append(perrs, p.Errors...) - }) - if len(perrs) > 0 { - err = &PackageError{perrs} - } - return vpkgs, err -} - -// Source calls vulncheck.Source on the Go source in pkgs. It returns the result -// with Vulns trimmed to those that are actually called. -func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*vulncheck.Result, error) { - r, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{Client: c}) - if err != nil { - return nil, err - } - // Keep only the vulns that are called. - var vulns []*vulncheck.Vuln - for _, v := range r.Vulns { - if v.CallSink != 0 { - vulns = append(vulns, v) - } - } - r.Vulns = vulns - return r, nil -} - -// CallInfo is information about calls to vulnerable functions. -type CallInfo struct { - CallStacks map[*vulncheck.Vuln][]vulncheck.CallStack // all call stacks - VulnGroups [][]*vulncheck.Vuln // vulns grouped by ID and package - ModuleVersions map[string]string // map from module paths to versions - TopPackages map[string]bool // top-level packages -} - -// GetCallInfo computes call stacks and related information from a vulncheck.Result. -// I also makes a set of top-level packages from pkgs. -func GetCallInfo(r *vulncheck.Result, pkgs []*vulncheck.Package) *CallInfo { - pset := map[string]bool{} - for _, p := range pkgs { - pset[p.PkgPath] = true - } - return &CallInfo{ - CallStacks: vulncheck.CallStacks(r), - VulnGroups: groupByIDAndPackage(r.Vulns), - ModuleVersions: moduleVersionMap(r.Modules), - TopPackages: pset, - } -} - -func groupByIDAndPackage(vs []*vulncheck.Vuln) [][]*vulncheck.Vuln { - groups := map[[2]string][]*vulncheck.Vuln{} - for _, v := range vs { - key := [2]string{v.OSV.ID, v.PkgPath} - groups[key] = append(groups[key], v) - } - - var res [][]*vulncheck.Vuln - for _, g := range groups { - res = append(res, g) - } - sort.Slice(res, func(i, j int) bool { - return res[i][0].PkgPath < res[j][0].PkgPath - }) - return res -} - -// moduleVersionMap builds a map from module paths to versions. -func moduleVersionMap(mods []*vulncheck.Module) map[string]string { - moduleVersions := map[string]string{} - for _, m := range mods { - v := m.Version - if m.Replace != nil { - v = m.Replace.Version - } - moduleVersions[m.Path] = v - } - return moduleVersions -} diff --git a/gopls/internal/govulncheck/types.go b/gopls/internal/govulncheck/types.go new file mode 100644 index 00000000000..2881cf4bc40 --- /dev/null +++ b/gopls/internal/govulncheck/types.go @@ -0,0 +1,37 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package govulncheck + +import "time" + +// Result is the result of vulnerability scanning. +type Result struct { + // Vulns contains all vulnerabilities that are called or imported by + // the analyzed module. + Vulns []*Vuln `json:",omitempty"` + + // Mode contains the source of the vulnerability info. + // Clients of the gopls.fetch_vulncheck_result command may need + // to interpret the vulnerabilities differently based on the + // analysis mode. For example, Vuln without callstack traces + // indicate a vulnerability that is not used if the result was + // from 'govulncheck' analysis mode. On the other hand, Vuln + // without callstack traces just implies the package with the + // vulnerability is known to the workspace and we do not know + // whether the vulnerable symbols are actually used or not. + Mode AnalysisMode `json:",omitempty"` + + // AsOf describes when this Result was computed using govulncheck. + // It is valid only with the govulncheck analysis mode. + AsOf time.Time `json:",omitempty"` +} + +type AnalysisMode string + +const ( + ModeInvalid AnalysisMode = "" // zero value + ModeGovulncheck AnalysisMode = "govulncheck" + ModeImports AnalysisMode = "imports" +) diff --git a/gopls/internal/govulncheck/types_118.go b/gopls/internal/govulncheck/types_118.go new file mode 100644 index 00000000000..7b354d622a8 --- /dev/null +++ b/gopls/internal/govulncheck/types_118.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +// Package govulncheck provides an experimental govulncheck API. +package govulncheck + +import ( + "golang.org/x/vuln/exp/govulncheck" +) + +var ( + // Source reports vulnerabilities that affect the analyzed packages. + Source = govulncheck.Source + + // DefaultCache constructs cache for a vulnerability database client. + DefaultCache = govulncheck.DefaultCache +) + +type ( + // Config is the configuration for Main. + Config = govulncheck.Config + + // Vuln represents a single OSV entry. + Vuln = govulncheck.Vuln + + // Module represents a specific vulnerability relevant to a + // single module or package. + Module = govulncheck.Module + + // Package is a Go package with known vulnerable symbols. + Package = govulncheck.Package + + // CallStacks contains a representative call stack for each + // vulnerable symbol that is called. + CallStack = govulncheck.CallStack + + // StackFrame represents a call stack entry. + StackFrame = govulncheck.StackFrame +) diff --git a/gopls/internal/govulncheck/types_not118.go b/gopls/internal/govulncheck/types_not118.go new file mode 100644 index 00000000000..faf5a7055b5 --- /dev/null +++ b/gopls/internal/govulncheck/types_not118.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package govulncheck + +import ( + "go/token" + + "golang.org/x/vuln/osv" +) + +// Vuln represents a single OSV entry. +type Vuln struct { + // OSV contains all data from the OSV entry for this vulnerability. + OSV *osv.Entry + + // Modules contains all of the modules in the OSV entry where a + // vulnerable package is imported by the target source code or binary. + // + // For example, a module M with two packages M/p1 and M/p2, where only p1 + // is vulnerable, will appear in this list if and only if p1 is imported by + // the target source code or binary. + Modules []*Module +} + +func (v *Vuln) IsCalled() bool { + return false +} + +// Module represents a specific vulnerability relevant to a single module. +type Module struct { + // Path is the module path of the module containing the vulnerability. + // + // Importable packages in the standard library will have the path "stdlib". + Path string + + // FoundVersion is the module version where the vulnerability was found. + FoundVersion string + + // FixedVersion is the module version where the vulnerability was + // fixed. If there are multiple fixed versions in the OSV report, this will + // be the latest fixed version. + // + // This is empty if a fix is not available. + FixedVersion string + + // Packages contains all the vulnerable packages in OSV entry that are + // imported by the target source code or binary. + // + // For example, given a module M with two packages M/p1 and M/p2, where + // both p1 and p2 are vulnerable, p1 and p2 will each only appear in this + // list they are individually imported by the target source code or binary. + Packages []*Package +} + +// Package is a Go package with known vulnerable symbols. +type Package struct { + // Path is the import path of the package containing the vulnerability. + Path string + + // CallStacks contains a representative call stack for each + // vulnerable symbol that is called. + // + // For vulnerabilities found from binary analysis, only CallStack.Symbol + // will be provided. + // + // For non-affecting vulnerabilities reported from the source mode + // analysis, this will be empty. + CallStacks []CallStack +} + +// CallStacks contains a representative call stack for a vulnerable +// symbol. +type CallStack struct { + // Symbol is the name of the detected vulnerable function + // or method. + // + // This follows the naming convention in the OSV report. + Symbol string + + // Summary is a one-line description of the callstack, used by the + // default govulncheck mode. + // + // Example: module3.main calls github.com/shiyanhui/dht.DHT.Run + Summary string + + // Frames contains an entry for each stack in the call stack. + // + // Frames are sorted starting from the entry point to the + // imported vulnerable symbol. The last frame in Frames should match + // Symbol. + Frames []*StackFrame +} + +// StackFrame represents a call stack entry. +type StackFrame struct { + // PackagePath is the import path. + PkgPath string + + // FuncName is the function name. + FuncName string + + // RecvType is the fully qualified receiver type, + // if the called symbol is a method. + // + // The client can create the final symbol name by + // prepending RecvType to FuncName. + RecvType string + + // Position describes an arbitrary source position + // including the file, line, and column location. + // A Position is valid if the line number is > 0. + Position token.Position +} + +func (sf *StackFrame) Name() string { + return "" +} + +func (sf *StackFrame) Pos() string { + return "" +} diff --git a/gopls/internal/govulncheck/util.go b/gopls/internal/govulncheck/util.go index baa2d961329..544fba2a593 100644 --- a/gopls/internal/govulncheck/util.go +++ b/gopls/internal/govulncheck/util.go @@ -8,23 +8,24 @@ package govulncheck import ( - "fmt" - "strings" - "golang.org/x/mod/semver" + isem "golang.org/x/tools/gopls/internal/govulncheck/semver" "golang.org/x/vuln/osv" - "golang.org/x/vuln/vulncheck" ) // LatestFixed returns the latest fixed version in the list of affected ranges, // or the empty string if there are no fixed versions. -func LatestFixed(as []osv.Affected) string { +func LatestFixed(modulePath string, as []osv.Affected) string { v := "" for _, a := range as { + if a.Package.Name != modulePath { + continue + } for _, r := range a.Ranges { if r.Type == osv.TypeSemver { for _, e := range r.Events { - if e.Fixed != "" && (v == "" || semver.Compare(e.Fixed, v) > 0) { + if e.Fixed != "" && (v == "" || + semver.Compare(isem.CanonicalizeSemverPrefix(e.Fixed), isem.CanonicalizeSemverPrefix(v)) > 0) { v = e.Fixed } } @@ -33,77 +34,3 @@ func LatestFixed(as []osv.Affected) string { } return v } - -// SummarizeCallStack returns a short description of the call stack. -// It uses one of two forms, depending on what the lowest function F in topPkgs -// calls: -// - If it calls a function V from the vulnerable package, then summarizeCallStack -// returns "F calls V". -// - If it calls a function G in some other package, which eventually calls V, -// it returns "F calls G, which eventually calls V". -// -// If it can't find any of these functions, summarizeCallStack returns the empty string. -func SummarizeCallStack(cs vulncheck.CallStack, topPkgs map[string]bool, vulnPkg string) string { - // Find the lowest function in the top packages. - iTop := lowest(cs, func(e vulncheck.StackEntry) bool { - return topPkgs[PkgPath(e.Function)] - }) - if iTop < 0 { - return "" - } - // Find the highest function in the vulnerable package that is below iTop. - iVuln := highest(cs[iTop+1:], func(e vulncheck.StackEntry) bool { - return PkgPath(e.Function) == vulnPkg - }) - if iVuln < 0 { - return "" - } - iVuln += iTop + 1 // adjust for slice in call to highest. - topName := FuncName(cs[iTop].Function) - vulnName := FuncName(cs[iVuln].Function) - if iVuln == iTop+1 { - return fmt.Sprintf("%s calls %s", topName, vulnName) - } - return fmt.Sprintf("%s calls %s, which eventually calls %s", - topName, FuncName(cs[iTop+1].Function), vulnName) -} - -// highest returns the highest (one with the smallest index) entry in the call -// stack for which f returns true. -func highest(cs vulncheck.CallStack, f func(e vulncheck.StackEntry) bool) int { - for i := 0; i < len(cs); i++ { - if f(cs[i]) { - return i - } - } - return -1 -} - -// lowest returns the lowest (one with the largets index) entry in the call -// stack for which f returns true. -func lowest(cs vulncheck.CallStack, f func(e vulncheck.StackEntry) bool) int { - for i := len(cs) - 1; i >= 0; i-- { - if f(cs[i]) { - return i - } - } - return -1 -} - -// PkgPath returns the package path from fn. -func PkgPath(fn *vulncheck.FuncNode) string { - if fn.PkgPath != "" { - return fn.PkgPath - } - s := strings.TrimPrefix(fn.RecvType, "*") - if i := strings.LastIndexByte(s, '.'); i > 0 { - s = s[:i] - } - return s -} - -// FuncName returns the function name from fn, adjusted -// to remove pointer annotations. -func FuncName(fn *vulncheck.FuncNode) string { - return strings.TrimPrefix(fn.String(), "*") -} diff --git a/gopls/internal/govulncheck/util_test.go b/gopls/internal/govulncheck/util_test.go deleted file mode 100644 index 3288cd84c83..00000000000 --- a/gopls/internal/govulncheck/util_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package govulncheck - -import ( - "strings" - "testing" - - "golang.org/x/vuln/vulncheck" -) - -func TestPkgPath(t *testing.T) { - for _, test := range []struct { - in vulncheck.FuncNode - want string - }{ - { - vulncheck.FuncNode{PkgPath: "math", Name: "Floor"}, - "math", - }, - { - vulncheck.FuncNode{RecvType: "a.com/b.T", Name: "M"}, - "a.com/b", - }, - { - vulncheck.FuncNode{RecvType: "*a.com/b.T", Name: "M"}, - "a.com/b", - }, - } { - got := PkgPath(&test.in) - if got != test.want { - t.Errorf("%+v: got %q, want %q", test.in, got, test.want) - } - } -} - -func TestSummarizeCallStack(t *testing.T) { - topPkgs := map[string]bool{"t1": true, "t2": true} - vulnPkg := "v" - - for _, test := range []struct { - in, want string - }{ - {"a.F", ""}, - {"t1.F", ""}, - {"v.V", ""}, - { - "t1.F v.V", - "t1.F calls v.V", - }, - { - "t1.F t2.G v.V1 v.v2", - "t2.G calls v.V1", - }, - { - "t1.F x.Y t2.G a.H b.I c.J v.V", - "t2.G calls a.H, which eventually calls v.V", - }, - } { - in := stringToCallStack(test.in) - got := SummarizeCallStack(in, topPkgs, vulnPkg) - if got != test.want { - t.Errorf("%s:\ngot %s\nwant %s", test.in, got, test.want) - } - } -} - -func stringToCallStack(s string) vulncheck.CallStack { - var cs vulncheck.CallStack - for _, e := range strings.Fields(s) { - parts := strings.Split(e, ".") - cs = append(cs, vulncheck.StackEntry{ - Function: &vulncheck.FuncNode{ - PkgPath: parts[0], - Name: parts[1], - }, - }) - } - return cs -} diff --git a/gopls/internal/govulncheck/vulncache.go b/gopls/internal/govulncheck/vulncache.go new file mode 100644 index 00000000000..a259f027336 --- /dev/null +++ b/gopls/internal/govulncheck/vulncache.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package govulncheck + +import ( + "sync" + "time" + + vulnc "golang.org/x/vuln/client" + "golang.org/x/vuln/osv" +) + +// inMemoryCache is an implementation of the [client.Cache] interface +// that "decorates" another instance of that interface to provide +// an additional layer of (memory-based) caching. +type inMemoryCache struct { + mu sync.Mutex + underlying vulnc.Cache + db map[string]*db +} + +var _ vulnc.Cache = &inMemoryCache{} + +type db struct { + retrieved time.Time + index vulnc.DBIndex + entry map[string][]*osv.Entry +} + +// NewInMemoryCache returns a new memory-based cache that decorates +// the provided cache (file-based, perhaps). +func NewInMemoryCache(underlying vulnc.Cache) *inMemoryCache { + return &inMemoryCache{ + underlying: underlying, + db: make(map[string]*db), + } +} + +func (c *inMemoryCache) lookupDBLocked(dbName string) *db { + cached := c.db[dbName] + if cached == nil { + cached = &db{entry: make(map[string][]*osv.Entry)} + c.db[dbName] = cached + } + return cached +} + +// ReadIndex returns the index for dbName from the cache, or returns zero values +// if it is not present. +func (c *inMemoryCache) ReadIndex(dbName string) (vulnc.DBIndex, time.Time, error) { + c.mu.Lock() + defer c.mu.Unlock() + cached := c.lookupDBLocked(dbName) + + if cached.retrieved.IsZero() { + // First time ReadIndex is called. + index, retrieved, err := c.underlying.ReadIndex(dbName) + if err != nil { + return index, retrieved, err + } + cached.index, cached.retrieved = index, retrieved + } + return cached.index, cached.retrieved, nil +} + +// WriteIndex puts the index and retrieved time into the cache. +func (c *inMemoryCache) WriteIndex(dbName string, index vulnc.DBIndex, retrieved time.Time) error { + c.mu.Lock() + defer c.mu.Unlock() + cached := c.lookupDBLocked(dbName) + cached.index, cached.retrieved = index, retrieved + // TODO(hyangah): shouldn't we invalidate all cached entries? + return c.underlying.WriteIndex(dbName, index, retrieved) +} + +// ReadEntries returns the vulndb entries for path from the cache. +func (c *inMemoryCache) ReadEntries(dbName, path string) ([]*osv.Entry, error) { + c.mu.Lock() + defer c.mu.Unlock() + cached := c.lookupDBLocked(dbName) + entries, ok := cached.entry[path] + if !ok { + // cache miss + entries, err := c.underlying.ReadEntries(dbName, path) + if err != nil { + return entries, err + } + cached.entry[path] = entries + } + return entries, nil +} + +// WriteEntries puts the entries for path into the cache. +func (c *inMemoryCache) WriteEntries(dbName, path string, entries []*osv.Entry) error { + c.mu.Lock() + defer c.mu.Unlock() + cached := c.lookupDBLocked(dbName) + cached.entry[path] = entries + return c.underlying.WriteEntries(dbName, path, entries) +} diff --git a/gopls/internal/hooks/analysis.go b/gopls/internal/hooks/analysis.go deleted file mode 100644 index 51048991d5a..00000000000 --- a/gopls/internal/hooks/analysis.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.17 -// +build go1.17 - -package hooks - -import ( - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "honnef.co/go/tools/analysis/lint" - "honnef.co/go/tools/quickfix" - "honnef.co/go/tools/simple" - "honnef.co/go/tools/staticcheck" - "honnef.co/go/tools/stylecheck" -) - -func updateAnalyzers(options *source.Options) { - options.StaticcheckSupported = true - - mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity { - switch severity { - case lint.SeverityError: - return protocol.SeverityError - case lint.SeverityDeprecated: - // TODO(dh): in LSP, deprecated is a tag, not a severity. - // We'll want to support this once we enable SA5011. - return protocol.SeverityWarning - case lint.SeverityWarning: - return protocol.SeverityWarning - case lint.SeverityInfo: - return protocol.SeverityInformation - case lint.SeverityHint: - return protocol.SeverityHint - default: - return protocol.SeverityWarning - } - } - add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) { - for _, a := range analyzers { - if _, ok := skip[a.Analyzer.Name]; ok { - continue - } - - enabled := !a.Doc.NonDefault - options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity)) - } - } - - add(simple.Analyzers, nil) - add(staticcheck.Analyzers, map[string]struct{}{ - // This check conflicts with the vet printf check (golang/go#34494). - "SA5009": {}, - // This check relies on facts from dependencies, which - // we don't currently compute. - "SA5011": {}, - }) - add(stylecheck.Analyzers, nil) - add(quickfix.Analyzers, nil) -} diff --git a/gopls/internal/hooks/analysis_116.go b/gopls/internal/hooks/analysis_116.go new file mode 100644 index 00000000000..dd429dea898 --- /dev/null +++ b/gopls/internal/hooks/analysis_116.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.17 +// +build !go1.17 + +package hooks + +import "golang.org/x/tools/gopls/internal/lsp/source" + +func updateAnalyzers(options *source.Options) { + options.StaticcheckSupported = false +} diff --git a/gopls/internal/hooks/analysis_117.go b/gopls/internal/hooks/analysis_117.go index 02f9170ab63..27ab9a699f9 100644 --- a/gopls/internal/hooks/analysis_117.go +++ b/gopls/internal/hooks/analysis_117.go @@ -1,14 +1,62 @@ -// Copyright 2021 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.17 -// +build !go1.17 +//go:build go1.17 +// +build go1.17 package hooks -import "golang.org/x/tools/internal/lsp/source" +import ( + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "honnef.co/go/tools/analysis/lint" + "honnef.co/go/tools/quickfix" + "honnef.co/go/tools/simple" + "honnef.co/go/tools/staticcheck" + "honnef.co/go/tools/stylecheck" +) func updateAnalyzers(options *source.Options) { - options.StaticcheckSupported = false + options.StaticcheckSupported = true + + mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity { + switch severity { + case lint.SeverityError: + return protocol.SeverityError + case lint.SeverityDeprecated: + // TODO(dh): in LSP, deprecated is a tag, not a severity. + // We'll want to support this once we enable SA5011. + return protocol.SeverityWarning + case lint.SeverityWarning: + return protocol.SeverityWarning + case lint.SeverityInfo: + return protocol.SeverityInformation + case lint.SeverityHint: + return protocol.SeverityHint + default: + return protocol.SeverityWarning + } + } + add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) { + for _, a := range analyzers { + if _, ok := skip[a.Analyzer.Name]; ok { + continue + } + + enabled := !a.Doc.NonDefault + options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity)) + } + } + + add(simple.Analyzers, nil) + add(staticcheck.Analyzers, map[string]struct{}{ + // This check conflicts with the vet printf check (golang/go#34494). + "SA5009": {}, + // This check relies on facts from dependencies, which + // we don't currently compute. + "SA5011": {}, + }) + add(stylecheck.Analyzers, nil) + add(quickfix.Analyzers, nil) } diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go index a307ba77fd6..f7fec5a7bb2 100644 --- a/gopls/internal/hooks/diff.go +++ b/gopls/internal/hooks/diff.go @@ -5,37 +5,165 @@ package hooks import ( + "encoding/json" "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "sync" + "time" "github.com/sergi/go-diff/diffmatchpatch" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/diff" ) -func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) { +// structure for saving information about diffs +// while the new code is being rolled out +type diffstat struct { + Before, After int + Oldedits, Newedits int + Oldtime, Newtime time.Duration + Stack string + Msg string `json:",omitempty"` // for errors + Ignored int `json:",omitempty"` // numbr of skipped records with 0 edits +} + +var ( + ignoredMu sync.Mutex + ignored int // counter of diff requests on equal strings + + diffStatsOnce sync.Once + diffStats *os.File // never closed +) + +// save writes a JSON record of statistics about diff requests to a temporary file. +func (s *diffstat) save() { + diffStatsOnce.Do(func() { + f, err := ioutil.TempFile("", "gopls-diff-stats-*") + if err != nil { + log.Printf("can't create diff stats temp file: %v", err) // e.g. disk full + return + } + diffStats = f + }) + if diffStats == nil { + return + } + + // diff is frequently called with equal strings, + // so we count repeated instances but only print every 15th. + ignoredMu.Lock() + if s.Oldedits == 0 && s.Newedits == 0 { + ignored++ + if ignored < 15 { + ignoredMu.Unlock() + return + } + } + s.Ignored = ignored + ignored = 0 + ignoredMu.Unlock() + + // Record the name of the file in which diff was called. + // There aren't many calls, so only the base name is needed. + if _, file, line, ok := runtime.Caller(2); ok { + s.Stack = fmt.Sprintf("%s:%d", filepath.Base(file), line) + } + x, err := json.Marshal(s) + if err != nil { + log.Fatalf("internal error marshalling JSON: %v", err) + } + fmt.Fprintf(diffStats, "%s\n", x) +} + +// disaster is called when the diff algorithm panics or produces a +// diff that cannot be applied. It saves the broken input in a +// new temporary file and logs the file name, which is returned. +func disaster(before, after string) string { + // We use the pid to salt the name, not os.TempFile, + // so that each process creates at most one file. + // One is sufficient for a bug report. + filename := fmt.Sprintf("%s/gopls-diff-bug-%x", os.TempDir(), os.Getpid()) + + // We use NUL as a separator: it should never appear in Go source. + data := before + "\x00" + after + + if err := ioutil.WriteFile(filename, []byte(data), 0600); err != nil { + log.Printf("failed to write diff bug report: %v", err) + return "" + } + + bug.Reportf("Bug detected in diff algorithm! Please send file %s to the maintainers of gopls if you are comfortable sharing its contents.", filename) + + return filename +} + +// BothDiffs edits calls both the new and old diffs, checks that the new diffs +// change before into after, and attempts to preserve some statistics. +func BothDiffs(before, after string) (edits []diff.Edit) { + // The new diff code contains a lot of internal checks that panic when they + // fail. This code catches the panics, or other failures, tries to save + // the failing example (and it would ask the user to send it back to us, and + // changes options.newDiff to 'old', if only we could figure out how.) + stat := diffstat{Before: len(before), After: len(after)} + now := time.Now() + oldedits := ComputeEdits(before, after) + stat.Oldedits = len(oldedits) + stat.Oldtime = time.Since(now) + defer func() { + if r := recover(); r != nil { + disaster(before, after) + edits = oldedits + } + }() + now = time.Now() + newedits := diff.Strings(before, after) + stat.Newedits = len(newedits) + stat.Newtime = time.Now().Sub(now) + got, err := diff.Apply(before, newedits) + if err != nil || got != after { + stat.Msg += "FAIL" + disaster(before, after) + stat.save() + return oldedits + } + stat.save() + return newedits +} + +// ComputeEdits computes a diff using the github.com/sergi/go-diff implementation. +func ComputeEdits(before, after string) (edits []diff.Edit) { // The go-diff library has an unresolved panic (see golang/go#278774). // TODO(rstambler): Remove the recover once the issue has been fixed // upstream. defer func() { if r := recover(); r != nil { - edits = nil - err = fmt.Errorf("unable to compute edits for %s: %s", uri.Filename(), r) + bug.Reportf("unable to compute edits: %s", r) + // Report one big edit for the whole file. + edits = []diff.Edit{{ + Start: 0, + End: len(before), + New: after, + }} } }() diffs := diffmatchpatch.New().DiffMain(before, after, true) - edits = make([]diff.TextEdit, 0, len(diffs)) + edits = make([]diff.Edit, 0, len(diffs)) offset := 0 for _, d := range diffs { - start := span.NewPoint(0, 0, offset) + start := offset switch d.Type { case diffmatchpatch.DiffDelete: offset += len(d.Text) - edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.NewPoint(0, 0, offset))}) + edits = append(edits, diff.Edit{Start: start, End: offset}) case diffmatchpatch.DiffEqual: offset += len(d.Text) case diffmatchpatch.DiffInsert: - edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.Point{}), NewText: d.Text}) + edits = append(edits, diff.Edit{Start: start, End: start, New: d.Text}) } } - return edits, nil + return edits } diff --git a/gopls/internal/hooks/diff_test.go b/gopls/internal/hooks/diff_test.go index d979be78dbe..a46bf3b2d28 100644 --- a/gopls/internal/hooks/diff_test.go +++ b/gopls/internal/hooks/diff_test.go @@ -2,15 +2,32 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package hooks_test +package hooks import ( + "io/ioutil" + "os" "testing" - "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/diff/difftest" + "golang.org/x/tools/internal/diff/difftest" ) func TestDiff(t *testing.T) { - difftest.DiffTest(t, hooks.ComputeEdits) + difftest.DiffTest(t, ComputeEdits) +} + +func TestDisaster(t *testing.T) { + a := "This is a string,(\u0995) just for basic\nfunctionality" + b := "This is another string, (\u0996) to see if disaster will store stuff correctly" + fname := disaster(a, b) + buf, err := ioutil.ReadFile(fname) + if err != nil { + t.Fatal(err) + } + if string(buf) != a+"\x00"+b { + t.Error("failed to record original strings") + } + if err := os.Remove(fname); err != nil { + t.Error(err) + } } diff --git a/gopls/internal/hooks/gen-licenses.sh b/gopls/internal/hooks/gen-licenses.sh index 7d6bab79f54..c35c91260d4 100755 --- a/gopls/internal/hooks/gen-licenses.sh +++ b/gopls/internal/hooks/gen-licenses.sh @@ -27,7 +27,7 @@ mods=$(go list -deps -f '{{with .Module}}{{.Path}}{{end}}' golang.org/x/tools/go for mod in $mods; do # Find the license file, either LICENSE or COPYING, and add it to the result. dir=$(go list -m -f {{.Dir}} $mod) - license=$(ls -1 $dir | egrep -i '^(LICENSE|COPYING)$') + license=$(ls -1 $dir | grep -E -i '^(LICENSE|COPYING)$') echo "-- $mod $license --" >> $tempfile echo >> $tempfile sed 's/^-- / &/' $dir/$license >> $tempfile diff --git a/gopls/internal/hooks/gofumpt_117.go b/gopls/internal/hooks/gofumpt_117.go new file mode 100644 index 00000000000..71886357704 --- /dev/null +++ b/gopls/internal/hooks/gofumpt_117.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package hooks + +import "golang.org/x/tools/gopls/internal/lsp/source" + +func updateGofumpt(options *source.Options) { +} diff --git a/gopls/internal/hooks/gofumpt_118.go b/gopls/internal/hooks/gofumpt_118.go new file mode 100644 index 00000000000..4eb523261dc --- /dev/null +++ b/gopls/internal/hooks/gofumpt_118.go @@ -0,0 +1,24 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package hooks + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/source" + "mvdan.cc/gofumpt/format" +) + +func updateGofumpt(options *source.Options) { + options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) { + return format.Source(src, format.Options{ + LangVersion: langVersion, + ModulePath: modulePath, + }) + } +} diff --git a/gopls/internal/hooks/hooks.go b/gopls/internal/hooks/hooks.go index 023aefeab98..5624a5eb386 100644 --- a/gopls/internal/hooks/hooks.go +++ b/gopls/internal/hooks/hooks.go @@ -8,27 +8,24 @@ package hooks // import "golang.org/x/tools/gopls/internal/hooks" import ( - "context" - - "golang.org/x/tools/gopls/internal/vulncheck" - "golang.org/x/tools/internal/lsp/source" - "mvdan.cc/gofumpt/format" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/diff" "mvdan.cc/xurls/v2" ) func Options(options *source.Options) { options.LicensesText = licensesText if options.GoDiff { - options.ComputeEdits = ComputeEdits + switch options.NewDiff { + case "old": + options.ComputeEdits = ComputeEdits + case "new": + options.ComputeEdits = diff.Strings + default: + options.ComputeEdits = BothDiffs + } } options.URLRegexp = xurls.Relaxed() - options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) { - return format.Source(src, format.Options{ - LangVersion: langVersion, - ModulePath: modulePath, - }) - } updateAnalyzers(options) - - options.Govulncheck = vulncheck.Govulncheck + updateGofumpt(options) } diff --git a/gopls/internal/hooks/licenses_test.go b/gopls/internal/hooks/licenses_test.go index 3b61d348d95..b10d7e2b36c 100644 --- a/gopls/internal/hooks/licenses_test.go +++ b/gopls/internal/hooks/licenses_test.go @@ -15,9 +15,9 @@ import ( ) func TestLicenses(t *testing.T) { - // License text differs for older Go versions because staticcheck isn't - // supported for those versions. - testenv.NeedsGo1Point(t, 17) + // License text differs for older Go versions because staticcheck or gofumpt + // isn't supported for those versions. + testenv.NeedsGo1Point(t, 18) if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skip("generating licenses only works on Unixes") diff --git a/internal/lsp/README.md b/gopls/internal/lsp/README.md similarity index 100% rename from internal/lsp/README.md rename to gopls/internal/lsp/README.md diff --git a/internal/lsp/analysis/embeddirective/embeddirective.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective.go similarity index 100% rename from internal/lsp/analysis/embeddirective/embeddirective.go rename to gopls/internal/lsp/analysis/embeddirective/embeddirective.go diff --git a/internal/lsp/analysis/embeddirective/embeddirective_test.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go similarity index 100% rename from internal/lsp/analysis/embeddirective/embeddirective_test.go rename to gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/a.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/embeddirective/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/b.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go similarity index 100% rename from internal/lsp/analysis/embeddirective/testdata/src/a/b.go rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/embedText b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText similarity index 100% rename from internal/lsp/analysis/embeddirective/testdata/src/a/embedText rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go similarity index 87% rename from internal/lsp/analysis/fillreturns/fillreturns.go rename to gopls/internal/lsp/analysis/fillreturns/fillreturns.go index 72fe65d79ca..c8146df2dd0 100644 --- a/internal/lsp/analysis/fillreturns/fillreturns.go +++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go @@ -19,6 +19,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/fuzzy" "golang.org/x/tools/internal/typeparams" ) @@ -51,9 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, fmt.Errorf("nil TypeInfo") } - errors := analysisinternal.GetTypeErrors(pass) outer: - for _, typeErr := range errors { + for _, typeErr := range pass.TypeErrors { // Filter out the errors that are not relevant to this analyzer. if !FixesError(typeErr) { continue @@ -70,6 +70,8 @@ outer: } // Get the end position of the error. + // (This heuristic assumes that the buffer is formatted, + // at least up to the end position of the error.) var buf bytes.Buffer if err := format.Node(&buf, pass.Fset, file); err != nil { continue @@ -112,7 +114,7 @@ outer: break } } - if enclosingFunc == nil { + if enclosingFunc == nil || enclosingFunc.Results == nil { continue } @@ -155,19 +157,23 @@ outer: fixed := make([]ast.Expr, len(enclosingFunc.Results.List)) // For each value in the return function declaration, find the leftmost element - // in the return statement that has the desired type. If no such element exits, + // in the return statement that has the desired type. If no such element exists, // fill in the missing value with the appropriate "zero" value. + // Beware that type information may be incomplete. var retTyps []types.Type for _, ret := range enclosingFunc.Results.List { - retTyps = append(retTyps, info.TypeOf(ret.Type)) + retTyp := info.TypeOf(ret.Type) + if retTyp == nil { + return nil, nil + } + retTyps = append(retTyps, retTyp) } - matches := - analysisinternal.FindMatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg) + matches := analysisinternal.MatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg) for i, retTyp := range retTyps { var match ast.Expr var idx int for j, val := range remaining { - if !matchingTypes(info.TypeOf(val), retTyp) { + if t := info.TypeOf(val); t == nil || !matchingTypes(t, retTyp) { continue } if !analysisinternal.IsZeroValue(val) { @@ -184,21 +190,19 @@ outer: fixed[i] = match remaining = append(remaining[:idx], remaining[idx+1:]...) } else { - idents, ok := matches[retTyp] + names, ok := matches[retTyp] if !ok { return nil, fmt.Errorf("invalid return type: %v", retTyp) } - // Find the identifier whose name is most similar to the return type. - // If we do not find any identifier that matches the pattern, - // generate a zero value. - value := analysisinternal.FindBestMatch(retTyp.String(), idents) - if value == nil { - value = analysisinternal.ZeroValue(file, pass.Pkg, retTyp) - } - if value == nil { + // Find the identifier most similar to the return type. + // If no identifier matches the pattern, generate a zero value. + if best := fuzzy.BestMatch(retTyp.String(), names); best != "" { + fixed[i] = ast.NewIdent(best) + } else if zero := analysisinternal.ZeroValue(file, pass.Pkg, retTyp); zero != nil { + fixed[i] = zero + } else { return nil, nil } - fixed[i] = value } } diff --git a/internal/lsp/analysis/fillreturns/fillreturns_test.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go similarity index 89% rename from internal/lsp/analysis/fillreturns/fillreturns_test.go rename to gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go index 7ef0d46792e..1f7627551a0 100644 --- a/internal/lsp/analysis/fillreturns/fillreturns_test.go +++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/fillreturns" + "golang.org/x/tools/gopls/internal/lsp/analysis/fillreturns" "golang.org/x/tools/internal/typeparams" ) diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/fillreturns/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go similarity index 100% rename from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden similarity index 100% rename from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go similarity index 76% rename from internal/lsp/analysis/fillstruct/fillstruct.go rename to gopls/internal/lsp/analysis/fillstruct/fillstruct.go index f160d4422ae..af29a3632f1 100644 --- a/internal/lsp/analysis/fillstruct/fillstruct.go +++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go @@ -4,6 +4,12 @@ // Package fillstruct defines an Analyzer that automatically // fills in a struct declaration with zero value elements for each field. +// +// The analyzer's diagnostic is merely a prompt. +// The actual fix is created by a separate direct call from gopls to +// the SuggestedFixes function. +// Tests of Analyzer.Run can be found in ./testdata/src. +// Tests of the SuggestedFixes logic live in ../../testdata/fillstruct. package fillstruct import ( @@ -20,8 +26,9 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/gopls/internal/lsp/safetoken" "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/fuzzy" "golang.org/x/tools/internal/typeparams" ) @@ -45,12 +52,10 @@ func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{(*ast.CompositeLit)(nil)} inspect.Preorder(nodeFilter, func(n ast.Node) { - info := pass.TypesInfo - if info == nil { - return - } expr := n.(*ast.CompositeLit) + // Find enclosing file. + // TODO(adonovan): use inspect.WithStack? var file *ast.File for _, f := range pass.Files { if f.Pos() <= expr.Pos() && expr.Pos() <= f.End() { @@ -62,73 +67,49 @@ func run(pass *analysis.Pass) (interface{}, error) { return } - typ := info.TypeOf(expr) + typ := pass.TypesInfo.TypeOf(expr) if typ == nil { return } - // Ignore types that have type parameters for now. - // TODO: support type params. - if typ, ok := typ.(*types.Named); ok { - if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 { - return - } - } - // Find reference to the type declaration of the struct being initialized. - for { - p, ok := typ.Underlying().(*types.Pointer) - if !ok { - break - } - typ = p.Elem() - } - typ = typ.Underlying() - - obj, ok := typ.(*types.Struct) + typ = deref(typ) + tStruct, ok := typ.Underlying().(*types.Struct) if !ok { return } - fieldCount := obj.NumFields() + // Inv: typ is the possibly-named struct type. + + fieldCount := tStruct.NumFields() // Skip any struct that is already populated or that has no fields. if fieldCount == 0 || fieldCount == len(expr.Elts) { return } - var fillable bool + // Are any fields in need of filling? var fillableFields []string for i := 0; i < fieldCount; i++ { - field := obj.Field(i) + field := tStruct.Field(i) // Ignore fields that are not accessible in the current package. if field.Pkg() != nil && field.Pkg() != pass.Pkg && !field.Exported() { continue } - // Ignore structs containing fields that have type parameters for now. - // TODO: support type params. - if typ, ok := field.Type().(*types.Named); ok { - if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 { - return - } - } - if _, ok := field.Type().(*typeparams.TypeParam); ok { - return - } - fillable = true fillableFields = append(fillableFields, fmt.Sprintf("%s: %s", field.Name(), field.Type().String())) } - if !fillable { + if len(fillableFields) == 0 { return } + + // Derive a name for the struct type. var name string - switch typ := expr.Type.(type) { - case *ast.Ident: - name = typ.Name - case *ast.SelectorExpr: - name = fmt.Sprintf("%s.%s", typ.X, typ.Sel.Name) - default: + if typ != tStruct { + // named struct type (e.g. pkg.S[T]) + name = types.TypeString(typ, types.RelativeTo(pass.Pkg)) + } else { + // anonymous struct type totalFields := len(fillableFields) - maxLen := 20 + const maxLen = 20 // Find the index to cut off printing of fields. var i, fieldLen int for i = range fillableFields { @@ -152,8 +133,14 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { - pos := rng.Start // don't use the end +// SuggestedFix computes the suggested fix for the kinds of +// diagnostics produced by the Analyzer above. +func SuggestedFix(fset *token.FileSet, start, end token.Pos, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + if info == nil { + return nil, fmt.Errorf("nil types.Info") + } + + pos := start // don't use the end // TODO(rstambler): Using ast.Inspect would probably be more efficient than // calling PathEnclosingInterval. Switch this approach. @@ -169,37 +156,29 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast } } - if info == nil { - return nil, fmt.Errorf("nil types.Info") - } typ := info.TypeOf(expr) if typ == nil { return nil, fmt.Errorf("no composite literal") } // Find reference to the type declaration of the struct being initialized. - for { - p, ok := typ.Underlying().(*types.Pointer) - if !ok { - break - } - typ = p.Elem() - } - typ = typ.Underlying() - - obj, ok := typ.(*types.Struct) + typ = deref(typ) + tStruct, ok := typ.Underlying().(*types.Struct) if !ok { - return nil, fmt.Errorf("unexpected type %v (%T), expected *types.Struct", typ, typ) + return nil, fmt.Errorf("%s is not a (pointer to) struct type", + types.TypeString(typ, types.RelativeTo(pkg))) } - fieldCount := obj.NumFields() + // Inv: typ is the the possibly-named struct type. + + fieldCount := tStruct.NumFields() // Check which types have already been filled in. (we only want to fill in // the unfilled types, or else we'll blat user-supplied details) - prefilledTypes := map[string]ast.Expr{} + prefilledFields := map[string]ast.Expr{} for _, e := range expr.Elts { if kv, ok := e.(*ast.KeyValueExpr); ok { if key, ok := kv.Key.(*ast.Ident); ok { - prefilledTypes[key.Name] = kv.Value + prefilledFields[key.Name] = kv.Value } } } @@ -209,14 +188,16 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast // each field we're going to set. format.Node only cares about line // numbers, so we don't need to set columns, and each line can be // 1 byte long. + // TODO(adonovan): why is this necessary? The position information + // is going to be wrong for the existing trees in prefilledFields. + // Can't the formatter just do its best with an empty fileset? fakeFset := token.NewFileSet() tok := fakeFset.AddFile("", -1, fieldCount+2) line := 2 // account for 1-based lines and the left brace - var elts []ast.Expr var fieldTyps []types.Type for i := 0; i < fieldCount; i++ { - field := obj.Field(i) + field := tStruct.Field(i) // Ignore fields that are not accessible in the current package. if field.Pkg() != nil && field.Pkg() != pkg && !field.Exported() { fieldTyps = append(fieldTyps, nil) @@ -224,11 +205,13 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast } fieldTyps = append(fieldTyps, field.Type()) } - matches := analysisinternal.FindMatchingIdents(fieldTyps, file, rng.Start, info, pkg) + matches := analysisinternal.MatchingIdents(fieldTyps, file, start, info, pkg) + var elts []ast.Expr for i, fieldTyp := range fieldTyps { if fieldTyp == nil { - continue + continue // TODO(adonovan): is this reachable? } + fieldName := tStruct.Field(i).Name() tok.AddLine(line - 1) // add 1 byte per line if line > tok.LineCount() { @@ -239,30 +222,28 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast kv := &ast.KeyValueExpr{ Key: &ast.Ident{ NamePos: pos, - Name: obj.Field(i).Name(), + Name: fieldName, }, Colon: pos, } - if expr, ok := prefilledTypes[obj.Field(i).Name()]; ok { + if expr, ok := prefilledFields[fieldName]; ok { kv.Value = expr } else { - idents, ok := matches[fieldTyp] + names, ok := matches[fieldTyp] if !ok { return nil, fmt.Errorf("invalid struct field type: %v", fieldTyp) } - // Find the identifier whose name is most similar to the name of the field's key. - // If we do not find any identifier that matches the pattern, generate a new value. + // Find the name most similar to the field name. + // If no name matches the pattern, generate a zero value. // NOTE: We currently match on the name of the field key rather than the field type. - value := analysisinternal.FindBestMatch(obj.Field(i).Name(), idents) - if value == nil { - value = populateValue(file, pkg, fieldTyp) - } - if value == nil { + if best := fuzzy.BestMatch(fieldName, names); best != "" { + kv.Value = ast.NewIdent(best) + } else if v := populateValue(file, pkg, fieldTyp); v != nil { + kv.Value = v + } else { return nil, nil } - - kv.Value = value } elts = append(elts, kv) line++ @@ -290,7 +271,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast // Find the line on which the composite literal is declared. split := bytes.Split(content, []byte("\n")) - lineNumber := fset.Position(expr.Lbrace).Line + lineNumber := safetoken.StartPosition(fset, expr.Lbrace).Line firstLine := split[lineNumber-1] // lines are 1-indexed // Trim the whitespace from the left of the line, and use the index @@ -306,7 +287,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast } sug := indent(formatBuf.Bytes(), whitespace) - if len(prefilledTypes) > 0 { + if len(prefilledFields) > 0 { // Attempt a second pass through the formatter to line up columns. sourced, err := format.Source(sug) if err == nil { @@ -350,16 +331,12 @@ func indent(str, ind []byte) []byte { // // When the type of a struct field is a basic literal or interface, we return // default values. For other types, such as maps, slices, and channels, we create -// expressions rather than using default values. +// empty expressions such as []T{} or make(chan T) rather than using default values. // // The reasoning here is that users will call fillstruct with the intention of // initializing the struct, in which case setting these fields to nil has no effect. func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - under := typ - if n, ok := typ.(*types.Named); ok { - under = n.Underlying() - } - switch u := under.(type) { + switch u := typ.Underlying().(type) { case *types.Basic: switch { case u.Info()&types.IsNumeric != 0: @@ -373,6 +350,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { default: panic("unknown basic type") } + case *types.Map: k := analysisinternal.TypeExpr(f, pkg, u.Key()) v := analysisinternal.TypeExpr(f, pkg, u.Elem()) @@ -395,6 +373,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { Elt: s, }, } + case *types.Array: a := analysisinternal.TypeExpr(f, pkg, u.Elem()) if a == nil { @@ -408,6 +387,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, }, } + case *types.Chan: v := analysisinternal.TypeExpr(f, pkg, u.Elem()) if v == nil { @@ -426,6 +406,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, }, } + case *types.Struct: s := analysisinternal.TypeExpr(f, pkg, typ) if s == nil { @@ -434,6 +415,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { return &ast.CompositeLit{ Type: s, } + case *types.Signature: var params []*ast.Field for i := 0; i < u.Params().Len(); i++ { @@ -471,6 +453,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, Body: &ast.BlockStmt{}, } + case *types.Pointer: switch u.Elem().(type) { case *types.Basic: @@ -490,8 +473,34 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { X: populateValue(f, pkg, u.Elem()), } } + case *types.Interface: + if param, ok := typ.(*typeparams.TypeParam); ok { + // *new(T) is the zero value of a type parameter T. + // TODO(adonovan): one could give a more specific zero + // value if the type has a core type that is, say, + // always a number or a pointer. See go/ssa for details. + return &ast.StarExpr{ + X: &ast.CallExpr{ + Fun: ast.NewIdent("new"), + Args: []ast.Expr{ + ast.NewIdent(param.Obj().Name()), + }, + }, + } + } + return ast.NewIdent("nil") } return nil } + +func deref(t types.Type) types.Type { + for { + ptr, ok := t.Underlying().(*types.Pointer) + if !ok { + return t + } + t = ptr.Elem() + } +} diff --git a/internal/lsp/analysis/fillstruct/fillstruct_test.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go similarity index 89% rename from internal/lsp/analysis/fillstruct/fillstruct_test.go rename to gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go index 51a516cdfdb..66642b7ab59 100644 --- a/internal/lsp/analysis/fillstruct/fillstruct_test.go +++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/fillstruct" + "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct" "golang.org/x/tools/internal/typeparams" ) diff --git a/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go new file mode 100644 index 00000000000..9ee3860fcae --- /dev/null +++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go @@ -0,0 +1,113 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fillstruct + +import ( + data "b" + "go/ast" + "go/token" + "unsafe" +) + +type emptyStruct struct{} + +var _ = emptyStruct{} + +type basicStruct struct { + foo int +} + +var _ = basicStruct{} // want `Fill basicStruct` + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{} // want `Fill twoArgStruct` + +var _ = twoArgStruct{ // want `Fill twoArgStruct` + bar: "bar", +} + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{} // want `Fill nestedStruct` + +var _ = data.B{} // want `Fill b.B` + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{} // want `Fill typedStruct` + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{} // want `Fill funStruct` + +type funStructComplex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructComplex{} // want `Fill funStructComplex` + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{} // want `Fill funStructEmpty` + +type Foo struct { + A int +} + +type Bar struct { + X *Foo + Y *Foo +} + +var _ = Bar{} // want `Fill Bar` + +type importedStruct struct { + m map[*ast.CompositeLit]ast.Field + s []ast.BadExpr + a [3]token.Token + c chan ast.EmptyStmt + fn func(ast_decl ast.DeclStmt) ast.Ellipsis + st ast.CompositeLit +} + +var _ = importedStruct{} // want `Fill importedStruct` + +type pointerBuiltinStruct struct { + b *bool + s *string + i *int +} + +var _ = pointerBuiltinStruct{} // want `Fill pointerBuiltinStruct` + +var _ = []ast.BasicLit{ + {}, // want `Fill go/ast.BasicLit` +} + +var _ = []ast.BasicLit{{}, // want "go/ast.BasicLit" +} + +type unsafeStruct struct { + foo unsafe.Pointer +} + +var _ = unsafeStruct{} // want `Fill unsafeStruct` diff --git a/internal/lsp/analysis/fillstruct/testdata/src/b/b.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go similarity index 100% rename from internal/lsp/analysis/fillstruct/testdata/src/b/b.go rename to gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go diff --git a/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go new file mode 100644 index 00000000000..46bb8ae4027 --- /dev/null +++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go @@ -0,0 +1,50 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fillstruct + +type emptyStruct[A any] struct{} + +var _ = emptyStruct[int]{} + +type basicStruct[T any] struct { + foo T +} + +var _ = basicStruct[int]{} // want `Fill basicStruct\[int\]` + +type twoArgStruct[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStruct[string, int]{} // want `Fill twoArgStruct\[string, int\]` + +var _ = twoArgStruct[int, string]{ // want `Fill twoArgStruct\[int, string\]` + bar: "bar", +} + +type nestedStruct struct { + bar string + basic basicStruct[int] +} + +var _ = nestedStruct{} // want "Fill nestedStruct" + +func _[T any]() { + type S struct{ t T } + x := S{} // want "Fill S" + _ = x +} + +func Test() { + var tests = []struct { + a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p string + }{ + {}, // want "Fill anonymous struct { a: string, b: string, c: string, ... }" + } + for _, test := range tests { + _ = test + } +} diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/infertypeargs.go rename to gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go similarity index 80% rename from internal/lsp/analysis/infertypeargs/infertypeargs_test.go rename to gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go index 2957f46e367..70855e1ab3e 100644 --- a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go +++ b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go @@ -8,13 +8,11 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/infertypeargs" - "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/gopls/internal/lsp/analysis/infertypeargs" "golang.org/x/tools/internal/typeparams" ) func Test(t *testing.T) { - testenv.NeedsGo1Point(t, 13) if !typeparams.Enabled { t.Skip("type params are not enabled") } diff --git a/internal/lsp/analysis/infertypeargs/run_go117.go b/gopls/internal/lsp/analysis/infertypeargs/run_go117.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/run_go117.go rename to gopls/internal/lsp/analysis/infertypeargs/run_go117.go diff --git a/internal/lsp/analysis/infertypeargs/run_go118.go b/gopls/internal/lsp/analysis/infertypeargs/run_go118.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/run_go118.go rename to gopls/internal/lsp/analysis/infertypeargs/run_go118.go diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden similarity index 100% rename from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden diff --git a/internal/lsp/analysis/nonewvars/nonewvars.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go similarity index 94% rename from internal/lsp/analysis/nonewvars/nonewvars.go rename to gopls/internal/lsp/analysis/nonewvars/nonewvars.go index e7fa430cc53..6937b36d1f5 100644 --- a/internal/lsp/analysis/nonewvars/nonewvars.go +++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go @@ -30,7 +30,7 @@ will turn into ` var Analyzer = &analysis.Analyzer{ - Name: string(analysisinternal.NoNewVars), + Name: "nonewvars", Doc: Doc, Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,9 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - errors := analysisinternal.GetTypeErrors(pass) + if len(pass.TypeErrors) == 0 { + return nil, nil + } nodeFilter := []ast.Node{(*ast.AssignStmt)(nil)} inspect.Preorder(nodeFilter, func(n ast.Node) { @@ -60,7 +62,7 @@ func run(pass *analysis.Pass) (interface{}, error) { return } - for _, err := range errors { + for _, err := range pass.TypeErrors { if !FixesError(err.Msg) { continue } diff --git a/internal/lsp/analysis/nonewvars/nonewvars_test.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go similarity index 89% rename from internal/lsp/analysis/nonewvars/nonewvars_test.go rename to gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go index dc58ab0ff5e..8f6f0a51fb4 100644 --- a/internal/lsp/analysis/nonewvars/nonewvars_test.go +++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/nonewvars" + "golang.org/x/tools/gopls/internal/lsp/analysis/nonewvars" "golang.org/x/tools/internal/typeparams" ) diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/nonewvars/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go similarity index 100% rename from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden similarity index 100% rename from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden diff --git a/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go new file mode 100644 index 00000000000..41952a5479e --- /dev/null +++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go @@ -0,0 +1,92 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package noresultvalues defines an Analyzer that applies suggested fixes +// to errors of the type "no result values expected". +package noresultvalues + +import ( + "bytes" + "go/ast" + "go/format" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" +) + +const Doc = `suggested fixes for unexpected return values + +This checker provides suggested fixes for type errors of the +type "no result values expected" or "too many return values". +For example: + func z() { return nil } +will turn into + func z() { return } +` + +var Analyzer = &analysis.Analyzer{ + Name: "noresultvalues", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, + RunDespiteErrors: true, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if len(pass.TypeErrors) == 0 { + return nil, nil + } + + nodeFilter := []ast.Node{(*ast.ReturnStmt)(nil)} + inspect.Preorder(nodeFilter, func(n ast.Node) { + retStmt, _ := n.(*ast.ReturnStmt) + + var file *ast.File + for _, f := range pass.Files { + if f.Pos() <= retStmt.Pos() && retStmt.Pos() < f.End() { + file = f + break + } + } + if file == nil { + return + } + + for _, err := range pass.TypeErrors { + if !FixesError(err.Msg) { + continue + } + if retStmt.Pos() >= err.Pos || err.Pos >= retStmt.End() { + continue + } + var buf bytes.Buffer + if err := format.Node(&buf, pass.Fset, file); err != nil { + continue + } + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos), + Message: err.Msg, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Delete return values", + TextEdits: []analysis.TextEdit{{ + Pos: retStmt.Pos(), + End: retStmt.End(), + NewText: []byte("return"), + }}, + }}, + }) + } + }) + return nil, nil +} + +func FixesError(msg string) bool { + return msg == "no result values expected" || + strings.HasPrefix(msg, "too many return values") && strings.Contains(msg, "want ()") +} diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go similarity index 89% rename from internal/lsp/analysis/noresultvalues/noresultvalues_test.go rename to gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go index 12198a1c130..24ce39207ee 100644 --- a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go +++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/noresultvalues" + "golang.org/x/tools/gopls/internal/lsp/analysis/noresultvalues" "golang.org/x/tools/internal/typeparams" ) diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go similarity index 100% rename from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden similarity index 100% rename from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go similarity index 100% rename from internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go rename to gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go similarity index 85% rename from internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go rename to gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go index e60f7d6b055..b0365a6b3da 100644 --- a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go +++ b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/simplifycompositelit" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifycompositelit" ) func Test(t *testing.T) { diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go similarity index 100% rename from internal/lsp/analysis/simplifyrange/simplifyrange.go rename to gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go similarity index 86% rename from internal/lsp/analysis/simplifyrange/simplifyrange_test.go rename to gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go index ecc7a969257..fbd57ec2d65 100644 --- a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go +++ b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/simplifyrange" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyrange" ) func Test(t *testing.T) { diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go similarity index 100% rename from internal/lsp/analysis/simplifyslice/simplifyslice.go rename to gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go similarity index 89% rename from internal/lsp/analysis/simplifyslice/simplifyslice_test.go rename to gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go index cff6267c679..41914ba3170 100644 --- a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go +++ b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/simplifyslice" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyslice" "golang.org/x/tools/internal/typeparams" ) diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go similarity index 100% rename from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden similarity index 100% rename from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden diff --git a/internal/lsp/analysis/stubmethods/stubmethods.go b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go similarity index 91% rename from internal/lsp/analysis/stubmethods/stubmethods.go rename to gopls/internal/lsp/analysis/stubmethods/stubmethods.go index f9dc69a9652..9ff869225ff 100644 --- a/internal/lsp/analysis/stubmethods/stubmethods.go +++ b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go @@ -35,7 +35,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (interface{}, error) { - for _, err := range analysisinternal.GetTypeErrors(pass) { + for _, err := range pass.TypeErrors { ifaceErr := strings.Contains(err.Msg, "missing method") || strings.HasPrefix(err.Msg, "cannot convert") if !ifaceErr { continue @@ -84,7 +84,7 @@ type StubInfo struct { // in the case where the concrete type file requires a new import that happens to be renamed // in the interface file. // TODO(marwan-at-work): implement interface literals. - Interface types.Object + Interface *types.TypeName Concrete *types.Named Pointer bool } @@ -269,19 +269,21 @@ func fromAssignStmt(ti *types.Info, as *ast.AssignStmt, pos token.Pos) *StubInfo } } -// RelativeToFiles returns a types.Qualifier that formats package names -// according to the files where the concrete and interface types are defined. +// RelativeToFiles returns a types.Qualifier that formats package +// names according to the import environments of the files that define +// the concrete type and the interface type. (Only the imports of the +// latter file are provided.) // // This is similar to types.RelativeTo except if a file imports the package with a different name, // then it will use it. And if the file does import the package but it is ignored, -// then it will return the original name. It also prefers package names in ifaceFile in case -// an import is missing from concFile but is present in ifaceFile. +// then it will return the original name. It also prefers package names in importEnv in case +// an import is missing from concFile but is present among importEnv. // // Additionally, if missingImport is not nil, the function will be called whenever the concFile // is presented with a package that is not imported. This is useful so that as types.TypeString is // formatting a function signature, it is identifying packages that will need to be imported when // stubbing an interface. -func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, missingImport func(name, path string)) types.Qualifier { +func RelativeToFiles(concPkg *types.Package, concFile *ast.File, ifaceImports []*ast.ImportSpec, missingImport func(name, path string)) types.Qualifier { return func(other *types.Package) string { if other == concPkg { return "" @@ -292,6 +294,7 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss for _, imp := range concFile.Imports { impPath, _ := strconv.Unquote(imp.Path.Value) isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_") + // TODO(adonovan): this comparison disregards a vendor prefix in 'other'. if impPath == other.Path() && !isIgnored { importName := other.Name() if imp.Name != nil { @@ -304,16 +307,15 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss // If the concrete file does not have the import, check if the package // is renamed in the interface file and prefer that. var importName string - if ifaceFile != nil { - for _, imp := range ifaceFile.Imports { - impPath, _ := strconv.Unquote(imp.Path.Value) - isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_") - if impPath == other.Path() && !isIgnored { - if imp.Name != nil && imp.Name.Name != concPkg.Name() { - importName = imp.Name.Name - } - break + for _, imp := range ifaceImports { + impPath, _ := strconv.Unquote(imp.Path.Value) + isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_") + // TODO(adonovan): this comparison disregards a vendor prefix in 'other'. + if impPath == other.Path() && !isIgnored { + if imp.Name != nil && imp.Name.Name != concPkg.Name() { + importName = imp.Name.Name } + break } } @@ -333,7 +335,7 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss // ifaceType will try to extract the types.Object that defines // the interface given the ast.Expr where the "missing method" // or "conversion" errors happen. -func ifaceType(n ast.Expr, ti *types.Info) types.Object { +func ifaceType(n ast.Expr, ti *types.Info) *types.TypeName { tv, ok := ti.Types[n] if !ok { return nil @@ -341,7 +343,7 @@ func ifaceType(n ast.Expr, ti *types.Info) types.Object { return ifaceObjFromType(tv.Type) } -func ifaceObjFromType(t types.Type) types.Object { +func ifaceObjFromType(t types.Type) *types.TypeName { named, ok := t.(*types.Named) if !ok { return nil diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go new file mode 100644 index 00000000000..c5d8a2d789c --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func x() int { + var z int + z = y // want "(undeclared name|undefined): y" + + if z == m { // want "(undeclared name|undefined): m" + z = 1 + } + + if z == 1 { + z = 1 + } else if z == n+1 { // want "(undeclared name|undefined): n" + z = 1 + } + + switch z { + case 10: + z = 1 + case a: // want "(undeclared name|undefined): a" + z = 1 + } + return z +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go new file mode 100644 index 00000000000..76c7ba685e1 --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func channels(s string) { + undefinedChannels(c()) // want "(undeclared name|undefined): undefinedChannels" +} + +func c() (<-chan string, chan string) { + return make(<-chan string), make(chan string) +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go new file mode 100644 index 00000000000..73beace102c --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go @@ -0,0 +1,10 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func consecutiveParams() { + var s string + undefinedConsecutiveParams(s, s) // want "(undeclared name|undefined): undefinedConsecutiveParams" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go new file mode 100644 index 00000000000..5de9254112d --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go @@ -0,0 +1,10 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func errorParam() { + var err error + undefinedErrorParam(err) // want "(undeclared name|undefined): undefinedErrorParam" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go new file mode 100644 index 00000000000..c62174ec947 --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +type T struct{} + +func literals() { + undefinedLiterals("hey compiler", T{}, &T{}) // want "(undeclared name|undefined): undefinedLiterals" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go new file mode 100644 index 00000000000..9396da4bd9d --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +import "time" + +func operation() { + undefinedOperation(10 * time.Second) // want "(undeclared name|undefined): undefinedOperation" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go new file mode 100644 index 00000000000..a4ed290d466 --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go @@ -0,0 +1,10 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func selector() { + m := map[int]bool{} + undefinedSelector(m[1]) // want "(undeclared name|undefined): undefinedSelector" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go new file mode 100644 index 00000000000..5cde299add3 --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func slice() { + undefinedSlice([]int{1, 2}) // want "(undeclared name|undefined): undefinedSlice" +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go new file mode 100644 index 00000000000..9e91c59c25e --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func tuple() { + undefinedTuple(b()) // want "(undeclared name|undefined): undefinedTuple" +} + +func b() (string, error) { + return "", nil +} diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go new file mode 100644 index 00000000000..5b4241425e5 --- /dev/null +++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package undeclared + +func uniqueArguments() { + var s string + var i int + undefinedUniqueArguments(s, i, s) // want "(undeclared name|undefined): undefinedUniqueArguments" +} diff --git a/internal/lsp/analysis/undeclaredname/undeclared.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go similarity index 90% rename from internal/lsp/analysis/undeclaredname/undeclared.go rename to gopls/internal/lsp/analysis/undeclaredname/undeclared.go index faa14091aee..043979408d0 100644 --- a/internal/lsp/analysis/undeclaredname/undeclared.go +++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go @@ -18,8 +18,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/safetoken" "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/span" ) const Doc = `suggested fixes for "undeclared name: <>" @@ -38,27 +38,34 @@ func <>(inferred parameters) { ` var Analyzer = &analysis.Analyzer{ - Name: string(analysisinternal.UndeclaredName), + Name: "undeclaredname", Doc: Doc, Requires: []*analysis.Analyzer{}, Run: run, RunDespiteErrors: true, } -const undeclaredNamePrefix = "undeclared name: " +// The prefix for this error message changed in Go 1.20. +var undeclaredNamePrefixes = []string{"undeclared name: ", "undefined: "} func run(pass *analysis.Pass) (interface{}, error) { - for _, err := range analysisinternal.GetTypeErrors(pass) { + for _, err := range pass.TypeErrors { runForError(pass, err) } return nil, nil } func runForError(pass *analysis.Pass, err types.Error) { - if !strings.HasPrefix(err.Msg, undeclaredNamePrefix) { + var name string + for _, prefix := range undeclaredNamePrefixes { + if !strings.HasPrefix(err.Msg, prefix) { + continue + } + name = strings.TrimPrefix(err.Msg, prefix) + } + if name == "" { return } - name := strings.TrimPrefix(err.Msg, undeclaredNamePrefix) var file *ast.File for _, f := range pass.Files { if f.Pos() <= err.Pos && err.Pos < f.End() { @@ -105,8 +112,8 @@ func runForError(pass *analysis.Pass, err types.Error) { if tok == nil { return } - offset := pass.Fset.Position(err.Pos).Offset - end := tok.Pos(offset + len(name)) + offset := safetoken.StartPosition(pass.Fset, err.Pos).Offset + end := tok.Pos(offset + len(name)) // TODO(adonovan): dubious! err.Pos + len(name)?? pass.Report(analysis.Diagnostic{ Pos: err.Pos, End: end, @@ -114,8 +121,8 @@ func runForError(pass *analysis.Pass, err types.Error) { }) } -func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { - pos := rng.Start // don't use the end +func SuggestedFix(fset *token.FileSet, start, end token.Pos, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + pos := start // don't use the end path, _ := astutil.PathEnclosingInterval(file, pos, pos) if len(path) < 2 { return nil, fmt.Errorf("no expression found") @@ -139,7 +146,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast return nil, fmt.Errorf("could not locate insertion point") } - insertBefore := fset.Position(insertBeforeStmt.Pos()).Offset + insertBefore := safetoken.StartPosition(fset, insertBeforeStmt.Pos()).Offset // Get the indent to add on the line after the new statement. // Since this will have a parse error, we can not use format.Source(). diff --git a/internal/lsp/analysis/undeclaredname/undeclared_test.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go similarity index 85% rename from internal/lsp/analysis/undeclaredname/undeclared_test.go rename to gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go index b7154393742..306c3f03941 100644 --- a/internal/lsp/analysis/undeclaredname/undeclared_test.go +++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/undeclaredname" + "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname" ) func Test(t *testing.T) { diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/unusedparams/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go similarity index 100% rename from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden similarity index 100% rename from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden diff --git a/internal/lsp/analysis/unusedparams/unusedparams.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams.go similarity index 100% rename from internal/lsp/analysis/unusedparams/unusedparams.go rename to gopls/internal/lsp/analysis/unusedparams/unusedparams.go diff --git a/internal/lsp/analysis/unusedparams/unusedparams_test.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go similarity index 89% rename from internal/lsp/analysis/unusedparams/unusedparams_test.go rename to gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go index dff17c95e5d..fdd43b821fe 100644 --- a/internal/lsp/analysis/unusedparams/unusedparams_test.go +++ b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/unusedparams" + "golang.org/x/tools/gopls/internal/lsp/analysis/unusedparams" "golang.org/x/tools/internal/typeparams" ) diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go new file mode 100644 index 00000000000..aa9f46e5b31 --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go @@ -0,0 +1,74 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import ( + "fmt" + "os" +) + +type A struct { + b int +} + +func singleAssignment() { + v := "s" // want `v declared (and|but) not used` + + s := []int{ // want `s declared (and|but) not used` + 1, + 2, + } + + a := func(s string) bool { // want `a declared (and|but) not used` + return false + } + + if 1 == 1 { + s := "v" // want `s declared (and|but) not used` + } + + panic("I should survive") +} + +func noOtherStmtsInBlock() { + v := "s" // want `v declared (and|but) not used` +} + +func partOfMultiAssignment() { + f, err := os.Open("file") // want `f declared (and|but) not used` + panic(err) +} + +func sideEffects(cBool chan bool, cInt chan int) { + b := <-c // want `b declared (and|but) not used` + s := fmt.Sprint("") // want `s declared (and|but) not used` + a := A{ // want `a declared (and|but) not used` + b: func() int { + return 1 + }(), + } + c := A{<-cInt} // want `c declared (and|but) not used` + d := fInt() + <-cInt // want `d declared (and|but) not used` + e := fBool() && <-cBool // want `e declared (and|but) not used` + f := map[int]int{ // want `f declared (and|but) not used` + fInt(): <-cInt, + } + g := []int{<-cInt} // want `g declared (and|but) not used` + h := func(s string) {} // want `h declared (and|but) not used` + i := func(s string) {}() // want `i declared (and|but) not used` +} + +func commentAbove() { + // v is a variable + v := "s" // want `v declared (and|but) not used` +} + +func fBool() bool { + return true +} + +func fInt() int { + return 1 +} diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden new file mode 100644 index 00000000000..18173ce0bf9 --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden @@ -0,0 +1,59 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import ( + "fmt" + "os" +) + +type A struct { + b int +} + +func singleAssignment() { + if 1 == 1 { + } + + panic("I should survive") +} + +func noOtherStmtsInBlock() { +} + +func partOfMultiAssignment() { + _, err := os.Open("file") // want `f declared (and|but) not used` + panic(err) +} + +func sideEffects(cBool chan bool, cInt chan int) { + <-c // want `b declared (and|but) not used` + fmt.Sprint("") // want `s declared (and|but) not used` + A{ // want `a declared (and|but) not used` + b: func() int { + return 1 + }(), + } + A{<-cInt} // want `c declared (and|but) not used` + fInt() + <-cInt // want `d declared (and|but) not used` + fBool() && <-cBool // want `e declared (and|but) not used` + map[int]int{ // want `f declared (and|but) not used` + fInt(): <-cInt, + } + []int{<-cInt} // want `g declared (and|but) not used` + func(s string) {}() // want `i declared (and|but) not used` +} + +func commentAbove() { + // v is a variable +} + +func fBool() bool { + return true +} + +func fInt() int { + return 1 +} diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go new file mode 100644 index 00000000000..8e843024a54 --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package decl + +func a() { + var b, c bool // want `b declared (and|but) not used` + panic(c) + + if 1 == 1 { + var s string // want `s declared (and|but) not used` + } +} + +func b() { + // b is a variable + var b bool // want `b declared (and|but) not used` +} + +func c() { + var ( + d string + + // some comment for c + c bool // want `c declared (and|but) not used` + ) + + panic(d) +} diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden new file mode 100644 index 00000000000..6ed97332eea --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden @@ -0,0 +1,24 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package decl + +func a() { + var c bool // want `b declared (and|but) not used` + panic(c) + + if 1 == 1 { + } +} + +func b() { + // b is a variable +} + +func c() { + var ( + d string + ) + panic(d) +} diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go new file mode 100644 index 00000000000..904016be71e --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go @@ -0,0 +1,300 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedvariable defines an analyzer that checks for unused variables. +package unusedvariable + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" +) + +const Doc = `check for unused variables + +The unusedvariable analyzer suggests fixes for unused variables errors. +` + +var Analyzer = &analysis.Analyzer{ + Name: "unusedvariable", + Doc: Doc, + Requires: []*analysis.Analyzer{}, + Run: run, + RunDespiteErrors: true, // an unusedvariable diagnostic is a compile error +} + +// The suffix for this error message changed in Go 1.20. +var unusedVariableSuffixes = []string{" declared and not used", " declared but not used"} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, typeErr := range pass.TypeErrors { + for _, suffix := range unusedVariableSuffixes { + if strings.HasSuffix(typeErr.Msg, suffix) { + varName := strings.TrimSuffix(typeErr.Msg, suffix) + err := runForError(pass, typeErr, varName) + if err != nil { + return nil, err + } + } + } + } + + return nil, nil +} + +func runForError(pass *analysis.Pass, err types.Error, name string) error { + var file *ast.File + for _, f := range pass.Files { + if f.Pos() <= err.Pos && err.Pos < f.End() { + file = f + break + } + } + if file == nil { + return nil + } + + path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos) + if len(path) < 2 { + return nil + } + + ident, ok := path[0].(*ast.Ident) + if !ok || ident.Name != name { + return nil + } + + diag := analysis.Diagnostic{ + Pos: ident.Pos(), + End: ident.End(), + Message: err.Msg, + } + + for i := range path { + switch stmt := path[i].(type) { + case *ast.ValueSpec: + // Find GenDecl to which offending ValueSpec belongs. + if decl, ok := path[i+1].(*ast.GenDecl); ok { + fixes := removeVariableFromSpec(pass, path, stmt, decl, ident) + // fixes may be nil + if len(fixes) > 0 { + diag.SuggestedFixes = fixes + pass.Report(diag) + } + } + + case *ast.AssignStmt: + if stmt.Tok != token.DEFINE { + continue + } + + containsIdent := false + for _, expr := range stmt.Lhs { + if expr == ident { + containsIdent = true + } + } + if !containsIdent { + continue + } + + fixes := removeVariableFromAssignment(pass, path, stmt, ident) + // fixes may be nil + if len(fixes) > 0 { + diag.SuggestedFixes = fixes + pass.Report(diag) + } + } + } + + return nil +} + +func removeVariableFromSpec(pass *analysis.Pass, path []ast.Node, stmt *ast.ValueSpec, decl *ast.GenDecl, ident *ast.Ident) []analysis.SuggestedFix { + newDecl := new(ast.GenDecl) + *newDecl = *decl + newDecl.Specs = nil + + for _, spec := range decl.Specs { + if spec != stmt { + newDecl.Specs = append(newDecl.Specs, spec) + continue + } + + newSpec := new(ast.ValueSpec) + *newSpec = *stmt + newSpec.Names = nil + + for _, n := range stmt.Names { + if n != ident { + newSpec.Names = append(newSpec.Names, n) + } + } + + if len(newSpec.Names) > 0 { + newDecl.Specs = append(newDecl.Specs, newSpec) + } + } + + // decl.End() does not include any comments, so if a comment is present we + // need to account for it when we delete the statement + end := decl.End() + if stmt.Comment != nil && stmt.Comment.End() > end { + end = stmt.Comment.End() + } + + // There are no other specs left in the declaration, the whole statement can + // be deleted + if len(newDecl.Specs) == 0 { + // Find parent DeclStmt and delete it + for _, node := range path { + if declStmt, ok := node.(*ast.DeclStmt); ok { + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: deleteStmtFromBlock(path, declStmt), + }, + } + } + } + } + + var b bytes.Buffer + if err := format.Node(&b, pass.Fset, newDecl); err != nil { + return nil + } + + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: decl.Pos(), + // Avoid adding a new empty line + End: end + 1, + NewText: b.Bytes(), + }, + }, + }, + } +} + +func removeVariableFromAssignment(pass *analysis.Pass, path []ast.Node, stmt *ast.AssignStmt, ident *ast.Ident) []analysis.SuggestedFix { + // The only variable in the assignment is unused + if len(stmt.Lhs) == 1 { + // If LHS has only one expression to be valid it has to have 1 expression + // on RHS + // + // RHS may have side effects, preserve RHS + if exprMayHaveSideEffects(stmt.Rhs[0]) { + // Delete until RHS + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: ident.Pos(), + End: stmt.Rhs[0].Pos(), + }, + }, + }, + } + } + + // RHS does not have any side effects, delete the whole statement + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: deleteStmtFromBlock(path, stmt), + }, + } + } + + // Otherwise replace ident with `_` + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: ident.Pos(), + End: ident.End(), + NewText: []byte("_"), + }, + }, + }, + } +} + +func suggestedFixMessage(name string) string { + return fmt.Sprintf("Remove variable %s", name) +} + +func deleteStmtFromBlock(path []ast.Node, stmt ast.Stmt) []analysis.TextEdit { + // Find innermost enclosing BlockStmt. + var block *ast.BlockStmt + for i := range path { + if blockStmt, ok := path[i].(*ast.BlockStmt); ok { + block = blockStmt + break + } + } + + nodeIndex := -1 + for i, blockStmt := range block.List { + if blockStmt == stmt { + nodeIndex = i + break + } + } + + // The statement we need to delete was not found in BlockStmt + if nodeIndex == -1 { + return nil + } + + // Delete until the end of the block unless there is another statement after + // the one we are trying to delete + end := block.Rbrace + if nodeIndex < len(block.List)-1 { + end = block.List[nodeIndex+1].Pos() + } + + return []analysis.TextEdit{ + { + Pos: stmt.Pos(), + End: end, + }, + } +} + +// exprMayHaveSideEffects reports whether the expression may have side effects +// (because it contains a function call or channel receive). We disregard +// runtime panics as well written programs should not encounter them. +func exprMayHaveSideEffects(expr ast.Expr) bool { + var mayHaveSideEffects bool + ast.Inspect(expr, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.CallExpr: // possible function call + mayHaveSideEffects = true + return false + case *ast.UnaryExpr: + if n.Op == token.ARROW { // channel receive + mayHaveSideEffects = true + return false + } + case *ast.FuncLit: + return false // evaluating what's inside a FuncLit has no effect + } + return true + }) + + return mayHaveSideEffects +} diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go new file mode 100644 index 00000000000..08223155f6e --- /dev/null +++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unusedvariable_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/gopls/internal/lsp/analysis/unusedvariable" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + + t.Run("decl", func(t *testing.T) { + analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "decl") + }) + + t.Run("assign", func(t *testing.T) { + analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "assign") + }) +} diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go similarity index 100% rename from internal/lsp/analysis/useany/testdata/src/a/a.go rename to gopls/internal/lsp/analysis/useany/testdata/src/a/a.go diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden similarity index 100% rename from internal/lsp/analysis/useany/testdata/src/a/a.go.golden rename to gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden diff --git a/internal/lsp/analysis/useany/useany.go b/gopls/internal/lsp/analysis/useany/useany.go similarity index 100% rename from internal/lsp/analysis/useany/useany.go rename to gopls/internal/lsp/analysis/useany/useany.go diff --git a/internal/lsp/analysis/useany/useany_test.go b/gopls/internal/lsp/analysis/useany/useany_test.go similarity index 89% rename from internal/lsp/analysis/useany/useany_test.go rename to gopls/internal/lsp/analysis/useany/useany_test.go index 535d9152665..083c3d54fd4 100644 --- a/internal/lsp/analysis/useany/useany_test.go +++ b/gopls/internal/lsp/analysis/useany/useany_test.go @@ -8,7 +8,7 @@ import ( "testing" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/internal/lsp/analysis/useany" + "golang.org/x/tools/gopls/internal/lsp/analysis/useany" "golang.org/x/tools/internal/typeparams" ) diff --git a/internal/lsp/browser/README.md b/gopls/internal/lsp/browser/README.md similarity index 100% rename from internal/lsp/browser/README.md rename to gopls/internal/lsp/browser/README.md diff --git a/internal/lsp/browser/browser.go b/gopls/internal/lsp/browser/browser.go similarity index 100% rename from internal/lsp/browser/browser.go rename to gopls/internal/lsp/browser/browser.go diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go new file mode 100644 index 00000000000..1e1910834ff --- /dev/null +++ b/gopls/internal/lsp/cache/analysis.go @@ -0,0 +1,1228 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +// This file defines gopls' driver for modular static analysis (go/analysis). + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/gob" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "log" + "reflect" + "runtime/debug" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/gopls/internal/lsp/filecache" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/facts" + "golang.org/x/tools/internal/gcimporter" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +/* + + DESIGN + + An analysis request is for a set of analyzers and an individual + package ID, notated (a*, p). The result is the set of diagnostics + for that package. It could easily be generalized to a set of + packages, (a*, p*), and perhaps should be, to improve performance + versus calling it in a loop. + + The snapshot holds a cache (persistent.Map) of entries keyed by + (a*, p) pairs ("analysisKey") that have been requested so far. Some + of these entries may be invalidated during snapshot cloning after a + modification event. The cache maps each (a*, p) to a promise of + the analysis result or "analysisSummary". The summary contains the + results of analysis (e.g. diagnostics) as well as the intermediate + results required by the recursion, such as serialized types and + facts. + + The promise represents the result of a call to analyzeImpl, which + type-checks a package and then applies a graph of analyzers to it + in parallel postorder. (These graph edges are "horizontal": within + the same package.) First, analyzeImpl reads the source files of + package p, and obtains (recursively) the results of the "vertical" + dependencies (i.e. analyzers applied to the packages imported by + p). Only the subset of analyzers that use facts need be executed + recursively, but even if this subset is empty, the step is still + necessary because it provides type information. It is possible that + a package may need to be type-checked and analyzed twice, for + different subsets of analyzers, but the overlap is typically + insignificant. + + With the file contents and the results of vertical dependencies, + analyzeImpl is then in a position to produce a key representing the + unit of work (parsing, type-checking, and analysis) that it has to + do. The key is a cryptographic hash of the "recipe" for this step, + including the Metadata, the file contents, the set of analyzers, + and the type and fact information from the vertical dependencies. + + The key is sought in a machine-global persistent file-system based + cache. If this gopls process, or another gopls process on the same + machine, has already performed this analysis step, analyzeImpl will + make a cache hit and load the serialized summary of the results. If + not, it will have to proceed to type-checking and analysis, and + write a new cache entry. The entry contains serialized types + (export data) and analysis facts. + + For types, we use "shallow" export data. Historically, the Go + compiler always produced a summary of the types for a given package + that included types from other packages that it indirectly + referenced: "deep" export data. This had the advantage that the + compiler (and analogous tools such as gopls) need only load one + file per direct import. However, it meant that the files tended to + get larger based on the level of the package in the import + graph. For example, higher-level packages in the kubernetes module + have over 1MB of "deep" export data, even when they have almost no + content of their own, merely because they mention a major type that + references many others. In pathological cases the export data was + 300x larger than the source for a package due to this quadratic + growth. + + "Shallow" export data means that the serialized types describe only + a single package. If those types mention types from other packages, + the type checker may need to request additional packages beyond + just the direct imports. This means type information for the entire + transitive closure of imports may need to be available just in + case. After a cache hit or a cache miss, the summary is + postprocessed so that it contains the union of export data payloads + of all its direct dependencies. + + For correct dependency analysis, the digest used as a cache key + must reflect the "deep" export data, so it is derived recursively + from the transitive closure. As an optimization, we needn't include + every package of the transitive closure in the deep hash, only the + packages that were actually requested by the type checker. This + allows changes to a package that have no effect on its export data + to be "pruned". The direct consumer will need to be re-executed, + but if its export data is unchanged as a result, then indirect + consumers may not need to be re-executed. This allows, for example, + one to insert a print statement in a function and not "rebuild" the + whole application (though export data does record line numbers of + types which may be perturbed by otherwise insignificant changes.) + + The summary must record whether a package is transitively + error-free (whether it would compile) because many analyzers are + not safe to run on packages with inconsistent types. + + For fact encoding, we use the same fact set as the unitchecker + (vet) to record and serialize analysis facts. The fact + serialization mechanism is analogous to "deep" export data. + +*/ + +// TODO(adonovan): +// - Profile + optimize: +// - on a cold run, mostly type checking + export data, unsurprisingly. +// - on a hot-disk run, mostly type checking the IWL. +// Would be nice to have a benchmark that separates this out. +// - measure and record in the code the typical operation times +// and file sizes (export data + facts = cache entries). +// - Do "port the old logic" tasks (see TODO in actuallyAnalyze). +// - Add a (white-box) test of pruning when a change doesn't affect export data. +// - Optimise pruning based on subset of packages mentioned in exportdata. +// - Better logging so that it is possible to deduce why an analyzer +// is not being run--often due to very indirect failures. +// Even if the ultimate consumer decides to ignore errors, +// tests and other situations want to be assured of freedom from +// errors, not just missing results. This should be recorded. +// - Check that the event trace is intelligible. +// - Split this into a subpackage, gopls/internal/lsp/cache/driver, +// consisting of this file and three helpers from errors.go. +// The (*snapshot).Analyze method would stay behind and make calls +// to the driver package. +// Steps: +// - define a narrow driver.Snapshot interface with only these methods: +// Metadata(PackageID) source.Metadata +// GetFile(Context, URI) (source.FileHandle, error) +// View() *View // for Options +// - define a State type that encapsulates the persistent map +// (with its own mutex), and has methods: +// New() *State +// Clone(invalidate map[PackageID]bool) *State +// Destroy() +// - share cache.{goVersionRx,parseGoImpl} + +var born = time.Now() + +// Analyze applies a set of analyzers to the package denoted by id, +// and returns their diagnostics for that package. +// +// The analyzers list must be duplicate free; order does not matter. +// +// Precondition: all analyzers within the process have distinct names. +// (The names are relied on by the serialization logic.) +func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) { + if false { // debugging + log.Println("Analyze@", time.Since(born)) // called after the 7s IWL in k8s + } + + // Filter and sort enabled root analyzers. + // A disabled analyzer may still be run if required by another. + toSrc := make(map[*analysis.Analyzer]*source.Analyzer) + var enabled []*analysis.Analyzer + for _, a := range analyzers { + if a.IsEnabled(s.view.Options()) { + toSrc[a.Analyzer] = a + enabled = append(enabled, a.Analyzer) + } + } + sort.Slice(enabled, func(i, j int) bool { + return enabled[i].Name < enabled[j].Name + }) + + // Register fact types of required analyzers. + for _, a := range requiredAnalyzers(enabled) { + for _, f := range a.FactTypes { + gob.Register(f) + } + } + + if false { // debugging + // TODO(adonovan): use proper tracing. + t0 := time.Now() + defer func() { + log.Printf("%v for analyze(%s, %s)", time.Since(t0), id, enabled) + }() + } + + // Run the analysis. + res, err := s.analyze(ctx, id, enabled) + if err != nil { + return nil, err + } + + // Report diagnostics only from enabled actions that succeeded. + // Errors from creating or analyzing packages are ignored. + // Diagnostics are reported in the order of the analyzers argument. + // + // TODO(adonovan): ignoring action errors gives the caller no way + // to distinguish "there are no problems in this code" from + // "the code (or analyzers!) are so broken that we couldn't even + // begin the analysis you asked for". + // Even if current callers choose to discard the + // results, we should propagate the per-action errors. + var results []*source.Diagnostic + for _, a := range enabled { + summary := res.Actions[a.Name] + if summary.Err != "" { + continue // action failed + } + for _, gobDiag := range summary.Diagnostics { + results = append(results, toSourceDiagnostic(toSrc[a], &gobDiag)) + } + } + return results, nil +} + +// analysisKey is the type of keys in the snapshot.analyses map. +type analysisKey struct { + analyzerNames string + pkgid PackageID +} + +func (key analysisKey) String() string { + return fmt.Sprintf("%s@%s", key.analyzerNames, key.pkgid) +} + +// analyzeSummary is a gob-serializable summary of successfully +// applying a list of analyzers to a package. +type analyzeSummary struct { + PkgPath PackagePath // types.Package.Path() (needed to decode export data) + Export []byte + DeepExportHash source.Hash // hash of reflexive transitive closure of export data + Compiles bool // transitively free of list/parse/type errors + Actions actionsMap // map from analyzer name to analysis results (*actionSummary) + + // Not serialized: populated after the summary is computed or deserialized. + allExport map[PackagePath][]byte // transitive export data +} + +// actionsMap defines a stable Gob encoding for a map. +// TODO(adonovan): generalize and move to a library when we can use generics. +type actionsMap map[string]*actionSummary + +var _ gob.GobEncoder = (actionsMap)(nil) +var _ gob.GobDecoder = (*actionsMap)(nil) + +type actionsMapEntry struct { + K string + V *actionSummary +} + +func (m actionsMap) GobEncode() ([]byte, error) { + entries := make([]actionsMapEntry, 0, len(m)) + for k, v := range m { + entries = append(entries, actionsMapEntry{k, v}) + } + sort.Slice(entries, func(i, j int) bool { + return entries[i].K < entries[j].K + }) + var buf bytes.Buffer + err := gob.NewEncoder(&buf).Encode(entries) + return buf.Bytes(), err +} + +func (m *actionsMap) GobDecode(data []byte) error { + var entries []actionsMapEntry + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil { + return err + } + *m = make(actionsMap, len(entries)) + for _, e := range entries { + (*m)[e.K] = e.V + } + return nil +} + +// actionSummary is a gob-serializable summary of one possibly failed analysis action. +// If Err is non-empty, the other fields are undefined. +type actionSummary struct { + Facts []byte // the encoded facts.Set + FactsHash source.Hash // hash(Facts) + Diagnostics []gobDiagnostic + Err string // "" => success +} + +// analyze is a memoization of analyzeImpl. +func (s *snapshot) analyze(ctx context.Context, id PackageID, analyzers []*analysis.Analyzer) (*analyzeSummary, error) { + // Use the sorted list of names of analyzers in the key. + // + // TODO(adonovan): opt: account for analysis results at a + // finer grain to avoid duplicate work when a + // a proper subset of analyzers is requested? + // In particular, TypeErrorAnalyzers don't use facts + // but need to request vdeps just for type information. + names := make([]string, 0, len(analyzers)) + for _, a := range analyzers { + names = append(names, a.Name) + } + // This key describes the result of applying a list of analyzers to a package. + key := analysisKey{strings.Join(names, ","), id} + + // An analysisPromise represents the result of loading, parsing, + // type-checking and analyzing a single package. + type analysisPromise struct { + promise *memoize.Promise // [analyzeImplResult] + } + + type analyzeImplResult struct { + summary *analyzeSummary + err error + } + + // Access the map once, briefly, and atomically. + s.mu.Lock() + entry, hit := s.analyses.Get(key) + if !hit { + entry = analysisPromise{ + promise: memoize.NewPromise("analysis", func(ctx context.Context, arg interface{}) interface{} { + summary, err := analyzeImpl(ctx, arg.(*snapshot), analyzers, id) + return analyzeImplResult{summary, err} + }), + } + s.analyses.Set(key, entry, nil) // nothing needs releasing + } + s.mu.Unlock() + + // Await result. + ap := entry.(analysisPromise) + v, err := s.awaitPromise(ctx, ap.promise) + if err != nil { + return nil, err // e.g. cancelled + } + res := v.(analyzeImplResult) + return res.summary, res.err +} + +// analyzeImpl applies a list of analyzers (plus any others +// transitively required by them) to a package. It succeeds as long +// as it could produce a types.Package, even if there were direct or +// indirect list/parse/type errors, and even if all the analysis +// actions failed. It usually fails only if the package was unknown, +// a file was missing, or the operation was cancelled. +// +// Postcondition: analyzeImpl must not continue to use the snapshot +// (in background goroutines) after it has returned; see memoize.RefCounted. +func analyzeImpl(ctx context.Context, snapshot *snapshot, analyzers []*analysis.Analyzer, id PackageID) (*analyzeSummary, error) { + m := snapshot.Metadata(id) + if m == nil { + return nil, fmt.Errorf("no metadata for %s", id) + } + + // Recursively analyze each "vertical" dependency + // for its types.Package and (perhaps) analysis.Facts. + // If any of them fails to produce a package, we cannot continue. + // We request only the analyzers that produce facts. + // + // Also, load the contents of each "compiled" Go file through + // the snapshot's cache. + // + // Both loops occur in parallel, and parallel with each other. + vdeps := make(map[PackageID]*analyzeSummary) + compiledGoFiles := make([]source.FileHandle, len(m.CompiledGoFiles)) + { + var group errgroup.Group + + // Analyze vertical dependencies. + // We request only the required analyzers that use facts. + var useFacts []*analysis.Analyzer + for _, a := range requiredAnalyzers(analyzers) { + if len(a.FactTypes) > 0 { + useFacts = append(useFacts, a) + } + } + var vdepsMu sync.Mutex + for _, id := range m.DepsByPkgPath { + id := id + group.Go(func() error { + res, err := snapshot.analyze(ctx, id, useFacts) + if err != nil { + return err // cancelled, or failed to produce a package + } + + vdepsMu.Lock() + vdeps[id] = res + vdepsMu.Unlock() + return nil + }) + } + + // Read file contents. + // (In practice these will be cache hits + // on reads done by the initial workspace load + // or after a change modification event.) + for i, uri := range m.CompiledGoFiles { + i, uri := i, uri + group.Go(func() error { + fh, err := snapshot.GetFile(ctx, uri) // ~25us + compiledGoFiles[i] = fh + return err // e.g. cancelled + }) + } + + if err := group.Wait(); err != nil { + return nil, err + } + } + + // Inv: analyze() of all vdeps succeeded (though some actions may have failed). + + // We no longer depend on the snapshot. + snapshot = nil + + // At this point we have the action results (serialized + // packages and facts) of our immediate dependencies, + // and the metadata and content of this package. + // + // We now compute a hash for all our inputs, and consult a + // global cache of promised results. If nothing material + // has changed, we'll make a hit in the shared cache. + // + // The hash of our inputs is based on the serialized export + // data and facts so that immaterial changes can be pruned + // without decoding. + key := analysisCacheKey(analyzers, m, compiledGoFiles, vdeps) + + // Access the cache. + var summary *analyzeSummary + const cacheKind = "analysis" + if data, err := filecache.Get(cacheKind, key); err == nil { + // cache hit + mustDecode(data, &summary) + + } else if err != filecache.ErrNotFound { + return nil, bug.Errorf("internal error reading shared cache: %v", err) + + } else { + // Cache miss: do the work. + var err error + summary, err = actuallyAnalyze(ctx, analyzers, m, vdeps, compiledGoFiles) + if err != nil { + return nil, err + } + data := mustEncode(summary) + if false { + log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), id) + } + if err := filecache.Set(cacheKind, key, data); err != nil { + return nil, fmt.Errorf("internal error updating shared cache: %v", err) + } + } + + // Hit or miss, we need to merge the export data from + // dependencies so that it includes all the types + // that might be summoned by the type checker. + // + // TODO(adonovan): opt: reduce this set by recording + // which packages were actually summoned by insert(). + // (Just makes map smaller; probably marginal?) + allExport := make(map[PackagePath][]byte) + for _, vdep := range vdeps { + for k, v := range vdep.allExport { + allExport[k] = v + } + } + allExport[m.PkgPath] = summary.Export + summary.allExport = allExport + + return summary, nil +} + +// analysisCacheKey returns a cache key that is a cryptographic digest +// of the all the values that might affect type checking and analysis: +// the analyzer names, package metadata, names and contents of +// compiled Go files, and vdeps information (export data and facts). +// +// TODO(adonovan): safety: define our own flavor of Metadata +// containing just the fields we need, and using it in the subsequent +// logic, to keep us honest about hashing all parts that matter? +func analysisCacheKey(analyzers []*analysis.Analyzer, m *source.Metadata, compiledGoFiles []source.FileHandle, vdeps map[PackageID]*analyzeSummary) [sha256.Size]byte { + hasher := sha256.New() + + // In principle, a key must be the hash of an + // unambiguous encoding of all the relevant data. + // If it's ambiguous, we risk collisions. + + // analyzers + fmt.Fprintf(hasher, "analyzers: %d\n", len(analyzers)) + for _, a := range analyzers { + fmt.Fprintln(hasher, a.Name) + } + + // package metadata + fmt.Fprintf(hasher, "package: %s %s %s\n", m.ID, m.Name, m.PkgPath) + // We can ignore m.DepsBy{Pkg,Import}Path: although the logic + // uses those fields, we account for them by hashing vdeps. + + // type sizes + // This assertion is safe, but if a black-box implementation + // is ever needed, record Sizeof(*int) and Alignof(int64). + sz := m.TypesSizes.(*types.StdSizes) + fmt.Fprintf(hasher, "sizes: %d %d\n", sz.WordSize, sz.MaxAlign) + + // metadata errors: used for 'compiles' field + fmt.Fprintf(hasher, "errors: %d", len(m.Errors)) + + // module Go version + if m.Module != nil && m.Module.GoVersion != "" { + fmt.Fprintf(hasher, "go %s\n", m.Module.GoVersion) + } + + // file names and contents + fmt.Fprintf(hasher, "files: %d\n", len(compiledGoFiles)) + for _, fh := range compiledGoFiles { + fmt.Fprintln(hasher, fh.FileIdentity()) + } + + // vdeps, in PackageID order + depIDs := make([]string, 0, len(vdeps)) + for depID := range vdeps { + depIDs = append(depIDs, string(depID)) + } + sort.Strings(depIDs) + for _, depID := range depIDs { + vdep := vdeps[PackageID(depID)] + fmt.Fprintf(hasher, "dep: %s\n", vdep.PkgPath) + fmt.Fprintf(hasher, "export: %s\n", vdep.DeepExportHash) + + // action results: errors and facts + names := make([]string, 0, len(vdep.Actions)) + for name := range vdep.Actions { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + summary := vdep.Actions[name] + fmt.Fprintf(hasher, "action %s\n", name) + if summary.Err != "" { + fmt.Fprintf(hasher, "error %s\n", summary.Err) + } else { + fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash) + // We can safely omit summary.diagnostics + // from the key since they have no downstream effect. + } + } + } + + var hash [sha256.Size]byte + hasher.Sum(hash[:0]) + return hash +} + +// actuallyAnalyze implements the cache-miss case. +// This function does not access the snapshot. +func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *source.Metadata, vdeps map[PackageID]*analyzeSummary, compiledGoFiles []source.FileHandle) (*analyzeSummary, error) { + + // Create a local FileSet for processing this package only. + fset := token.NewFileSet() + + // Parse only the "compiled" Go files. + // Do the computation in parallel. + parsed := make([]*source.ParsedGoFile, len(compiledGoFiles)) + { + var group errgroup.Group + for i, fh := range compiledGoFiles { + i, fh := i, fh + group.Go(func() error { + // Call parseGoImpl directly, not the caching wrapper, + // as cached ASTs require the global FileSet. + pgf, err := parseGoImpl(ctx, fset, fh, source.ParseFull) + parsed[i] = pgf + return err + }) + } + if err := group.Wait(); err != nil { + return nil, err // cancelled, or catastrophic error (e.g. missing file) + } + } + + // Type-check the package. + pkg := typeCheckForAnalysis(fset, parsed, m, vdeps) + + // Build a map of PkgPath to *Package for all packages mentioned + // in exportdata for use by facts. + pkg.factsDecoder = facts.NewDecoder(pkg.types) + + // Poll cancellation state. + if err := ctx.Err(); err != nil { + return nil, err + } + + // TODO(adonovan): port the old logic to: + // - gather go/packages diagnostics from m.Errors? (port goPackagesErrorDiagnostics) + // - record unparseable file URIs so we can suppress type errors for these files. + // - gather diagnostics from expandErrors + typeErrorDiagnostics + depsErrors. + + // -- analysis -- + + // Build action graph for this package. + // Each graph node (action) is one unit of analysis. + actions := make(map[*analysis.Analyzer]*action) + var mkAction func(a *analysis.Analyzer) *action + mkAction = func(a *analysis.Analyzer) *action { + act, ok := actions[a] + if !ok { + var hdeps []*action + for _, req := range a.Requires { + hdeps = append(hdeps, mkAction(req)) + } + act = &action{a: a, pkg: pkg, vdeps: vdeps, hdeps: hdeps} + actions[a] = act + } + return act + } + + // Build actions for initial package. + var roots []*action + for _, a := range analyzers { + roots = append(roots, mkAction(a)) + } + + // Execute the graph in parallel. + execActions(roots) + + // Don't return (or cache) the result in case of cancellation. + if err := ctx.Err(); err != nil { + return nil, err // cancelled + } + + // Return summaries only for the requested actions. + summaries := make(map[string]*actionSummary) + for _, act := range roots { + summaries[act.a.Name] = act.summary + } + + return &analyzeSummary{ + PkgPath: PackagePath(pkg.types.Path()), + Export: pkg.export, + DeepExportHash: pkg.deepExportHash, + Compiles: pkg.compiles, + Actions: summaries, + }, nil +} + +func typeCheckForAnalysis(fset *token.FileSet, parsed []*source.ParsedGoFile, m *source.Metadata, vdeps map[PackageID]*analyzeSummary) *analysisPackage { + if false { // debugging + log.Println("typeCheckForAnalysis", m.PkgPath) + } + + pkg := &analysisPackage{ + m: m, + fset: fset, + parsed: parsed, + files: make([]*ast.File, len(parsed)), + compiles: len(m.Errors) == 0, // false => list error + types: types.NewPackage(string(m.PkgPath), string(m.Name)), + typesInfo: &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + Scopes: make(map[ast.Node]*types.Scope), + }, + typesSizes: m.TypesSizes, + } + typeparams.InitInstanceInfo(pkg.typesInfo) + + for i, p := range parsed { + pkg.files[i] = p.File + if p.ParseErr != nil { + pkg.compiles = false // parse error + } + } + + // Unsafe is special. + if m.PkgPath == "unsafe" { + pkg.types = types.Unsafe + return pkg + } + + // Compute the union of transitive export data. + // (The actual values are shared, and not serialized.) + allExport := make(map[PackagePath][]byte) + for _, vdep := range vdeps { + for k, v := range vdep.allExport { + allExport[k] = v + } + + if !vdep.Compiles { + pkg.compiles = false // transitive error + } + } + + // exportHasher computes a hash of the names and export data of + // each package that was actually loaded during type checking. + // + // Because we use shallow export data, the hash for dependency + // analysis must incorporate indirect dependencies. As an + // optimization, we include only those that were actually + // used, which may be a small subset of those available. + // + // TODO(adonovan): opt: even better would be to implement a + // traversal over the package API like facts.NewDecoder does + // and only mention that set of packages in the hash. + // Perhaps there's a way to do that more efficiently. + // + // TODO(adonovan): opt: record the shallow hash alongside the + // shallow export data in the allExport map to avoid repeatedly + // hashing the export data. + // + // The writes to hasher below assume that type checking imports + // packages in a deterministic order. + exportHasher := sha256.New() + hashExport := func(pkgPath PackagePath, export []byte) { + fmt.Fprintf(exportHasher, "%s %d ", pkgPath, len(export)) + exportHasher.Write(export) + } + + // importer state + var ( + insert func(p *types.Package, name string) + importMap = make(map[string]*types.Package) // keys are PackagePaths + ) + loadFromExportData := func(pkgPath PackagePath) (*types.Package, error) { + export, ok := allExport[pkgPath] + if !ok { + return nil, bug.Errorf("missing export data for %q", pkgPath) + } + hashExport(pkgPath, export) + imported, err := gcimporter.IImportShallow(fset, importMap, export, string(pkgPath), insert) + if err != nil { + return nil, bug.Errorf("invalid export data for %q: %v", pkgPath, err) + } + return imported, nil + } + insert = func(p *types.Package, name string) { + imported, err := loadFromExportData(PackagePath(p.Path())) + if err != nil { + log.Fatalf("internal error: %v", err) + } + if imported != p { + log.Fatalf("internal error: inconsistent packages") + } + } + + cfg := &types.Config{ + Sizes: m.TypesSizes, + Error: func(e error) { + pkg.compiles = false // type error + pkg.typeErrors = append(pkg.typeErrors, e.(types.Error)) + }, + Importer: importerFunc(func(importPath string) (*types.Package, error) { + if importPath == "unsafe" { + return types.Unsafe, nil // unsafe has no export data + } + + // Beware that returning an error from this function + // will cause the type checker to synthesize a fake + // package whose Path is importPath, potentially + // losing a vendor/ prefix. If type-checking errors + // are swallowed, these packages may be confusing. + + id, ok := m.DepsByImpPath[ImportPath(importPath)] + if !ok { + // The import syntax is inconsistent with the metadata. + // This could be because the import declaration was + // incomplete and the metadata only includes complete + // imports; or because the metadata ignores import + // edges that would lead to cycles in the graph. + return nil, fmt.Errorf("missing metadata for import of %q", importPath) + } + + depResult, ok := vdeps[id] // id may be "" + if !ok { + // Analogous to (*snapshot).missingPkgError + // in the logic for regular type-checking, + // but without a snapshot we can't provide + // such detail, and anyway most analysis + // failures aren't surfaced in the UI. + return nil, fmt.Errorf("no required module provides package %q (id=%q)", importPath, id) + } + + // (Duplicates logic from check.go.) + if !source.IsValidImport(m.PkgPath, depResult.PkgPath) { + return nil, fmt.Errorf("invalid use of internal package %s", importPath) + } + + return loadFromExportData(depResult.PkgPath) + }), + } + + // Set Go dialect. + if m.Module != nil && m.Module.GoVersion != "" { + goVersion := "go" + m.Module.GoVersion + // types.NewChecker panics if GoVersion is invalid. + // An unparsable mod file should probably stop us + // before we get here, but double check just in case. + if goVersionRx.MatchString(goVersion) { + typesinternal.SetGoVersion(cfg, goVersion) + } + } + + // We want to type check cgo code if go/types supports it. + // We passed typecheckCgo to go/packages when we Loaded. + // TODO(adonovan): do we actually need this?? + typesinternal.SetUsesCgo(cfg) + + check := types.NewChecker(cfg, fset, pkg.types, pkg.typesInfo) + + // Type checking errors are handled via the config, so ignore them here. + _ = check.Files(pkg.files) + + // debugging (type errors are quite normal) + if false { + if pkg.typeErrors != nil { + log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors) + } + } + + // Emit the export data and compute the deep hash. + export, err := gcimporter.IExportShallow(pkg.fset, pkg.types) + if err != nil { + // TODO(adonovan): in light of exporter bugs such as #57729, + // consider using bug.Report here and retrying the IExportShallow + // call here using an empty types.Package. + log.Fatalf("internal error writing shallow export data: %v", err) + } + pkg.export = export + hashExport(m.PkgPath, export) + exportHasher.Sum(pkg.deepExportHash[:0]) + + return pkg +} + +// analysisPackage contains information about a package, including +// syntax trees, used transiently during its type-checking and analysis. +type analysisPackage struct { + m *source.Metadata + fset *token.FileSet // local to this package + parsed []*source.ParsedGoFile + files []*ast.File // same as parsed[i].File + types *types.Package + compiles bool // package is transitively free of list/parse/type errors + factsDecoder *facts.Decoder + export []byte // encoding of types.Package + deepExportHash source.Hash // reflexive transitive hash of export data + typesInfo *types.Info + typeErrors []types.Error + typesSizes types.Sizes +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + once sync.Once + a *analysis.Analyzer + pkg *analysisPackage + hdeps []*action // horizontal dependencies + vdeps map[PackageID]*analyzeSummary // vertical dependencies + + // results of action.exec(): + result interface{} // result of Run function, of type a.ResultType + summary *actionSummary + err error +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.m.ID) +} + +// execActions executes a set of action graph nodes in parallel. +func execActions(actions []*action) { + var wg sync.WaitGroup + for _, act := range actions { + act := act + wg.Add(1) + go func() { + defer wg.Done() + act.once.Do(func() { + execActions(act.hdeps) // analyze "horizontal" dependencies + act.result, act.summary, act.err = act.exec() + if act.err != nil { + act.summary = &actionSummary{Err: act.err.Error()} + // TODO(adonovan): suppress logging. But + // shouldn't the root error's causal chain + // include this information? + if false { // debugging + log.Printf("act.exec(%v) failed: %v", act, act.err) + } + } + }) + }() + } + wg.Wait() +} + +// exec defines the execution of a single action. +// It returns the (ephemeral) result of the analyzer's Run function, +// along with its (serializable) facts and diagnostics. +// Or it returns an error if the analyzer did not run to +// completion and deliver a valid result. +func (act *action) exec() (interface{}, *actionSummary, error) { + analyzer := act.a + pkg := act.pkg + + hasFacts := len(analyzer.FactTypes) > 0 + + // Report an error if any action dependency (vertical or horizontal) failed. + // To avoid long error messages describing chains of failure, + // we return the dependencies' error' unadorned. + if hasFacts { + // TODO(adonovan): use deterministic order. + for _, res := range act.vdeps { + if vdep := res.Actions[analyzer.Name]; vdep.Err != "" { + return nil, nil, errors.New(vdep.Err) + } + } + } + for _, dep := range act.hdeps { + if dep.err != nil { + return nil, nil, dep.err + } + } + // Inv: all action dependencies succeeded. + + // Were there list/parse/type errors that might prevent analysis? + if !pkg.compiles && !analyzer.RunDespiteErrors { + return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.m.ID) + } + // Inv: package is well-formed enough to proceed with analysis. + + if false { // debugging + log.Println("action.exec", act) + } + + // Gather analysis Result values from horizontal dependencies. + var inputs = make(map[*analysis.Analyzer]interface{}) + for _, dep := range act.hdeps { + inputs[dep.a] = dep.result + } + + // TODO(adonovan): opt: facts.Set works but it may be more + // efficient to fork and tailor it to our precise needs. + // + // We've already sharded the fact encoding by action + // so that it can be done in parallel (hoisting the + // ImportMap call so that we build the map once per package). + // We could eliminate locking. + // We could also dovetail more closely with the export data + // decoder to obtain a more compact representation of + // packages and objects (e.g. its internal IDs, instead + // of PkgPaths and objectpaths.) + + // Read and decode analysis facts for each imported package. + factset, err := pkg.factsDecoder.Decode(func(imp *types.Package) ([]byte, error) { + if !hasFacts { + return nil, nil // analyzer doesn't use facts, so no vdeps + } + + // Package.Imports() may contain a fake "C" package. Ignore it. + if imp.Path() == "C" { + return nil, nil + } + + id, ok := pkg.m.DepsByPkgPath[PackagePath(imp.Path())] + if !ok { + // This may mean imp was synthesized by the type + // checker because it failed to import it for any reason + // (e.g. bug processing export data; metadata ignoring + // a cycle-forming import). + // In that case, the fake package's imp.Path + // is set to the failed importPath (and thus + // it may lack a "vendor/" prefix). + // + // For now, silently ignore it on the assumption + // that the error is already reported elsewhere. + // return nil, fmt.Errorf("missing metadata") + return nil, nil + } + + vdep, ok := act.vdeps[id] + if !ok { + return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id) + } + return vdep.Actions[analyzer.Name].Facts, nil + }) + if err != nil { + return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err) + } + + // TODO(adonovan): make Export*Fact panic rather than discarding + // undeclared fact types, so that we discover bugs in analyzers. + factFilter := make(map[reflect.Type]bool) + for _, f := range analyzer.FactTypes { + factFilter[reflect.TypeOf(f)] = true + } + + // posToLocation converts from token.Pos to protocol form. + // TODO(adonovan): improve error messages. + posToLocation := func(start, end token.Pos) (protocol.Location, error) { + tokFile := pkg.fset.File(start) + for _, p := range pkg.parsed { + if p.Tok == tokFile { + if end == token.NoPos { + end = start + } + return p.PosLocation(start, end) + } + } + return protocol.Location{}, + bug.Errorf("internal error: token.Pos not within package") + } + + // Now run the (pkg, analyzer) action. + var diagnostics []gobDiagnostic + pass := &analysis.Pass{ + Analyzer: analyzer, + Fset: pkg.fset, + Files: pkg.files, + Pkg: pkg.types, + TypesInfo: pkg.typesInfo, + TypesSizes: pkg.typesSizes, + TypeErrors: pkg.typeErrors, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { + // Prefix the diagnostic category with the analyzer's name. + if d.Category == "" { + d.Category = analyzer.Name + } else { + d.Category = analyzer.Name + "." + d.Category + } + + diagnostic, err := toGobDiagnostic(posToLocation, d) + if err != nil { + bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err) + return + } + diagnostics = append(diagnostics, diagnostic) + }, + ImportObjectFact: factset.ImportObjectFact, + ExportObjectFact: factset.ExportObjectFact, + ImportPackageFact: factset.ImportPackageFact, + ExportPackageFact: factset.ExportPackageFact, + AllObjectFacts: func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) }, + AllPackageFacts: func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) }, + } + + // Recover from panics (only) within the analyzer logic. + // (Use an anonymous function to limit the recover scope.) + var result interface{} + func() { + defer func() { + if r := recover(); r != nil { + // An Analyzer panicked, likely due to a bug. + // + // In general we want to discover and fix such panics quickly, + // so we don't suppress them, but some bugs in third-party + // analyzers cannot be quickly fixed, so we use an allowlist + // to suppress panics. + const strict = true + if strict && bug.PanicOnBugs && + analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343 + // Uncomment this when debugging suspected failures + // in the driver, not the analyzer. + if false { + debug.SetTraceback("all") // show all goroutines + } + panic(r) + } else { + // In production, suppress the panic and press on. + err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r) + } + } + }() + result, err = pass.Analyzer.Run(pass) + }() + if err != nil { + return nil, nil, err + } + + if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want { + return nil, nil, bug.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + + // Disallow Export*Fact calls after Run. + // (A panic means the Analyzer is abusing concurrency.) + pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) { + panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)) + } + pass.ExportPackageFact = func(fact analysis.Fact) { + panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact)) + } + + factsdata := factset.Encode() + return result, &actionSummary{ + Diagnostics: diagnostics, + Facts: factsdata, + FactsHash: source.HashOf(factsdata), + }, nil +} + +// requiredAnalyzers returns the transitive closure of required analyzers in preorder. +func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer { + var result []*analysis.Analyzer + seen := make(map[*analysis.Analyzer]bool) + var visitAll func([]*analysis.Analyzer) + visitAll = func(analyzers []*analysis.Analyzer) { + for _, a := range analyzers { + if !seen[a] { + seen[a] = true + result = append(result, a) + visitAll(a.Requires) + } + } + } + visitAll(analyzers) + return result +} + +func mustEncode(x interface{}) []byte { + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(x); err != nil { + log.Fatalf("internal error encoding %T: %v", x, err) + } + return buf.Bytes() +} + +func mustDecode(data []byte, ptr interface{}) { + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil { + log.Fatalf("internal error decoding %T: %v", ptr, err) + } +} + +// -- data types for serialization of analysis.Diagnostic -- + +type gobDiagnostic struct { + Location protocol.Location + Category string + Message string + SuggestedFixes []gobSuggestedFix + Related []gobRelatedInformation +} + +type gobRelatedInformation struct { + Location protocol.Location + Message string +} + +type gobSuggestedFix struct { + Message string + TextEdits []gobTextEdit +} + +type gobTextEdit struct { + Location protocol.Location + NewText []byte +} + +// toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic, +// which requires expanding token.Pos positions into protocol.Location form. +func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), diag analysis.Diagnostic) (gobDiagnostic, error) { + var fixes []gobSuggestedFix + for _, fix := range diag.SuggestedFixes { + var gobEdits []gobTextEdit + for _, textEdit := range fix.TextEdits { + loc, err := posToLocation(textEdit.Pos, textEdit.End) + if err != nil { + return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err) + } + gobEdits = append(gobEdits, gobTextEdit{ + Location: loc, + NewText: textEdit.NewText, + }) + } + fixes = append(fixes, gobSuggestedFix{ + Message: fix.Message, + TextEdits: gobEdits, + }) + } + + var related []gobRelatedInformation + for _, r := range diag.Related { + loc, err := posToLocation(r.Pos, r.End) + if err != nil { + return gobDiagnostic{}, fmt.Errorf("in Related: %w", err) + } + related = append(related, gobRelatedInformation{ + Location: loc, + Message: r.Message, + }) + } + + loc, err := posToLocation(diag.Pos, diag.End) + if err != nil { + return gobDiagnostic{}, err + } + return gobDiagnostic{ + Location: loc, + Category: diag.Category, + Message: diag.Message, + Related: related, + SuggestedFixes: fixes, + }, nil +} diff --git a/gopls/internal/lsp/cache/cache.go b/gopls/internal/lsp/cache/cache.go new file mode 100644 index 00000000000..edf1d0eeae6 --- /dev/null +++ b/gopls/internal/lsp/cache/cache.go @@ -0,0 +1,187 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + "html/template" + "reflect" + "sort" + "strconv" + "sync/atomic" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/robustio" +) + +// New Creates a new cache for gopls operation results, using the given file +// set, shared store, and session options. +// +// Both the fset and store may be nil, but if store is non-nil so must be fset +// (and they must always be used together), otherwise it may be possible to get +// cached data referencing token.Pos values not mapped by the FileSet. +func New(fset *token.FileSet, store *memoize.Store) *Cache { + index := atomic.AddInt64(&cacheIndex, 1) + + if store != nil && fset == nil { + panic("non-nil store with nil fset") + } + if fset == nil { + fset = token.NewFileSet() + } + if store == nil { + store = &memoize.Store{} + } + + c := &Cache{ + id: strconv.FormatInt(index, 10), + fset: fset, + store: store, + memoizedFS: &memoizedFS{filesByID: map[robustio.FileID][]*DiskFile{}}, + } + return c +} + +// A Cache holds caching stores that are bundled together for consistency. +// +// TODO(rfindley): once fset and store need not be bundled together, the Cache +// type can be eliminated. +type Cache struct { + id string + fset *token.FileSet + + store *memoize.Store + + *memoizedFS // implements source.FileSource +} + +// NewSession creates a new gopls session with the given cache and options overrides. +// +// The provided optionsOverrides may be nil. +// +// TODO(rfindley): move this to session.go. +func NewSession(ctx context.Context, c *Cache, optionsOverrides func(*source.Options)) *Session { + index := atomic.AddInt64(&sessionIndex, 1) + options := source.DefaultOptions().Clone() + if optionsOverrides != nil { + optionsOverrides(options) + } + s := &Session{ + id: strconv.FormatInt(index, 10), + cache: c, + gocmdRunner: &gocommand.Runner{}, + options: options, + overlayFS: newOverlayFS(c), + } + event.Log(ctx, "New session", KeyCreateSession.Of(s)) + return s +} + +var cacheIndex, sessionIndex, viewIndex int64 + +func (c *Cache) ID() string { return c.id } +func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() } + +type packageStat struct { + id PackageID + mode source.ParseMode + file int64 + ast int64 + types int64 + typesInfo int64 + total int64 +} + +func (c *Cache) PackageStats(withNames bool) template.HTML { + var packageStats []packageStat + c.store.DebugOnlyIterate(func(k, v interface{}) { + switch k.(type) { + case packageHandleKey: + v := v.(typeCheckResult) + if v.pkg == nil { + break + } + typsCost := typesCost(v.pkg.types.Scope()) + typInfoCost := typesInfoCost(v.pkg.typesInfo) + stat := packageStat{ + id: v.pkg.id, + mode: v.pkg.mode, + types: typsCost, + typesInfo: typInfoCost, + } + for _, f := range v.pkg.compiledGoFiles { + stat.file += int64(len(f.Src)) + stat.ast += astCost(f.File) + } + stat.total = stat.file + stat.ast + stat.types + stat.typesInfo + packageStats = append(packageStats, stat) + } + }) + var totalCost int64 + for _, stat := range packageStats { + totalCost += stat.total + } + sort.Slice(packageStats, func(i, j int) bool { + return packageStats[i].total > packageStats[j].total + }) + html := "\n" + human := func(n int64) string { + return fmt.Sprintf("%.2f", float64(n)/(1024*1024)) + } + var printedCost int64 + for _, stat := range packageStats { + name := stat.id + if !withNames { + name = "-" + } + html += fmt.Sprintf("\n", name, stat.mode, + human(stat.total), human(stat.file), human(stat.ast), human(stat.types), human(stat.typesInfo)) + printedCost += stat.total + if float64(printedCost) > float64(totalCost)*.9 { + break + } + } + html += "
    Nametotal = file + ast + types + types info
    %v (%v)%v = %v + %v + %v + %v
    \n" + return template.HTML(html) +} + +func astCost(f *ast.File) int64 { + if f == nil { + return 0 + } + var count int64 + ast.Inspect(f, func(_ ast.Node) bool { + count += 32 // nodes are pretty small. + return true + }) + return count +} + +func typesCost(scope *types.Scope) int64 { + cost := 64 + int64(scope.Len())*128 // types.object looks pretty big + for i := 0; i < scope.NumChildren(); i++ { + cost += typesCost(scope.Child(i)) + } + return cost +} + +func typesInfoCost(info *types.Info) int64 { + // Most of these refer to existing objects, with the exception of InitOrder, Selections, and Types. + cost := 24*len(info.Defs) + + 32*len(info.Implicits) + + 256*len(info.InitOrder) + // these are big, but there aren't many of them. + 32*len(info.Scopes) + + 128*len(info.Selections) + // wild guess + 128*len(info.Types) + // wild guess + 32*len(info.Uses) + return int64(cost) +} diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go new file mode 100644 index 00000000000..b8d7d0298d5 --- /dev/null +++ b/gopls/internal/lsp/cache/check.go @@ -0,0 +1,854 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "go/ast" + "go/types" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + + "golang.org/x/mod/module" + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/lsp/source/xrefs" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A packageKey identifies a packageHandle in the snapshot.packages map. +type packageKey struct { + mode source.ParseMode + id PackageID +} + +type packageHandleKey source.Hash + +// A packageHandle holds package information, some of which may not be fully +// evaluated. +// +// The only methods on packageHandle that are safe to call before calling await +// are Metadata and await itself. +type packageHandle struct { + // TODO(rfindley): remove metadata from packageHandle. It is only used for + // bug detection. + m *source.Metadata + + // key is the hashed key for the package. + // + // It includes the all bits of the transitive closure of + // dependencies's sources. This is more than type checking + // really depends on: export data of direct deps should be + // enough. (The key for analysis actions could similarly + // hash only Facts of direct dependencies.) + key packageHandleKey + + // The shared type-checking promise. + promise *memoize.Promise // [typeCheckResult] +} + +// typeCheckInputs contains the inputs of a call to typeCheckImpl, which +// type-checks a package. +type typeCheckInputs struct { + id PackageID + pkgPath PackagePath + name PackageName + mode source.ParseMode + goFiles, compiledGoFiles []source.FileHandle + sizes types.Sizes + deps map[PackageID]*packageHandle + depsByImpPath map[ImportPath]PackageID + goVersion string // packages.Module.GoVersion, e.g. "1.18" +} + +// typeCheckResult contains the result of a call to +// typeCheckImpl, which type-checks a package. +type typeCheckResult struct { + pkg *syntaxPackage + err error +} + +// buildPackageHandle returns a handle for the future results of +// type-checking the package identified by id in the given mode. +// It assumes that the given ID already has metadata available, so it does not +// attempt to reload missing or invalid metadata. The caller must reload +// metadata if needed. +func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) { + packageKey := packageKey{id: id, mode: mode} + + s.mu.Lock() + entry, hit := s.packages.Get(packageKey) + m := s.meta.metadata[id] + s.mu.Unlock() + + if m == nil { + return nil, fmt.Errorf("no metadata for %s", id) + } + + if hit { + return entry.(*packageHandle), nil + } + + // Begin computing the key by getting the depKeys for all dependencies. + // This requires reading the transitive closure of dependencies' source files. + // + // It is tempting to parallelize the recursion here, but + // without de-duplication of subtasks this would lead to an + // exponential amount of work, and computing the key is + // expensive as it reads all the source files transitively. + // Notably, we don't update the s.packages cache until the + // entire key has been computed. + // TODO(adonovan): use a promise cache to ensure that the key + // for each package is computed by at most one thread, then do + // the recursive key building of dependencies in parallel. + deps := make(map[PackageID]*packageHandle) + for _, depID := range m.DepsByPkgPath { + depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID)) + // Don't use invalid metadata for dependencies if the top-level + // metadata is valid. We only load top-level packages, so if the + // top-level is valid, all of its dependencies should be as well. + if err != nil { + event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, source.SnapshotLabels(s)...) + + // This check ensures we break out of the slow + // buildPackageHandle recursion quickly when + // context cancelation is detected within GetFile. + if ctx.Err() != nil { + return nil, ctx.Err() // cancelled + } + + // One bad dependency should not prevent us from + // checking the entire package. Leave depKeys[i] unset. + continue + } + deps[depID] = depHandle + } + + // Read both lists of files of this package, in parallel. + // + // goFiles aren't presented to the type checker--nor + // are they included in the key, unsoundly--but their + // syntax trees are available from (*pkg).File(URI). + // TODO(adonovan): consider parsing them on demand? + // The need should be rare. + goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m) + if err != nil { + return nil, err + } + + goVersion := "" + if m.Module != nil && m.Module.GoVersion != "" { + goVersion = m.Module.GoVersion + } + + inputs := typeCheckInputs{ + id: m.ID, + pkgPath: m.PkgPath, + name: m.Name, + mode: mode, + goFiles: goFiles, + compiledGoFiles: compiledGoFiles, + sizes: m.TypesSizes, + deps: deps, + depsByImpPath: m.DepsByImpPath, + goVersion: goVersion, + } + + // All the file reading has now been done. + // Create a handle for the result of type checking. + phKey := computePackageKey(s, inputs) + promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} { + pkg, err := typeCheckImpl(ctx, arg.(*snapshot), inputs) + return typeCheckResult{pkg, err} + }) + + ph := &packageHandle{ + promise: promise, + m: m, + key: phKey, + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Check that the metadata has not changed + // (which should invalidate this handle). + // + // (In future, handles should form a graph with edges from a + // packageHandle to the handles for parsing its files and the + // handles for type-checking its immediate deps, at which + // point there will be no need to even access s.meta.) + if s.meta.metadata[ph.m.ID] != ph.m { + // TODO(rfindley): this should be bug.Errorf. + return nil, fmt.Errorf("stale metadata for %s", ph.m.ID) + } + + // Check cache again in case another goroutine got there first. + if prev, ok := s.packages.Get(packageKey); ok { + prevPH := prev.(*packageHandle) + release() + if prevPH.m != ph.m { + return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID) + } + return prevPH, nil + } + + // Update the map. + s.packages.Set(packageKey, ph, func(_, _ interface{}) { release() }) + + return ph, nil +} + +// readGoFiles reads the content of Metadata.GoFiles and +// Metadata.CompiledGoFiles, in parallel. +func readGoFiles(ctx context.Context, s *snapshot, m *source.Metadata) (goFiles, compiledGoFiles []source.FileHandle, err error) { + var group errgroup.Group + getFileHandles := func(files []span.URI) []source.FileHandle { + fhs := make([]source.FileHandle, len(files)) + for i, uri := range files { + i, uri := i, uri + group.Go(func() (err error) { + fhs[i], err = s.GetFile(ctx, uri) // ~25us + return + }) + } + return fhs + } + return getFileHandles(m.GoFiles), + getFileHandles(m.CompiledGoFiles), + group.Wait() +} + +func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { + s.mu.Lock() + defer s.mu.Unlock() + _, ws := s.workspacePackages[id] + if !ws { + return source.ParseExported + } + if s.view.Options().MemoryMode == source.ModeNormal { + return source.ParseFull + } + if s.isActiveLocked(id) { + return source.ParseFull + } + return source.ParseExported +} + +// computePackageKey returns a key representing the act of type checking +// a package named id containing the specified files, metadata, and +// combined dependency hash. +func computePackageKey(s *snapshot, inputs typeCheckInputs) packageHandleKey { + hasher := sha256.New() + + // In principle, a key must be the hash of an + // unambiguous encoding of all the relevant data. + // If it's ambiguous, we risk collisions. + + // package identifiers + fmt.Fprintf(hasher, "package: %s %s %s\n", inputs.id, inputs.name, inputs.pkgPath) + + // module Go version + fmt.Fprintf(hasher, "go %s\n", inputs.goVersion) + + // parse mode + fmt.Fprintf(hasher, "mode %d\n", inputs.mode) + + // import map + importPaths := make([]string, 0, len(inputs.depsByImpPath)) + for impPath := range inputs.depsByImpPath { + importPaths = append(importPaths, string(impPath)) + } + sort.Strings(importPaths) + for _, impPath := range importPaths { + fmt.Fprintf(hasher, "import %s %s", impPath, string(inputs.depsByImpPath[ImportPath(impPath)])) + } + + // deps, in PackageID order + depIDs := make([]string, 0, len(inputs.deps)) + for depID := range inputs.deps { + depIDs = append(depIDs, string(depID)) + } + sort.Strings(depIDs) + for _, depID := range depIDs { + dep := inputs.deps[PackageID(depID)] + fmt.Fprintf(hasher, "dep: %s key:%s\n", dep.m.PkgPath, dep.key) + } + + // file names and contents + fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(inputs.compiledGoFiles)) + for _, fh := range inputs.compiledGoFiles { + fmt.Fprintln(hasher, fh.FileIdentity()) + } + fmt.Fprintf(hasher, "goFiles: %d\n", len(inputs.goFiles)) + for _, fh := range inputs.goFiles { + fmt.Fprintln(hasher, fh.FileIdentity()) + } + + // types sizes + sz := inputs.sizes.(*types.StdSizes) + fmt.Fprintf(hasher, "sizes: %d %d\n", sz.WordSize, sz.MaxAlign) + + var hash [sha256.Size]byte + hasher.Sum(hash[:0]) + return packageHandleKey(hash) +} + +// await waits for typeCheckImpl to complete and returns its result. +func (ph *packageHandle) await(ctx context.Context, s *snapshot) (*syntaxPackage, error) { + v, err := s.awaitPromise(ctx, ph.promise) + if err != nil { + return nil, err + } + data := v.(typeCheckResult) + return data.pkg, data.err +} + +func (ph *packageHandle) cached() (*syntaxPackage, error) { + v := ph.promise.Cached() + if v == nil { + return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath) + } + data := v.(typeCheckResult) + return data.pkg, data.err +} + +// typeCheckImpl type checks the parsed source files in compiledGoFiles. +// (The resulting pkg also holds the parsed but not type-checked goFiles.) +// deps holds the future results of type-checking the direct dependencies. +func typeCheckImpl(ctx context.Context, snapshot *snapshot, inputs typeCheckInputs) (*syntaxPackage, error) { + // Start type checking of direct dependencies, + // in parallel and asynchronously. + // As the type checker imports each of these + // packages, it will wait for its completion. + var wg sync.WaitGroup + for _, dep := range inputs.deps { + wg.Add(1) + go func(dep *packageHandle) { + dep.await(ctx, snapshot) // ignore result + wg.Done() + }(dep) + } + // The 'defer' below is unusual but intentional: + // it is not necessary that each call to dep.check + // complete before type checking begins, as the type + // checker will wait for those it needs. But they do + // need to complete before this function returns and + // the snapshot is possibly destroyed. + defer wg.Wait() + + var filter *unexportedFilter + if inputs.mode == source.ParseExported { + filter = &unexportedFilter{uses: map[string]bool{}} + } + pkg, err := doTypeCheck(ctx, snapshot, inputs, filter) + if err != nil { + return nil, err + } + + if inputs.mode == source.ParseExported { + // The AST filtering is a little buggy and may remove things it + // shouldn't. If we only got undeclared name errors, try one more + // time keeping those names. + missing, unexpected := filter.ProcessErrors(pkg.typeErrors) + if len(unexpected) == 0 && len(missing) != 0 { + pkg, err = doTypeCheck(ctx, snapshot, inputs, filter) + if err != nil { + return nil, err + } + missing, unexpected = filter.ProcessErrors(pkg.typeErrors) + } + if len(unexpected) != 0 || len(missing) != 0 { + pkg, err = doTypeCheck(ctx, snapshot, inputs, nil) + if err != nil { + return nil, err + } + } + } + + // We don't care about a package's errors unless we have parsed it in full. + if inputs.mode != source.ParseFull { + return pkg, nil + } + + // Our heuristic for whether to show type checking errors is: + // + If any file was 'fixed', don't show type checking errors as we + // can't guarantee that they reference accurate locations in the source. + // + If there is a parse error _in the current file_, suppress type + // errors in that file. + // + Otherwise, show type errors even in the presence of parse errors in + // other package files. go/types attempts to suppress follow-on errors + // due to bad syntax, so on balance type checking errors still provide + // a decent signal/noise ratio as long as the file in question parses. + + // Track URIs with parse errors so that we can suppress type errors for these + // files. + unparseable := map[span.URI]bool{} + for _, e := range pkg.parseErrors { + diags, err := parseErrorDiagnostics(snapshot, pkg, e) + if err != nil { + event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(string(inputs.id))) + continue + } + for _, diag := range diags { + unparseable[diag.URI] = true + pkg.diagnostics = append(pkg.diagnostics, diag) + } + } + + if pkg.hasFixedFiles { + return pkg, nil + } + + unexpanded := pkg.typeErrors + pkg.typeErrors = nil + for _, e := range expandErrors(unexpanded, snapshot.View().Options().RelatedInformationSupported) { + diags, err := typeErrorDiagnostics(snapshot, pkg, e) + if err != nil { + event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(string(inputs.id))) + continue + } + pkg.typeErrors = append(pkg.typeErrors, e.primary) + for _, diag := range diags { + // If the file didn't parse cleanly, it is highly likely that type + // checking errors will be confusing or redundant. But otherwise, type + // checking usually provides a good enough signal to include. + if !unparseable[diag.URI] { + pkg.diagnostics = append(pkg.diagnostics, diag) + } + } + } + + return pkg, nil +} + +var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) + +func doTypeCheck(ctx context.Context, snapshot *snapshot, inputs typeCheckInputs, astFilter *unexportedFilter) (*syntaxPackage, error) { + ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(inputs.id))) + defer done() + + pkg := &syntaxPackage{ + id: inputs.id, + mode: inputs.mode, + fset: snapshot.FileSet(), // must match parse call below (snapshot.ParseGo for now) + types: types.NewPackage(string(inputs.pkgPath), string(inputs.name)), + typesInfo: &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + Scopes: make(map[ast.Node]*types.Scope), + }, + } + typeparams.InitInstanceInfo(pkg.typesInfo) + + // Parse the non-compiled GoFiles. (These aren't presented to + // the type checker but are part of the returned pkg.) + // TODO(adonovan): opt: parallelize parsing. + for _, fh := range inputs.goFiles { + goMode := inputs.mode + if inputs.mode == source.ParseExported { + // This package is being loaded only for type information, + // to which non-compiled Go files are irrelevant, + // so parse only the header. + goMode = source.ParseHeader + } + pgf, err := snapshot.ParseGo(ctx, fh, goMode) + if err != nil { + return nil, err + } + pkg.goFiles = append(pkg.goFiles, pgf) + } + + // Parse the CompiledGoFiles: those seen by the compiler/typechecker. + if err := parseCompiledGoFiles(ctx, inputs.compiledGoFiles, snapshot, inputs.mode, pkg, astFilter); err != nil { + return nil, err + } + + // Use the default type information for the unsafe package. + if inputs.pkgPath == "unsafe" { + // Don't type check Unsafe: it's unnecessary, and doing so exposes a data + // race to Unsafe.completed. + // TODO(adonovan): factor (tail-merge) with the normal control path. + pkg.types = types.Unsafe + pkg.methodsets = methodsets.NewIndex(pkg.fset, pkg.types) + pkg.xrefs = xrefs.Index(pkg.compiledGoFiles, pkg.types, pkg.typesInfo) + return pkg, nil + } + + if len(pkg.compiledGoFiles) == 0 { + // No files most likely means go/packages failed. + // + // TODO(rfindley): in the past, we would capture go list errors in this + // case, to present go list errors to the user. However we had no tests for + // this behavior. It is unclear if anything better can be done here. + return nil, fmt.Errorf("no parsed files for package %s", inputs.pkgPath) + } + + cfg := &types.Config{ + Sizes: inputs.sizes, + Error: func(e error) { + pkg.typeErrors = append(pkg.typeErrors, e.(types.Error)) + }, + Importer: importerFunc(func(path string) (*types.Package, error) { + // While all of the import errors could be reported + // based on the metadata before we start type checking, + // reporting them via types.Importer places the errors + // at the correct source location. + id, ok := inputs.depsByImpPath[ImportPath(path)] + if !ok { + // If the import declaration is broken, + // go list may fail to report metadata about it. + // See TestFixImportDecl for an example. + return nil, fmt.Errorf("missing metadata for import of %q", path) + } + dep, ok := inputs.deps[id] // id may be "" + if !ok { + return nil, snapshot.missingPkgError(path) + } + if !source.IsValidImport(inputs.pkgPath, dep.m.PkgPath) { + return nil, fmt.Errorf("invalid use of internal package %s", path) + } + depPkg, err := dep.await(ctx, snapshot) + if err != nil { + return nil, err + } + return depPkg.types, nil + }), + } + + if inputs.goVersion != "" { + goVersion := "go" + inputs.goVersion + // types.NewChecker panics if GoVersion is invalid. An unparsable mod + // file should probably stop us before we get here, but double check + // just in case. + if goVersionRx.MatchString(goVersion) { + typesinternal.SetGoVersion(cfg, goVersion) + } + } + + if inputs.mode != source.ParseFull { + cfg.DisableUnusedImportCheck = true + cfg.IgnoreFuncBodies = true + } + + // We want to type check cgo code if go/types supports it. + // We passed typecheckCgo to go/packages when we Loaded. + typesinternal.SetUsesCgo(cfg) + + check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo) + + var files []*ast.File + for _, cgf := range pkg.compiledGoFiles { + files = append(files, cgf.File) + } + + // Type checking errors are handled via the config, so ignore them here. + _ = check.Files(files) // 50us-15ms, depending on size of package + + // Build global index of method sets for 'implementations' queries. + pkg.methodsets = methodsets.NewIndex(pkg.fset, pkg.types) + + // Build global index of outbound cross-references. + pkg.xrefs = xrefs.Index(pkg.compiledGoFiles, pkg.types, pkg.typesInfo) + + // If the context was cancelled, we may have returned a ton of transient + // errors to the type checker. Swallow them. + if ctx.Err() != nil { + return nil, ctx.Err() + } + return pkg, nil +} + +func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHandle, snapshot *snapshot, mode source.ParseMode, pkg *syntaxPackage, astFilter *unexportedFilter) error { + // TODO(adonovan): opt: parallelize this loop, which takes 1-25ms. + for _, fh := range compiledGoFiles { + var pgf *source.ParsedGoFile + var err error + // Only parse Full through the cache -- we need to own Exported ASTs + // to prune them. + if mode == source.ParseFull { + pgf, err = snapshot.ParseGo(ctx, fh, mode) + } else { + pgf, err = parseGoImpl(ctx, pkg.fset, fh, mode) // ~20us/KB + } + if err != nil { + return err + } + pkg.compiledGoFiles = append(pkg.compiledGoFiles, pgf) + if pgf.ParseErr != nil { + pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr) + } + // If we have fixed parse errors in any of the files, we should hide type + // errors, as they may be completely nonsensical. + pkg.hasFixedFiles = pkg.hasFixedFiles || pgf.Fixed + } + + // Optionally remove parts that don't affect the exported API. + if mode == source.ParseExported { + // TODO(adonovan): opt: experiment with pre-parser + // trimming, either a scanner-based implementation + // such as https://go.dev/play/p/KUrObH1YkX8 (~31% + // speedup), or a byte-oriented implementation (2x + // speedup). + if astFilter != nil { + // aggressive pruning based on reachability + var files []*ast.File + for _, cgf := range pkg.compiledGoFiles { + files = append(files, cgf.File) + } + astFilter.Filter(files) + } else { + // simple trimming of function bodies + for _, cgf := range pkg.compiledGoFiles { + trimAST(cgf.File) + } + } + } + + return nil +} + +// depsErrors creates diagnostics for each metadata error (e.g. import cycle). +// These may be attached to import declarations in the transitive source files +// of pkg, or to 'requires' declarations in the package's go.mod file. +// +// TODO(rfindley): move this to errors.go +func (s *snapshot) depsErrors(ctx context.Context, pkg *syntaxPackage, depsErrors []*packagesinternal.PackageError) ([]*source.Diagnostic, error) { + // Select packages that can't be found, and were imported in non-workspace packages. + // Workspace packages already show their own errors. + var relevantErrors []*packagesinternal.PackageError + for _, depsError := range depsErrors { + // Up to Go 1.15, the missing package was included in the stack, which + // was presumably a bug. We want the next one up. + directImporterIdx := len(depsError.ImportStack) - 1 + if directImporterIdx < 0 { + continue + } + + directImporter := depsError.ImportStack[directImporterIdx] + if s.isWorkspacePackage(PackageID(directImporter)) { + continue + } + relevantErrors = append(relevantErrors, depsError) + } + + // Don't build the import index for nothing. + if len(relevantErrors) == 0 { + return nil, nil + } + + // Build an index of all imports in the package. + type fileImport struct { + cgf *source.ParsedGoFile + imp *ast.ImportSpec + } + allImports := map[string][]fileImport{} + for _, cgf := range pkg.compiledGoFiles { + // TODO(adonovan): modify Imports() to accept a single token.File (cgf.Tok). + for _, group := range astutil.Imports(pkg.fset, cgf.File) { + for _, imp := range group { + if imp.Path == nil { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + allImports[path] = append(allImports[path], fileImport{cgf, imp}) + } + } + } + + // Apply a diagnostic to any import involved in the error, stopping once + // we reach the workspace. + var errors []*source.Diagnostic + for _, depErr := range relevantErrors { + for i := len(depErr.ImportStack) - 1; i >= 0; i-- { + item := depErr.ImportStack[i] + if s.isWorkspacePackage(PackageID(item)) { + break + } + + for _, imp := range allImports[item] { + rng, err := imp.cgf.NodeRange(imp.imp) + if err != nil { + return nil, err + } + fixes, err := goGetQuickFixes(s, imp.cgf.URI, item) + if err != nil { + return nil, err + } + errors = append(errors, &source.Diagnostic{ + URI: imp.cgf.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.TypeError, + Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), + SuggestedFixes: fixes, + }) + } + } + } + + if len(pkg.compiledGoFiles) == 0 { + return errors, nil + } + mod := s.GoModForFile(pkg.compiledGoFiles[0].URI) + if mod == "" { + return errors, nil + } + fh, err := s.GetFile(ctx, mod) + if err != nil { + return nil, err + } + pm, err := s.ParseMod(ctx, fh) + if err != nil { + return nil, err + } + + // Add a diagnostic to the module that contained the lowest-level import of + // the missing package. + for _, depErr := range relevantErrors { + for i := len(depErr.ImportStack) - 1; i >= 0; i-- { + item := depErr.ImportStack[i] + m := s.Metadata(PackageID(item)) + if m == nil || m.Module == nil { + continue + } + modVer := module.Version{Path: m.Module.Path, Version: m.Module.Version} + reference := findModuleReference(pm.File, modVer) + if reference == nil { + continue + } + rng, err := pm.Mapper.OffsetRange(reference.Start.Byte, reference.End.Byte) + if err != nil { + return nil, err + } + fixes, err := goGetQuickFixes(s, pm.URI, item) + if err != nil { + return nil, err + } + errors = append(errors, &source.Diagnostic{ + URI: pm.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.TypeError, + Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), + SuggestedFixes: fixes, + }) + break + } + } + return errors, nil +} + +// missingPkgError returns an error message for a missing package that varies +// based on the user's workspace mode. +func (s *snapshot) missingPkgError(pkgPath string) error { + var b strings.Builder + if s.workspaceMode()&moduleMode == 0 { + gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath)) + fmt.Fprintf(&b, "cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg) + for _, gopath := range filepath.SplitList(s.view.gopath) { + gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath)) + fmt.Fprintf(&b, "\n\t%s (from $GOPATH)", gopathSrcPkg) + } + } else { + fmt.Fprintf(&b, "no required module provides package %q", pkgPath) + if err := s.getInitializationError(); err != nil { + fmt.Fprintf(&b, "\n(workspace configuration error: %s)", err.MainError) + } + } + return errors.New(b.String()) +} + +type extendedError struct { + primary types.Error + secondaries []types.Error +} + +func (e extendedError) Error() string { + return e.primary.Error() +} + +// expandErrors duplicates "secondary" errors by mapping them to their main +// error. Some errors returned by the type checker are followed by secondary +// errors which give more information about the error. These are errors in +// their own right, and they are marked by starting with \t. For instance, when +// there is a multiply-defined function, the secondary error points back to the +// definition first noticed. +// +// This function associates the secondary error with its primary error, which can +// then be used as RelatedInformation when the error becomes a diagnostic. +// +// If supportsRelatedInformation is false, the secondary is instead embedded as +// additional context in the primary error. +func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extendedError { + var result []extendedError + for i := 0; i < len(errs); { + original := extendedError{ + primary: errs[i], + } + for i++; i < len(errs); i++ { + spl := errs[i] + if len(spl.Msg) == 0 || spl.Msg[0] != '\t' { + break + } + spl.Msg = spl.Msg[1:] + original.secondaries = append(original.secondaries, spl) + } + + // Clone the error to all its related locations -- VS Code, at least, + // doesn't do it for us. + result = append(result, original) + for i, mainSecondary := range original.secondaries { + // Create the new primary error, with a tweaked message, in the + // secondary's location. We need to start from the secondary to + // capture its unexported location fields. + relocatedSecondary := mainSecondary + if supportsRelatedInformation { + relocatedSecondary.Msg = fmt.Sprintf("%v (see details)", original.primary.Msg) + } else { + relocatedSecondary.Msg = fmt.Sprintf("%v (this error: %v)", original.primary.Msg, mainSecondary.Msg) + } + relocatedSecondary.Soft = original.primary.Soft + + // Copy over the secondary errors, noting the location of the + // current error we're cloning. + clonedError := extendedError{primary: relocatedSecondary, secondaries: []types.Error{original.primary}} + for j, secondary := range original.secondaries { + if i == j { + secondary.Msg += " (this error)" + } + clonedError.secondaries = append(clonedError.secondaries, secondary) + } + result = append(result, clonedError) + } + + } + return result +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/gopls/internal/lsp/cache/debug.go b/gopls/internal/lsp/cache/debug.go new file mode 100644 index 00000000000..fd82aff301e --- /dev/null +++ b/gopls/internal/lsp/cache/debug.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "os" + "sort" +) + +// This file contains helpers that can be used to instrument code while +// debugging. + +// debugEnabled toggles the helpers below. +const debugEnabled = false + +// If debugEnabled is true, debugf formats its arguments and prints to stderr. +// If debugEnabled is false, it is a no-op. +func debugf(format string, args ...interface{}) { + if !debugEnabled { + return + } + if false { + _ = fmt.Sprintf(format, args...) // encourage vet to validate format strings + } + fmt.Fprintf(os.Stderr, ">>> "+format+"\n", args...) +} + +// If debugEnabled is true, dumpWorkspace prints a summary of workspace +// packages to stderr. If debugEnabled is false, it is a no-op. +func (s *snapshot) dumpWorkspace(context string) { + if !debugEnabled { + return + } + + debugf("workspace (after %s):", context) + var ids []PackageID + for id := range s.workspacePackages { + ids = append(ids, id) + } + + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + + for _, id := range ids { + pkgPath := s.workspacePackages[id] + _, ok := s.meta.metadata[id] + debugf(" %s:%s (metadata: %t)", id, pkgPath, ok) + } +} diff --git a/gopls/internal/lsp/cache/errors.go b/gopls/internal/lsp/cache/errors.go new file mode 100644 index 00000000000..92e2330a974 --- /dev/null +++ b/gopls/internal/lsp/cache/errors.go @@ -0,0 +1,383 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +// This file defines routines to convert diagnostics from go list, go +// get, go/packages, parsing, type checking, and analysis into +// source.Diagnostic form, and suggesting quick fixes. + +import ( + "fmt" + "go/scanner" + "go/types" + "log" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/packages" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/typesinternal" +) + +func goPackagesErrorDiagnostics(e packages.Error, pkg *syntaxPackage, fromDir string) (diags []*source.Diagnostic, rerr error) { + if diag, ok := parseGoListImportCycleError(e, pkg); ok { + return []*source.Diagnostic{diag}, nil + } + + var spn span.Span + if e.Pos == "" { + spn = parseGoListError(e.Msg, fromDir) + // We may not have been able to parse a valid span. Apply the errors to all files. + if _, err := spanToRange(pkg, spn); err != nil { + var diags []*source.Diagnostic + for _, pgf := range pkg.compiledGoFiles { + diags = append(diags, &source.Diagnostic{ + URI: pgf.URI, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: e.Msg, + }) + } + return diags, nil + } + } else { + spn = span.ParseInDir(e.Pos, fromDir) + } + + // TODO(rfindley): in some cases the go command outputs invalid spans, for + // example (from TestGoListErrors): + // + // package a + // import + // + // In this case, the go command will complain about a.go:2:8, which is after + // the trailing newline but still considered to be on the second line, most + // likely because *token.File lacks information about newline termination. + // + // We could do better here by handling that case. + rng, err := spanToRange(pkg, spn) + if err != nil { + return nil, err + } + return []*source.Diagnostic{{ + URI: spn.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: e.Msg, + }}, nil +} + +func parseErrorDiagnostics(snapshot *snapshot, pkg *syntaxPackage, errList scanner.ErrorList) ([]*source.Diagnostic, error) { + // The first parser error is likely the root cause of the problem. + if errList.Len() <= 0 { + return nil, fmt.Errorf("no errors in %v", errList) + } + e := errList[0] + pgf, err := pkg.File(span.URIFromPath(e.Pos.Filename)) + if err != nil { + return nil, err + } + rng, err := pgf.Mapper.OffsetRange(e.Pos.Offset, e.Pos.Offset) + if err != nil { + return nil, err + } + return []*source.Diagnostic{{ + URI: pgf.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ParseError, + Message: e.Msg, + }}, nil +} + +var importErrorRe = regexp.MustCompile(`could not import ([^\s]+)`) +var unsupportedFeatureRe = regexp.MustCompile(`.*require.* go(\d+\.\d+) or later`) + +func typeErrorDiagnostics(snapshot *snapshot, pkg *syntaxPackage, e extendedError) ([]*source.Diagnostic, error) { + code, loc, err := typeErrorData(pkg, e.primary) + if err != nil { + return nil, err + } + diag := &source.Diagnostic{ + URI: loc.URI.SpanURI(), + Range: loc.Range, + Severity: protocol.SeverityError, + Source: source.TypeError, + Message: e.primary.Msg, + } + if code != 0 { + diag.Code = code.String() + diag.CodeHref = typesCodeHref(snapshot, code) + } + switch code { + case typesinternal.UnusedVar, typesinternal.UnusedImport: + diag.Tags = append(diag.Tags, protocol.Unnecessary) + } + + for _, secondary := range e.secondaries { + _, secondaryLoc, err := typeErrorData(pkg, secondary) + if err != nil { + return nil, err + } + diag.Related = append(diag.Related, source.RelatedInformation{ + URI: secondaryLoc.URI.SpanURI(), + Range: secondaryLoc.Range, + Message: secondary.Msg, + }) + } + + if match := importErrorRe.FindStringSubmatch(e.primary.Msg); match != nil { + diag.SuggestedFixes, err = goGetQuickFixes(snapshot, loc.URI.SpanURI(), match[1]) + if err != nil { + return nil, err + } + } + if match := unsupportedFeatureRe.FindStringSubmatch(e.primary.Msg); match != nil { + diag.SuggestedFixes, err = editGoDirectiveQuickFix(snapshot, loc.URI.SpanURI(), match[1]) + if err != nil { + return nil, err + } + } + return []*source.Diagnostic{diag}, nil +} + +func goGetQuickFixes(snapshot *snapshot, uri span.URI, pkg string) ([]source.SuggestedFix, error) { + // Go get only supports module mode for now. + if snapshot.workspaceMode()&moduleMode == 0 { + return nil, nil + } + title := fmt.Sprintf("go get package %v", pkg) + cmd, err := command.NewGoGetPackageCommand(title, command.GoGetPackageArgs{ + URI: protocol.URIFromSpanURI(uri), + AddRequire: true, + Pkg: pkg, + }) + if err != nil { + return nil, err + } + return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil +} + +func editGoDirectiveQuickFix(snapshot *snapshot, uri span.URI, version string) ([]source.SuggestedFix, error) { + // Go mod edit only supports module mode. + if snapshot.workspaceMode()&moduleMode == 0 { + return nil, nil + } + title := fmt.Sprintf("go mod edit -go=%s", version) + cmd, err := command.NewEditGoDirectiveCommand(title, command.EditGoDirectiveArgs{ + URI: protocol.URIFromSpanURI(uri), + Version: version, + }) + if err != nil { + return nil, err + } + return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil +} + +// toSourceDiagnostic converts a gobDiagnostic to "source" form. +func toSourceDiagnostic(srcAnalyzer *source.Analyzer, gobDiag *gobDiagnostic) *source.Diagnostic { + kinds := srcAnalyzer.ActionKind + if len(srcAnalyzer.ActionKind) == 0 { + kinds = append(kinds, protocol.QuickFix) + } + fixes := suggestedAnalysisFixes(gobDiag, kinds) + if srcAnalyzer.Fix != "" { + cmd, err := command.NewApplyFixCommand(gobDiag.Message, command.ApplyFixArgs{ + URI: gobDiag.Location.URI, + Range: gobDiag.Location.Range, + Fix: srcAnalyzer.Fix, + }) + if err != nil { + // JSON marshalling of these argument values cannot fail. + log.Fatalf("internal error in NewApplyFixCommand: %v", err) + } + for _, kind := range kinds { + fixes = append(fixes, source.SuggestedFixFromCommand(cmd, kind)) + } + } + + severity := srcAnalyzer.Severity + if severity == 0 { + severity = protocol.SeverityWarning + } + diag := &source.Diagnostic{ + // TODO(adonovan): is this sound? See dual conversion in posToLocation. + URI: span.URI(gobDiag.Location.URI), + Range: gobDiag.Location.Range, + Severity: severity, + Source: source.AnalyzerErrorKind(gobDiag.Category), + Message: gobDiag.Message, + Related: relatedInformation(gobDiag), + SuggestedFixes: fixes, + } + // If the fixes only delete code, assume that the diagnostic is reporting dead code. + if onlyDeletions(fixes) { + diag.Tags = []protocol.DiagnosticTag{protocol.Unnecessary} + } + return diag +} + +// onlyDeletions returns true if all of the suggested fixes are deletions. +func onlyDeletions(fixes []source.SuggestedFix) bool { + for _, fix := range fixes { + if fix.Command != nil { + return false + } + for _, edits := range fix.Edits { + for _, edit := range edits { + if edit.NewText != "" { + return false + } + if protocol.ComparePosition(edit.Range.Start, edit.Range.End) == 0 { + return false + } + } + } + } + return len(fixes) > 0 +} + +func typesCodeHref(snapshot *snapshot, code typesinternal.ErrorCode) string { + target := snapshot.View().Options().LinkTarget + return source.BuildLink(target, "golang.org/x/tools/internal/typesinternal", code.String()) +} + +func suggestedAnalysisFixes(diag *gobDiagnostic, kinds []protocol.CodeActionKind) []source.SuggestedFix { + var fixes []source.SuggestedFix + for _, fix := range diag.SuggestedFixes { + edits := make(map[span.URI][]protocol.TextEdit) + for _, e := range fix.TextEdits { + uri := span.URI(e.Location.URI) + edits[uri] = append(edits[uri], protocol.TextEdit{ + Range: e.Location.Range, + NewText: string(e.NewText), + }) + } + for _, kind := range kinds { + fixes = append(fixes, source.SuggestedFix{ + Title: fix.Message, + Edits: edits, + ActionKind: kind, + }) + } + + } + return fixes +} + +func relatedInformation(diag *gobDiagnostic) []source.RelatedInformation { + var out []source.RelatedInformation + for _, related := range diag.Related { + out = append(out, source.RelatedInformation{ + URI: span.URI(related.Location.URI), + Range: related.Location.Range, + Message: related.Message, + }) + } + return out +} + +func typeErrorData(pkg *syntaxPackage, terr types.Error) (typesinternal.ErrorCode, protocol.Location, error) { + ecode, start, end, ok := typesinternal.ReadGo116ErrorData(terr) + if !ok { + start, end = terr.Pos, terr.Pos + ecode = 0 + } + // go/types may return invalid positions in some cases, such as + // in errors on tokens missing from the syntax tree. + if !start.IsValid() { + return 0, protocol.Location{}, fmt.Errorf("type error (%q, code %d, go116=%t) without position", terr.Msg, ecode, ok) + } + // go/types errors retain their FileSet. + // Sanity-check that we're using the right one. + fset := pkg.fset + if fset != terr.Fset { + return 0, protocol.Location{}, bug.Errorf("wrong FileSet for type error") + } + posn := safetoken.StartPosition(fset, start) + if !posn.IsValid() { + return 0, protocol.Location{}, fmt.Errorf("position %d of type error %q (code %q) not found in FileSet", start, start, terr) + } + pgf, err := pkg.File(span.URIFromPath(posn.Filename)) + if err != nil { + return 0, protocol.Location{}, err + } + if !end.IsValid() || end == start { + end = analysisinternal.TypeErrorEndPos(fset, pgf.Src, start) + } + loc, err := pgf.Mapper.PosLocation(pgf.Tok, start, end) + return ecode, loc, err +} + +// spanToRange converts a span.Span to a protocol.Range, +// assuming that the span belongs to the package whose diagnostics are being computed. +func spanToRange(pkg *syntaxPackage, spn span.Span) (protocol.Range, error) { + pgf, err := pkg.File(spn.URI()) + if err != nil { + return protocol.Range{}, err + } + return pgf.Mapper.SpanRange(spn) +} + +// parseGoListError attempts to parse a standard `go list` error message +// by stripping off the trailing error message. +// +// It works only on errors whose message is prefixed by colon, +// followed by a space (": "). For example: +// +// attributes.go:13:1: expected 'package', found 'type' +func parseGoListError(input, wd string) span.Span { + input = strings.TrimSpace(input) + msgIndex := strings.Index(input, ": ") + if msgIndex < 0 { + return span.Parse(input) + } + return span.ParseInDir(input[:msgIndex], wd) +} + +func parseGoListImportCycleError(e packages.Error, pkg *syntaxPackage) (*source.Diagnostic, bool) { + re := regexp.MustCompile(`(.*): import stack: \[(.+)\]`) + matches := re.FindStringSubmatch(strings.TrimSpace(e.Msg)) + if len(matches) < 3 { + return nil, false + } + msg := matches[1] + importList := strings.Split(matches[2], " ") + // Since the error is relative to the current package. The import that is causing + // the import cycle error is the second one in the list. + if len(importList) < 2 { + return nil, false + } + // Imports have quotation marks around them. + circImp := strconv.Quote(importList[1]) + for _, pgf := range pkg.compiledGoFiles { + // Search file imports for the import that is causing the import cycle. + for _, imp := range pgf.File.Imports { + if imp.Path.Value == circImp { + rng, err := pgf.NodeMappedRange(imp) + if err != nil { + return nil, false + } + + return &source.Diagnostic{ + URI: pgf.URI, + Range: rng.Range(), + Severity: protocol.SeverityError, + Source: source.ListError, + Message: msg, + }, true + } + } + } + return nil, false +} diff --git a/internal/lsp/cache/error_test.go b/gopls/internal/lsp/cache/errors_test.go similarity index 100% rename from internal/lsp/cache/error_test.go rename to gopls/internal/lsp/cache/errors_test.go diff --git a/gopls/internal/lsp/cache/fs_memoized.go b/gopls/internal/lsp/cache/fs_memoized.go new file mode 100644 index 00000000000..9acd872762f --- /dev/null +++ b/gopls/internal/lsp/cache/fs_memoized.go @@ -0,0 +1,149 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "os" + "sync" + "time" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/robustio" +) + +// A memoizedFS is a file source that memoizes reads, to reduce IO. +type memoizedFS struct { + mu sync.Mutex + + // filesByID maps existing file inodes to the result of a read. + // (The read may have failed, e.g. due to EACCES or a delete between stat+read.) + // Each slice is a non-empty list of aliases: different URIs. + filesByID map[robustio.FileID][]*DiskFile +} + +func newMemoizedFS() *memoizedFS { + return &memoizedFS{filesByID: make(map[robustio.FileID][]*DiskFile)} +} + +// A DiskFile is a file on the filesystem, or a failure to read one. +// It implements the source.FileHandle interface. +type DiskFile struct { + uri span.URI + modTime time.Time + content []byte + hash source.Hash + err error +} + +func (h *DiskFile) URI() span.URI { return h.uri } + +func (h *DiskFile) FileIdentity() source.FileIdentity { + return source.FileIdentity{ + URI: h.uri, + Hash: h.hash, + } +} + +func (h *DiskFile) Saved() bool { return true } +func (h *DiskFile) Version() int32 { return 0 } +func (h *DiskFile) Read() ([]byte, error) { return h.content, h.err } + +// GetFile stats and (maybe) reads the file, updates the cache, and returns it. +func (fs *memoizedFS) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { + id, mtime, err := robustio.GetFileID(uri.Filename()) + if err != nil { + // file does not exist + return &DiskFile{ + err: err, + uri: uri, + }, nil + } + + // We check if the file has changed by comparing modification times. Notably, + // this is an imperfect heuristic as various systems have low resolution + // mtimes (as much as 1s on WSL or s390x builders), so we only cache + // filehandles if mtime is old enough to be reliable, meaning that we don't + // expect a subsequent write to have the same mtime. + // + // The coarsest mtime precision we've seen in practice is 1s, so consider + // mtime to be unreliable if it is less than 2s old. Capture this before + // doing anything else. + recentlyModified := time.Since(mtime) < 2*time.Second + + fs.mu.Lock() + fhs, ok := fs.filesByID[id] + if ok && fhs[0].modTime.Equal(mtime) { + var fh *DiskFile + // We have already seen this file and it has not changed. + for _, h := range fhs { + if h.uri == uri { + fh = h + break + } + } + // No file handle for this exact URI. Create an alias, but share content. + if fh == nil { + newFH := *fhs[0] + newFH.uri = uri + fh = &newFH + fhs = append(fhs, fh) + fs.filesByID[id] = fhs + } + fs.mu.Unlock() + return fh, nil + } + fs.mu.Unlock() + + // Unknown file, or file has changed. Read (or re-read) it. + fh, err := readFile(ctx, uri, mtime) // ~25us + if err != nil { + return nil, err // e.g. cancelled (not: read failed) + } + + fs.mu.Lock() + if !recentlyModified { + fs.filesByID[id] = []*DiskFile{fh} + } else { + delete(fs.filesByID, id) + } + fs.mu.Unlock() + return fh, nil +} + +// ioLimit limits the number of parallel file reads per process. +var ioLimit = make(chan struct{}, 128) + +func readFile(ctx context.Context, uri span.URI, mtime time.Time) (*DiskFile, error) { + select { + case ioLimit <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + defer func() { <-ioLimit }() + + ctx, done := event.Start(ctx, "cache.readFile", tag.File.Of(uri.Filename())) + _ = ctx + defer done() + + // It is possible that a race causes us to read a file with different file + // ID, or whose mtime differs from the given mtime. However, in these cases + // we expect the client to notify of a subsequent file change, and the file + // content should be eventually consistent. + content, err := os.ReadFile(uri.Filename()) // ~20us + if err != nil { + content = nil // just in case + } + return &DiskFile{ + modTime: mtime, + uri: uri, + content: content, + hash: source.HashOf(content), + err: err, + }, nil +} diff --git a/gopls/internal/lsp/cache/fs_overlay.go b/gopls/internal/lsp/cache/fs_overlay.go new file mode 100644 index 00000000000..36a7194cebb --- /dev/null +++ b/gopls/internal/lsp/cache/fs_overlay.go @@ -0,0 +1,78 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "sync" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +// An overlayFS is a source.FileSource that keeps track of overlays on top of a +// delegate FileSource. +type overlayFS struct { + delegate source.FileSource + + mu sync.Mutex + overlays map[span.URI]*Overlay +} + +func newOverlayFS(delegate source.FileSource) *overlayFS { + return &overlayFS{ + delegate: delegate, + overlays: make(map[span.URI]*Overlay), + } +} + +// Overlays returns a new unordered array of overlays. +func (fs *overlayFS) Overlays() []*Overlay { + fs.mu.Lock() + defer fs.mu.Unlock() + overlays := make([]*Overlay, 0, len(fs.overlays)) + for _, overlay := range fs.overlays { + overlays = append(overlays, overlay) + } + return overlays +} + +func (fs *overlayFS) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { + fs.mu.Lock() + overlay, ok := fs.overlays[uri] + fs.mu.Unlock() + if ok { + return overlay, nil + } + return fs.delegate.GetFile(ctx, uri) +} + +// An Overlay is a file open in the editor. It may have unsaved edits. +// It implements the source.FileHandle interface. +type Overlay struct { + uri span.URI + content []byte + hash source.Hash + version int32 + kind source.FileKind + + // saved is true if a file matches the state on disk, + // and therefore does not need to be part of the overlay sent to go/packages. + saved bool +} + +func (o *Overlay) URI() span.URI { return o.uri } + +func (o *Overlay) FileIdentity() source.FileIdentity { + return source.FileIdentity{ + URI: o.uri, + Hash: o.hash, + } +} + +func (o *Overlay) Read() ([]byte, error) { return o.content, nil } +func (o *Overlay) Version() int32 { return o.version } +func (o *Overlay) Saved() bool { return o.saved } +func (o *Overlay) Kind() source.FileKind { return o.kind } diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go new file mode 100644 index 00000000000..8e9e5d92c4f --- /dev/null +++ b/gopls/internal/lsp/cache/graph.go @@ -0,0 +1,124 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "sort" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +// A metadataGraph is an immutable and transitively closed import +// graph of Go packages, as obtained from go/packages. +type metadataGraph struct { + // metadata maps package IDs to their associated metadata. + metadata map[PackageID]*source.Metadata + + // importedBy maps package IDs to the list of packages that import them. + importedBy map[PackageID][]PackageID + + // ids maps file URIs to package IDs, sorted by (!valid, cli, packageID). + // A single file may belong to multiple packages due to tests packages. + ids map[span.URI][]PackageID +} + +// Clone creates a new metadataGraph, applying the given updates to the +// receiver. +func (g *metadataGraph) Clone(updates map[PackageID]*source.Metadata) *metadataGraph { + if len(updates) == 0 { + // Optimization: since the graph is immutable, we can return the receiver. + return g + } + result := &metadataGraph{metadata: make(map[PackageID]*source.Metadata, len(g.metadata))} + // Copy metadata. + for id, m := range g.metadata { + result.metadata[id] = m + } + for id, m := range updates { + if m == nil { + delete(result.metadata, id) + } else { + result.metadata[id] = m + } + } + result.build() + return result +} + +// build constructs g.importedBy and g.uris from g.metadata. +func (g *metadataGraph) build() { + // Build the import graph. + g.importedBy = make(map[PackageID][]PackageID) + for id, m := range g.metadata { + for _, depID := range m.DepsByPkgPath { + g.importedBy[depID] = append(g.importedBy[depID], id) + } + } + + // Collect file associations. + g.ids = make(map[span.URI][]PackageID) + for id, m := range g.metadata { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + for uri := range uris { + g.ids[uri] = append(g.ids[uri], id) + } + } + + // Sort and filter file associations. + for uri, ids := range g.ids { + sort.Slice(ids, func(i, j int) bool { + cli := source.IsCommandLineArguments(ids[i]) + clj := source.IsCommandLineArguments(ids[j]) + if cli != clj { + return clj + } + + // 2. packages appear in name order. + return ids[i] < ids[j] + }) + + // Choose the best IDs for each URI, according to the following rules: + // - If there are any valid real packages, choose them. + // - Else, choose the first valid command-line-argument package, if it exists. + // + // TODO(rfindley): it might be better to track all IDs here, and exclude + // them later when type checking, but this is the existing behavior. + for i, id := range ids { + // If we've seen *anything* prior to command-line arguments package, take + // it. Note that ids[0] may itself be command-line-arguments. + if i > 0 && source.IsCommandLineArguments(id) { + g.ids[uri] = ids[:i] + break + } + } + } +} + +// reverseReflexiveTransitiveClosure returns a new mapping containing the +// metadata for the specified packages along with any package that +// transitively imports one of them, keyed by ID, including all the initial packages. +func (g *metadataGraph) reverseReflexiveTransitiveClosure(ids ...PackageID) map[PackageID]*source.Metadata { + seen := make(map[PackageID]*source.Metadata) + var visitAll func([]PackageID) + visitAll = func(ids []PackageID) { + for _, id := range ids { + if seen[id] == nil { + if m := g.metadata[id]; m != nil { + seen[id] = m + visitAll(g.importedBy[id]) + } + } + } + } + visitAll(ids) + return seen +} diff --git a/gopls/internal/lsp/cache/imports.go b/gopls/internal/lsp/cache/imports.go new file mode 100644 index 00000000000..46b8d151fc5 --- /dev/null +++ b/gopls/internal/lsp/cache/imports.go @@ -0,0 +1,188 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" +) + +type importsState struct { + ctx context.Context + + mu sync.Mutex + processEnv *imports.ProcessEnv + cacheRefreshDuration time.Duration + cacheRefreshTimer *time.Timer + cachedModFileHash source.Hash + cachedBuildFlags []string + cachedDirectoryFilters []string + + // runOnce records whether runProcessEnvFunc has been called at least once. + // This is necessary to avoid resetting state before the process env is + // populated. + // + // TODO(rfindley): this shouldn't be necessary. + runOnce bool +} + +func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Find the hash of active mod files, if any. Using the unsaved content + // is slightly wasteful, since we'll drop caches a little too often, but + // the mod file shouldn't be changing while people are autocompleting. + // + // TODO(rfindley): consider instead hashing on-disk modfiles here. + var modFileHash source.Hash + for m := range snapshot.workspaceModFiles { + fh, err := snapshot.GetFile(ctx, m) + if err != nil { + return err + } + modFileHash.XORWith(fh.FileIdentity().Hash) + } + + // view.goEnv is immutable -- changes make a new view. Options can change. + // We can't compare build flags directly because we may add -modfile. + snapshot.view.optionsMu.Lock() + localPrefix := snapshot.view.options.Local + currentBuildFlags := snapshot.view.options.BuildFlags + currentDirectoryFilters := snapshot.view.options.DirectoryFilters + changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) || + snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) || + modFileHash != s.cachedModFileHash || + !reflect.DeepEqual(snapshot.view.options.DirectoryFilters, s.cachedDirectoryFilters) + snapshot.view.optionsMu.Unlock() + + // If anything relevant to imports has changed, clear caches and + // update the processEnv. Clearing caches blocks on any background + // scans. + if changed { + // As a special case, skip cleanup the first time -- we haven't fully + // initialized the environment yet and calling GetResolver will do + // unnecessary work and potentially mess up the go.mod file. + if s.runOnce { + if resolver, err := s.processEnv.GetResolver(); err == nil { + if modResolver, ok := resolver.(*imports.ModuleResolver); ok { + modResolver.ClearForNewMod() + } + } + } + + s.cachedModFileHash = modFileHash + s.cachedBuildFlags = currentBuildFlags + s.cachedDirectoryFilters = currentDirectoryFilters + if err := s.populateProcessEnv(ctx, snapshot); err != nil { + return err + } + s.runOnce = true + } + + // Run the user function. + opts := &imports.Options{ + // Defaults. + AllErrors: true, + Comments: true, + Fragment: true, + FormatOnly: false, + TabIndent: true, + TabWidth: 8, + Env: s.processEnv, + LocalPrefix: localPrefix, + } + + if err := fn(opts); err != nil { + return err + } + + if s.cacheRefreshTimer == nil { + // Don't refresh more than twice per minute. + delay := 30 * time.Second + // Don't spend more than a couple percent of the time refreshing. + if adaptive := 50 * s.cacheRefreshDuration; adaptive > delay { + delay = adaptive + } + s.cacheRefreshTimer = time.AfterFunc(delay, s.refreshProcessEnv) + } + + return nil +} + +// populateProcessEnv sets the dynamically configurable fields for the view's +// process environment. Assumes that the caller is holding the s.view.importsMu. +func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapshot) error { + pe := s.processEnv + + if snapshot.view.Options().VerboseOutput { + pe.Logf = func(format string, args ...interface{}) { + event.Log(ctx, fmt.Sprintf(format, args...)) + } + } else { + pe.Logf = nil + } + + // Extract invocation details from the snapshot to use with goimports. + // + // TODO(rfindley): refactor to extract the necessary invocation logic into + // separate functions. Using goCommandInvocation is unnecessarily indirect, + // and has led to memory leaks in the past, when the snapshot was + // unintentionally held past its lifetime. + _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ + WorkingDir: snapshot.view.workingDir().Filename(), + }) + if err != nil { + return err + } + + pe.BuildFlags = inv.BuildFlags + pe.ModFlag = "readonly" // processEnv operations should not mutate the modfile + pe.Env = map[string]string{} + for _, kv := range inv.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) != 2 { + continue + } + pe.Env[split[0]] = split[1] + } + // We don't actually use the invocation, so clean it up now. + cleanupInvocation() + // TODO(rfindley): should this simply be inv.WorkingDir? + pe.WorkingDir = snapshot.view.workingDir().Filename() + return nil +} + +func (s *importsState) refreshProcessEnv() { + start := time.Now() + + s.mu.Lock() + env := s.processEnv + if resolver, err := s.processEnv.GetResolver(); err == nil { + resolver.ClearForNewScan() + } + s.mu.Unlock() + + event.Log(s.ctx, "background imports cache refresh starting") + if err := imports.PrimeCache(context.Background(), env); err == nil { + event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start))) + } else { + event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err)) + } + s.mu.Lock() + s.cacheRefreshDuration = time.Since(start) + s.cacheRefreshTimer = nil + s.mu.Unlock() +} diff --git a/internal/lsp/cache/keys.go b/gopls/internal/lsp/cache/keys.go similarity index 100% rename from internal/lsp/cache/keys.go rename to gopls/internal/lsp/cache/keys.go diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go new file mode 100644 index 00000000000..98a8fece431 --- /dev/null +++ b/gopls/internal/lsp/cache/load.go @@ -0,0 +1,726 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "context" + "errors" + "fmt" + "path/filepath" + "sort" + "strings" + "sync/atomic" + "time" + + "golang.org/x/tools/go/packages" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +var loadID uint64 // atomic identifier for loads + +// errNoPackages indicates that a load query matched no packages. +var errNoPackages = errors.New("no packages returned") + +// load calls packages.Load for the given scopes, updating package metadata, +// import graph, and mapped files with the result. +// +// The resulting error may wrap the moduleErrorMap error type, representing +// errors associated with specific modules. +func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadScope) (err error) { + id := atomic.AddUint64(&loadID, 1) + eventName := fmt.Sprintf("go/packages.Load #%d", id) // unique name for logging + + var query []string + var containsDir bool // for logging + + // Keep track of module query -> module path so that we can later correlate query + // errors with errors. + moduleQueries := make(map[string]string) + for _, scope := range scopes { + switch scope := scope.(type) { + case packageLoadScope: + // The only time we pass package paths is when we're doing a + // partial workspace load. In those cases, the paths came back from + // go list and should already be GOPATH-vendorized when appropriate. + query = append(query, string(scope)) + + case fileLoadScope: + uri := span.URI(scope) + fh := s.FindFile(uri) + if fh == nil || s.View().FileKind(fh) != source.Go { + // Don't try to load a file that doesn't exist, or isn't a go file. + continue + } + contents, err := fh.Read() + if err != nil { + continue + } + if isStandaloneFile(contents, s.view.Options().StandaloneTags) { + query = append(query, uri.Filename()) + } else { + query = append(query, fmt.Sprintf("file=%s", uri.Filename())) + } + + case moduleLoadScope: + switch scope { + case "std", "cmd": + query = append(query, string(scope)) + default: + modQuery := fmt.Sprintf("%s/...", scope) + query = append(query, modQuery) + moduleQueries[modQuery] = string(scope) + } + + case viewLoadScope: + // If we are outside of GOPATH, a module, or some other known + // build system, don't load subdirectories. + if !s.ValidBuildConfiguration() { + query = append(query, "./") + } else { + query = append(query, "./...") + } + + default: + panic(fmt.Sprintf("unknown scope type %T", scope)) + } + switch scope.(type) { + case viewLoadScope, moduleLoadScope: + containsDir = true + } + } + if len(query) == 0 { + return nil + } + sort.Strings(query) // for determinism + + ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query)) + defer done() + + flags := source.LoadWorkspace + if allowNetwork { + flags |= source.AllowNetwork + } + _, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{ + WorkingDir: s.view.workingDir().Filename(), + }) + if err != nil { + return err + } + + // Set a last resort deadline on packages.Load since it calls the go + // command, which may hang indefinitely if it has a bug. golang/go#42132 + // and golang/go#42255 have more context. + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() + + cfg := s.config(ctx, inv) + pkgs, err := packages.Load(cfg, query...) + cleanup() + + // If the context was canceled, return early. Otherwise, we might be + // type-checking an incomplete result. Check the context directly, + // because go/packages adds extra information to the error. + if ctx.Err() != nil { + return ctx.Err() + } + + // This log message is sought for by TestReloadOnlyOnce. + labels := append(source.SnapshotLabels(s), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) + if err != nil { + event.Error(ctx, eventName, err, labels...) + } else { + event.Log(ctx, eventName, labels...) + } + + if len(pkgs) == 0 { + if err == nil { + err = errNoPackages + } + return fmt.Errorf("packages.Load error: %w", err) + } + + moduleErrs := make(map[string][]packages.Error) // module path -> errors + filterFunc := s.view.filterFunc() + newMetadata := make(map[PackageID]*source.Metadata) + for _, pkg := range pkgs { + // The Go command returns synthetic list results for module queries that + // encountered module errors. + // + // For example, given a module path a.mod, we'll query for "a.mod/..." and + // the go command will return a package named "a.mod/..." holding this + // error. Save it for later interpretation. + // + // See golang/go#50862 for more details. + if mod := moduleQueries[pkg.PkgPath]; mod != "" { // a synthetic result for the unloadable module + if len(pkg.Errors) > 0 { + moduleErrs[mod] = pkg.Errors + } + continue + } + + if !containsDir || s.view.Options().VerboseOutput { + event.Log(ctx, eventName, append( + source.SnapshotLabels(s), + tag.Package.Of(pkg.ID), + tag.Files.Of(pkg.CompiledGoFiles))...) + } + + // Ignore packages with no sources, since we will never be able to + // correctly invalidate that metadata. + if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 { + continue + } + // Special case for the builtin package, as it has no dependencies. + if pkg.PkgPath == "builtin" { + if len(pkg.GoFiles) != 1 { + return fmt.Errorf("only expected 1 file for builtin, got %v", len(pkg.GoFiles)) + } + s.setBuiltin(pkg.GoFiles[0]) + continue + } + // Skip test main packages. + if isTestMain(pkg, s.view.gocache) { + continue + } + // Skip filtered packages. They may be added anyway if they're + // dependencies of non-filtered packages. + // + // TODO(rfindley): why exclude metadata arbitrarily here? It should be safe + // to capture all metadata. + // TODO(rfindley): what about compiled go files? + if allFilesExcluded(pkg.GoFiles, filterFunc) { + continue + } + if err := buildMetadata(ctx, pkg, cfg, query, newMetadata, nil); err != nil { + return err + } + } + + s.mu.Lock() + + // Compute the minimal metadata updates (for Clone) + // required to preserve this invariant: + // for all id, s.packages.Get(id).m == s.meta.metadata[id]. + updates := make(map[PackageID]*source.Metadata) + for _, m := range newMetadata { + if existing := s.meta.metadata[m.ID]; existing == nil { + updates[m.ID] = m + delete(s.shouldLoad, m.ID) + } + } + // Assert the invariant. + s.packages.Range(func(k, v interface{}) { + pk, ph := k.(packageKey), v.(*packageHandle) + if s.meta.metadata[pk.id] != ph.m { + // TODO(adonovan): upgrade to unconditional panic after Jan 2023. + bug.Reportf("inconsistent metadata") + } + }) + + event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates))) + + s.meta = s.meta.Clone(updates) + s.resetIsActivePackageLocked() + + s.workspacePackages = computeWorkspacePackagesLocked(s, s.meta) + s.dumpWorkspace("load") + s.mu.Unlock() + + // Recompute the workspace package handle for any packages we invalidated. + // + // This is (putatively) an optimization since handle + // construction prefetches the content of all Go source files. + // It is safe to ignore errors, or omit this step entirely. + for _, m := range updates { + s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)) // ignore error + } + + if len(moduleErrs) > 0 { + return &moduleErrorMap{moduleErrs} + } + + return nil +} + +type moduleErrorMap struct { + errs map[string][]packages.Error // module path -> errors +} + +func (m *moduleErrorMap) Error() string { + var paths []string // sort for stability + for path, errs := range m.errs { + if len(errs) > 0 { // should always be true, but be cautious + paths = append(paths, path) + } + } + sort.Strings(paths) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%d modules have errors:\n", len(paths)) + for _, path := range paths { + fmt.Fprintf(&buf, "\t%s:%s\n", path, m.errs[path][0].Msg) + } + + return buf.String() +} + +// workspaceLayoutError returns an error describing a misconfiguration of the +// workspace, along with related diagnostic. +// +// The unusual argument ordering of results is intentional: if the resulting +// error is nil, so must be the resulting diagnostics. +// +// If ctx is cancelled, it may return ctx.Err(), nil. +// +// TODO(rfindley): separate workspace diagnostics from critical workspace +// errors. +func (s *snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.Diagnostic) { + // TODO(rfindley): both of the checks below should be delegated to the workspace. + + if s.view.effectiveGO111MODULE() == off { + return nil, nil + } + + // If the user is using a go.work file, we assume that they know what they + // are doing. + // + // TODO(golang/go#53880): improve orphaned file diagnostics when using go.work. + if s.view.gowork != "" { + return nil, nil + } + + // Apply diagnostics about the workspace configuration to relevant open + // files. + openFiles := s.openFiles() + + // If the snapshot does not have a valid build configuration, it may be + // that the user has opened a directory that contains multiple modules. + // Check for that an warn about it. + if !s.ValidBuildConfiguration() { + var msg string + if s.view.goversion >= 18 { + msg = `gopls was not able to find modules in your workspace. +When outside of GOPATH, gopls needs to know which modules you are working on. +You can fix this by opening your workspace to a folder inside a Go module, or +by using a go.work file to specify multiple modules. +See the documentation for more information on setting up your workspace: +https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.` + } else { + msg = `gopls requires a module at the root of your workspace. +You can work with multiple modules by upgrading to Go 1.18 or later, and using +go workspaces (go.work files). +See the documentation for more information on setting up your workspace: +https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.` + } + return fmt.Errorf(msg), s.applyCriticalErrorToFiles(ctx, msg, openFiles) + } + + // If the user has one active go.mod file, they may still be editing files + // in nested modules. Check the module of each open file and add warnings + // that the nested module must be opened as a workspace folder. + if len(s.workspaceModFiles) == 1 { + // Get the active root go.mod file to compare against. + var rootMod string + for uri := range s.workspaceModFiles { + rootMod = uri.Filename() + } + rootDir := filepath.Dir(rootMod) + nestedModules := make(map[string][]source.FileHandle) + for _, fh := range openFiles { + mod, err := findRootPattern(ctx, filepath.Dir(fh.URI().Filename()), "go.mod", s) + if err != nil { + if ctx.Err() != nil { + return ctx.Err(), nil + } + continue + } + if mod == "" { + continue + } + if mod != rootMod && source.InDir(rootDir, mod) { + modDir := filepath.Dir(mod) + nestedModules[modDir] = append(nestedModules[modDir], fh) + } + } + var multiModuleMsg string + if s.view.goversion >= 18 { + multiModuleMsg = `To work on multiple modules at once, please use a go.work file. +See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.` + } else { + multiModuleMsg = `To work on multiple modules at once, please upgrade to Go 1.18 and use a go.work file. +See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.` + } + // Add a diagnostic to each file in a nested module to mark it as + // "orphaned". Don't show a general diagnostic in the progress bar, + // because the user may still want to edit a file in a nested module. + var srcDiags []*source.Diagnostic + for modDir, uris := range nestedModules { + msg := fmt.Sprintf("This file is in %s, which is a nested module in the %s module.\n%s", modDir, rootMod, multiModuleMsg) + srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...) + } + if len(srcDiags) != 0 { + return fmt.Errorf("You have opened a nested module.\n%s", multiModuleMsg), srcDiags + } + } + return nil, nil +} + +func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.FileHandle) []*source.Diagnostic { + var srcDiags []*source.Diagnostic + for _, fh := range files { + // Place the diagnostics on the package or module declarations. + var rng protocol.Range + switch s.view.FileKind(fh) { + case source.Go: + if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil { + // Check that we have a valid `package foo` range to use for positioning the error. + if pgf.File.Package.IsValid() && pgf.File.Name != nil && pgf.File.Name.End().IsValid() { + rng, _ = pgf.PosRange(pgf.File.Package, pgf.File.Name.End()) + } + } + case source.Mod: + if pmf, err := s.ParseMod(ctx, fh); err == nil { + if mod := pmf.File.Module; mod != nil && mod.Syntax != nil { + rng, _ = pmf.Mapper.OffsetRange(mod.Syntax.Start.Byte, mod.Syntax.End.Byte) + } + } + } + srcDiags = append(srcDiags, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: msg, + }) + } + return srcDiags +} + +// buildMetadata populates the updates map with metadata updates to +// apply, based on the given pkg. It recurs through pkg.Imports to ensure that +// metadata exists for all dependencies. +func buildMetadata(ctx context.Context, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*source.Metadata, path []PackageID) error { + // Allow for multiple ad-hoc packages in the workspace (see #47584). + pkgPath := PackagePath(pkg.PkgPath) + id := PackageID(pkg.ID) + if source.IsCommandLineArguments(id) { + suffix := ":" + strings.Join(query, ",") + id = PackageID(pkg.ID + suffix) + pkgPath = PackagePath(pkg.PkgPath + suffix) + } + + if _, ok := updates[id]; ok { + // If we've already seen this dependency, there may be an import cycle, or + // we may have reached the same package transitively via distinct paths. + // Check the path to confirm. + + // TODO(rfindley): this doesn't look sufficient. Any single piece of new + // metadata could theoretically introduce import cycles in the metadata + // graph. What's the point of this limited check here (and is it even + // possible to get an import cycle in data from go/packages)? Consider + // simply returning, so that this function need not return an error. + // + // We should consider doing a more complete guard against import cycles + // elsewhere. + for _, prev := range path { + if prev == id { + return fmt.Errorf("import cycle detected: %q", id) + } + } + return nil + } + + // Recreate the metadata rather than reusing it to avoid locking. + m := &source.Metadata{ + ID: id, + PkgPath: pkgPath, + Name: PackageName(pkg.Name), + ForTest: PackagePath(packagesinternal.GetForTest(pkg)), + TypesSizes: pkg.TypesSizes, + LoadDir: cfg.Dir, + Module: pkg.Module, + Errors: pkg.Errors, + DepsErrors: packagesinternal.GetDepsErrors(pkg), + } + + updates[id] = m + + for _, filename := range pkg.CompiledGoFiles { + uri := span.URIFromPath(filename) + m.CompiledGoFiles = append(m.CompiledGoFiles, uri) + } + for _, filename := range pkg.GoFiles { + uri := span.URIFromPath(filename) + m.GoFiles = append(m.GoFiles, uri) + } + + depsByImpPath := make(map[ImportPath]PackageID) + depsByPkgPath := make(map[PackagePath]PackageID) + for importPath, imported := range pkg.Imports { + importPath := ImportPath(importPath) + + // It is not an invariant that importPath == imported.PkgPath. + // For example, package "net" imports "golang.org/x/net/dns/dnsmessage" + // which refers to the package whose ID and PkgPath are both + // "vendor/golang.org/x/net/dns/dnsmessage". Notice the ImportMap, + // which maps ImportPaths to PackagePaths: + // + // $ go list -json net vendor/golang.org/x/net/dns/dnsmessage + // { + // "ImportPath": "net", + // "Name": "net", + // "Imports": [ + // "C", + // "vendor/golang.org/x/net/dns/dnsmessage", + // "vendor/golang.org/x/net/route", + // ... + // ], + // "ImportMap": { + // "golang.org/x/net/dns/dnsmessage": "vendor/golang.org/x/net/dns/dnsmessage", + // "golang.org/x/net/route": "vendor/golang.org/x/net/route" + // }, + // ... + // } + // { + // "ImportPath": "vendor/golang.org/x/net/dns/dnsmessage", + // "Name": "dnsmessage", + // ... + // } + // + // (Beware that, for historical reasons, go list uses + // the JSON field "ImportPath" for the package's + // path--effectively the linker symbol prefix.) + // + // The example above is slightly special to go list + // because it's in the std module. Otherwise, + // vendored modules are simply modules whose directory + // is vendor/ instead of GOMODCACHE, and the + // import path equals the package path. + // + // But in GOPATH (non-module) mode, it's possible for + // package vendoring to cause a non-identity ImportMap, + // as in this example: + // + // $ cd $HOME/src + // $ find . -type f + // ./b/b.go + // ./vendor/example.com/a/a.go + // $ cat ./b/b.go + // package b + // import _ "example.com/a" + // $ cat ./vendor/example.com/a/a.go + // package a + // $ GOPATH=$HOME GO111MODULE=off go list -json ./b | grep -A2 ImportMap + // "ImportMap": { + // "example.com/a": "vendor/example.com/a" + // }, + + // Don't remember any imports with significant errors. + // + // The len=0 condition is a heuristic check for imports of + // non-existent packages (for which go/packages will create + // an edge to a synthesized node). The heuristic is unsound + // because some valid packages have zero files, for example, + // a directory containing only the file p_test.go defines an + // empty package p. + // TODO(adonovan): clarify this. Perhaps go/packages should + // report which nodes were synthesized. + if importPath != "unsafe" && len(imported.CompiledGoFiles) == 0 { + depsByImpPath[importPath] = "" // missing + continue + } + + depsByImpPath[importPath] = PackageID(imported.ID) + depsByPkgPath[PackagePath(imported.PkgPath)] = PackageID(imported.ID) + if err := buildMetadata(ctx, imported, cfg, query, updates, append(path, id)); err != nil { + event.Error(ctx, "error in dependency", err) + } + } + m.DepsByImpPath = depsByImpPath + m.DepsByPkgPath = depsByPkgPath + + return nil +} + +// containsPackageLocked reports whether p is a workspace package for the +// snapshot s. +// +// s.mu must be held while calling this function. +func containsPackageLocked(s *snapshot, m *source.Metadata) bool { + // In legacy workspace mode, or if a package does not have an associated + // module, a package is considered inside the workspace if any of its files + // are under the workspace root (and not excluded). + // + // Otherwise if the package has a module it must be an active module (as + // defined by the module root or go.work file) and at least one file must not + // be filtered out by directoryFilters. + // + // TODO(rfindley): revisit this function. We should not need to predicate on + // gowork != "". It should suffice to consider workspace mod files (also, we + // will hopefully eliminate the concept of a workspace package soon). + if m.Module != nil && s.view.gowork != "" { + modURI := span.URIFromPath(m.Module.GoMod) + _, ok := s.workspaceModFiles[modURI] + if !ok { + return false + } + + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + filterFunc := s.view.filterFunc() + for uri := range uris { + // Don't use view.contains here. go.work files may include modules + // outside of the workspace folder. + if !strings.Contains(string(uri), "/vendor/") && !filterFunc(uri) { + return true + } + } + return false + } + + return containsFileInWorkspaceLocked(s, m) +} + +// containsOpenFileLocked reports whether any file referenced by m is open in +// the snapshot s. +// +// s.mu must be held while calling this function. +func containsOpenFileLocked(s *snapshot, m *source.Metadata) bool { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + for uri := range uris { + if s.isOpenLocked(uri) { + return true + } + } + return false +} + +// containsFileInWorkspaceLocked reports whether m contains any file inside the +// workspace of the snapshot s. +// +// s.mu must be held while calling this function. +func containsFileInWorkspaceLocked(s *snapshot, m *source.Metadata) bool { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + for uri := range uris { + // In order for a package to be considered for the workspace, at least one + // file must be contained in the workspace and not vendored. + + // The package's files are in this view. It may be a workspace package. + // Vendored packages are not likely to be interesting to the user. + if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) { + return true + } + } + return false +} + +// computeWorkspacePackagesLocked computes workspace packages in the snapshot s +// for the given metadata graph. +// +// s.mu must be held while calling this function. +func computeWorkspacePackagesLocked(s *snapshot, meta *metadataGraph) map[PackageID]PackagePath { + workspacePackages := make(map[PackageID]PackagePath) + for _, m := range meta.metadata { + if !containsPackageLocked(s, m) { + continue + } + + if source.IsCommandLineArguments(m.ID) { + // If all the files contained in m have a real package, we don't need to + // keep m as a workspace package. + if allFilesHaveRealPackages(meta, m) { + continue + } + + // We only care about command-line-arguments packages if they are still + // open. + if !containsOpenFileLocked(s, m) { + continue + } + } + + switch { + case m.ForTest == "": + // A normal package. + workspacePackages[m.ID] = m.PkgPath + case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath: + // The test variant of some workspace package or its x_test. + // To load it, we need to load the non-test variant with -test. + // + // Notably, this excludes intermediate test variants from workspace + // packages. + workspacePackages[m.ID] = m.ForTest + } + } + return workspacePackages +} + +// allFilesHaveRealPackages reports whether all files referenced by m are +// contained in a "real" package (not command-line-arguments). +// +// If m is valid but all "real" packages containing any file are invalid, this +// function returns false. +// +// If m is not a command-line-arguments package, this is trivially true. +func allFilesHaveRealPackages(g *metadataGraph, m *source.Metadata) bool { + n := len(m.CompiledGoFiles) +checkURIs: + for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) { + for _, id := range g.ids[uri] { + if !source.IsCommandLineArguments(id) { + continue checkURIs + } + } + return false + } + return true +} + +func isTestMain(pkg *packages.Package, gocache string) bool { + // Test mains must have an import path that ends with ".test". + if !strings.HasSuffix(pkg.PkgPath, ".test") { + return false + } + // Test main packages are always named "main". + if pkg.Name != "main" { + return false + } + // Test mains always have exactly one GoFile that is in the build cache. + if len(pkg.GoFiles) > 1 { + return false + } + if !source.InDir(gocache, pkg.GoFiles[0]) { + return false + } + return true +} diff --git a/gopls/internal/lsp/cache/maps.go b/gopls/internal/lsp/cache/maps.go new file mode 100644 index 00000000000..baa0debc174 --- /dev/null +++ b/gopls/internal/lsp/cache/maps.go @@ -0,0 +1,218 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "strings" + + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/persistent" +) + +// TODO(euroelessar): Use generics once support for go1.17 is dropped. + +type filesMap struct { + impl *persistent.Map +} + +// uriLessInterface is the < relation for "any" values containing span.URIs. +func uriLessInterface(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) +} + +func newFilesMap() filesMap { + return filesMap{ + impl: persistent.NewMap(uriLessInterface), + } +} + +func (m filesMap) Clone() filesMap { + return filesMap{ + impl: m.impl.Clone(), + } +} + +func (m filesMap) Destroy() { + m.impl.Destroy() +} + +func (m filesMap) Get(key span.URI) (source.FileHandle, bool) { + value, ok := m.impl.Get(key) + if !ok { + return nil, false + } + return value.(source.FileHandle), true +} + +func (m filesMap) Range(do func(key span.URI, value source.FileHandle)) { + m.impl.Range(func(key, value interface{}) { + do(key.(span.URI), value.(source.FileHandle)) + }) +} + +func (m filesMap) Set(key span.URI, value source.FileHandle) { + m.impl.Set(key, value, nil) +} + +func (m filesMap) Delete(key span.URI) { + m.impl.Delete(key) +} + +func parseKeyLessInterface(a, b interface{}) bool { + return parseKeyLess(a.(parseKey), b.(parseKey)) +} + +func parseKeyLess(a, b parseKey) bool { + if a.mode != b.mode { + return a.mode < b.mode + } + if a.file.Hash != b.file.Hash { + return a.file.Hash.Less(b.file.Hash) + } + return a.file.URI < b.file.URI +} + +type isActivePackageCacheMap struct { + impl *persistent.Map +} + +func newIsActivePackageCacheMap() isActivePackageCacheMap { + return isActivePackageCacheMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(PackageID) < b.(PackageID) + }), + } +} + +func (m isActivePackageCacheMap) Clone() isActivePackageCacheMap { + return isActivePackageCacheMap{ + impl: m.impl.Clone(), + } +} + +func (m isActivePackageCacheMap) Destroy() { + m.impl.Destroy() +} + +func (m isActivePackageCacheMap) Get(key PackageID) (bool, bool) { + value, ok := m.impl.Get(key) + if !ok { + return false, false + } + return value.(bool), true +} + +func (m isActivePackageCacheMap) Set(key PackageID, value bool) { + m.impl.Set(key, value, nil) +} + +type parseKeysByURIMap struct { + impl *persistent.Map +} + +func newParseKeysByURIMap() parseKeysByURIMap { + return parseKeysByURIMap{ + impl: persistent.NewMap(uriLessInterface), + } +} + +func (m parseKeysByURIMap) Clone() parseKeysByURIMap { + return parseKeysByURIMap{ + impl: m.impl.Clone(), + } +} + +func (m parseKeysByURIMap) Destroy() { + m.impl.Destroy() +} + +func (m parseKeysByURIMap) Get(key span.URI) ([]parseKey, bool) { + value, ok := m.impl.Get(key) + if !ok { + return nil, false + } + return value.([]parseKey), true +} + +func (m parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) { + m.impl.Range(func(key, value interface{}) { + do(key.(span.URI), value.([]parseKey)) + }) +} + +func (m parseKeysByURIMap) Set(key span.URI, value []parseKey) { + m.impl.Set(key, value, nil) +} + +func (m parseKeysByURIMap) Delete(key span.URI) { + m.impl.Delete(key) +} + +func packageKeyLessInterface(x, y interface{}) bool { + return packageKeyLess(x.(packageKey), y.(packageKey)) +} + +func packageKeyLess(x, y packageKey) bool { + if x.mode != y.mode { + return x.mode < y.mode + } + return x.id < y.id +} + +type knownDirsSet struct { + impl *persistent.Map +} + +func newKnownDirsSet() knownDirsSet { + return knownDirsSet{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) + }), + } +} + +func (s knownDirsSet) Clone() knownDirsSet { + return knownDirsSet{ + impl: s.impl.Clone(), + } +} + +func (s knownDirsSet) Destroy() { + s.impl.Destroy() +} + +func (s knownDirsSet) Contains(key span.URI) bool { + _, ok := s.impl.Get(key) + return ok +} + +func (s knownDirsSet) Range(do func(key span.URI)) { + s.impl.Range(func(key, value interface{}) { + do(key.(span.URI)) + }) +} + +func (s knownDirsSet) SetAll(other knownDirsSet) { + s.impl.SetAll(other.impl) +} + +func (s knownDirsSet) Insert(key span.URI) { + s.impl.Set(key, nil, nil) +} + +func (s knownDirsSet) Remove(key span.URI) { + s.impl.Delete(key) +} + +// analysisKeyLessInterface is the less-than relation for analysisKey +// values wrapped in an interface. +func analysisKeyLessInterface(a, b interface{}) bool { + x, y := a.(analysisKey), b.(analysisKey) + if cmp := strings.Compare(x.analyzerNames, y.analyzerNames); cmp != 0 { + return cmp < 0 + } + return x.pkgid < y.pkgid +} diff --git a/gopls/internal/lsp/cache/mod.go b/gopls/internal/lsp/cache/mod.go new file mode 100644 index 00000000000..4a3d8db1b80 --- /dev/null +++ b/gopls/internal/lsp/cache/mod.go @@ -0,0 +1,522 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/memoize" +) + +// ParseMod parses a go.mod file, using a cache. It may return partial results and an error. +func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) { + uri := fh.URI() + + s.mu.Lock() + entry, hit := s.parseModHandles.Get(uri) + s.mu.Unlock() + + type parseModKey source.FileIdentity + type parseModResult struct { + parsed *source.ParsedModule + err error + } + + // cache miss? + if !hit { + promise, release := s.store.Promise(parseModKey(fh.FileIdentity()), func(ctx context.Context, _ interface{}) interface{} { + parsed, err := parseModImpl(ctx, fh) + return parseModResult{parsed, err} + }) + + entry = promise + s.mu.Lock() + s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(parseModResult) + return res.parsed, res.err +} + +// parseModImpl parses the go.mod file whose name and contents are in fh. +// It may return partial results and an error. +func parseModImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) { + _, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI())) + defer done() + + contents, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewMapper(fh.URI(), contents) + file, parseErr := modfile.Parse(fh.URI().Filename(), contents, nil) + // Attempt to convert the error to a standardized parse error. + var parseErrors []*source.Diagnostic + if parseErr != nil { + mfErrList, ok := parseErr.(modfile.ErrorList) + if !ok { + return nil, fmt.Errorf("unexpected parse error type %v", parseErr) + } + for _, mfErr := range mfErrList { + rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte) + if err != nil { + return nil, err + } + parseErrors = append(parseErrors, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ParseError, + Message: mfErr.Err.Error(), + }) + } + } + return &source.ParsedModule{ + URI: fh.URI(), + Mapper: m, + File: file, + ParseErrors: parseErrors, + }, parseErr +} + +// ParseWork parses a go.work file, using a cache. It may return partial results and an error. +// TODO(adonovan): move to new work.go file. +func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) { + uri := fh.URI() + + s.mu.Lock() + entry, hit := s.parseWorkHandles.Get(uri) + s.mu.Unlock() + + type parseWorkKey source.FileIdentity + type parseWorkResult struct { + parsed *source.ParsedWorkFile + err error + } + + // cache miss? + if !hit { + handle, release := s.store.Promise(parseWorkKey(fh.FileIdentity()), func(ctx context.Context, _ interface{}) interface{} { + parsed, err := parseWorkImpl(ctx, fh) + return parseWorkResult{parsed, err} + }) + + entry = handle + s.mu.Lock() + s.parseWorkHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(parseWorkResult) + return res.parsed, res.err +} + +// parseWorkImpl parses a go.work file. It may return partial results and an error. +func parseWorkImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) { + _, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI())) + defer done() + + contents, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewMapper(fh.URI(), contents) + file, parseErr := modfile.ParseWork(fh.URI().Filename(), contents, nil) + // Attempt to convert the error to a standardized parse error. + var parseErrors []*source.Diagnostic + if parseErr != nil { + mfErrList, ok := parseErr.(modfile.ErrorList) + if !ok { + return nil, fmt.Errorf("unexpected parse error type %v", parseErr) + } + for _, mfErr := range mfErrList { + rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte) + if err != nil { + return nil, err + } + parseErrors = append(parseErrors, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ParseError, + Message: mfErr.Err.Error(), + }) + } + } + return &source.ParsedWorkFile{ + URI: fh.URI(), + Mapper: m, + File: file, + ParseErrors: parseErrors, + }, parseErr +} + +// goSum reads the go.sum file for the go.mod file at modURI, if it exists. If +// it doesn't exist, it returns nil. +func (s *snapshot) goSum(ctx context.Context, modURI span.URI) []byte { + // Get the go.sum file, either from the snapshot or directly from the + // cache. Avoid (*snapshot).GetFile here, as we don't want to add + // nonexistent file handles to the snapshot if the file does not exist. + // + // TODO(rfindley): but that's not right. Changes to sum files should + // invalidate content, even if it's nonexistent content. + sumURI := span.URIFromPath(sumFilename(modURI)) + var sumFH source.FileHandle = s.FindFile(sumURI) + if sumFH == nil { + var err error + sumFH, err = s.view.fs.GetFile(ctx, sumURI) + if err != nil { + return nil + } + } + content, err := sumFH.Read() + if err != nil { + return nil + } + return content +} + +func sumFilename(modURI span.URI) string { + return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum" +} + +// ModWhy returns the "go mod why" result for each module named in a +// require statement in the go.mod file. +// TODO(adonovan): move to new mod_why.go file. +func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) { + uri := fh.URI() + + if s.View().FileKind(fh) != source.Mod { + return nil, fmt.Errorf("%s is not a go.mod file", uri) + } + + s.mu.Lock() + entry, hit := s.modWhyHandles.Get(uri) + s.mu.Unlock() + + type modWhyResult struct { + why map[string]string + err error + } + + // cache miss? + if !hit { + handle := memoize.NewPromise("modWhy", func(ctx context.Context, arg interface{}) interface{} { + why, err := modWhyImpl(ctx, arg.(*snapshot), fh) + return modWhyResult{why, err} + }) + + entry = handle + s.mu.Lock() + s.modWhyHandles.Set(uri, entry, nil) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(modWhyResult) + return res.why, res.err +} + +// modWhyImpl returns the result of "go mod why -m" on the specified go.mod file. +func modWhyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) (map[string]string, error) { + ctx, done := event.Start(ctx, "cache.ModWhy", tag.URI.Of(fh.URI())) + defer done() + + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + return nil, err + } + // No requires to explain. + if len(pm.File.Require) == 0 { + return nil, nil // empty result + } + // Run `go mod why` on all the dependencies. + inv := &gocommand.Invocation{ + Verb: "mod", + Args: []string{"why", "-m"}, + WorkingDir: filepath.Dir(fh.URI().Filename()), + } + for _, req := range pm.File.Require { + inv.Args = append(inv.Args, req.Mod.Path) + } + stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv) + if err != nil { + return nil, err + } + whyList := strings.Split(stdout.String(), "\n\n") + if len(whyList) != len(pm.File.Require) { + return nil, fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)) + } + why := make(map[string]string, len(pm.File.Require)) + for i, req := range pm.File.Require { + why[req.Mod.Path] = whyList[i] + } + return why, nil +} + +// extractGoCommandErrors tries to parse errors that come from the go command +// and shape them into go.mod diagnostics. +// TODO: rename this to 'load errors' +func (s *snapshot) extractGoCommandErrors(ctx context.Context, goCmdError error) []*source.Diagnostic { + if goCmdError == nil { + return nil + } + + type locatedErr struct { + spn span.Span + msg string + } + diagLocations := map[*source.ParsedModule]locatedErr{} + backupDiagLocations := map[*source.ParsedModule]locatedErr{} + + // If moduleErrs is non-nil, go command errors are scoped to specific + // modules. + var moduleErrs *moduleErrorMap + _ = errors.As(goCmdError, &moduleErrs) + + // Match the error against all the mod files in the workspace. + for _, uri := range s.ModFiles() { + fh, err := s.GetFile(ctx, uri) + if err != nil { + event.Error(ctx, "getting modfile for Go command error", err) + continue + } + pm, err := s.ParseMod(ctx, fh) + if err != nil { + // Parsing errors are reported elsewhere + return nil + } + var msgs []string // error messages to consider + if moduleErrs != nil { + if pm.File.Module != nil { + for _, mes := range moduleErrs.errs[pm.File.Module.Mod.Path] { + msgs = append(msgs, mes.Error()) + } + } + } else { + msgs = append(msgs, goCmdError.Error()) + } + for _, msg := range msgs { + if strings.Contains(goCmdError.Error(), "errors parsing go.mod") { + // The go command emits parse errors for completely invalid go.mod files. + // Those are reported by our own diagnostics and can be ignored here. + // As of writing, we are not aware of any other errors that include + // file/position information, so don't even try to find it. + continue + } + spn, found, err := s.matchErrorToModule(ctx, pm, msg) + if err != nil { + event.Error(ctx, "matching error to module", err) + continue + } + le := locatedErr{ + spn: spn, + msg: msg, + } + if found { + diagLocations[pm] = le + } else { + backupDiagLocations[pm] = le + } + } + } + + // If we didn't find any good matches, assign diagnostics to all go.mod files. + if len(diagLocations) == 0 { + diagLocations = backupDiagLocations + } + + var srcErrs []*source.Diagnostic + for pm, le := range diagLocations { + diag, err := s.goCommandDiagnostic(pm, le.spn, le.msg) + if err != nil { + event.Error(ctx, "building go command diagnostic", err) + continue + } + srcErrs = append(srcErrs, diag) + } + return srcErrs +} + +var moduleVersionInErrorRe = regexp.MustCompile(`[:\s]([+-._~0-9A-Za-z]+)@([+-._~0-9A-Za-z]+)[:\s]`) + +// matchErrorToModule matches a go command error message to a go.mod file. +// Some examples: +// +// example.com@v1.2.2: reading example.com/@v/v1.2.2.mod: no such file or directory +// go: github.com/cockroachdb/apd/v2@v2.0.72: reading github.com/cockroachdb/apd/go.mod at revision v2.0.72: unknown revision v2.0.72 +// go: example.com@v1.2.3 requires\n\trandom.org@v1.2.3: parsing go.mod:\n\tmodule declares its path as: bob.org\n\tbut was required as: random.org +// +// It returns the location of a reference to the one of the modules and true +// if one exists. If none is found it returns a fallback location and false. +func (s *snapshot) matchErrorToModule(ctx context.Context, pm *source.ParsedModule, goCmdError string) (span.Span, bool, error) { + var reference *modfile.Line + matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1) + + for i := len(matches) - 1; i >= 0; i-- { + ver := module.Version{Path: matches[i][1], Version: matches[i][2]} + if err := module.Check(ver.Path, ver.Version); err != nil { + continue + } + reference = findModuleReference(pm.File, ver) + if reference != nil { + break + } + } + + if reference == nil { + // No match for the module path was found in the go.mod file. + // Show the error on the module declaration, if one exists, or + // just the first line of the file. + if pm.File.Module == nil { + return span.New(pm.URI, span.NewPoint(1, 1, 0), span.Point{}), false, nil + } + syntax := pm.File.Module.Syntax + spn, err := pm.Mapper.OffsetSpan(syntax.Start.Byte, syntax.End.Byte) + return spn, false, err + } + + spn, err := pm.Mapper.OffsetSpan(reference.Start.Byte, reference.End.Byte) + return spn, true, err +} + +// goCommandDiagnostic creates a diagnostic for a given go command error. +func (s *snapshot) goCommandDiagnostic(pm *source.ParsedModule, spn span.Span, goCmdError string) (*source.Diagnostic, error) { + rng, err := pm.Mapper.SpanRange(spn) + if err != nil { + return nil, err + } + + matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1) + var innermost *module.Version + for i := len(matches) - 1; i >= 0; i-- { + ver := module.Version{Path: matches[i][1], Version: matches[i][2]} + if err := module.Check(ver.Path, ver.Version); err != nil { + continue + } + innermost = &ver + break + } + + switch { + case strings.Contains(goCmdError, "inconsistent vendoring"): + cmd, err := command.NewVendorCommand("Run go mod vendor", command.URIArg{URI: protocol.URIFromSpanURI(pm.URI)}) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: pm.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: `Inconsistent vendoring detected. Please re-run "go mod vendor". +See https://github.com/golang/go/issues/39164 for more detail on this issue.`, + SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, + }, nil + + case strings.Contains(goCmdError, "updates to go.sum needed"), strings.Contains(goCmdError, "missing go.sum entry"): + var args []protocol.DocumentURI + for _, uri := range s.ModFiles() { + args = append(args, protocol.URIFromSpanURI(uri)) + } + tidyCmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: args}) + if err != nil { + return nil, err + } + updateCmd, err := command.NewUpdateGoSumCommand("Update go.sum", command.URIArgs{URIs: args}) + if err != nil { + return nil, err + } + msg := "go.sum is out of sync with go.mod. Please update it by applying the quick fix." + if innermost != nil { + msg = fmt.Sprintf("go.sum is out of sync with go.mod: entry for %v is missing. Please updating it by applying the quick fix.", innermost) + } + return &source.Diagnostic{ + URI: pm.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: msg, + SuggestedFixes: []source.SuggestedFix{ + source.SuggestedFixFromCommand(tidyCmd, protocol.QuickFix), + source.SuggestedFixFromCommand(updateCmd, protocol.QuickFix), + }, + }, nil + case strings.Contains(goCmdError, "disabled by GOPROXY=off") && innermost != nil: + title := fmt.Sprintf("Download %v@%v", innermost.Path, innermost.Version) + cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{ + URI: protocol.URIFromSpanURI(pm.URI), + AddRequire: false, + GoCmdArgs: []string{fmt.Sprintf("%v@%v", innermost.Path, innermost.Version)}, + }) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: pm.URI, + Range: rng, + Severity: protocol.SeverityError, + Message: fmt.Sprintf("%v@%v has not been downloaded", innermost.Path, innermost.Version), + Source: source.ListError, + SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, + }, nil + default: + return &source.Diagnostic{ + URI: pm.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: goCmdError, + }, nil + } +} + +func findModuleReference(mf *modfile.File, ver module.Version) *modfile.Line { + for _, req := range mf.Require { + if req.Mod == ver { + return req.Syntax + } + } + for _, ex := range mf.Exclude { + if ex.Mod == ver { + return ex.Syntax + } + } + for _, rep := range mf.Replace { + if rep.New == ver || rep.Old == ver { + return rep.Syntax + } + } + return nil +} diff --git a/gopls/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go new file mode 100644 index 00000000000..def10d55fb6 --- /dev/null +++ b/gopls/internal/lsp/cache/mod_tidy.go @@ -0,0 +1,464 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "fmt" + "go/ast" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/memoize" +) + +// ModTidy returns the go.mod file that would be obtained by running +// "go mod tidy". Concurrent requests are combined into a single command. +func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { + uri := pm.URI + if pm.File == nil { + return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", uri) + } + + s.mu.Lock() + entry, hit := s.modTidyHandles.Get(uri) + s.mu.Unlock() + + type modTidyResult struct { + tidied *source.TidiedModule + err error + } + + // Cache miss? + if !hit { + // If the file handle is an overlay, it may not be written to disk. + // The go.mod file has to be on disk for `go mod tidy` to work. + // TODO(rfindley): is this still true with Go 1.16 overlay support? + fh, err := s.GetFile(ctx, pm.URI) + if err != nil { + return nil, err + } + if _, ok := fh.(*Overlay); ok { + if info, _ := os.Stat(uri.Filename()); info == nil { + return nil, source.ErrNoModOnDisk + } + } + + if criticalErr := s.GetCriticalError(ctx); criticalErr != nil { + return &source.TidiedModule{ + Diagnostics: criticalErr.Diagnostics, + }, nil + } + if ctx.Err() != nil { // must check ctx after GetCriticalError + return nil, ctx.Err() + } + + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } + + handle := memoize.NewPromise("modTidy", func(ctx context.Context, arg interface{}) interface{} { + tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm) + return modTidyResult{tidied, err} + }) + + entry = handle + s.mu.Lock() + s.modTidyHandles.Set(uri, entry, nil) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(modTidyResult) + return res.tidied, res.err +} + +// modTidyImpl runs "go mod tidy" on a go.mod file. +func modTidyImpl(ctx context.Context, snapshot *snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) { + ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename)) + defer done() + + inv := &gocommand.Invocation{ + Verb: "mod", + Args: []string{"tidy"}, + WorkingDir: filepath.Dir(filename), + } + // TODO(adonovan): ensure that unsaved overlays are passed through to 'go'. + tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv) + if err != nil { + return nil, err + } + // Keep the temporary go.mod file around long enough to parse it. + defer cleanup() + + if _, err := snapshot.view.gocmdRunner.Run(ctx, *inv); err != nil { + return nil, err + } + + // Go directly to disk to get the temporary mod file, + // since it is always on disk. + tempContents, err := ioutil.ReadFile(tmpURI.Filename()) + if err != nil { + return nil, err + } + ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil) + if err != nil { + // We do not need to worry about the temporary file's parse errors + // since it has been "tidied". + return nil, err + } + + // Compare the original and tidied go.mod files to compute errors and + // suggested fixes. + diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal) + if err != nil { + return nil, err + } + + return &source.TidiedModule{ + Diagnostics: diagnostics, + TidiedContent: tempContents, + }, nil +} + +// modTidyDiagnostics computes the differences between the original and tidied +// go.mod files to produce diagnostic and suggested fixes. Some diagnostics +// may appear on the Go files that import packages from missing modules. +func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) { + // First, determine which modules are unused and which are missing from the + // original go.mod file. + var ( + unused = make(map[string]*modfile.Require, len(pm.File.Require)) + missing = make(map[string]*modfile.Require, len(ideal.Require)) + wrongDirectness = make(map[string]*modfile.Require, len(pm.File.Require)) + ) + for _, req := range pm.File.Require { + unused[req.Mod.Path] = req + } + for _, req := range ideal.Require { + origReq := unused[req.Mod.Path] + if origReq == nil { + missing[req.Mod.Path] = req + continue + } else if origReq.Indirect != req.Indirect { + wrongDirectness[req.Mod.Path] = origReq + } + delete(unused, req.Mod.Path) + } + for _, req := range wrongDirectness { + // Handle dependencies that are incorrectly labeled indirect and + // vice versa. + srcDiag, err := directnessDiagnostic(pm.Mapper, req, snapshot.View().Options().ComputeEdits) + if err != nil { + // We're probably in a bad state if we can't compute a + // directnessDiagnostic, but try to keep going so as to not suppress + // other, valid diagnostics. + event.Error(ctx, "computing directness diagnostic", err) + continue + } + diagnostics = append(diagnostics, srcDiag) + } + // Next, compute any diagnostics for modules that are missing from the + // go.mod file. The fixes will be for the go.mod file, but the + // diagnostics should also appear in both the go.mod file and the import + // statements in the Go files in which the dependencies are used. + missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{} + for _, req := range missing { + srcDiag, err := missingModuleDiagnostic(pm, req) + if err != nil { + return nil, err + } + missingModuleFixes[req] = srcDiag.SuggestedFixes + diagnostics = append(diagnostics, srcDiag) + } + // Add diagnostics for missing modules anywhere they are imported in the + // workspace. + // TODO(adonovan): opt: opportunities for parallelism abound. + for _, m := range snapshot.workspaceMetadata() { + // Read both lists of files of this package, in parallel. + goFiles, compiledGoFiles, err := readGoFiles(ctx, snapshot, m) + if err != nil { + return nil, err + } + + missingImports := map[string]*modfile.Require{} + + // If -mod=readonly is not set we may have successfully imported + // packages from missing modules. Otherwise they'll be in + // MissingDependencies. Combine both. + for imp := range parseImports(ctx, snapshot, goFiles) { + if req, ok := missing[imp]; ok { + missingImports[imp] = req + break + } + // If the import is a package of the dependency, then add the + // package to the map, this will eliminate the need to do this + // prefix package search on each import for each file. + // Example: + // + // import ( + // "golang.org/x/tools/go/expect" + // "golang.org/x/tools/go/packages" + // ) + // They both are related to the same module: "golang.org/x/tools". + var match string + for _, req := range ideal.Require { + if strings.HasPrefix(imp, req.Mod.Path) && len(req.Mod.Path) > len(match) { + match = req.Mod.Path + } + } + if req, ok := missing[match]; ok { + missingImports[imp] = req + } + } + // None of this package's imports are from missing modules. + if len(missingImports) == 0 { + continue + } + for _, goFile := range compiledGoFiles { + pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader) + if err != nil { + continue + } + file, m := pgf.File, pgf.Mapper + if file == nil || m == nil { + continue + } + imports := make(map[string]*ast.ImportSpec) + for _, imp := range file.Imports { + if imp.Path == nil { + continue + } + if target, err := strconv.Unquote(imp.Path.Value); err == nil { + imports[target] = imp + } + } + if len(imports) == 0 { + continue + } + for importPath, req := range missingImports { + imp, ok := imports[importPath] + if !ok { + continue + } + fixes, ok := missingModuleFixes[req] + if !ok { + return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path) + } + srcErr, err := missingModuleForImport(pgf, imp, req, fixes) + if err != nil { + return nil, err + } + diagnostics = append(diagnostics, srcErr) + } + } + } + // Finally, add errors for any unused dependencies. + onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1 + for _, req := range unused { + srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic) + if err != nil { + return nil, err + } + diagnostics = append(diagnostics, srcErr) + } + return diagnostics, nil +} + +// unusedDiagnostic returns a source.Diagnostic for an unused require. +func unusedDiagnostic(m *protocol.Mapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) { + rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte) + if err != nil { + return nil, err + } + title := fmt.Sprintf("Remove dependency: %s", req.Mod.Path) + cmd, err := command.NewRemoveDependencyCommand(title, command.RemoveDependencyArgs{ + URI: protocol.URIFromSpanURI(m.URI), + OnlyDiagnostic: onlyDiagnostic, + ModulePath: req.Mod.Path, + }) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: m.URI, + Range: rng, + Severity: protocol.SeverityWarning, + Source: source.ModTidyError, + Message: fmt.Sprintf("%s is not used in this module", req.Mod.Path), + SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, + }, nil +} + +// directnessDiagnostic extracts errors when a dependency is labeled indirect when +// it should be direct and vice versa. +func directnessDiagnostic(m *protocol.Mapper, req *modfile.Require, computeEdits source.DiffFunction) (*source.Diagnostic, error) { + rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte) + if err != nil { + return nil, err + } + direction := "indirect" + if req.Indirect { + direction = "direct" + + // If the dependency should be direct, just highlight the // indirect. + if comments := req.Syntax.Comment(); comments != nil && len(comments.Suffix) > 0 { + end := comments.Suffix[0].Start + end.LineRune += len(comments.Suffix[0].Token) + end.Byte += len(comments.Suffix[0].Token) + rng, err = m.OffsetRange(comments.Suffix[0].Start.Byte, end.Byte) + if err != nil { + return nil, err + } + } + } + // If the dependency should be indirect, add the // indirect. + edits, err := switchDirectness(req, m, computeEdits) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: m.URI, + Range: rng, + Severity: protocol.SeverityWarning, + Source: source.ModTidyError, + Message: fmt.Sprintf("%s should be %s", req.Mod.Path, direction), + SuggestedFixes: []source.SuggestedFix{{ + Title: fmt.Sprintf("Change %s to %s", req.Mod.Path, direction), + Edits: map[span.URI][]protocol.TextEdit{ + m.URI: edits, + }, + ActionKind: protocol.QuickFix, + }}, + }, nil +} + +func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*source.Diagnostic, error) { + var rng protocol.Range + // Default to the start of the file if there is no module declaration. + if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil { + start, end := pm.File.Module.Syntax.Span() + var err error + rng, err = pm.Mapper.OffsetRange(start.Byte, end.Byte) + if err != nil { + return nil, err + } + } + title := fmt.Sprintf("Add %s to your go.mod file", req.Mod.Path) + cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{ + URI: protocol.URIFromSpanURI(pm.Mapper.URI), + AddRequire: !req.Indirect, + GoCmdArgs: []string{req.Mod.Path + "@" + req.Mod.Version}, + }) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: pm.Mapper.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ModTidyError, + Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path), + SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, + }, nil +} + +// switchDirectness gets the edits needed to change an indirect dependency to +// direct and vice versa. +func switchDirectness(req *modfile.Require, m *protocol.Mapper, computeEdits source.DiffFunction) ([]protocol.TextEdit, error) { + // We need a private copy of the parsed go.mod file, since we're going to + // modify it. + copied, err := modfile.Parse("", m.Content, nil) + if err != nil { + return nil, err + } + // Change the directness in the matching require statement. To avoid + // reordering the require statements, rewrite all of them. + var requires []*modfile.Require + seenVersions := make(map[string]string) + for _, r := range copied.Require { + if seen := seenVersions[r.Mod.Path]; seen != "" && seen != r.Mod.Version { + // Avoid a panic in SetRequire below, which panics on conflicting + // versions. + return nil, fmt.Errorf("%q has conflicting versions: %q and %q", r.Mod.Path, seen, r.Mod.Version) + } + seenVersions[r.Mod.Path] = r.Mod.Version + if r.Mod.Path == req.Mod.Path { + requires = append(requires, &modfile.Require{ + Mod: r.Mod, + Syntax: r.Syntax, + Indirect: !r.Indirect, + }) + continue + } + requires = append(requires, r) + } + copied.SetRequire(requires) + newContent, err := copied.Format() + if err != nil { + return nil, err + } + // Calculate the edits to be made due to the change. + edits := computeEdits(string(m.Content), string(newContent)) + return source.ToProtocolEdits(m, edits) +} + +// missingModuleForImport creates an error for a given import path that comes +// from a missing module. +func missingModuleForImport(pgf *source.ParsedGoFile, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) { + if req.Syntax == nil { + return nil, fmt.Errorf("no syntax for %v", req) + } + rng, err := pgf.NodeRange(imp.Path) + if err != nil { + return nil, err + } + return &source.Diagnostic{ + URI: pgf.URI, + Range: rng, + Severity: protocol.SeverityError, + Source: source.ModTidyError, + Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path), + SuggestedFixes: fixes, + }, nil +} + +// parseImports parses the headers of the specified files and returns +// the set of strings that appear in import declarations within +// GoFiles. Errors are ignored. +// +// (We can't simply use Metadata.Imports because it is based on +// CompiledGoFiles, after cgo processing.) +func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) map[string]bool { + s.mu.Lock() // peekOrParse requires a locked snapshot (!) + defer s.mu.Unlock() + seen := make(map[string]bool) + for _, file := range files { + f, err := peekOrParse(ctx, s, file, source.ParseHeader) + if err != nil { + continue + } + for _, spec := range f.File.Imports { + path, _ := strconv.Unquote(spec.Path.Value) + seen[path] = true + } + } + return seen +} diff --git a/gopls/internal/lsp/cache/mod_vuln.go b/gopls/internal/lsp/cache/mod_vuln.go new file mode 100644 index 00000000000..88d1a1cb4d2 --- /dev/null +++ b/gopls/internal/lsp/cache/mod_vuln.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "os" + + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/gopls/internal/vulncheck" + "golang.org/x/tools/internal/memoize" +) + +// ModVuln returns import vulnerability analysis for the given go.mod URI. +// Concurrent requests are combined into a single command. +func (s *snapshot) ModVuln(ctx context.Context, modURI span.URI) (*govulncheck.Result, error) { + s.mu.Lock() + entry, hit := s.modVulnHandles.Get(modURI) + s.mu.Unlock() + + type modVuln struct { + result *govulncheck.Result + err error + } + + // Cache miss? + if !hit { + // If the file handle is an overlay, it may not be written to disk. + // The go.mod file has to be on disk for vulncheck to work. + // + // TODO(hyangah): use overlays for vulncheck. + fh, err := s.GetFile(ctx, modURI) + if err != nil { + return nil, err + } + if _, ok := fh.(*Overlay); ok { + if info, _ := os.Stat(modURI.Filename()); info == nil { + return nil, source.ErrNoModOnDisk + } + } + + handle := memoize.NewPromise("modVuln", func(ctx context.Context, arg interface{}) interface{} { + result, err := modVulnImpl(ctx, arg.(*snapshot), modURI) + return modVuln{result, err} + }) + + entry = handle + s.mu.Lock() + s.modVulnHandles.Set(modURI, entry, nil) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(modVuln) + return res.result, res.err +} + +func modVulnImpl(ctx context.Context, s *snapshot, uri span.URI) (*govulncheck.Result, error) { + if vulncheck.VulnerablePackages == nil { + return &govulncheck.Result{}, nil + } + fh, err := s.GetFile(ctx, uri) + if err != nil { + return nil, err + } + return vulncheck.VulnerablePackages(ctx, s, fh) +} diff --git a/internal/lsp/cache/os_darwin.go b/gopls/internal/lsp/cache/os_darwin.go similarity index 100% rename from internal/lsp/cache/os_darwin.go rename to gopls/internal/lsp/cache/os_darwin.go diff --git a/internal/lsp/cache/os_windows.go b/gopls/internal/lsp/cache/os_windows.go similarity index 99% rename from internal/lsp/cache/os_windows.go rename to gopls/internal/lsp/cache/os_windows.go index 7ff1cce7469..2feded84d7a 100644 --- a/internal/lsp/cache/os_windows.go +++ b/gopls/internal/lsp/cache/os_windows.go @@ -1,6 +1,7 @@ // Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package cache import ( diff --git a/gopls/internal/lsp/cache/parse.go b/gopls/internal/lsp/cache/parse.go new file mode 100644 index 00000000000..4a3d9b7c53a --- /dev/null +++ b/gopls/internal/lsp/cache/parse.go @@ -0,0 +1,1329 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "path/filepath" + "reflect" + "strconv" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/memoize" +) + +// parseKey uniquely identifies a parsed Go file. +type parseKey struct { + file source.FileIdentity + mode source.ParseMode +} + +// ParseGo parses the file whose contents are provided by fh, using a cache. +// The resulting tree may have be fixed up. +// +// Token position information will be added to the snapshot's FileSet. +// +// The parser mode must not be ParseExported: that mode is used during +// type checking to destructively trim the tree to reduce work, +// which is not safe for values from a shared cache. +// TODO(adonovan): opt: shouldn't parseGoImpl do the trimming? +// Then we can cache the result since it would never change. +// +// TODO(adonovan): in the absence of any way to add existing an +// token.File to a new FileSet (see go.dev/issue/53200), caching ASTs +// implies a global FileSet. +func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + if mode == source.ParseExported { + panic("only type checking should use Exported") + } + + key := parseKey{ + file: fh.FileIdentity(), + mode: mode, + } + + s.mu.Lock() + entry, hit := s.parsedGoFiles.Get(key) + s.mu.Unlock() + + // cache miss? + if !hit { + promise, release := s.store.Promise(key, func(ctx context.Context, arg interface{}) interface{} { + parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode) + return parseGoResult{parsed, err} + }) + + s.mu.Lock() + // Check cache again in case another thread got there first. + if prev, ok := s.parsedGoFiles.Get(key); ok { + entry = prev + release() + } else { + entry = promise + s.parsedGoFiles.Set(key, entry, func(_, _ interface{}) { release() }) + + // In order to correctly invalidate the key above, we must keep track of + // the parse key just created. + // + // TODO(rfindley): use a two-level map URI->parseKey->promise. + keys, _ := s.parseKeysByURI.Get(fh.URI()) + + // Only record the new key if it doesn't exist. This is overly cautious: + // we should only be setting the key if it doesn't exist. However, this + // logic will be replaced soon, and erring on the side of caution seemed + // wise. + foundKey := false + for _, existing := range keys { + if existing == key { + foundKey = true + break + } + } + if !foundKey { + keys = append(keys, key) + s.parseKeysByURI.Set(fh.URI(), keys) + } + } + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(parseGoResult) + return res.parsed, res.err +} + +// peekParseGoLocked peeks at the cache used by ParseGo but does not +// populate it or wait for other threads to do so. On cache hit, it returns +// the cache result of parseGoImpl; otherwise it returns (nil, nil). +func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + entry, hit := s.parsedGoFiles.Get(parseKey{fh.FileIdentity(), mode}) + if !hit { + return nil, nil // no-one has requested this file + } + v := entry.(*memoize.Promise).Cached() + if v == nil { + return nil, nil // parsing is still in progress + } + res := v.(parseGoResult) + return res.parsed, res.err +} + +// parseGoResult holds the result of a call to parseGoImpl. +type parseGoResult struct { + parsed *source.ParsedGoFile + err error +} + +// parseGoImpl parses the Go source file whose content is provided by fh. +func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename())) + defer done() + + ext := filepath.Ext(fh.URI().Filename()) + if ext != ".go" && ext != "" { // files generated by cgo have no extension + return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI()) + } + src, err := fh.Read() + if err != nil { + return nil, err + } + + parserMode := parser.AllErrors | parser.ParseComments + if mode == source.ParseHeader { + parserMode = parser.ImportsOnly | parser.ParseComments + } + + file, err := parser.ParseFile(fset, fh.URI().Filename(), src, parserMode) + var parseErr scanner.ErrorList + if err != nil { + // We passed a byte slice, so the only possible error is a parse error. + parseErr = err.(scanner.ErrorList) + } + + tok := fset.File(file.Pos()) + if tok == nil { + // file.Pos is the location of the package declaration (issue #53202). If there was + // none, we can't find the token.File that ParseFile created, and we + // have no choice but to recreate it. + tok = fset.AddFile(fh.URI().Filename(), -1, len(src)) + tok.SetLinesForContent(src) + } + + fixed := false + // If there were parse errors, attempt to fix them up. + if parseErr != nil { + // Fix any badly parsed parts of the AST. + fixed = fixAST(file, tok, src) + + for i := 0; i < 10; i++ { + // Fix certain syntax errors that render the file unparseable. + newSrc := fixSrc(file, tok, src) + if newSrc == nil { + break + } + + // If we thought there was something to fix 10 times in a row, + // it is likely we got stuck in a loop somehow. Log out a diff + // of the last changes we made to aid in debugging. + if i == 9 { + unified := diff.Unified("before", "after", string(src), string(newSrc)) + event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name())) + } + + newFile, _ := parser.ParseFile(fset, fh.URI().Filename(), newSrc, parserMode) + if newFile != nil { + // Maintain the original parseError so we don't try formatting the doctored file. + file = newFile + src = newSrc + tok = fset.File(file.Pos()) + + fixed = fixAST(file, tok, src) + } + } + } + + return &source.ParsedGoFile{ + URI: fh.URI(), + Mode: mode, + Src: src, + Fixed: fixed, + File: file, + Tok: tok, + Mapper: protocol.NewMapper(fh.URI(), src), + ParseErr: parseErr, + }, nil +} + +// An unexportedFilter removes as much unexported AST from a set of Files as possible. +type unexportedFilter struct { + uses map[string]bool +} + +// Filter records uses of unexported identifiers and filters out all other +// unexported declarations. +func (f *unexportedFilter) Filter(files []*ast.File) { + // Iterate to fixed point -- unexported types can include other unexported types. + oldLen := len(f.uses) + for { + for _, file := range files { + f.recordUses(file) + } + if len(f.uses) == oldLen { + break + } + oldLen = len(f.uses) + } + + for _, file := range files { + var newDecls []ast.Decl + for _, decl := range file.Decls { + if f.filterDecl(decl) { + newDecls = append(newDecls, decl) + } + } + file.Decls = newDecls + file.Scope = nil + file.Unresolved = nil + file.Comments = nil + trimAST(file) + } +} + +func (f *unexportedFilter) keep(ident *ast.Ident) bool { + return ast.IsExported(ident.Name) || f.uses[ident.Name] +} + +func (f *unexportedFilter) filterDecl(decl ast.Decl) bool { + switch decl := decl.(type) { + case *ast.FuncDecl: + if ident := source.RecvIdent(decl.Recv); ident != nil && !f.keep(ident) { + return false + } + return f.keep(decl.Name) + case *ast.GenDecl: + if decl.Tok == token.CONST { + // Constants can involve iota, and iota is hard to deal with. + return true + } + var newSpecs []ast.Spec + for _, spec := range decl.Specs { + if f.filterSpec(spec) { + newSpecs = append(newSpecs, spec) + } + } + decl.Specs = newSpecs + return len(newSpecs) != 0 + case *ast.BadDecl: + return false + } + panic(fmt.Sprintf("unknown ast.Decl %T", decl)) +} + +func (f *unexportedFilter) filterSpec(spec ast.Spec) bool { + switch spec := spec.(type) { + case *ast.ImportSpec: + return true + case *ast.ValueSpec: + var newNames []*ast.Ident + for _, name := range spec.Names { + if f.keep(name) { + newNames = append(newNames, name) + } + } + spec.Names = newNames + return len(spec.Names) != 0 + case *ast.TypeSpec: + if !f.keep(spec.Name) { + return false + } + switch typ := spec.Type.(type) { + case *ast.StructType: + // In practice this no longer filters anything; + // see comment at StructType case in recordUses. + f.filterFieldList(typ.Fields) + case *ast.InterfaceType: + f.filterFieldList(typ.Methods) + } + return true + } + panic(fmt.Sprintf("unknown ast.Spec %T", spec)) +} + +func (f *unexportedFilter) filterFieldList(fields *ast.FieldList) { + var newFields []*ast.Field + for _, field := range fields.List { + if len(field.Names) == 0 { + // Keep embedded fields: they can export methods and fields. + newFields = append(newFields, field) + } + for _, name := range field.Names { + if f.keep(name) { + newFields = append(newFields, field) + break + } + } + } + fields.List = newFields +} + +func (f *unexportedFilter) recordUses(file *ast.File) { + for _, decl := range file.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + // Ignore methods on dropped types. + if ident := source.RecvIdent(decl.Recv); ident != nil && !f.keep(ident) { + break + } + // Ignore functions with dropped names. + if !f.keep(decl.Name) { + break + } + f.recordFuncType(decl.Type) + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for i, name := range spec.Names { + // Don't mess with constants -- iota is hard. + if f.keep(name) || decl.Tok == token.CONST { + f.recordIdents(spec.Type) + if len(spec.Values) > i { + f.recordIdents(spec.Values[i]) + } + } + } + case *ast.TypeSpec: + switch typ := spec.Type.(type) { + case *ast.StructType: + // We used to trim unexported fields but this + // had observable consequences. For example, + // the 'fieldalignment' analyzer would compute + // incorrect diagnostics from the size and + // offsets, and the UI hover information for + // types was inaccurate. So now we keep them. + if typ.Fields != nil { + for _, field := range typ.Fields.List { + f.recordIdents(field.Type) + } + } + case *ast.InterfaceType: + f.recordInterfaceMethodUses(typ.Methods) + } + } + } + } + } +} + +// recordIdents records unexported identifiers in an Expr in uses. +// These may be types, e.g. in map[key]value, function names, e.g. in foo(), +// or simple variable references. References that will be discarded, such +// as those in function literal bodies, are ignored. +func (f *unexportedFilter) recordIdents(x ast.Expr) { + ast.Inspect(x, func(n ast.Node) bool { + if n == nil { + return false + } + if complit, ok := n.(*ast.CompositeLit); ok { + // We clear out composite literal contents; just record their type. + f.recordIdents(complit.Type) + return false + } + if flit, ok := n.(*ast.FuncLit); ok { + f.recordFuncType(flit.Type) + return false + } + if ident, ok := n.(*ast.Ident); ok && !ast.IsExported(ident.Name) { + f.uses[ident.Name] = true + } + return true + }) +} + +// recordFuncType records the types mentioned by a function type. +func (f *unexportedFilter) recordFuncType(fn *ast.FuncType) { + // Parameter and result types of retained functions need to be retained. + if fn.Params != nil { + for _, field := range fn.Params.List { + f.recordIdents(field.Type) + } + } + if fn.Results != nil { + for _, field := range fn.Results.List { + f.recordIdents(field.Type) + } + } +} + +// recordInterfaceMethodUses records unexported identifiers used in interface methods. +func (f *unexportedFilter) recordInterfaceMethodUses(methods *ast.FieldList) { + if methods != nil { + for _, method := range methods.List { + if len(method.Names) == 0 { + // I, pkg.I, I[T] -- embedded interface: + // may contribute exported names. + f.recordIdents(method.Type) + } else if ft, ok := method.Type.(*ast.FuncType); ok { + // f(T) -- ordinary interface method: + // needs all its types retained. + f.recordFuncType(ft) + } + } + } +} + +// ProcessErrors records additional uses from errors, returning the new uses +// and any unexpected errors. +func (f *unexportedFilter) ProcessErrors(errors []types.Error) (map[string]bool, []types.Error) { + var unexpected []types.Error + missing := map[string]bool{} + for _, err := range errors { + if strings.Contains(err.Msg, "missing return") { + continue + } + const undeclared = "undeclared name: " + if strings.HasPrefix(err.Msg, undeclared) { + missing[strings.TrimPrefix(err.Msg, undeclared)] = true + f.uses[strings.TrimPrefix(err.Msg, undeclared)] = true + continue + } + unexpected = append(unexpected, err) + } + return missing, unexpected +} + +// trimAST clears any part of the AST not relevant to type checking +// the package-level declarations. +func trimAST(file *ast.File) { + // Eliminate bodies of top-level functions, methods, inits. + for _, decl := range file.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok { + fn.Body = nil + } + } + + // Simplify remaining declarations. + ast.Inspect(file, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + // Eliminate bodies of literal functions. + // func() { ... } => func() {} + n.Body.List = nil + case *ast.CompositeLit: + // types.Info.Types for long slice/array literals are particularly + // expensive. Try to clear them out: T{e, ..., e} => T{} + at, ok := n.Type.(*ast.ArrayType) + if !ok { + // Map or struct literal: no harm removing all its fields. + n.Elts = nil + break + } + + // Removing the elements from an ellipsis array changes its type. + // Try to set the length explicitly so we can continue. + // [...]T{e, ..., e} => [3]T[]{} + if _, ok := at.Len.(*ast.Ellipsis); ok { + length, ok := arrayLength(n) + if !ok { + break + } + at.Len = &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprint(length), + ValuePos: at.Len.Pos(), + } + } + n.Elts = nil + } + return true + }) +} + +// arrayLength returns the length of some simple forms of ellipsis array literal. +// Notably, it handles the tables in golang.org/x/text. +func arrayLength(array *ast.CompositeLit) (int, bool) { + litVal := func(expr ast.Expr) (int, bool) { + lit, ok := expr.(*ast.BasicLit) + if !ok { + return 0, false + } + val, err := strconv.ParseInt(lit.Value, 10, 64) + if err != nil { + return 0, false + } + return int(val), true + } + largestKey := -1 + for _, elt := range array.Elts { + kve, ok := elt.(*ast.KeyValueExpr) + if !ok { + continue + } + switch key := kve.Key.(type) { + case *ast.BasicLit: + if val, ok := litVal(key); ok && largestKey < val { + largestKey = val + } + case *ast.BinaryExpr: + // golang.org/x/text uses subtraction (and only subtraction) in its indices. + if key.Op != token.SUB { + break + } + x, ok := litVal(key.X) + if !ok { + break + } + y, ok := litVal(key.Y) + if !ok { + break + } + if val := x - y; largestKey < val { + largestKey = val + } + } + } + if largestKey != -1 { + return largestKey + 1, true + } + return len(array.Elts), true +} + +// fixAST inspects the AST and potentially modifies any *ast.BadStmts so that it can be +// type-checked more effectively. +// +// If fixAST returns true, the resulting AST is considered "fixed", meaning +// positions have been mangled, and type checker errors may not make sense. +func fixAST(n ast.Node, tok *token.File, src []byte) (fixed bool) { + var err error + walkASTWithParent(n, func(n, parent ast.Node) bool { + switch n := n.(type) { + case *ast.BadStmt: + if fixed = fixDeferOrGoStmt(n, parent, tok, src); fixed { + // Recursively fix in our fixed node. + _ = fixAST(parent, tok, src) + } else { + err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err) + } + return false + case *ast.BadExpr: + if fixed = fixArrayType(n, parent, tok, src); fixed { + // Recursively fix in our fixed node. + _ = fixAST(parent, tok, src) + return false + } + + // Fix cases where parser interprets if/for/switch "init" + // statement as "cond" expression, e.g.: + // + // // "i := foo" is init statement, not condition. + // for i := foo + // + fixInitStmt(n, parent, tok, src) + + return false + case *ast.SelectorExpr: + // Fix cases where a keyword prefix results in a phantom "_" selector, e.g.: + // + // foo.var<> // want to complete to "foo.variance" + // + fixPhantomSelector(n, tok, src) + return true + + case *ast.BlockStmt: + switch parent.(type) { + case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + // Adjust closing curly brace of empty switch/select + // statements so we can complete inside them. + fixEmptySwitch(n, tok, src) + } + + return true + default: + return true + } + }) + return fixed +} + +// walkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func walkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + defer func() { + if recurse { + ancestors = append(ancestors, n) + } + }() + + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + + return f(n, parent) + }) +} + +// fixSrc attempts to modify the file's source code to fix certain +// syntax errors that leave the rest of the file unparsed. +func fixSrc(f *ast.File, tf *token.File, src []byte) (newSrc []byte) { + walkASTWithParent(f, func(n, parent ast.Node) bool { + if newSrc != nil { + return false + } + + switch n := n.(type) { + case *ast.BlockStmt: + newSrc = fixMissingCurlies(f, n, parent, tf, src) + case *ast.SelectorExpr: + newSrc = fixDanglingSelector(n, tf, src) + } + + return newSrc == nil + }) + + return newSrc +} + +// fixMissingCurlies adds in curly braces for block statements that +// are missing curly braces. For example: +// +// if foo +// +// becomes +// +// if foo {} +func fixMissingCurlies(f *ast.File, b *ast.BlockStmt, parent ast.Node, tok *token.File, src []byte) []byte { + // If the "{" is already in the source code, there isn't anything to + // fix since we aren't missing curlies. + if b.Lbrace.IsValid() { + braceOffset, err := safetoken.Offset(tok, b.Lbrace) + if err != nil { + return nil + } + if braceOffset < len(src) && src[braceOffset] == '{' { + return nil + } + } + + parentLine := tok.Line(parent.Pos()) + + if parentLine >= tok.LineCount() { + // If we are the last line in the file, no need to fix anything. + return nil + } + + // Insert curlies at the end of parent's starting line. The parent + // is the statement that contains the block, e.g. *ast.IfStmt. The + // block's Pos()/End() can't be relied upon because they are based + // on the (missing) curly braces. We assume the statement is a + // single line for now and try sticking the curly braces at the end. + insertPos := tok.LineStart(parentLine+1) - 1 + + // Scootch position backwards until it's not in a comment. For example: + // + // if foo<> // some amazing comment | + // someOtherCode() + // + // insertPos will be located at "|", so we back it out of the comment. + didSomething := true + for didSomething { + didSomething = false + for _, c := range f.Comments { + if c.Pos() < insertPos && insertPos <= c.End() { + insertPos = c.Pos() + didSomething = true + } + } + } + + // Bail out if line doesn't end in an ident or ".". This is to avoid + // cases like below where we end up making things worse by adding + // curlies: + // + // if foo && + // bar<> + switch precedingToken(insertPos, tok, src) { + case token.IDENT, token.PERIOD: + // ok + default: + return nil + } + + var buf bytes.Buffer + buf.Grow(len(src) + 3) + offset, err := safetoken.Offset(tok, insertPos) + if err != nil { + return nil + } + buf.Write(src[:offset]) + + // Detect if we need to insert a semicolon to fix "for" loop situations like: + // + // for i := foo(); foo<> + // + // Just adding curlies is not sufficient to make things parse well. + if fs, ok := parent.(*ast.ForStmt); ok { + if _, ok := fs.Cond.(*ast.BadExpr); !ok { + if xs, ok := fs.Post.(*ast.ExprStmt); ok { + if _, ok := xs.X.(*ast.BadExpr); ok { + buf.WriteByte(';') + } + } + } + } + + // Insert "{}" at insertPos. + buf.WriteByte('{') + buf.WriteByte('}') + buf.Write(src[offset:]) + return buf.Bytes() +} + +// fixEmptySwitch moves empty switch/select statements' closing curly +// brace down one line. This allows us to properly detect incomplete +// "case" and "default" keywords as inside the switch statement. For +// example: +// +// switch { +// def<> +// } +// +// gets parsed like: +// +// switch { +// } +// +// Later we manually pull out the "def" token, but we need to detect +// that our "<>" position is inside the switch block. To do that we +// move the curly brace so it looks like: +// +// switch { +// +// } +func fixEmptySwitch(body *ast.BlockStmt, tok *token.File, src []byte) { + // We only care about empty switch statements. + if len(body.List) > 0 || !body.Rbrace.IsValid() { + return + } + + // If the right brace is actually in the source code at the + // specified position, don't mess with it. + braceOffset, err := safetoken.Offset(tok, body.Rbrace) + if err != nil { + return + } + if braceOffset < len(src) && src[braceOffset] == '}' { + return + } + + braceLine := tok.Line(body.Rbrace) + if braceLine >= tok.LineCount() { + // If we are the last line in the file, no need to fix anything. + return + } + + // Move the right brace down one line. + body.Rbrace = tok.LineStart(braceLine + 1) +} + +// fixDanglingSelector inserts real "_" selector expressions in place +// of phantom "_" selectors. For example: +// +// func _() { +// x.<> +// } +// +// var x struct { i int } +// +// To fix completion at "<>", we insert a real "_" after the "." so the +// following declaration of "x" can be parsed and type checked +// normally. +func fixDanglingSelector(s *ast.SelectorExpr, tf *token.File, src []byte) []byte { + if !isPhantomUnderscore(s.Sel, tf, src) { + return nil + } + + if !s.X.End().IsValid() { + return nil + } + + insertOffset, err := safetoken.Offset(tf, s.X.End()) + if err != nil { + return nil + } + // Insert directly after the selector's ".". + insertOffset++ + if src[insertOffset-1] != '.' { + return nil + } + + var buf bytes.Buffer + buf.Grow(len(src) + 1) + buf.Write(src[:insertOffset]) + buf.WriteByte('_') + buf.Write(src[insertOffset:]) + return buf.Bytes() +} + +// fixPhantomSelector tries to fix selector expressions with phantom +// "_" selectors. In particular, we check if the selector is a +// keyword, and if so we swap in an *ast.Ident with the keyword text. For example: +// +// foo.var +// +// yields a "_" selector instead of "var" since "var" is a keyword. +// +// TODO(rfindley): should this constitute an ast 'fix'? +func fixPhantomSelector(sel *ast.SelectorExpr, tf *token.File, src []byte) { + if !isPhantomUnderscore(sel.Sel, tf, src) { + return + } + + // Only consider selectors directly abutting the selector ".". This + // avoids false positives in cases like: + // + // foo. // don't think "var" is our selector + // var bar = 123 + // + if sel.Sel.Pos() != sel.X.End()+1 { + return + } + + maybeKeyword := readKeyword(sel.Sel.Pos(), tf, src) + if maybeKeyword == "" { + return + } + + replaceNode(sel, sel.Sel, &ast.Ident{ + Name: maybeKeyword, + NamePos: sel.Sel.Pos(), + }) +} + +// isPhantomUnderscore reports whether the given ident is a phantom +// underscore. The parser sometimes inserts phantom underscores when +// it encounters otherwise unparseable situations. +func isPhantomUnderscore(id *ast.Ident, tok *token.File, src []byte) bool { + if id == nil || id.Name != "_" { + return false + } + + // Phantom underscore means the underscore is not actually in the + // program text. + offset, err := safetoken.Offset(tok, id.Pos()) + if err != nil { + return false + } + return len(src) <= offset || src[offset] != '_' +} + +// fixInitStmt fixes cases where the parser misinterprets an +// if/for/switch "init" statement as the "cond" conditional. In cases +// like "if i := 0" the user hasn't typed the semicolon yet so the +// parser is looking for the conditional expression. However, "i := 0" +// are not valid expressions, so we get a BadExpr. +// +// fixInitStmt returns valid AST for the original source. +func fixInitStmt(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) { + if !bad.Pos().IsValid() || !bad.End().IsValid() { + return + } + + // Try to extract a statement from the BadExpr. + start, end, err := safetoken.Offsets(tok, bad.Pos(), bad.End()-1) + if err != nil { + return + } + stmtBytes := src[start : end+1] + stmt, err := parseStmt(bad.Pos(), stmtBytes) + if err != nil { + return + } + + // If the parent statement doesn't already have an "init" statement, + // move the extracted statement into the "init" field and insert a + // dummy expression into the required "cond" field. + switch p := parent.(type) { + case *ast.IfStmt: + if p.Init != nil { + return + } + p.Init = stmt + p.Cond = &ast.Ident{ + Name: "_", + NamePos: stmt.End(), + } + case *ast.ForStmt: + if p.Init != nil { + return + } + p.Init = stmt + p.Cond = &ast.Ident{ + Name: "_", + NamePos: stmt.End(), + } + case *ast.SwitchStmt: + if p.Init != nil { + return + } + p.Init = stmt + p.Tag = nil + } +} + +// readKeyword reads the keyword starting at pos, if any. +func readKeyword(pos token.Pos, tok *token.File, src []byte) string { + var kwBytes []byte + offset, err := safetoken.Offset(tok, pos) + if err != nil { + return "" + } + for i := offset; i < len(src); i++ { + // Use a simplified identifier check since keywords are always lowercase ASCII. + if src[i] < 'a' || src[i] > 'z' { + break + } + kwBytes = append(kwBytes, src[i]) + + // Stop search at arbitrarily chosen too-long-for-a-keyword length. + if len(kwBytes) > 15 { + return "" + } + } + + if kw := string(kwBytes); token.Lookup(kw).IsKeyword() { + return kw + } + + return "" +} + +// fixArrayType tries to parse an *ast.BadExpr into an *ast.ArrayType. +// go/parser often turns lone array types like "[]int" into BadExprs +// if it isn't expecting a type. +func fixArrayType(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) bool { + // Our expected input is a bad expression that looks like "[]someExpr". + + from := bad.Pos() + to := bad.End() + + if !from.IsValid() || !to.IsValid() { + return false + } + + exprBytes := make([]byte, 0, int(to-from)+3) + // Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF. + // It also panics if the position is not in the range of the file, and + // badExprs may not necessarily have good positions, so check first. + fromOffset, toOffset, err := safetoken.Offsets(tok, from, to-1) + if err != nil { + return false + } + exprBytes = append(exprBytes, src[fromOffset:toOffset+1]...) + exprBytes = bytes.TrimSpace(exprBytes) + + // If our expression ends in "]" (e.g. "[]"), add a phantom selector + // so we can complete directly after the "[]". + if len(exprBytes) > 0 && exprBytes[len(exprBytes)-1] == ']' { + exprBytes = append(exprBytes, '_') + } + + // Add "{}" to turn our ArrayType into a CompositeLit. This is to + // handle the case of "[...]int" where we must make it a composite + // literal to be parseable. + exprBytes = append(exprBytes, '{', '}') + + expr, err := parseExpr(from, exprBytes) + if err != nil { + return false + } + + cl, _ := expr.(*ast.CompositeLit) + if cl == nil { + return false + } + + at, _ := cl.Type.(*ast.ArrayType) + if at == nil { + return false + } + + return replaceNode(parent, bad, at) +} + +// precedingToken scans src to find the token preceding pos. +func precedingToken(pos token.Pos, tok *token.File, src []byte) token.Token { + s := &scanner.Scanner{} + s.Init(tok, src, nil, 0) + + var lastTok token.Token + for { + p, t, _ := s.Scan() + if t == token.EOF || p >= pos { + break + } + + lastTok = t + } + return lastTok +} + +// fixDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement. +// +// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because +// it does not include a call expression. This means that go/types skips type-checking +// this statement entirely, and we can't use the type information when completing. +// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST, +// instead of the *ast.BadStmt. +func fixDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) bool { + // Check if we have a bad statement containing either a "go" or "defer". + s := &scanner.Scanner{} + s.Init(tok, src, nil, 0) + + var ( + pos token.Pos + tkn token.Token + ) + for { + if tkn == token.EOF { + return false + } + if pos >= bad.From { + break + } + pos, tkn, _ = s.Scan() + } + + var stmt ast.Stmt + switch tkn { + case token.DEFER: + stmt = &ast.DeferStmt{ + Defer: pos, + } + case token.GO: + stmt = &ast.GoStmt{ + Go: pos, + } + default: + return false + } + + var ( + from, to, last token.Pos + lastToken token.Token + braceDepth int + phantomSelectors []token.Pos + ) +FindTo: + for { + to, tkn, _ = s.Scan() + + if from == token.NoPos { + from = to + } + + switch tkn { + case token.EOF: + break FindTo + case token.SEMICOLON: + // If we aren't in nested braces, end of statement means + // end of expression. + if braceDepth == 0 { + break FindTo + } + case token.LBRACE: + braceDepth++ + } + + // This handles the common dangling selector case. For example in + // + // defer fmt. + // y := 1 + // + // we notice the dangling period and end our expression. + // + // If the previous token was a "." and we are looking at a "}", + // the period is likely a dangling selector and needs a phantom + // "_". Likewise if the current token is on a different line than + // the period, the period is likely a dangling selector. + if lastToken == token.PERIOD && (tkn == token.RBRACE || tok.Line(to) > tok.Line(last)) { + // Insert phantom "_" selector after the dangling ".". + phantomSelectors = append(phantomSelectors, last+1) + // If we aren't in a block then end the expression after the ".". + if braceDepth == 0 { + to = last + 1 + break + } + } + + lastToken = tkn + last = to + + switch tkn { + case token.RBRACE: + braceDepth-- + if braceDepth <= 0 { + if braceDepth == 0 { + // +1 to include the "}" itself. + to += 1 + } + break FindTo + } + } + } + + fromOffset, toOffset, err := safetoken.Offsets(tok, from, to) + if err != nil { + return false + } + if !from.IsValid() || fromOffset >= len(src) { + return false + } + if !to.IsValid() || toOffset >= len(src) { + return false + } + + // Insert any phantom selectors needed to prevent dangling "." from messing + // up the AST. + exprBytes := make([]byte, 0, int(to-from)+len(phantomSelectors)) + for i, b := range src[fromOffset:toOffset] { + if len(phantomSelectors) > 0 && from+token.Pos(i) == phantomSelectors[0] { + exprBytes = append(exprBytes, '_') + phantomSelectors = phantomSelectors[1:] + } + exprBytes = append(exprBytes, b) + } + + if len(phantomSelectors) > 0 { + exprBytes = append(exprBytes, '_') + } + + expr, err := parseExpr(from, exprBytes) + if err != nil { + return false + } + + // Package the expression into a fake *ast.CallExpr and re-insert + // into the function. + call := &ast.CallExpr{ + Fun: expr, + Lparen: to, + Rparen: to, + } + + switch stmt := stmt.(type) { + case *ast.DeferStmt: + stmt.Call = call + case *ast.GoStmt: + stmt.Call = call + } + + return replaceNode(parent, bad, stmt) +} + +// parseStmt parses the statement in src and updates its position to +// start at pos. +func parseStmt(pos token.Pos, src []byte) (ast.Stmt, error) { + // Wrap our expression to make it a valid Go file we can pass to ParseFile. + fileSrc := bytes.Join([][]byte{ + []byte("package fake;func _(){"), + src, + []byte("}"), + }, nil) + + // Use ParseFile instead of ParseExpr because ParseFile has + // best-effort behavior, whereas ParseExpr fails hard on any error. + fakeFile, err := parser.ParseFile(token.NewFileSet(), "", fileSrc, 0) + if fakeFile == nil { + return nil, fmt.Errorf("error reading fake file source: %v", err) + } + + // Extract our expression node from inside the fake file. + if len(fakeFile.Decls) == 0 { + return nil, fmt.Errorf("error parsing fake file: %v", err) + } + + fakeDecl, _ := fakeFile.Decls[0].(*ast.FuncDecl) + if fakeDecl == nil || len(fakeDecl.Body.List) == 0 { + return nil, fmt.Errorf("no statement in %s: %v", src, err) + } + + stmt := fakeDecl.Body.List[0] + + // parser.ParseFile returns undefined positions. + // Adjust them for the current file. + offsetPositions(stmt, pos-1-(stmt.Pos()-1)) + + return stmt, nil +} + +// parseExpr parses the expression in src and updates its position to +// start at pos. +func parseExpr(pos token.Pos, src []byte) (ast.Expr, error) { + stmt, err := parseStmt(pos, src) + if err != nil { + return nil, err + } + + exprStmt, ok := stmt.(*ast.ExprStmt) + if !ok { + return nil, fmt.Errorf("no expr in %s: %v", src, err) + } + + return exprStmt.X, nil +} + +var tokenPosType = reflect.TypeOf(token.NoPos) + +// offsetPositions applies an offset to the positions in an ast.Node. +func offsetPositions(n ast.Node, offset token.Pos) { + ast.Inspect(n, func(n ast.Node) bool { + if n == nil { + return false + } + + v := reflect.ValueOf(n).Elem() + + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + if f.Type() != tokenPosType { + continue + } + + if !f.CanSet() { + continue + } + + // Don't offset invalid positions: they should stay invalid. + if !token.Pos(f.Int()).IsValid() { + continue + } + + f.SetInt(f.Int() + int64(offset)) + } + } + + return true + }) +} + +// replaceNode updates parent's child oldChild to be newChild. It +// returns whether it replaced successfully. +func replaceNode(parent, oldChild, newChild ast.Node) bool { + if parent == nil || oldChild == nil || newChild == nil { + return false + } + + parentVal := reflect.ValueOf(parent).Elem() + if parentVal.Kind() != reflect.Struct { + return false + } + + newChildVal := reflect.ValueOf(newChild) + + tryReplace := func(v reflect.Value) bool { + if !v.CanSet() || !v.CanInterface() { + return false + } + + // If the existing value is oldChild, we found our child. Make + // sure our newChild is assignable and then make the swap. + if v.Interface() == oldChild && newChildVal.Type().AssignableTo(v.Type()) { + v.Set(newChildVal) + return true + } + + return false + } + + // Loop over parent's struct fields. + for i := 0; i < parentVal.NumField(); i++ { + f := parentVal.Field(i) + + switch f.Kind() { + // Check interface and pointer fields. + case reflect.Interface, reflect.Ptr: + if tryReplace(f) { + return true + } + + // Search through any slice fields. + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + if tryReplace(f.Index(i)) { + return true + } + } + } + } + + return false +} diff --git a/internal/lsp/cache/parse_test.go b/gopls/internal/lsp/cache/parse_test.go similarity index 98% rename from internal/lsp/cache/parse_test.go rename to gopls/internal/lsp/cache/parse_test.go index cb620f27432..e8db64530e6 100644 --- a/internal/lsp/cache/parse_test.go +++ b/gopls/internal/lsp/cache/parse_test.go @@ -149,7 +149,7 @@ type Exported struct { } var Var = Exported{foo:1} `, - kept: []string{"Exported", "Var"}, + kept: []string{"Exported", "Var", "x"}, }, { name: "drop_function_literals", diff --git a/internal/lsp/cache/parsemode_go116.go b/gopls/internal/lsp/cache/parsemode_go116.go similarity index 100% rename from internal/lsp/cache/parsemode_go116.go rename to gopls/internal/lsp/cache/parsemode_go116.go diff --git a/internal/lsp/cache/parsemode_go117.go b/gopls/internal/lsp/cache/parsemode_go117.go similarity index 100% rename from internal/lsp/cache/parsemode_go117.go rename to gopls/internal/lsp/cache/parsemode_go117.go diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go new file mode 100644 index 00000000000..bb4823c0326 --- /dev/null +++ b/gopls/internal/lsp/cache/pkg.go @@ -0,0 +1,231 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "fmt" + "go/ast" + "go/scanner" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/lsp/source/xrefs" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/memoize" +) + +// Convenient local aliases for typed strings. +type ( + PackageID = source.PackageID + PackagePath = source.PackagePath + PackageName = source.PackageName + ImportPath = source.ImportPath +) + +// A Package is the union of snapshot-local information (Metadata) and shared +// type-checking information (a syntaxPackage). +// +// TODO(rfindley): for now, we do not persist the post-processing of +// loadDiagnostics, because the value of the snapshot.packages map is just the +// package handle. Fix this. +type Package struct { + m *source.Metadata + pkg *syntaxPackage + loadDiagnostics *memoize.Promise // post-processed errors from loading +} + +func newPackage(m *source.Metadata, pkg *syntaxPackage) *Package { + p := &Package{ + m: m, + pkg: pkg, + } + if len(m.Errors) > 0 || len(m.DepsErrors) > 0 { + p.loadDiagnostics = memoize.NewPromise(fmt.Sprintf("loadDiagnostics(%s)", m.ID), func(ctx context.Context, arg interface{}) interface{} { + s := arg.(*snapshot) + var diags []*source.Diagnostic + for _, packagesErr := range p.m.Errors { + // Filter out parse errors from go list. We'll get them when we + // actually parse, and buggy overlay support may generate spurious + // errors. (See TestNewModule_Issue38207.) + if strings.Contains(packagesErr.Msg, "expected '") { + continue + } + pkgDiags, err := goPackagesErrorDiagnostics(packagesErr, p.pkg, p.m.LoadDir) + if err != nil { + // There are certain cases where the go command returns invalid + // positions, so we cannot panic or even bug.Reportf here. + event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(string(p.m.ID))) + continue + } + diags = append(diags, pkgDiags...) + } + + // TODO(rfindley): this is buggy: an insignificant change to a modfile + // (or an unsaved modfile) could affect the position of deps errors, + // without invalidating the package. + depsDiags, err := s.depsErrors(ctx, p.pkg, p.m.DepsErrors) + if err != nil { + if ctx.Err() == nil { + // TODO(rfindley): consider making this a bug.Reportf. depsErrors should + // not normally fail. + event.Error(ctx, "unable to compute deps errors", err, tag.Package.Of(string(p.m.ID))) + } + return nil + } + diags = append(diags, depsDiags...) + return diags + }) + } + return p +} + +// syntaxPackage contains parse trees and type information for a package. +type syntaxPackage struct { + // -- identifiers -- + id PackageID + mode source.ParseMode + + // -- outputs -- + fset *token.FileSet // for now, same as the snapshot's FileSet + goFiles []*source.ParsedGoFile + compiledGoFiles []*source.ParsedGoFile + diagnostics []*source.Diagnostic + parseErrors []scanner.ErrorList + typeErrors []types.Error + types *types.Package + typesInfo *types.Info + hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors + xrefs []byte // serializable index of outbound cross-references + analyses memoize.Store // maps analyzer.Name to Promise[actionResult] + methodsets *methodsets.Index // index of method sets of package-level types +} + +func (p *Package) String() string { return string(p.m.ID) } + +func (p *Package) Metadata() *source.Metadata { + return p.m +} + +// A loadScope defines a package loading scope for use with go/packages. +// +// TODO(rfindley): move this to load.go. +type loadScope interface { + aScope() +} + +type ( + fileLoadScope span.URI // load packages containing a file (including command-line-arguments) + packageLoadScope string // load a specific package (the value is its PackageID) + moduleLoadScope string // load packages in a specific module + viewLoadScope span.URI // load the workspace +) + +// Implement the loadScope interface. +func (fileLoadScope) aScope() {} +func (packageLoadScope) aScope() {} +func (moduleLoadScope) aScope() {} +func (viewLoadScope) aScope() {} + +func (p *Package) ParseMode() source.ParseMode { + return p.pkg.mode +} + +func (p *Package) CompiledGoFiles() []*source.ParsedGoFile { + return p.pkg.compiledGoFiles +} + +func (p *Package) File(uri span.URI) (*source.ParsedGoFile, error) { + return p.pkg.File(uri) +} + +func (pkg *syntaxPackage) File(uri span.URI) (*source.ParsedGoFile, error) { + for _, cgf := range pkg.compiledGoFiles { + if cgf.URI == uri { + return cgf, nil + } + } + for _, gf := range pkg.goFiles { + if gf.URI == uri { + return gf, nil + } + } + return nil, fmt.Errorf("no parsed file for %s in %v", uri, pkg.id) +} + +func (p *Package) GetSyntax() []*ast.File { + var syntax []*ast.File + for _, pgf := range p.pkg.compiledGoFiles { + syntax = append(syntax, pgf.File) + } + return syntax +} + +func (p *Package) FileSet() *token.FileSet { + return p.pkg.fset +} + +func (p *Package) GetTypes() *types.Package { + return p.pkg.types +} + +func (p *Package) GetTypesInfo() *types.Info { + return p.pkg.typesInfo +} + +func (p *Package) HasParseErrors() bool { + return len(p.pkg.parseErrors) != 0 +} + +func (p *Package) HasTypeErrors() bool { + return len(p.pkg.typeErrors) != 0 +} + +func (p *Package) DiagnosticsForFile(ctx context.Context, s source.Snapshot, uri span.URI) ([]*source.Diagnostic, error) { + var diags []*source.Diagnostic + for _, diag := range p.pkg.diagnostics { + if diag.URI == uri { + diags = append(diags, diag) + } + } + + if p.loadDiagnostics != nil { + res, err := p.loadDiagnostics.Get(ctx, s) + if err != nil { + return nil, err + } + for _, diag := range res.([]*source.Diagnostic) { + if diag.URI == uri { + diags = append(diags, diag) + } + } + } + + return diags, nil +} + +// ReferencesTo returns the location of each reference within package p +// to one of the target objects denoted by the pair (package path, object path). +func (p *Package) ReferencesTo(targets map[PackagePath]map[objectpath.Path]struct{}) []protocol.Location { + // TODO(adonovan): In future, p.xrefs will be retrieved from a + // section of the cache file produced by type checking. + // (Other sections will include the package's export data, + // "implements" relations, exported symbols, etc.) + // For now we just hang it off the pkg. + return xrefs.Lookup(p.m, p.pkg.xrefs, targets) +} + +func (p *Package) MethodSetsIndex() *methodsets.Index { + // TODO(adonovan): In future, p.methodsets will be retrieved from a + // section of the cache file produced by type checking. + return p.pkg.methodsets +} diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go new file mode 100644 index 00000000000..5df540e4f61 --- /dev/null +++ b/gopls/internal/lsp/cache/session.go @@ -0,0 +1,732 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" + "golang.org/x/tools/internal/persistent" + "golang.org/x/tools/internal/xcontext" +) + +type Session struct { + // Unique identifier for this session. + id string + + // Immutable attributes shared across views. + cache *Cache // shared cache + gocmdRunner *gocommand.Runner // limits go command concurrency + + optionsMu sync.Mutex + options *source.Options + + viewMu sync.Mutex + views []*View + viewMap map[span.URI]*View // map of URI->best view + + *overlayFS +} + +// ID returns the unique identifier for this session on this server. +func (s *Session) ID() string { return s.id } +func (s *Session) String() string { return s.id } + +// Options returns a copy of the SessionOptions for this session. +func (s *Session) Options() *source.Options { + s.optionsMu.Lock() + defer s.optionsMu.Unlock() + return s.options +} + +// SetOptions sets the options of this session to new values. +func (s *Session) SetOptions(options *source.Options) { + s.optionsMu.Lock() + defer s.optionsMu.Unlock() + s.options = options +} + +// Shutdown the session and all views it has created. +func (s *Session) Shutdown(ctx context.Context) { + var views []*View + s.viewMu.Lock() + views = append(views, s.views...) + s.views = nil + s.viewMap = nil + s.viewMu.Unlock() + for _, view := range views { + view.shutdown() + } + event.Log(ctx, "Shutdown session", KeyShutdownSession.Of(s)) +} + +// Cache returns the cache that created this session, for debugging only. +func (s *Session) Cache() *Cache { + return s.cache +} + +// NewView creates a new View, returning it and its first snapshot. If a +// non-empty tempWorkspace directory is provided, the View will record a copy +// of its gopls workspace module in that directory, so that client tooling +// can execute in the same main module. On success it also returns a release +// function that must be called when the Snapshot is no longer needed. +func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (*View, source.Snapshot, func(), error) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + for _, view := range s.views { + if span.SameExistingFile(view.folder, folder) { + return nil, nil, nil, source.ErrViewExists + } + } + view, snapshot, release, err := s.createView(ctx, name, folder, options, 0) + if err != nil { + return nil, nil, nil, err + } + s.views = append(s.views, view) + // we always need to drop the view map + s.viewMap = make(map[span.URI]*View) + return view, snapshot, release, nil +} + +// TODO(rfindley): clarify that createView can never be cancelled (with the +// possible exception of server shutdown). +func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, seqID uint64) (*View, *snapshot, func(), error) { + index := atomic.AddInt64(&viewIndex, 1) + + // Get immutable workspace information. + info, err := s.getWorkspaceInformation(ctx, folder, options) + if err != nil { + return nil, nil, func() {}, err + } + + wsModFiles, wsModFilesErr := computeWorkspaceModFiles(ctx, info.gomod, info.effectiveGOWORK(), info.effectiveGO111MODULE(), s) + + // We want a true background context and not a detached context here + // the spans need to be unrelated and no tag values should pollute it. + baseCtx := event.Detach(xcontext.Detach(ctx)) + backgroundCtx, cancel := context.WithCancel(baseCtx) + + v := &View{ + id: strconv.FormatInt(index, 10), + fset: s.cache.fset, + gocmdRunner: s.gocmdRunner, + initialWorkspaceLoad: make(chan struct{}), + initializationSema: make(chan struct{}, 1), + options: options, + baseCtx: baseCtx, + name: name, + folder: folder, + moduleUpgrades: map[span.URI]map[string]string{}, + vulns: map[span.URI]*govulncheck.Result{}, + fs: s.overlayFS, + workspaceInformation: info, + } + v.importsState = &importsState{ + ctx: backgroundCtx, + processEnv: &imports.ProcessEnv{ + GocmdRunner: s.gocmdRunner, + SkipPathInScan: func(dir string) bool { + prefix := strings.TrimSuffix(string(v.folder), "/") + "/" + uri := strings.TrimSuffix(string(span.URIFromPath(dir)), "/") + if !strings.HasPrefix(uri+"/", prefix) { + return false + } + filterer := source.NewFilterer(options.DirectoryFilters) + rel := strings.TrimPrefix(uri, prefix) + disallow := filterer.Disallow(rel) + return disallow + }, + }, + } + v.snapshot = &snapshot{ + sequenceID: seqID, + globalID: nextSnapshotID(), + view: v, + backgroundCtx: backgroundCtx, + cancel: cancel, + store: s.cache.store, + packages: persistent.NewMap(packageKeyLessInterface), + meta: &metadataGraph{}, + files: newFilesMap(), + isActivePackageCache: newIsActivePackageCacheMap(), + parsedGoFiles: persistent.NewMap(parseKeyLessInterface), + parseKeysByURI: newParseKeysByURIMap(), + symbolizeHandles: persistent.NewMap(uriLessInterface), + analyses: persistent.NewMap(analysisKeyLessInterface), + workspacePackages: make(map[PackageID]PackagePath), + unloadableFiles: make(map[span.URI]struct{}), + parseModHandles: persistent.NewMap(uriLessInterface), + parseWorkHandles: persistent.NewMap(uriLessInterface), + modTidyHandles: persistent.NewMap(uriLessInterface), + modVulnHandles: persistent.NewMap(uriLessInterface), + modWhyHandles: persistent.NewMap(uriLessInterface), + knownSubdirs: newKnownDirsSet(), + workspaceModFiles: wsModFiles, + workspaceModFilesErr: wsModFilesErr, + } + // Save one reference in the view. + v.releaseSnapshot = v.snapshot.Acquire() + + // Record the environment of the newly created view in the log. + event.Log(ctx, viewEnv(v)) + + // Initialize the view without blocking. + initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx)) + v.initCancelFirstAttempt = initCancel + snapshot := v.snapshot + + // Pass a second reference to the background goroutine. + bgRelease := snapshot.Acquire() + go func() { + defer bgRelease() + snapshot.initialize(initCtx, true) + }() + + // Return a third reference to the caller. + return v, snapshot, snapshot.Acquire(), nil +} + +// View returns a view with a matching name, if the session has one. +func (s *Session) View(name string) *View { + s.viewMu.Lock() + defer s.viewMu.Unlock() + for _, view := range s.views { + if view.Name() == name { + return view + } + } + return nil +} + +// ViewOf returns a view corresponding to the given URI. +// If the file is not already associated with a view, pick one using some heuristics. +func (s *Session) ViewOf(uri span.URI) (*View, error) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + return s.viewOfLocked(uri) +} + +// Precondition: caller holds s.viewMu lock. +func (s *Session) viewOfLocked(uri span.URI) (*View, error) { + // Check if we already know this file. + if v, found := s.viewMap[uri]; found { + return v, nil + } + // Pick the best view for this file and memoize the result. + if len(s.views) == 0 { + return nil, fmt.Errorf("no views in session") + } + s.viewMap[uri] = bestViewForURI(uri, s.views) + return s.viewMap[uri], nil +} + +func (s *Session) Views() []*View { + s.viewMu.Lock() + defer s.viewMu.Unlock() + result := make([]*View, len(s.views)) + copy(result, s.views) + return result +} + +// bestViewForURI returns the most closely matching view for the given URI +// out of the given set of views. +func bestViewForURI(uri span.URI, views []*View) *View { + // we need to find the best view for this file + var longest *View + for _, view := range views { + if longest != nil && len(longest.Folder()) > len(view.Folder()) { + continue + } + // TODO(rfindley): this should consider the workspace layout (i.e. + // go.work). + if view.contains(uri) { + longest = view + } + } + if longest != nil { + return longest + } + // Try our best to return a view that knows the file. + for _, view := range views { + if view.knownFile(uri) { + return view + } + } + // TODO: are there any more heuristics we can use? + return views[0] +} + +// RemoveView removes the view v from the session +func (s *Session) RemoveView(view *View) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + i := s.dropView(view) + if i == -1 { // error reported elsewhere + return + } + // delete this view... we don't care about order but we do want to make + // sure we can garbage collect the view + s.views = removeElement(s.views, i) +} + +// updateView recreates the view with the given options. +// +// If the resulting error is non-nil, the view may or may not have already been +// dropped from the session. +func (s *Session) updateView(ctx context.Context, view *View, options *source.Options) (*View, error) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + + return s.updateViewLocked(ctx, view, options) +} + +func (s *Session) updateViewLocked(ctx context.Context, view *View, options *source.Options) (*View, error) { + // Preserve the snapshot ID if we are recreating the view. + view.snapshotMu.Lock() + if view.snapshot == nil { + view.snapshotMu.Unlock() + panic("updateView called after View was already shut down") + } + seqID := view.snapshot.sequenceID // Preserve sequence IDs when updating a view in place. + view.snapshotMu.Unlock() + + i := s.dropView(view) + if i == -1 { + return nil, fmt.Errorf("view %q not found", view.id) + } + + v, _, release, err := s.createView(ctx, view.name, view.folder, options, seqID) + release() + + if err != nil { + // we have dropped the old view, but could not create the new one + // this should not happen and is very bad, but we still need to clean + // up the view array if it happens + s.views = removeElement(s.views, i) + return nil, err + } + // substitute the new view into the array where the old view was + s.views[i] = v + return v, nil +} + +// removeElement removes the ith element from the slice replacing it with the last element. +// TODO(adonovan): generics, someday. +func removeElement(slice []*View, index int) []*View { + last := len(slice) - 1 + slice[index] = slice[last] + slice[last] = nil // aid GC + return slice[:last] +} + +// dropView removes v from the set of views for the receiver s and calls +// v.shutdown, returning the index of v in s.views (if found), or -1 if v was +// not found. s.viewMu must be held while calling this function. +func (s *Session) dropView(v *View) int { + // we always need to drop the view map + s.viewMap = make(map[span.URI]*View) + for i := range s.views { + if v == s.views[i] { + // we found the view, drop it and return the index it was found at + s.views[i] = nil + v.shutdown() + return i + } + } + // TODO(rfindley): it looks wrong that we don't shutdown v in this codepath. + // We should never get here. + bug.Reportf("tried to drop nonexistent view %q", v.id) + return -1 +} + +func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error { + _, release, err := s.DidModifyFiles(ctx, changes) + release() + return err +} + +// TODO(rfindley): fileChange seems redundant with source.FileModification. +// De-dupe into a common representation for changes. +type fileChange struct { + content []byte + exists bool + fileHandle source.FileHandle + + // isUnchanged indicates whether the file action is one that does not + // change the actual contents of the file. Opens and closes should not + // be treated like other changes, since the file content doesn't change. + isUnchanged bool +} + +// DidModifyFiles reports a file modification to the session. It returns +// the new snapshots after the modifications have been applied, paired with +// the affected file URIs for those snapshots. +// On success, it returns a release function that +// must be called when the snapshots are no longer needed. +// +// TODO(rfindley): what happens if this function fails? It must leave us in a +// broken state, which we should surface to the user, probably as a request to +// restart gopls. +func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, func(), error) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + + // Update overlays. + // + // TODO(rfindley): I think we do this while holding viewMu to prevent views + // from seeing the updated file content before they have processed + // invalidations, which could lead to a partial view of the changes (i.e. + // spurious diagnostics). However, any such view would immediately be + // invalidated here, so it is possible that we could update overlays before + // acquiring viewMu. + if err := s.updateOverlays(ctx, changes); err != nil { + return nil, nil, err + } + + // Re-create views whose definition may have changed. + // + // checkViews controls whether to re-evaluate view definitions when + // collecting views below. Any addition or deletion of a go.mod or go.work + // file may have affected the definition of the view. + checkViews := false + + for _, c := range changes { + if isGoMod(c.URI) || isGoWork(c.URI) { + // Change, InvalidateMetadata, and UnknownFileAction actions do not cause + // us to re-evaluate views. + redoViews := (c.Action != source.Change && + c.Action != source.InvalidateMetadata && + c.Action != source.UnknownFileAction) + + if redoViews { + checkViews = true + break + } + } + } + + if checkViews { + for _, view := range s.views { + // TODO(rfindley): can we avoid running the go command (go env) + // synchronously to change processing? Can we assume that the env did not + // change, and derive go.work using a combination of the configured + // GOWORK value and filesystem? + info, err := s.getWorkspaceInformation(ctx, view.folder, view.Options()) + if err != nil { + // Catastrophic failure, equivalent to a failure of session + // initialization and therefore should almost never happen. One + // scenario where this failure mode could occur is if some file + // permissions have changed preventing us from reading go.mod + // files. + // + // TODO(rfindley): consider surfacing this error more loudly. We + // could report a bug, but it's not really a bug. + event.Error(ctx, "fetching workspace information", err) + } + + if info != view.workspaceInformation { + _, err := s.updateViewLocked(ctx, view, view.Options()) + if err != nil { + // More catastrophic failure. The view may or may not still exist. + // The best we can do is log and move on. + event.Error(ctx, "recreating view", err) + } + } + } + } + + // Collect information about views affected by these changes. + views := make(map[*View]map[span.URI]*fileChange) + affectedViews := map[span.URI][]*View{} + // forceReloadMetadata records whether any change is the magic + // source.InvalidateMetadata action. + forceReloadMetadata := false + for _, c := range changes { + if c.Action == source.InvalidateMetadata { + forceReloadMetadata = true + } + // Build the list of affected views. + var changedViews []*View + for _, view := range s.views { + // Don't propagate changes that are outside of the view's scope + // or knowledge. + if !view.relevantChange(c) { + continue + } + changedViews = append(changedViews, view) + } + // If the change is not relevant to any view, but the change is + // happening in the editor, assign it the most closely matching view. + if len(changedViews) == 0 { + if c.OnDisk { + continue + } + bestView, err := s.viewOfLocked(c.URI) + if err != nil { + return nil, nil, err + } + changedViews = append(changedViews, bestView) + } + affectedViews[c.URI] = changedViews + + isUnchanged := c.Action == source.Open || c.Action == source.Close + + // Apply the changes to all affected views. + for _, view := range changedViews { + // Make sure that the file is added to the view's seenFiles set. + view.markKnown(c.URI) + if _, ok := views[view]; !ok { + views[view] = make(map[span.URI]*fileChange) + } + fh, err := s.GetFile(ctx, c.URI) + if err != nil { + return nil, nil, err + } + content, err := fh.Read() + if err != nil { + // Ignore the error: the file may be deleted. + content = nil + } + views[view][c.URI] = &fileChange{ + content: content, + exists: err == nil, + fileHandle: fh, + isUnchanged: isUnchanged, + } + } + } + + var releases []func() + viewToSnapshot := map[*View]*snapshot{} + for view, changed := range views { + snapshot, release := view.invalidateContent(ctx, changed, forceReloadMetadata) + releases = append(releases, release) + viewToSnapshot[view] = snapshot + } + + // The release function is called when the + // returned URIs no longer need to be valid. + release := func() { + for _, release := range releases { + release() + } + } + + // We only want to diagnose each changed file once, in the view to which + // it "most" belongs. We do this by picking the best view for each URI, + // and then aggregating the set of snapshots and their URIs (to avoid + // diagnosing the same snapshot multiple times). + snapshotURIs := map[source.Snapshot][]span.URI{} + for _, mod := range changes { + viewSlice, ok := affectedViews[mod.URI] + if !ok || len(viewSlice) == 0 { + continue + } + view := bestViewForURI(mod.URI, viewSlice) + snapshot, ok := viewToSnapshot[view] + if !ok { + panic(fmt.Sprintf("no snapshot for view %s", view.Folder())) + } + snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI) + } + + return snapshotURIs, release, nil +} + +// ExpandModificationsToDirectories returns the set of changes with the +// directory changes removed and expanded to include all of the files in +// the directory. +func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification { + var snapshots []*snapshot + s.viewMu.Lock() + for _, v := range s.views { + snapshot, release, err := v.getSnapshot() + if err != nil { + continue // view is shut down; continue with others + } + defer release() + snapshots = append(snapshots, snapshot) + } + s.viewMu.Unlock() + + knownDirs := knownDirectories(ctx, snapshots) + defer knownDirs.Destroy() + + var result []source.FileModification + for _, c := range changes { + if !knownDirs.Contains(c.URI) { + result = append(result, c) + continue + } + affectedFiles := knownFilesInDir(ctx, snapshots, c.URI) + var fileChanges []source.FileModification + for uri := range affectedFiles { + fileChanges = append(fileChanges, source.FileModification{ + URI: uri, + Action: c.Action, + LanguageID: "", + OnDisk: c.OnDisk, + // changes to directories cannot include text or versions + }) + } + result = append(result, fileChanges...) + } + return result +} + +// knownDirectories returns all of the directories known to the given +// snapshots, including workspace directories and their subdirectories. +// It is responsibility of the caller to destroy the returned set. +func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet { + result := newKnownDirsSet() + for _, snapshot := range snapshots { + dirs := snapshot.dirs(ctx) + for _, dir := range dirs { + result.Insert(dir) + } + knownSubdirs := snapshot.getKnownSubdirs(dirs) + result.SetAll(knownSubdirs) + knownSubdirs.Destroy() + } + return result +} + +// knownFilesInDir returns the files known to the snapshots in the session. +// It does not respect symlinks. +func knownFilesInDir(ctx context.Context, snapshots []*snapshot, dir span.URI) map[span.URI]struct{} { + files := map[span.URI]struct{}{} + + for _, snapshot := range snapshots { + for _, uri := range snapshot.knownFilesInDir(ctx, dir) { + files[uri] = struct{}{} + } + } + return files +} + +// Precondition: caller holds s.viewMu lock. +// TODO(rfindley): move this to fs_overlay.go. +func (fs *overlayFS) updateOverlays(ctx context.Context, changes []source.FileModification) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + for _, c := range changes { + // Don't update overlays for metadata invalidations. + if c.Action == source.InvalidateMetadata { + continue + } + + o, ok := fs.overlays[c.URI] + + // If the file is not opened in an overlay and the change is on disk, + // there's no need to update an overlay. If there is an overlay, we + // may need to update the overlay's saved value. + if !ok && c.OnDisk { + continue + } + + // Determine the file kind on open, otherwise, assume it has been cached. + var kind source.FileKind + switch c.Action { + case source.Open: + kind = source.FileKindForLang(c.LanguageID) + default: + if !ok { + return fmt.Errorf("updateOverlays: modifying unopened overlay %v", c.URI) + } + kind = o.kind + } + + // Closing a file just deletes its overlay. + if c.Action == source.Close { + delete(fs.overlays, c.URI) + continue + } + + // If the file is on disk, check if its content is the same as in the + // overlay. Saves and on-disk file changes don't come with the file's + // content. + text := c.Text + if text == nil && (c.Action == source.Save || c.OnDisk) { + if !ok { + return fmt.Errorf("no known content for overlay for %s", c.Action) + } + text = o.content + } + // On-disk changes don't come with versions. + version := c.Version + if c.OnDisk || c.Action == source.Save { + version = o.version + } + hash := source.HashOf(text) + var sameContentOnDisk bool + switch c.Action { + case source.Delete: + // Do nothing. sameContentOnDisk should be false. + case source.Save: + // Make sure the version and content (if present) is the same. + if false && o.version != version { // Client no longer sends the version + return fmt.Errorf("updateOverlays: saving %s at version %v, currently at %v", c.URI, c.Version, o.version) + } + if c.Text != nil && o.hash != hash { + return fmt.Errorf("updateOverlays: overlay %s changed on save", c.URI) + } + sameContentOnDisk = true + default: + fh, err := fs.delegate.GetFile(ctx, c.URI) + if err != nil { + return err + } + _, readErr := fh.Read() + sameContentOnDisk = (readErr == nil && fh.FileIdentity().Hash == hash) + } + o = &Overlay{ + uri: c.URI, + version: version, + content: text, + kind: kind, + hash: hash, + saved: sameContentOnDisk, + } + + // NOTE: previous versions of this code checked here that the overlay had a + // view and file kind (but we don't know why). + + fs.overlays[c.URI] = o + } + + return nil +} + +// FileWatchingGlobPatterns returns glob patterns to watch every directory +// known by the view. For views within a module, this is the module root, +// any directory in the module root, and any replace targets. +func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { + s.viewMu.Lock() + defer s.viewMu.Unlock() + patterns := map[string]struct{}{} + for _, view := range s.views { + snapshot, release, err := view.getSnapshot() + if err != nil { + continue // view is shut down; continue with others + } + for k, v := range snapshot.fileWatchingGlobPatterns(ctx) { + patterns[k] = v + } + release() + } + return patterns +} diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go new file mode 100644 index 00000000000..398240aa83c --- /dev/null +++ b/gopls/internal/lsp/cache/snapshot.go @@ -0,0 +1,2248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "context" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unsafe" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/persistent" + "golang.org/x/tools/internal/typesinternal" +) + +type snapshot struct { + sequenceID uint64 + globalID source.GlobalSnapshotID + + // TODO(rfindley): the snapshot holding a reference to the view poses + // lifecycle problems: a view may be shut down and waiting for work + // associated with this snapshot to complete. While most accesses of the view + // are benign (options or workspace information), this is not formalized and + // it is wrong for the snapshot to use a shutdown view. + // + // Fix this by passing options and workspace information to the snapshot, + // both of which should be immutable for the snapshot. + view *View + + cancel func() + backgroundCtx context.Context + + store *memoize.Store // cache of handles shared by all snapshots + + refcount sync.WaitGroup // number of references + destroyedBy *string // atomically set to non-nil in Destroy once refcount = 0 + + // initialized reports whether the snapshot has been initialized. Concurrent + // initialization is guarded by the view.initializationSema. Each snapshot is + // initialized at most once: concurrent initialization is guarded by + // view.initializationSema. + initialized bool + // initializedErr holds the last error resulting from initialization. If + // initialization fails, we only retry when the the workspace modules change, + // to avoid too many go/packages calls. + initializedErr *source.CriticalError + + // mu guards all of the maps in the snapshot, as well as the builtin URI. + mu sync.Mutex + + // builtin pins the AST and package for builtin.go in memory. + builtin span.URI + + // meta holds loaded metadata. + // + // meta is guarded by mu, but the metadataGraph itself is immutable. + // TODO(rfindley): in many places we hold mu while operating on meta, even + // though we only need to hold mu while reading the pointer. + meta *metadataGraph + + // files maps file URIs to their corresponding FileHandles. + // It may invalidated when a file's content changes. + files filesMap + + // parsedGoFiles maps a parseKey to the handle of the future result of parsing it. + parsedGoFiles *persistent.Map // from parseKey to *memoize.Promise[parseGoResult] + + // parseKeysByURI records the set of keys of parsedGoFiles that + // need to be invalidated for each URI. + // TODO(adonovan): opt: parseKey = ParseMode + URI, so this could + // be just a set of ParseModes, or we could loop over AllParseModes. + parseKeysByURI parseKeysByURIMap + + // symbolizeHandles maps each file URI to a handle for the future + // result of computing the symbols declared in that file. + symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult] + + // packages maps a packageKey to a *packageHandle. + // It may be invalidated when a file's content changes. + // + // Invariants to preserve: + // - packages.Get(id).meta == meta.metadata[id] for all ids + // - if a package is in packages, then all of its dependencies should also + // be in packages, unless there is a missing import + packages *persistent.Map // from packageKey to *packageHandle + + // isActivePackageCache maps package ID to the cached value if it is active or not. + // It may be invalidated when metadata changes or a new file is opened or closed. + isActivePackageCache isActivePackageCacheMap + + // analyses maps an analysisKey (which identifies a package + // and a set of analyzers) to the handle for the future result + // of loading the package and analyzing it. + analyses *persistent.Map // from analysisKey to analysisPromise + + // workspacePackages contains the workspace's packages, which are loaded + // when the view is created. + workspacePackages map[PackageID]PackagePath + + // shouldLoad tracks packages that need to be reloaded, mapping a PackageID + // to the package paths that should be used to reload it + // + // When we try to load a package, we clear it from the shouldLoad map + // regardless of whether the load succeeded, to prevent endless loads. + shouldLoad map[PackageID][]PackagePath + + // unloadableFiles keeps track of files that we've failed to load. + unloadableFiles map[span.URI]struct{} + + // parseModHandles keeps track of any parseModHandles for the snapshot. + // The handles need not refer to only the view's go.mod file. + parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult] + + // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. + // The handles need not refer to only the view's go.work file. + parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult] + + // Preserve go.mod-related handles to avoid garbage-collecting the results + // of various calls to the go command. The handles need not refer to only + // the view's go.mod file. + modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult] + modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult] + modVulnHandles *persistent.Map // from span.URI to *memoize.Promise[modVulnResult] + + // knownSubdirs is the set of subdirectories in the workspace, used to + // create glob patterns for file watching. + knownSubdirs knownDirsSet + knownSubdirsPatternCache string + // unprocessedSubdirChanges are any changes that might affect the set of + // subdirectories in the workspace. They are not reflected to knownSubdirs + // during the snapshot cloning step as it can slow down cloning. + unprocessedSubdirChanges []*fileChange + + // workspaceModFiles holds the set of mod files active in this snapshot. + // + // This is either empty, a single entry for the workspace go.mod file, or the + // set of mod files used by the workspace go.work file. + // + // This set is immutable inside the snapshot, and therefore is not guarded by mu. + workspaceModFiles map[span.URI]struct{} + workspaceModFilesErr error // error encountered computing workspaceModFiles +} + +var globalSnapshotID uint64 + +func nextSnapshotID() source.GlobalSnapshotID { + return source.GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1)) +} + +var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted + +// Acquire prevents the snapshot from being destroyed until the returned function is called. +// +// (s.Acquire().release() could instead be expressed as a pair of +// method calls s.IncRef(); s.DecRef(). The latter has the advantage +// that the DecRefs are fungible and don't require holding anything in +// addition to the refcounted object s, but paradoxically that is also +// an advantage of the current approach, which forces the caller to +// consider the release function at every stage, making a reference +// leak more obvious.) +func (s *snapshot) Acquire() func() { + type uP = unsafe.Pointer + if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil { + log.Panicf("%d: acquire() after Destroy(%q)", s.globalID, *(*string)(destroyedBy)) + } + s.refcount.Add(1) + return s.refcount.Done +} + +func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) { + return p.Get(ctx, s) +} + +// destroy waits for all leases on the snapshot to expire then releases +// any resources (reference counts and files) associated with it. +// Snapshots being destroyed can be awaited using v.destroyWG. +// +// TODO(adonovan): move this logic into the release function returned +// by Acquire when the reference count becomes zero. (This would cost +// us the destroyedBy debug info, unless we add it to the signature of +// memoize.RefCounted.Acquire.) +// +// The destroyedBy argument is used for debugging. +// +// v.snapshotMu must be held while calling this function, in order to preserve +// the invariants described by the the docstring for v.snapshot. +func (v *View) destroy(s *snapshot, destroyedBy string) { + v.snapshotWG.Add(1) + go func() { + defer v.snapshotWG.Done() + s.destroy(destroyedBy) + }() +} + +func (s *snapshot) destroy(destroyedBy string) { + // Wait for all leases to end before commencing destruction. + s.refcount.Wait() + + // Report bad state as a debugging aid. + // Not foolproof: another thread could acquire() at this moment. + type uP = unsafe.Pointer // looking forward to generics... + if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil { + log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.globalID, destroyedBy, *(*string)(old)) + } + + s.packages.Destroy() + s.isActivePackageCache.Destroy() + s.analyses.Destroy() + s.files.Destroy() + s.parsedGoFiles.Destroy() + s.parseKeysByURI.Destroy() + s.knownSubdirs.Destroy() + s.symbolizeHandles.Destroy() + s.parseModHandles.Destroy() + s.parseWorkHandles.Destroy() + s.modTidyHandles.Destroy() + s.modVulnHandles.Destroy() + s.modWhyHandles.Destroy() +} + +func (s *snapshot) SequenceID() uint64 { + return s.sequenceID +} + +func (s *snapshot) GlobalID() source.GlobalSnapshotID { + return s.globalID +} + +func (s *snapshot) View() source.View { + return s.view +} + +func (s *snapshot) BackgroundContext() context.Context { + return s.backgroundCtx +} + +func (s *snapshot) FileSet() *token.FileSet { + return s.view.fset +} + +func (s *snapshot) ModFiles() []span.URI { + var uris []span.URI + for modURI := range s.workspaceModFiles { + uris = append(uris, modURI) + } + return uris +} + +func (s *snapshot) WorkFile() span.URI { + return s.view.effectiveGOWORK() +} + +func (s *snapshot) Templates() map[span.URI]source.FileHandle { + s.mu.Lock() + defer s.mu.Unlock() + + tmpls := map[span.URI]source.FileHandle{} + s.files.Range(func(k span.URI, fh source.FileHandle) { + if s.view.FileKind(fh) == source.Tmpl { + tmpls[k] = fh + } + }) + return tmpls +} + +func (s *snapshot) ValidBuildConfiguration() bool { + // Since we only really understand the `go` command, if the user has a + // different GOPACKAGESDRIVER, assume that their configuration is valid. + if s.view.hasGopackagesDriver { + return true + } + // Check if the user is working within a module or if we have found + // multiple modules in the workspace. + if len(s.workspaceModFiles) > 0 { + return true + } + // The user may have a multiple directories in their GOPATH. + // Check if the workspace is within any of them. + // TODO(rfindley): this should probably be subject to "if GO111MODULES = off {...}". + for _, gp := range filepath.SplitList(s.view.gopath) { + if source.InDir(filepath.Join(gp, "src"), s.view.folder.Filename()) { + return true + } + } + return false +} + +// moduleMode reports whether the current snapshot uses Go modules. +// +// From https://go.dev/ref/mod, module mode is active if either of the +// following hold: +// - GO111MODULE=on +// - GO111MODULE=auto and we are inside a module or have a GOWORK value. +// +// Additionally, this method returns false if GOPACKAGESDRIVER is set. +// +// TODO(rfindley): use this more widely. +func (s *snapshot) moduleMode() bool { + // Since we only really understand the `go` command, if the user has a + // different GOPACKAGESDRIVER, assume that their configuration is valid. + if s.view.hasGopackagesDriver { + return false + } + + switch s.view.effectiveGO111MODULE() { + case on: + return true + case off: + return false + default: + return len(s.workspaceModFiles) > 0 || s.view.gowork != "" + } +} + +// workspaceMode describes the way in which the snapshot's workspace should +// be loaded. +// +// TODO(rfindley): remove this, in favor of specific methods. +func (s *snapshot) workspaceMode() workspaceMode { + var mode workspaceMode + + // If the view has an invalid configuration, don't build the workspace + // module. + validBuildConfiguration := s.ValidBuildConfiguration() + if !validBuildConfiguration { + return mode + } + // If the view is not in a module and contains no modules, but still has a + // valid workspace configuration, do not create the workspace module. + // It could be using GOPATH or a different build system entirely. + if len(s.workspaceModFiles) == 0 && validBuildConfiguration { + return mode + } + mode |= moduleMode + options := s.view.Options() + if options.TempModfile { + mode |= tempModfile + } + return mode +} + +// config returns the configuration used for the snapshot's interaction with +// the go/packages API. It uses the given working directory. +// +// TODO(rstambler): go/packages requires that we do not provide overlays for +// multiple modules in on config, so buildOverlay needs to filter overlays by +// module. +func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config { + s.view.optionsMu.Lock() + verboseOutput := s.view.options.VerboseOutput + s.view.optionsMu.Unlock() + + cfg := &packages.Config{ + Context: ctx, + Dir: inv.WorkingDir, + Env: inv.Env, + BuildFlags: inv.BuildFlags, + Mode: packages.NeedName | + packages.NeedFiles | + packages.NeedCompiledGoFiles | + packages.NeedImports | + packages.NeedDeps | + packages.NeedTypesSizes | + packages.NeedModule | + packages.LoadMode(packagesinternal.DepsErrors) | + packages.LoadMode(packagesinternal.ForTest), + Fset: nil, // we do our own parsing + Overlay: s.buildOverlay(), + ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) { + panic("go/packages must not be used to parse files") + }, + Logf: func(format string, args ...interface{}) { + if verboseOutput { + event.Log(ctx, fmt.Sprintf(format, args...)) + } + }, + Tests: true, + } + packagesinternal.SetModFile(cfg, inv.ModFile) + packagesinternal.SetModFlag(cfg, inv.ModFlag) + // We want to type check cgo code if go/types supports it. + if typesinternal.SetUsesCgo(&types.Config{}) { + cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo) + } + packagesinternal.SetGoCmdRunner(cfg, s.view.gocmdRunner) + return cfg +} + +func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) { + _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) + if err != nil { + return nil, err + } + defer cleanup() + + return s.view.gocmdRunner.Run(ctx, *inv) +} + +func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error { + _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) + if err != nil { + return err + } + defer cleanup() + return s.view.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr) +} + +func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) { + var flags source.InvocationFlags + if s.workspaceMode()&tempModfile != 0 { + flags = source.WriteTemporaryModFile + } else { + flags = source.Normal + } + if allowNetwork { + flags |= source.AllowNetwork + } + tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd}) + if err != nil { + return false, nil, nil, err + } + defer cleanup() + invoke := func(args ...string) (*bytes.Buffer, error) { + inv.Verb = args[0] + inv.Args = args[1:] + return s.view.gocmdRunner.Run(ctx, *inv) + } + if err := run(invoke); err != nil { + return false, nil, nil, err + } + if flags.Mode() != source.WriteTemporaryModFile { + return false, nil, nil, nil + } + var modBytes, sumBytes []byte + modBytes, err = ioutil.ReadFile(tmpURI.Filename()) + if err != nil && !os.IsNotExist(err) { + return false, nil, nil, err + } + sumBytes, err = ioutil.ReadFile(strings.TrimSuffix(tmpURI.Filename(), ".mod") + ".sum") + if err != nil && !os.IsNotExist(err) { + return false, nil, nil, err + } + return true, modBytes, sumBytes, nil +} + +// goCommandInvocation populates inv with configuration for running go commands on the snapshot. +// +// TODO(rfindley): refactor this function to compose the required configuration +// explicitly, rather than implicitly deriving it from flags and inv. +// +// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but +// it used only after call to tempModFile. Clarify that it is only +// non-nil on success. +func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) { + s.view.optionsMu.Lock() + allowModfileModificationOption := s.view.options.AllowModfileModifications + allowNetworkOption := s.view.options.AllowImplicitNetworkAccess + + // TODO(rfindley): this is very hard to follow, and may not even be doing the + // right thing: should inv.Env really trample view.options? Do we ever invoke + // this with a non-empty inv.Env? + // + // We should refactor to make it clearer that the correct env is being used. + inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.GO111MODULE()) + inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...) + s.view.optionsMu.Unlock() + cleanup = func() {} // fallback + + // All logic below is for module mode. + if s.workspaceMode()&moduleMode == 0 { + return "", inv, cleanup, nil + } + + mode, allowNetwork := flags.Mode(), flags.AllowNetwork() + if !allowNetwork && !allowNetworkOption { + inv.Env = append(inv.Env, "GOPROXY=off") + } + + // What follows is rather complicated logic for how to actually run the go + // command. A word of warning: this is the result of various incremental + // features added to gopls, and varying behavior of the Go command across Go + // versions. It can surely be cleaned up significantly, but tread carefully. + // + // Roughly speaking we need to resolve four things: + // - the working directory. + // - the -mod flag + // - the -modfile flag + // + // These are dependent on a number of factors: whether we need to run in a + // synthetic workspace, whether flags are supported at the current go + // version, and what we're actually trying to achieve (the + // source.InvocationFlags). + + var modURI span.URI + // Select the module context to use. + // If we're type checking, we need to use the workspace context, meaning + // the main (workspace) module. Otherwise, we should use the module for + // the passed-in working dir. + if mode == source.LoadWorkspace { + if s.view.effectiveGOWORK() == "" && s.view.gomod != "" { + modURI = s.view.gomod + } + } else { + modURI = s.GoModForFile(span.URIFromPath(inv.WorkingDir)) + } + + var modContent []byte + if modURI != "" { + modFH, err := s.GetFile(ctx, modURI) + if err != nil { + return "", nil, cleanup, err + } + modContent, err = modFH.Read() + if err != nil { + return "", nil, cleanup, err + } + } + + // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall + // back on the default behavior of vendorEnabled with an empty modURI. Figure + // out what is correct here and implement it explicitly. + vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent) + if err != nil { + return "", nil, cleanup, err + } + + const mutableModFlag = "mod" + // If the mod flag isn't set, populate it based on the mode and workspace. + // TODO(rfindley): this doesn't make sense if we're not in module mode + if inv.ModFlag == "" { + switch mode { + case source.LoadWorkspace, source.Normal: + if vendorEnabled { + inv.ModFlag = "vendor" + } else if !allowModfileModificationOption { + inv.ModFlag = "readonly" + } else { + inv.ModFlag = mutableModFlag + } + case source.WriteTemporaryModFile: + inv.ModFlag = mutableModFlag + // -mod must be readonly when using go.work files - see issue #48941 + inv.Env = append(inv.Env, "GOWORK=off") + } + } + + // Only use a temp mod file if the modfile can actually be mutated. + needTempMod := inv.ModFlag == mutableModFlag + useTempMod := s.workspaceMode()&tempModfile != 0 + if needTempMod && !useTempMod { + return "", nil, cleanup, source.ErrTmpModfileUnsupported + } + + // We should use -modfile if: + // - the workspace mode supports it + // - we're using a go.work file on go1.18+, or we need a temp mod file (for + // example, if running go mod tidy in a go.work workspace) + // + // TODO(rfindley): this is very hard to follow. Refactor. + if !needTempMod && s.view.gowork != "" { + // Since we're running in the workspace root, the go command will resolve GOWORK automatically. + } else if useTempMod { + if modURI == "" { + return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) + } + modFH, err := s.GetFile(ctx, modURI) + if err != nil { + return "", nil, cleanup, err + } + // Use the go.sum if it happens to be available. + gosum := s.goSum(ctx, modURI) + tmpURI, cleanup, err = tempModFile(modFH, gosum) + if err != nil { + return "", nil, cleanup, err + } + inv.ModFile = tmpURI.Filename() + } + + return tmpURI, inv, cleanup, nil +} + +func (s *snapshot) buildOverlay() map[string][]byte { + s.mu.Lock() + defer s.mu.Unlock() + + overlays := make(map[string][]byte) + s.files.Range(func(uri span.URI, fh source.FileHandle) { + overlay, ok := fh.(*Overlay) + if !ok { + return + } + if overlay.saved { + return + } + // TODO(rstambler): Make sure not to send overlays outside of the current view. + overlays[uri.Filename()] = overlay.content + }) + return overlays +} + +// TypeCheck type-checks the specified packages in the given mode. +// +// The resulting packages slice always contains len(ids) entries, though some +// of them may be nil if (and only if) the resulting error is non-nil. +// +// An error is returned if any of the packages fail to type-check. This is +// different from having type-checking errors: a failure to type-check +// indicates context cancellation or otherwise significant failure to perform +// the type-checking operation. +func (s *snapshot) TypeCheck(ctx context.Context, mode source.TypecheckMode, ids ...PackageID) ([]source.Package, error) { + // Build all the handles... + phs := make([]*packageHandle, len(ids)) + pkgs := make([]source.Package, len(ids)) + var firstErr error + for i, id := range ids { + parseMode := source.ParseFull + if mode == source.TypecheckWorkspace { + parseMode = s.workspaceParseMode(id) + } + + ph, err := s.buildPackageHandle(ctx, id, parseMode) + if err != nil { + if firstErr == nil { + firstErr = err + } + if ctx.Err() != nil { + return pkgs, firstErr + } + continue + } + phs[i] = ph + } + + // ...then await them all. + for i, ph := range phs { + if ph == nil { + continue + } + p, err := ph.await(ctx, s) + if err != nil { + if firstErr == nil { + firstErr = err + } + if ctx.Err() != nil { + return pkgs, firstErr + } + continue + } + pkgs[i] = newPackage(ph.m, p) + } + + return pkgs, firstErr +} + +func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source.Metadata, error) { + s.mu.Lock() + + // Start with the set of package associations derived from the last load. + ids := s.meta.ids[uri] + + shouldLoad := false // whether any packages containing uri are marked 'shouldLoad' + for _, id := range ids { + if len(s.shouldLoad[id]) > 0 { + shouldLoad = true + } + } + + // Check if uri is known to be unloadable. + _, unloadable := s.unloadableFiles[uri] + + s.mu.Unlock() + + // Reload if loading is likely to improve the package associations for uri: + // - uri is not contained in any valid packages + // - ...or one of the packages containing uri is marked 'shouldLoad' + // - ...but uri is not unloadable + if (shouldLoad || len(ids) == 0) && !unloadable { + scope := fileLoadScope(uri) + err := s.load(ctx, false, scope) + + // Guard against failed loads due to context cancellation. + // + // Return the context error here as the current operation is no longer + // valid. + if ctxErr := ctx.Err(); ctxErr != nil { + return nil, ctxErr + } + + // We must clear scopes after loading. + // + // TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded + // packages as loaded. We could do this from snapshot.load and avoid + // raciness. + s.clearShouldLoad(scope) + + // Don't return an error here, as we may still return stale IDs. + // Furthermore, the result of MetadataForFile should be consistent upon + // subsequent calls, even if the file is marked as unloadable. + if err != nil && !errors.Is(err, errNoPackages) { + event.Error(ctx, "MetadataForFile", err) + } + } + + // Retrieve the metadata. + s.mu.Lock() + defer s.mu.Unlock() + ids = s.meta.ids[uri] + metas := make([]*source.Metadata, len(ids)) + for i, id := range ids { + metas[i] = s.meta.metadata[id] + if metas[i] == nil { + panic("nil metadata") + } + } + // Metadata is only ever added by loading, + // so if we get here and still have + // no IDs, uri is unloadable. + if !unloadable && len(ids) == 0 { + s.unloadableFiles[uri] = struct{}{} + } + + // Sort packages "narrowest" to "widest" (in practice: non-tests before tests). + sort.Slice(metas, func(i, j int) bool { + return len(metas[i].CompiledGoFiles) < len(metas[j].CompiledGoFiles) + }) + + return metas, nil +} + +func (s *snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*source.Metadata, error) { + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } + s.mu.Lock() + meta := s.meta + s.mu.Unlock() + + var rdeps map[PackageID]*source.Metadata + if transitive { + rdeps = meta.reverseReflexiveTransitiveClosure(id) + + // Remove the original package ID from the map. + // (Callers all want irreflexivity but it's easier + // to compute reflexively then subtract.) + delete(rdeps, id) + + } else { + // direct reverse dependencies + rdeps = make(map[PackageID]*source.Metadata) + for _, rdepID := range meta.importedBy[id] { + if rdep := meta.metadata[rdepID]; rdep != nil { + rdeps[rdepID] = rdep + } + } + } + + return rdeps, nil +} + +func (s *snapshot) workspaceMetadata() (meta []*source.Metadata) { + s.mu.Lock() + defer s.mu.Unlock() + + for id := range s.workspacePackages { + meta = append(meta, s.meta.metadata[id]) + } + return meta +} + +func (s *snapshot) isActiveLocked(id PackageID) (active bool) { + if seen, ok := s.isActivePackageCache.Get(id); ok { + return seen + } + defer func() { + s.isActivePackageCache.Set(id, active) + }() + m, ok := s.meta.metadata[id] + if !ok { + return false + } + for _, cgf := range m.CompiledGoFiles { + if s.isOpenLocked(cgf) { + return true + } + } + // TODO(rfindley): it looks incorrect that we don't also check GoFiles here. + // If a CGo file is open, we want to consider the package active. + for _, dep := range m.DepsByPkgPath { + if s.isActiveLocked(dep) { + return true + } + } + return false +} + +func (s *snapshot) resetIsActivePackageLocked() { + s.isActivePackageCache.Destroy() + s.isActivePackageCache = newIsActivePackageCacheMap() +} + +const fileExtensions = "go,mod,sum,work" + +func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { + extensions := fileExtensions + for _, ext := range s.View().Options().TemplateExtensions { + extensions += "," + ext + } + // Work-around microsoft/vscode#100870 by making sure that we are, + // at least, watching the user's entire workspace. This will still be + // applied to every folder in the workspace. + patterns := map[string]struct{}{ + fmt.Sprintf("**/*.{%s}", extensions): {}, + } + + // If GOWORK is outside the folder, ensure we are watching it. + gowork := s.view.effectiveGOWORK() + if gowork != "" && !source.InDir(s.view.folder.Filename(), gowork.Filename()) { + patterns[gowork.Filename()] = struct{}{} + } + + // Add a pattern for each Go module in the workspace that is not within the view. + dirs := s.dirs(ctx) + for _, dir := range dirs { + dirName := dir.Filename() + + // If the directory is within the view's folder, we're already watching + // it with the first pattern above. + if source.InDir(s.view.folder.Filename(), dirName) { + continue + } + // TODO(rstambler): If microsoft/vscode#3025 is resolved before + // microsoft/vscode#101042, we will need a work-around for Windows + // drive letter casing. + patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{} + } + + // Some clients do not send notifications for changes to directories that + // contain Go code (golang/go#42348). To handle this, explicitly watch all + // of the directories in the workspace. We find them by adding the + // directories of every file in the snapshot's workspace directories. + // There may be thousands. + if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" { + patterns[pattern] = struct{}{} + } + + return patterns +} + +func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string { + s.mu.Lock() + defer s.mu.Unlock() + + // First, process any pending changes and update the set of known + // subdirectories. + // It may change list of known subdirs and therefore invalidate the cache. + s.applyKnownSubdirsChangesLocked(wsDirs) + + if s.knownSubdirsPatternCache == "" { + var builder strings.Builder + s.knownSubdirs.Range(func(uri span.URI) { + if builder.Len() == 0 { + builder.WriteString("{") + } else { + builder.WriteString(",") + } + builder.WriteString(uri.Filename()) + }) + if builder.Len() > 0 { + builder.WriteString("}") + s.knownSubdirsPatternCache = builder.String() + } + } + + return s.knownSubdirsPatternCache +} + +// collectAllKnownSubdirs collects all of the subdirectories within the +// snapshot's workspace directories. None of the workspace directories are +// included. +func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { + dirs := s.dirs(ctx) + + s.mu.Lock() + defer s.mu.Unlock() + + s.knownSubdirs.Destroy() + s.knownSubdirs = newKnownDirsSet() + s.knownSubdirsPatternCache = "" + s.files.Range(func(uri span.URI, fh source.FileHandle) { + s.addKnownSubdirLocked(uri, dirs) + }) +} + +func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet { + s.mu.Lock() + defer s.mu.Unlock() + + // First, process any pending changes and update the set of known + // subdirectories. + s.applyKnownSubdirsChangesLocked(wsDirs) + + return s.knownSubdirs.Clone() +} + +func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) { + for _, c := range s.unprocessedSubdirChanges { + if c.isUnchanged { + continue + } + if !c.exists { + s.removeKnownSubdirLocked(c.fileHandle.URI()) + } else { + s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs) + } + } + s.unprocessedSubdirChanges = nil +} + +func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { + dir := filepath.Dir(uri.Filename()) + // First check if the directory is already known, because then we can + // return early. + if s.knownSubdirs.Contains(span.URIFromPath(dir)) { + return + } + var matched span.URI + for _, wsDir := range dirs { + if source.InDir(wsDir.Filename(), dir) { + matched = wsDir + break + } + } + // Don't watch any directory outside of the workspace directories. + if matched == "" { + return + } + for { + if dir == "" || dir == matched.Filename() { + break + } + uri := span.URIFromPath(dir) + if s.knownSubdirs.Contains(uri) { + break + } + s.knownSubdirs.Insert(uri) + dir = filepath.Dir(dir) + s.knownSubdirsPatternCache = "" + } +} + +func (s *snapshot) removeKnownSubdirLocked(uri span.URI) { + dir := filepath.Dir(uri.Filename()) + for dir != "" { + uri := span.URIFromPath(dir) + if !s.knownSubdirs.Contains(uri) { + break + } + if info, _ := os.Stat(dir); info == nil { + s.knownSubdirs.Remove(uri) + s.knownSubdirsPatternCache = "" + } + dir = filepath.Dir(dir) + } +} + +// knownFilesInDir returns the files known to the given snapshot that are in +// the given directory. It does not respect symlinks. +func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI { + var files []span.URI + s.mu.Lock() + defer s.mu.Unlock() + + s.files.Range(func(uri span.URI, fh source.FileHandle) { + if source.InDir(dir.Filename(), uri.Filename()) { + files = append(files, uri) + } + }) + return files +} + +func (s *snapshot) ActiveMetadata(ctx context.Context) ([]*source.Metadata, error) { + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } + + if s.view.Options().MemoryMode == source.ModeNormal { + return s.workspaceMetadata(), nil + } + + // ModeDegradeClosed + s.mu.Lock() + defer s.mu.Unlock() + var active []*source.Metadata + for id := range s.workspacePackages { + if s.isActiveLocked(id) { + active = append(active, s.Metadata(id)) + } + } + return active, nil +} + +// Symbols extracts and returns the symbols for each file in all the snapshot's views. +func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { + // Read the set of Go files out of the snapshot. + var goFiles []source.FileHandle + s.mu.Lock() + s.files.Range(func(uri span.URI, f source.FileHandle) { + if s.View().FileKind(f) == source.Go { + goFiles = append(goFiles, f) + } + }) + s.mu.Unlock() + + // Symbolize them in parallel. + var ( + group errgroup.Group + nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU + resultMu sync.Mutex + result = make(map[span.URI][]source.Symbol) + ) + group.SetLimit(nprocs) + for _, f := range goFiles { + f := f + group.Go(func() error { + symbols, err := s.symbolize(ctx, f) + if err != nil { + return err + } + resultMu.Lock() + result[f.URI()] = symbols + resultMu.Unlock() + return nil + }) + } + // Keep going on errors, but log the first failure. + // Partial results are better than no symbol results. + if err := group.Wait(); err != nil { + event.Error(ctx, "getting snapshot symbols", err) + } + return result +} + +func (s *snapshot) AllMetadata(ctx context.Context) ([]*source.Metadata, error) { + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } + + s.mu.Lock() + g := s.meta + s.mu.Unlock() + + meta := make([]*source.Metadata, 0, len(g.metadata)) + for _, m := range g.metadata { + meta = append(meta, m) + } + return meta, nil +} + +func (s *snapshot) CachedImportPaths(ctx context.Context) (map[PackagePath]*types.Package, error) { + // Don't reload workspace package metadata. + // This function is meant to only return currently cached information. + s.AwaitInitialized(ctx) + + s.mu.Lock() + defer s.mu.Unlock() + + pkgs := make(map[PackagePath]*syntaxPackage) + + // Find all cached packages that are imported a nonzero amount of time. + // + // TODO(rfindley): this is pre-existing behavior, and a test fails if we + // don't do the importCount filter, but why do we care if a package is + // imported a nonzero amount of times? + imported := make(map[PackagePath]bool) + s.packages.Range(func(_, v interface{}) { + ph := v.(*packageHandle) + for dep := range ph.m.DepsByPkgPath { + imported[dep] = true + } + if ph.m.Name == "main" { + return + } + pkg, err := ph.cached() + if err != nil { + return + } + if old, ok := pkgs[ph.m.PkgPath]; ok { + if len(pkg.compiledGoFiles) < len(old.compiledGoFiles) { + pkgs[ph.m.PkgPath] = pkg + } + } else { + pkgs[ph.m.PkgPath] = pkg + } + }) + results := make(map[PackagePath]*types.Package) + for pkgPath, pkg := range pkgs { + if imported[pkgPath] { + results[pkgPath] = pkg.types + } + } + + return results, nil +} + +// TODO(rfindley): clarify that this is only active modules. Or update to just +// use findRootPattern. +func (s *snapshot) GoModForFile(uri span.URI) span.URI { + return moduleForURI(s.workspaceModFiles, uri) +} + +func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { + var match span.URI + for modURI := range modFiles { + if !source.InDir(span.Dir(modURI).Filename(), uri.Filename()) { + continue + } + if len(modURI) > len(match) { + match = modURI + } + } + return match +} + +func (s *snapshot) Metadata(id PackageID) *source.Metadata { + s.mu.Lock() + defer s.mu.Unlock() + return s.meta.metadata[id] +} + +// clearShouldLoad clears package IDs that no longer need to be reloaded after +// scopes has been loaded. +func (s *snapshot) clearShouldLoad(scopes ...loadScope) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, scope := range scopes { + switch scope := scope.(type) { + case packageLoadScope: + scopePath := PackagePath(scope) + var toDelete []PackageID + for id, pkgPaths := range s.shouldLoad { + for _, pkgPath := range pkgPaths { + if pkgPath == scopePath { + toDelete = append(toDelete, id) + } + } + } + for _, id := range toDelete { + delete(s.shouldLoad, id) + } + case fileLoadScope: + uri := span.URI(scope) + ids := s.meta.ids[uri] + for _, id := range ids { + delete(s.shouldLoad, id) + } + } + } +} + +// noValidMetadataForURILocked reports whether there is any valid metadata for +// the given URI. +func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { + for _, id := range s.meta.ids[uri] { + if _, ok := s.meta.metadata[id]; ok { + return false + } + } + return true +} + +func (s *snapshot) isWorkspacePackage(id PackageID) bool { + s.mu.Lock() + defer s.mu.Unlock() + + _, ok := s.workspacePackages[id] + return ok +} + +func (s *snapshot) FindFile(uri span.URI) source.FileHandle { + s.view.markKnown(uri) + + s.mu.Lock() + defer s.mu.Unlock() + + result, _ := s.files.Get(uri) + return result +} + +// GetFile returns a File for the given URI. If the file is unknown it is added +// to the managed set. +// +// GetFile succeeds even if the file does not exist. A non-nil error return +// indicates some type of internal error, for example if ctx is cancelled. +func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { + s.view.markKnown(uri) + + s.mu.Lock() + defer s.mu.Unlock() + + if fh, ok := s.files.Get(uri); ok { + return fh, nil + } + + fh, err := s.view.fs.GetFile(ctx, uri) // read the file + if err != nil { + return nil, err + } + s.files.Set(uri, fh) + return fh, nil +} + +func (s *snapshot) IsOpen(uri span.URI) bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.isOpenLocked(uri) + +} + +func (s *snapshot) openFiles() []source.FileHandle { + s.mu.Lock() + defer s.mu.Unlock() + + var open []source.FileHandle + s.files.Range(func(uri span.URI, fh source.FileHandle) { + if isFileOpen(fh) { + open = append(open, fh) + } + }) + return open +} + +func (s *snapshot) isOpenLocked(uri span.URI) bool { + fh, _ := s.files.Get(uri) + return isFileOpen(fh) +} + +func isFileOpen(fh source.FileHandle) bool { + _, open := fh.(*Overlay) + return open +} + +func (s *snapshot) awaitLoaded(ctx context.Context) error { + loadErr := s.awaitLoadedAllErrors(ctx) + + // TODO(rfindley): eliminate this function as part of simplifying + // CriticalErrors. + if loadErr != nil { + return loadErr.MainError + } + return nil +} + +func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError { + // If we couldn't compute workspace mod files, then the load below is + // invalid. + // + // TODO(rfindley): is this a clear error to present to the user? + if s.workspaceModFilesErr != nil { + return &source.CriticalError{MainError: s.workspaceModFilesErr} + } + + loadErr := s.awaitLoadedAllErrors(ctx) + if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) { + return nil + } + + // Even if packages didn't fail to load, we still may want to show + // additional warnings. + if loadErr == nil { + active, _ := s.ActiveMetadata(ctx) + if msg := shouldShowAdHocPackagesWarning(s, active); msg != "" { + return &source.CriticalError{ + MainError: errors.New(msg), + } + } + // Even if workspace packages were returned, there still may be an error + // with the user's workspace layout. Workspace packages that only have the + // ID "command-line-arguments" are usually a symptom of a bad workspace + // configuration. + // + // This heuristic is path-dependent: we only get command-line-arguments + // packages when we've loaded using file scopes, which only occurs + // on-demand or via orphaned file reloading. + // + // TODO(rfindley): re-evaluate this heuristic. + if containsCommandLineArguments(active) { + err, diags := s.workspaceLayoutError(ctx) + if err != nil { + if ctx.Err() != nil { + return nil // see the API documentation for source.Snapshot + } + return &source.CriticalError{ + MainError: err, + Diagnostics: diags, + } + } + } + return nil + } + + if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") { + err, diags := s.workspaceLayoutError(ctx) + if err != nil { + if ctx.Err() != nil { + return nil // see the API documentation for source.Snapshot + } + return &source.CriticalError{ + MainError: err, + Diagnostics: diags, + } + } + } + return loadErr +} + +// A portion of this text is expected by TestBrokenWorkspace_OutsideModule. +const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src. +If you are using modules, please open your editor to a directory in your module. +If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.` + +func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, active []*source.Metadata) string { + if !snapshot.ValidBuildConfiguration() { + for _, m := range active { + // A blank entry in DepsByImpPath + // indicates a missing dependency. + for _, importID := range m.DepsByImpPath { + if importID == "" { + return adHocPackagesWarning + } + } + } + } + return "" +} + +func containsCommandLineArguments(metas []*source.Metadata) bool { + for _, m := range metas { + if source.IsCommandLineArguments(m.ID) { + return true + } + } + return false +} + +func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError { + // Do not return results until the snapshot's view has been initialized. + s.AwaitInitialized(ctx) + + // TODO(rfindley): Should we be more careful about returning the + // initialization error? Is it possible for the initialization error to be + // corrected without a successful reinitialization? + if err := s.getInitializationError(); err != nil { + return err + } + + // TODO(rfindley): revisit this handling. Calling reloadWorkspace with a + // cancelled context should have the same effect, so this preemptive handling + // should not be necessary. + // + // Also: GetCriticalError ignores context cancellation errors. Should we be + // returning nil here? + if ctx.Err() != nil { + return &source.CriticalError{MainError: ctx.Err()} + } + + // TODO(rfindley): reloading is not idempotent: if we try to reload or load + // orphaned files below and fail, we won't try again. For that reason, we + // could get different results from subsequent calls to this function, which + // may cause critical errors to be suppressed. + + if err := s.reloadWorkspace(ctx); err != nil { + diags := s.extractGoCommandErrors(ctx, err) + return &source.CriticalError{ + MainError: err, + Diagnostics: diags, + } + } + + if err := s.reloadOrphanedOpenFiles(ctx); err != nil { + diags := s.extractGoCommandErrors(ctx, err) + return &source.CriticalError{ + MainError: err, + Diagnostics: diags, + } + } + return nil +} + +func (s *snapshot) getInitializationError() *source.CriticalError { + s.mu.Lock() + defer s.mu.Unlock() + + return s.initializedErr +} + +func (s *snapshot) AwaitInitialized(ctx context.Context) { + select { + case <-ctx.Done(): + return + case <-s.view.initialWorkspaceLoad: + } + // We typically prefer to run something as intensive as the IWL without + // blocking. I'm not sure if there is a way to do that here. + s.initialize(ctx, false) +} + +// reloadWorkspace reloads the metadata for all invalidated workspace packages. +func (s *snapshot) reloadWorkspace(ctx context.Context) error { + var scopes []loadScope + var seen map[PackagePath]bool + s.mu.Lock() + for _, pkgPaths := range s.shouldLoad { + for _, pkgPath := range pkgPaths { + if seen == nil { + seen = make(map[PackagePath]bool) + } + if seen[pkgPath] { + continue + } + seen[pkgPath] = true + scopes = append(scopes, packageLoadScope(pkgPath)) + } + } + s.mu.Unlock() + + if len(scopes) == 0 { + return nil + } + + // If the view's build configuration is invalid, we cannot reload by + // package path. Just reload the directory instead. + if !s.ValidBuildConfiguration() { + scopes = []loadScope{viewLoadScope("LOAD_INVALID_VIEW")} + } + + err := s.load(ctx, false, scopes...) + + // Unless the context was canceled, set "shouldLoad" to false for all + // of the metadata we attempted to load. + if !errors.Is(err, context.Canceled) { + s.clearShouldLoad(scopes...) + } + + return err +} + +func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { + // When we load ./... or a package path directly, we may not get packages + // that exist only in overlays. As a workaround, we search all of the files + // available in the snapshot and reload their metadata individually using a + // file= query if the metadata is unavailable. + files := s.orphanedOpenFiles() + + // Files without a valid package declaration can't be loaded. Don't try. + var scopes []loadScope + for _, file := range files { + pgf, err := s.ParseGo(ctx, file, source.ParseHeader) + if err != nil { + continue + } + if !pgf.File.Package.IsValid() { + continue + } + + scopes = append(scopes, fileLoadScope(file.URI())) + } + + if len(scopes) == 0 { + return nil + } + + // The regtests match this exact log message, keep them in sync. + event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes)) + err := s.load(ctx, false, scopes...) + + // If we failed to load some files, i.e. they have no metadata, + // mark the failures so we don't bother retrying until the file's + // content changes. + // + // TODO(rstambler): This may be an overestimate if the load stopped + // early for an unrelated errors. Add a fallback? + // + // Check for context cancellation so that we don't incorrectly mark files + // as unloadable, but don't return before setting all workspace packages. + if ctx.Err() == nil && err != nil { + event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes)) + s.mu.Lock() + for _, scope := range scopes { + uri := span.URI(scope.(fileLoadScope)) + if s.noValidMetadataForURILocked(uri) { + s.unloadableFiles[uri] = struct{}{} + } + } + s.mu.Unlock() + } + return nil +} + +func (s *snapshot) orphanedOpenFiles() []source.FileHandle { + s.mu.Lock() + defer s.mu.Unlock() + + var files []source.FileHandle + s.files.Range(func(uri span.URI, fh source.FileHandle) { + // Only consider open files, which will be represented as overlays. + if _, isOverlay := fh.(*Overlay); !isOverlay { + return + } + // Don't try to reload metadata for go.mod files. + if s.view.FileKind(fh) != source.Go { + return + } + // If the URI doesn't belong to this view, then it's not in a workspace + // package and should not be reloaded directly. + if !source.InDir(s.view.folder.Filename(), uri.Filename()) { + return + } + // Don't reload metadata for files we've already deemed unloadable. + if _, ok := s.unloadableFiles[uri]; ok { + return + } + if s.noValidMetadataForURILocked(uri) { + files = append(files, fh) + } + }) + return files +} + +// TODO(golang/go#53756): this function needs to consider more than just the +// absolute URI, for example: +// - the position of /vendor/ with respect to the relevant module root +// - whether or not go.work is in use (as vendoring isn't supported in workspace mode) +// +// Most likely, each call site of inVendor needs to be reconsidered to +// understand and correctly implement the desired behavior. +func inVendor(uri span.URI) bool { + _, after, found := cut(string(uri), "/vendor/") + // Only subdirectories of /vendor/ are considered vendored + // (/vendor/a/foo.go is vendored, /vendor/foo.go is not). + return found && strings.Contains(after, "/") +} + +// TODO(adonovan): replace with strings.Cut when we can assume go1.18. +func cut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// unappliedChanges is a file source that handles an uncloned snapshot. +type unappliedChanges struct { + originalSnapshot *snapshot + changes map[span.URI]*fileChange +} + +func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { + if c, ok := ac.changes[uri]; ok { + return c.fileHandle, nil + } + return ac.originalSnapshot.GetFile(ctx, uri) +} + +func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) { + ctx, done := event.Start(ctx, "snapshot.clone") + defer done() + + reinit := false + wsModFiles, wsModFilesErr := s.workspaceModFiles, s.workspaceModFilesErr + + if workURI := s.view.effectiveGOWORK(); workURI != "" { + if change, ok := changes[workURI]; ok { + wsModFiles, wsModFilesErr = computeWorkspaceModFiles(ctx, s.view.gomod, workURI, s.view.effectiveGO111MODULE(), &unappliedChanges{ + originalSnapshot: s, + changes: changes, + }) + // TODO(rfindley): don't rely on 'isUnchanged' here. Use a content hash instead. + reinit = change.fileHandle.Saved() && !change.isUnchanged + } + } + + // Reinitialize if any workspace mod file has changed on disk. + for uri, change := range changes { + if _, ok := wsModFiles[uri]; ok && change.fileHandle.Saved() && !change.isUnchanged { + reinit = true + } + } + + // Finally, process sumfile changes that may affect loading. + for uri, change := range changes { + if !change.fileHandle.Saved() { + continue // like with go.mod files, we only reinit when things are saved + } + if filepath.Base(uri.Filename()) == "go.work.sum" && s.view.gowork != "" { + if filepath.Dir(uri.Filename()) == filepath.Dir(s.view.gowork) { + reinit = true + } + } + if filepath.Base(uri.Filename()) == "go.sum" { + dir := filepath.Dir(uri.Filename()) + modURI := span.URIFromPath(filepath.Join(dir, "go.mod")) + if _, active := wsModFiles[modURI]; active { + reinit = true + } + } + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Changes to vendor tree may require reinitialization, + // either because of an initialization error + // (e.g. "inconsistent vendoring detected"), or because + // one or more modules may have moved into or out of the + // vendor tree after 'go mod vendor' or 'rm -fr vendor/'. + for uri := range changes { + if inVendor(uri) && s.initializedErr != nil || + strings.HasSuffix(string(uri), "/vendor/modules.txt") { + reinit = true + break + } + } + + bgCtx, cancel := context.WithCancel(bgCtx) + result := &snapshot{ + sequenceID: s.sequenceID + 1, + globalID: nextSnapshotID(), + store: s.store, + view: s.view, + backgroundCtx: bgCtx, + cancel: cancel, + builtin: s.builtin, + initialized: s.initialized, + initializedErr: s.initializedErr, + packages: s.packages.Clone(), + isActivePackageCache: s.isActivePackageCache.Clone(), + analyses: s.analyses.Clone(), + files: s.files.Clone(), + parsedGoFiles: s.parsedGoFiles.Clone(), + parseKeysByURI: s.parseKeysByURI.Clone(), + symbolizeHandles: s.symbolizeHandles.Clone(), + workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), + unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), + parseModHandles: s.parseModHandles.Clone(), + parseWorkHandles: s.parseWorkHandles.Clone(), + modTidyHandles: s.modTidyHandles.Clone(), + modWhyHandles: s.modWhyHandles.Clone(), + modVulnHandles: s.modVulnHandles.Clone(), + knownSubdirs: s.knownSubdirs.Clone(), + workspaceModFiles: wsModFiles, + workspaceModFilesErr: wsModFilesErr, + } + + // The snapshot should be initialized if either s was uninitialized, or we've + // detected a change that triggers reinitialization. + if reinit { + result.initialized = false + } + + // Create a lease on the new snapshot. + // (Best to do this early in case the code below hides an + // incref/decref operation that might destroy it prematurely.) + release := result.Acquire() + + // Copy the set of unloadable files. + // + // TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on + // changes to environment or workspace layout, or more generally on any + // metadata change? + // + // Maybe not, as major configuration changes cause a new view. + for k, v := range s.unloadableFiles { + result.unloadableFiles[k] = v + } + + // TODO(adonovan): merge loops over "changes". + for uri, change := range changes { + // Optimization: if the content did not change, we don't need to evict the + // parsed file. This is not the case for e.g. the files map, which may + // switch from on-disk state to overlay. Parsed files depend only on + // content and parse mode (which is captured in the parse key). + // + // NOTE: This also makes it less likely that we re-parse a file due to a + // cache-miss but get a cache-hit for the corresponding package. In the + // past, there was code that relied on ParseGo returning the type-checked + // syntax tree. That code was wrong, but avoiding invalidation here limits + // the blast radius of these types of bugs. + if !change.isUnchanged { + keys, ok := result.parseKeysByURI.Get(uri) + if ok { + for _, key := range keys { + result.parsedGoFiles.Delete(key) + } + result.parseKeysByURI.Delete(uri) + } + } + + // Invalidate go.mod-related handles. + result.modTidyHandles.Delete(uri) + result.modWhyHandles.Delete(uri) + result.modVulnHandles.Delete(uri) + + // Invalidate handles for cached symbols. + result.symbolizeHandles.Delete(uri) + } + + // Add all of the known subdirectories, but don't update them for the + // changed files. We need to rebuild the workspace module to know the + // true set of known subdirectories, but we don't want to do that in clone. + result.knownSubdirs = s.knownSubdirs.Clone() + result.knownSubdirsPatternCache = s.knownSubdirsPatternCache + for _, c := range changes { + result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c) + } + + // directIDs keeps track of package IDs that have directly changed. + // Note: this is not a set, it's a map from id to invalidateMetadata. + directIDs := map[PackageID]bool{} + + // Invalidate all package metadata if the workspace module has changed. + if reinit { + for k := range s.meta.metadata { + directIDs[k] = true + } + } + + // Compute invalidations based on file changes. + anyImportDeleted := false // import deletions can resolve cycles + anyFileOpenedOrClosed := false // opened files affect workspace packages + anyFileAdded := false // adding a file can resolve missing dependencies + + for uri, change := range changes { + // The original FileHandle for this URI is cached on the snapshot. + originalFH, _ := s.files.Get(uri) + var originalOpen, newOpen bool + _, originalOpen = originalFH.(*Overlay) + _, newOpen = change.fileHandle.(*Overlay) + anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen) + anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil) + + // If uri is a Go file, check if it has changed in a way that would + // invalidate metadata. Note that we can't use s.view.FileKind here, + // because the file type that matters is not what the *client* tells us, + // but what the Go command sees. + var invalidateMetadata, pkgFileChanged, importDeleted bool + if strings.HasSuffix(uri.Filename(), ".go") { + invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle) + } + + invalidateMetadata = invalidateMetadata || forceReloadMetadata || reinit + anyImportDeleted = anyImportDeleted || importDeleted + + // Mark all of the package IDs containing the given file. + filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged) + for id := range filePackageIDs { + directIDs[id] = directIDs[id] || invalidateMetadata // may insert 'false' + } + + // Invalidate the previous modTidyHandle if any of the files have been + // saved or if any of the metadata has been invalidated. + if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) { + // TODO(maybe): Only delete mod handles for + // which the withoutURI is relevant. + // Requires reverse-engineering the go command. (!) + result.modTidyHandles.Clear() + result.modWhyHandles.Clear() + result.modVulnHandles.Clear() + } + + result.parseModHandles.Delete(uri) + result.parseWorkHandles.Delete(uri) + // Handle the invalidated file; it may have new contents or not exist. + if !change.exists { + result.files.Delete(uri) + } else { + result.files.Set(uri, change.fileHandle) + } + + // Make sure to remove the changed file from the unloadable set. + delete(result.unloadableFiles, uri) + } + + // Deleting an import can cause list errors due to import cycles to be + // resolved. The best we can do without parsing the list error message is to + // hope that list errors may have been resolved by a deleted import. + // + // We could do better by parsing the list error message. We already do this + // to assign a better range to the list error, but for such critical + // functionality as metadata, it's better to be conservative until it proves + // impractical. + // + // We could also do better by looking at which imports were deleted and + // trying to find cycles they are involved in. This fails when the file goes + // from an unparseable state to a parseable state, as we don't have a + // starting point to compare with. + if anyImportDeleted { + for id, metadata := range s.meta.metadata { + if len(metadata.Errors) > 0 { + directIDs[id] = true + } + } + } + + // Adding a file can resolve missing dependencies from existing packages. + // + // We could be smart here and try to guess which packages may have been + // fixed, but until that proves necessary, just invalidate metadata for any + // package with missing dependencies. + if anyFileAdded { + for id, metadata := range s.meta.metadata { + for _, impID := range metadata.DepsByImpPath { + if impID == "" { // missing import + directIDs[id] = true + break + } + } + } + } + + // Invalidate reverse dependencies too. + // idsToInvalidate keeps track of transitive reverse dependencies. + // If an ID is present in the map, invalidate its types. + // If an ID's value is true, invalidate its metadata too. + idsToInvalidate := map[PackageID]bool{} + var addRevDeps func(PackageID, bool) + addRevDeps = func(id PackageID, invalidateMetadata bool) { + current, seen := idsToInvalidate[id] + newInvalidateMetadata := current || invalidateMetadata + + // If we've already seen this ID, and the value of invalidate + // metadata has not changed, we can return early. + if seen && current == newInvalidateMetadata { + return + } + idsToInvalidate[id] = newInvalidateMetadata + for _, rid := range s.meta.importedBy[id] { + addRevDeps(rid, invalidateMetadata) + } + } + for id, invalidateMetadata := range directIDs { + addRevDeps(id, invalidateMetadata) + } + + // Delete invalidated package type information. + for id := range idsToInvalidate { + for _, mode := range source.AllParseModes { + key := packageKey{mode, id} + result.packages.Delete(key) + } + } + + // Delete invalidated analysis actions. + var actionsToDelete []analysisKey + result.analyses.Range(func(k, _ interface{}) { + key := k.(analysisKey) + if _, ok := idsToInvalidate[key.pkgid]; ok { + actionsToDelete = append(actionsToDelete, key) + } + }) + for _, key := range actionsToDelete { + result.analyses.Delete(key) + } + + // If a file has been deleted, we must delete metadata for all packages + // containing that file. + // + // TODO(rfindley): why not keep invalid metadata in this case? If we + // otherwise allow operate on invalid metadata, why not continue to do so, + // skipping the missing file? + skipID := map[PackageID]bool{} + for _, c := range changes { + if c.exists { + continue + } + // The file has been deleted. + if ids, ok := s.meta.ids[c.fileHandle.URI()]; ok { + for _, id := range ids { + skipID[id] = true + } + } + } + + // Any packages that need loading in s still need loading in the new + // snapshot. + for k, v := range s.shouldLoad { + if result.shouldLoad == nil { + result.shouldLoad = make(map[PackageID][]PackagePath) + } + result.shouldLoad[k] = v + } + + // Compute which metadata updates are required. We only need to invalidate + // packages directly containing the affected file, and only if it changed in + // a relevant way. + metadataUpdates := make(map[PackageID]*source.Metadata) + for k, v := range s.meta.metadata { + invalidateMetadata := idsToInvalidate[k] + + // For metadata that has been newly invalidated, capture package paths + // requiring reloading in the shouldLoad map. + if invalidateMetadata && !source.IsCommandLineArguments(v.ID) { + if result.shouldLoad == nil { + result.shouldLoad = make(map[PackageID][]PackagePath) + } + needsReload := []PackagePath{v.PkgPath} + if v.ForTest != "" && v.ForTest != v.PkgPath { + // When reloading test variants, always reload their ForTest package as + // well. Otherwise, we may miss test variants in the resulting load. + // + // TODO(rfindley): is this actually sufficient? Is it possible that + // other test variants may be invalidated? Either way, we should + // determine exactly what needs to be reloaded here. + needsReload = append(needsReload, v.ForTest) + } + result.shouldLoad[k] = needsReload + } + + // Check whether the metadata should be deleted. + if skipID[k] || invalidateMetadata { + metadataUpdates[k] = nil + continue + } + } + + // Update metadata, if necessary. + result.meta = s.meta.Clone(metadataUpdates) + + // Update workspace and active packages, if necessary. + if result.meta != s.meta || anyFileOpenedOrClosed { + result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta) + result.resetIsActivePackageLocked() + } else { + result.workspacePackages = s.workspacePackages + } + + // Don't bother copying the importedBy graph, + // as it changes each time we update metadata. + + // TODO(rfindley): consolidate the this workspace mode detection with + // workspace invalidation. + workspaceModeChanged := s.workspaceMode() != result.workspaceMode() + + // If the snapshot's workspace mode has changed, the packages loaded using + // the previous mode are no longer relevant, so clear them out. + if workspaceModeChanged { + result.workspacePackages = map[PackageID]PackagePath{} + } + result.dumpWorkspace("clone") + return result, release +} + +// invalidatedPackageIDs returns all packages invalidated by a change to uri. +// If we haven't seen this URI before, we guess based on files in the same +// directory. This is of course incorrect in build systems where packages are +// not organized by directory. +// +// If packageFileChanged is set, the file is either a new file, or has a new +// package name. In this case, all known packages in the directory will be +// invalidated. +func invalidatedPackageIDs(uri span.URI, known map[span.URI][]PackageID, packageFileChanged bool) map[PackageID]struct{} { + invalidated := make(map[PackageID]struct{}) + + // At a minimum, we invalidate packages known to contain uri. + for _, id := range known[uri] { + invalidated[id] = struct{}{} + } + + // If the file didn't move to a new package, we should only invalidate the + // packages it is currently contained inside. + if !packageFileChanged && len(invalidated) > 0 { + return invalidated + } + + // This is a file we don't yet know about, or which has moved packages. Guess + // relevant packages by considering files in the same directory. + + // Cache of FileInfo to avoid unnecessary stats for multiple files in the + // same directory. + stats := make(map[string]struct { + os.FileInfo + error + }) + getInfo := func(dir string) (os.FileInfo, error) { + if res, ok := stats[dir]; ok { + return res.FileInfo, res.error + } + fi, err := os.Stat(dir) + stats[dir] = struct { + os.FileInfo + error + }{fi, err} + return fi, err + } + dir := filepath.Dir(uri.Filename()) + fi, err := getInfo(dir) + if err == nil { + // Aggregate all possibly relevant package IDs. + for knownURI, ids := range known { + knownDir := filepath.Dir(knownURI.Filename()) + knownFI, err := getInfo(knownDir) + if err != nil { + continue + } + if os.SameFile(fi, knownFI) { + for _, id := range ids { + invalidated[id] = struct{}{} + } + } + } + } + return invalidated +} + +// fileWasSaved reports whether the FileHandle passed in has been saved. It +// accomplishes this by checking to see if the original and current FileHandles +// are both overlays, and if the current FileHandle is saved while the original +// FileHandle was not saved. +func fileWasSaved(originalFH, currentFH source.FileHandle) bool { + c, ok := currentFH.(*Overlay) + if !ok || c == nil { + return true + } + o, ok := originalFH.(*Overlay) + if !ok || o == nil { + return c.saved + } + return !o.saved && c.saved +} + +// metadataChanges detects features of the change from oldFH->newFH that may +// affect package metadata. +// +// It uses lockedSnapshot to access cached parse information. lockedSnapshot +// must be locked. +// +// The result parameters have the following meaning: +// - invalidate means that package metadata for packages containing the file +// should be invalidated. +// - pkgFileChanged means that the file->package associates for the file have +// changed (possibly because the file is new, or because its package name has +// changed). +// - importDeleted means that an import has been deleted, or we can't +// determine if an import was deleted due to errors. +func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH source.FileHandle) (invalidate, pkgFileChanged, importDeleted bool) { + if oldFH == nil || newFH == nil { // existential changes + changed := (oldFH == nil) != (newFH == nil) + return changed, changed, (newFH == nil) // we don't know if an import was deleted + } + + // If the file hasn't changed, there's no need to reload. + if oldFH.FileIdentity() == newFH.FileIdentity() { + return false, false, false + } + + // Parse headers to compare package names and imports. + oldHead, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseHeader) + newHead, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseHeader) + + if oldErr != nil || newErr != nil { + // TODO(rfindley): we can get here if newFH does not exists. There is + // asymmetry here, in that newFH may be non-nil even if the underlying file + // does not exist. + // + // We should not produce a non-nil filehandle for a file that does not exist. + errChanged := (oldErr == nil) != (newErr == nil) + return errChanged, errChanged, (newErr != nil) // we don't know if an import was deleted + } + + // `go list` fails completely if the file header cannot be parsed. If we go + // from a non-parsing state to a parsing state, we should reload. + if oldHead.ParseErr != nil && newHead.ParseErr == nil { + return true, true, true // We don't know what changed, so fall back on full invalidation. + } + + // If a package name has changed, the set of package imports may have changed + // in ways we can't detect here. Assume an import has been deleted. + if oldHead.File.Name.Name != newHead.File.Name.Name { + return true, true, true + } + + // Check whether package imports have changed. Only consider potentially + // valid imports paths. + oldImports := validImports(oldHead.File.Imports) + newImports := validImports(newHead.File.Imports) + + for path := range newImports { + if _, ok := oldImports[path]; ok { + delete(oldImports, path) + } else { + invalidate = true // a new, potentially valid import was added + } + } + + if len(oldImports) > 0 { + invalidate = true + importDeleted = true + } + + // If the change does not otherwise invalidate metadata, get the full ASTs in + // order to check magic comments. + // + // Note: if this affects performance we can probably avoid parsing in the + // common case by first scanning the source for potential comments. + if !invalidate { + origFull, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseFull) + currFull, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseFull) + if oldErr == nil && newErr == nil { + invalidate = magicCommentsChanged(origFull.File, currFull.File) + } else { + // At this point, we shouldn't ever fail to produce a ParsedGoFile, as + // we're already past header parsing. + bug.Reportf("metadataChanges: unparseable file %v (old error: %v, new error: %v)", oldFH.URI(), oldErr, newErr) + } + } + + return invalidate, pkgFileChanged, importDeleted +} + +// peekOrParse returns the cached ParsedGoFile if it exists, +// otherwise parses without populating the cache. +// +// It returns an error if the file could not be read (note that parsing errors +// are stored in ParsedGoFile.ParseErr). +// +// lockedSnapshot must be locked. +func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + // Peek in the cache without populating it. + // We do this to reduce retained heap, not work. + if parsed, _ := lockedSnapshot.peekParseGoLocked(fh, mode); parsed != nil { + return parsed, nil // cache hit + } + return parseGoImpl(ctx, token.NewFileSet(), fh, mode) +} + +func magicCommentsChanged(original *ast.File, current *ast.File) bool { + oldComments := extractMagicComments(original) + newComments := extractMagicComments(current) + if len(oldComments) != len(newComments) { + return true + } + for i := range oldComments { + if oldComments[i] != newComments[i] { + return true + } + } + return false +} + +// validImports extracts the set of valid import paths from imports. +func validImports(imports []*ast.ImportSpec) map[string]struct{} { + m := make(map[string]struct{}) + for _, spec := range imports { + if path := spec.Path.Value; validImportPath(path) { + m[path] = struct{}{} + } + } + return m +} + +func validImportPath(path string) bool { + path, err := strconv.Unquote(path) + if err != nil { + return false + } + if path == "" { + return false + } + if path[len(path)-1] == '/' { + return false + } + return true +} + +var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`) + +// extractMagicComments finds magic comments that affect metadata in f. +func extractMagicComments(f *ast.File) []string { + var results []string + for _, cg := range f.Comments { + for _, c := range cg.List { + if buildConstraintOrEmbedRe.MatchString(c.Text) { + results = append(results, c.Text) + } + } + } + return results +} + +func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) { + s.AwaitInitialized(ctx) + + s.mu.Lock() + builtin := s.builtin + s.mu.Unlock() + + if builtin == "" { + return nil, fmt.Errorf("no builtin package for view %s", s.view.name) + } + + fh, err := s.GetFile(ctx, builtin) + if err != nil { + return nil, err + } + return s.ParseGo(ctx, fh, source.ParseFull) +} + +func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool { + s.mu.Lock() + defer s.mu.Unlock() + // We should always get the builtin URI in a canonical form, so use simple + // string comparison here. span.CompareURI is too expensive. + return uri == s.builtin +} + +func (s *snapshot) setBuiltin(path string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.builtin = span.URIFromPath(path) +} diff --git a/gopls/internal/lsp/cache/standalone_go115.go b/gopls/internal/lsp/cache/standalone_go115.go new file mode 100644 index 00000000000..79569ae10ec --- /dev/null +++ b/gopls/internal/lsp/cache/standalone_go115.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.16 +// +build !go1.16 + +package cache + +// isStandaloneFile returns false, as the 'standaloneTags' setting is +// unsupported on Go 1.15 and earlier. +func isStandaloneFile(src []byte, standaloneTags []string) bool { + return false +} diff --git a/gopls/internal/lsp/cache/standalone_go116.go b/gopls/internal/lsp/cache/standalone_go116.go new file mode 100644 index 00000000000..2f72d5f5495 --- /dev/null +++ b/gopls/internal/lsp/cache/standalone_go116.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package cache + +import ( + "go/build/constraint" + "go/parser" + "go/token" +) + +// isStandaloneFile reports whether a file with the given contents should be +// considered a 'standalone main file', meaning a package that consists of only +// a single file. +func isStandaloneFile(src []byte, standaloneTags []string) bool { + f, err := parser.ParseFile(token.NewFileSet(), "", src, parser.PackageClauseOnly|parser.ParseComments) + if err != nil { + return false + } + + if f.Name == nil || f.Name.Name != "main" { + return false + } + + for _, cg := range f.Comments { + // Even with PackageClauseOnly the parser consumes the semicolon following + // the package clause, so we must guard against comments that come after + // the package name. + if cg.Pos() > f.Name.Pos() { + continue + } + for _, comment := range cg.List { + if c, err := constraint.Parse(comment.Text); err == nil { + if tag, ok := c.(*constraint.TagExpr); ok { + for _, t := range standaloneTags { + if t == tag.Tag { + return true + } + } + } + } + } + } + + return false +} diff --git a/gopls/internal/lsp/cache/standalone_go116_test.go b/gopls/internal/lsp/cache/standalone_go116_test.go new file mode 100644 index 00000000000..9adf01e6cea --- /dev/null +++ b/gopls/internal/lsp/cache/standalone_go116_test.go @@ -0,0 +1,96 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package cache + +import ( + "testing" +) + +func TestIsStandaloneFile(t *testing.T) { + tests := []struct { + desc string + contents string + standaloneTags []string + want bool + }{ + { + "new syntax", + "//go:build ignore\n\npackage main\n", + []string{"ignore"}, + true, + }, + { + "legacy syntax", + "// +build ignore\n\npackage main\n", + []string{"ignore"}, + true, + }, + { + "multiple tags", + "//go:build ignore\n\npackage main\n", + []string{"exclude", "ignore"}, + true, + }, + { + "invalid tag", + "// +build ignore\n\npackage main\n", + []string{"script"}, + false, + }, + { + "non-main package", + "//go:build ignore\n\npackage p\n", + []string{"ignore"}, + false, + }, + { + "alternate tag", + "// +build script\n\npackage main\n", + []string{"script"}, + true, + }, + { + "both syntax", + "//go:build ignore\n// +build ignore\n\npackage main\n", + []string{"ignore"}, + true, + }, + { + "after comments", + "// A non-directive comment\n//go:build ignore\n\npackage main\n", + []string{"ignore"}, + true, + }, + { + "after package decl", + "package main //go:build ignore\n", + []string{"ignore"}, + false, + }, + { + "on line after package decl", + "package main\n\n//go:build ignore\n", + []string{"ignore"}, + false, + }, + { + "combined with other expressions", + "\n\n//go:build ignore || darwin\n\npackage main\n", + []string{"ignore"}, + false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if got := isStandaloneFile([]byte(test.contents), test.standaloneTags); got != test.want { + t.Errorf("isStandaloneFile(%q, %v) = %t, want %t", test.contents, test.standaloneTags, got, test.want) + } + }) + } +} diff --git a/gopls/internal/lsp/cache/symbols.go b/gopls/internal/lsp/cache/symbols.go new file mode 100644 index 00000000000..8cdb147a957 --- /dev/null +++ b/gopls/internal/lsp/cache/symbols.go @@ -0,0 +1,243 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "go/ast" + "go/parser" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/memoize" +) + +// symbolize returns the result of symbolizing the file identified by fh, using a cache. +func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]source.Symbol, error) { + uri := fh.URI() + + s.mu.Lock() + entry, hit := s.symbolizeHandles.Get(uri) + s.mu.Unlock() + + type symbolizeResult struct { + symbols []source.Symbol + err error + } + + // Cache miss? + if !hit { + type symbolHandleKey source.Hash + key := symbolHandleKey(fh.FileIdentity().Hash) + promise, release := s.store.Promise(key, func(_ context.Context, arg interface{}) interface{} { + symbols, err := symbolizeImpl(arg.(*snapshot), fh) + return symbolizeResult{symbols, err} + }) + + entry = promise + + s.mu.Lock() + s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } + + // Await result. + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) + if err != nil { + return nil, err + } + res := v.(symbolizeResult) + return res.symbols, res.err +} + +// symbolizeImpl reads and parses a file and extracts symbols from it. +// It may use a parsed file already present in the cache but +// otherwise does not populate the cache. +func symbolizeImpl(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) { + src, err := fh.Read() + if err != nil { + return nil, err + } + + var ( + file *ast.File + tokFile *token.File + mapper *protocol.Mapper + ) + + // If the file has already been fully parsed through the + // cache, we can just use the result. But we don't want to + // populate the cache after a miss. + snapshot.mu.Lock() + pgf, _ := snapshot.peekParseGoLocked(fh, source.ParseFull) + snapshot.mu.Unlock() + if pgf != nil { + file = pgf.File + tokFile = pgf.Tok + mapper = pgf.Mapper + } + + // Otherwise, we parse the file ourselves. Notably we don't use parseGo here, + // so that we can avoid parsing comments and can skip object resolution, + // which has a meaningful impact on performance. Neither comments nor objects + // are necessary for symbol construction. + if file == nil { + fset := token.NewFileSet() + file, err = parser.ParseFile(fset, fh.URI().Filename(), src, skipObjectResolution) + if file == nil { + return nil, err + } + tokFile = fset.File(file.Package) + mapper = protocol.NewMapper(fh.URI(), src) + } + + w := &symbolWalker{ + tokFile: tokFile, + mapper: mapper, + } + + w.fileDecls(file.Decls) + + return w.symbols, w.firstError +} + +type symbolWalker struct { + // for computing positions + tokFile *token.File + mapper *protocol.Mapper + + symbols []source.Symbol + firstError error +} + +func (w *symbolWalker) atNode(node ast.Node, name string, kind protocol.SymbolKind, path ...*ast.Ident) { + var b strings.Builder + for _, ident := range path { + if ident != nil { + b.WriteString(ident.Name) + b.WriteString(".") + } + } + b.WriteString(name) + + rng, err := w.mapper.NodeRange(w.tokFile, node) + if err != nil { + w.error(err) + return + } + sym := source.Symbol{ + Name: b.String(), + Kind: kind, + Range: rng, + } + w.symbols = append(w.symbols, sym) +} + +func (w *symbolWalker) error(err error) { + if err != nil && w.firstError == nil { + w.firstError = err + } +} + +func (w *symbolWalker) fileDecls(decls []ast.Decl) { + for _, decl := range decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + kind := protocol.Function + var recv *ast.Ident + if decl.Recv.NumFields() > 0 { + kind = protocol.Method + recv = unpackRecv(decl.Recv.List[0].Type) + } + w.atNode(decl.Name, decl.Name.Name, kind, recv) + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + kind := guessKind(spec) + w.atNode(spec.Name, spec.Name.Name, kind) + w.walkType(spec.Type, spec.Name) + case *ast.ValueSpec: + for _, name := range spec.Names { + kind := protocol.Variable + if decl.Tok == token.CONST { + kind = protocol.Constant + } + w.atNode(name, name.Name, kind) + } + } + } + } + } +} + +func guessKind(spec *ast.TypeSpec) protocol.SymbolKind { + switch spec.Type.(type) { + case *ast.InterfaceType: + return protocol.Interface + case *ast.StructType: + return protocol.Struct + case *ast.FuncType: + return protocol.Function + } + return protocol.Class +} + +func unpackRecv(rtyp ast.Expr) *ast.Ident { + // Extract the receiver identifier. Lifted from go/types/resolver.go +L: + for { + switch t := rtyp.(type) { + case *ast.ParenExpr: + rtyp = t.X + case *ast.StarExpr: + rtyp = t.X + default: + break L + } + } + if name, _ := rtyp.(*ast.Ident); name != nil { + return name + } + return nil +} + +// walkType processes symbols related to a type expression. path is path of +// nested type identifiers to the type expression. +func (w *symbolWalker) walkType(typ ast.Expr, path ...*ast.Ident) { + switch st := typ.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + w.walkField(field, protocol.Field, protocol.Field, path...) + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + w.walkField(field, protocol.Interface, protocol.Method, path...) + } + } +} + +// walkField processes symbols related to the struct field or interface method. +// +// unnamedKind and namedKind are the symbol kinds if the field is resp. unnamed +// or named. path is the path of nested identifiers containing the field. +func (w *symbolWalker) walkField(field *ast.Field, unnamedKind, namedKind protocol.SymbolKind, path ...*ast.Ident) { + if len(field.Names) == 0 { + switch typ := field.Type.(type) { + case *ast.SelectorExpr: + // embedded qualified type + w.atNode(field, typ.Sel.Name, unnamedKind, path...) + default: + w.atNode(field, types.ExprString(field.Type), unnamedKind, path...) + } + } + for _, name := range field.Names { + w.atNode(name, name.Name, namedKind, path...) + w.walkType(field.Type, append(path, name)...) + } +} diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go new file mode 100644 index 00000000000..51660b3ac87 --- /dev/null +++ b/gopls/internal/lsp/cache/view.go @@ -0,0 +1,1144 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cache implements the caching layer for gopls. +package cache + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "go/token" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/semver" + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" + "golang.org/x/tools/internal/xcontext" +) + +type View struct { + id string + + fset *token.FileSet // shared FileSet + gocmdRunner *gocommand.Runner // limits go command concurrency + + // baseCtx is the context handed to NewView. This is the parent of all + // background contexts created for this view. + baseCtx context.Context + + // name is the user-specified name of this view. + name string + + optionsMu sync.Mutex + options *source.Options + + // Workspace information. The fields below are immutable, and together with + // options define the build list. Any change to these fields results in a new + // View. + folder span.URI // user-specified workspace folder + workspaceInformation // Go environment information + + importsState *importsState + + // moduleUpgrades tracks known upgrades for module paths in each modfile. + // Each modfile has a map of module name to upgrade version. + moduleUpgradesMu sync.Mutex + moduleUpgrades map[span.URI]map[string]string + + // vulns maps each go.mod file's URI to its known vulnerabilities. + vulnsMu sync.Mutex + vulns map[span.URI]*govulncheck.Result + + // fs is the file source used to populate this view. + fs source.FileSource + + // seenFiles tracks files that the view has accessed. + // TODO(golang/go#57558): this notion is fundamentally problematic, and + // should be removed. + knownFilesMu sync.Mutex + knownFiles map[span.URI]bool + + // initCancelFirstAttempt can be used to terminate the view's first + // attempt at initialization. + initCancelFirstAttempt context.CancelFunc + + // Track the latest snapshot via the snapshot field, guarded by snapshotMu. + // + // Invariant: whenever the snapshot field is overwritten, destroy(snapshot) + // is called on the previous (overwritten) snapshot while snapshotMu is held, + // incrementing snapshotWG. During shutdown the final snapshot is + // overwritten with nil and destroyed, guaranteeing that all observed + // snapshots have been destroyed via the destroy method, and snapshotWG may + // be waited upon to let these destroy operations complete. + snapshotMu sync.Mutex + snapshot *snapshot // latest snapshot; nil after shutdown has been called + releaseSnapshot func() // called when snapshot is no longer needed + snapshotWG sync.WaitGroup // refcount for pending destroy operations + + // initialWorkspaceLoad is closed when the first workspace initialization has + // completed. If we failed to load, we only retry if the go.mod file changes, + // to avoid too many go/packages calls. + initialWorkspaceLoad chan struct{} + + // initializationSema is used limit concurrent initialization of snapshots in + // the view. We use a channel instead of a mutex to avoid blocking when a + // context is canceled. + // + // This field (along with snapshot.initialized) guards against duplicate + // initialization of snapshots. Do not change it without adjusting snapshot + // accordingly. + initializationSema chan struct{} +} + +// workspaceInformation holds the defining features of the View workspace. +// +// This type is compared to see if the View needs to be reconstructed. +type workspaceInformation struct { + // `go env` variables that need to be tracked by gopls. + goEnv + + // gomod holds the relevant go.mod file for this workspace. + gomod span.URI + + // The Go version in use: X in Go 1.X. + goversion int + + // The complete output of the go version command. + // (Call gocommand.ParseGoVersionOutput to extract a version + // substring such as go1.19.1 or go1.20-rc.1, go1.21-abcdef01.) + goversionOutput string + + // hasGopackagesDriver is true if the user has a value set for the + // GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on + // their machine. + hasGopackagesDriver bool +} + +// effectiveGO111MODULE reports the value of GO111MODULE effective in the go +// command at this go version, assuming at least Go 1.16. +func (w workspaceInformation) effectiveGO111MODULE() go111module { + switch w.GO111MODULE() { + case "off": + return off + case "on", "": + return on + default: + return auto + } +} + +// effectiveGOWORK returns the effective GOWORK value for this workspace, if +// any, in URI form. +func (w workspaceInformation) effectiveGOWORK() span.URI { + if w.gowork == "off" || w.gowork == "" { + return "" + } + return span.URIFromPath(w.gowork) +} + +// GO111MODULE returns the value of GO111MODULE to use for running the go +// command. It differs from the user's environment in order to allow for the +// more forgiving default value "auto" when using recent go versions. +// +// TODO(rfindley): it is probably not worthwhile diverging from the go command +// here. The extra forgiveness may be nice, but breaks the invariant that +// running the go command from the command line produces the same build list. +// +// Put differently: we shouldn't go out of our way to make GOPATH work, when +// the go command does not. +func (w workspaceInformation) GO111MODULE() string { + if w.go111module == "" { + return "auto" + } + return w.go111module +} + +type go111module int + +const ( + off = go111module(iota) + auto + on +) + +// goEnv holds important environment variables that gopls cares about. +type goEnv struct { + gocache, gopath, goroot, goprivate, gomodcache, gowork, goflags string + + // go111module holds the value of GO111MODULE as reported by go env. + // + // Don't use this value directly, because we choose to use a different + // default (auto) on Go 1.16 and later, to avoid spurious errors. Use + // the effectiveGO111MODULE method instead. + go111module string +} + +// loadGoEnv loads `go env` values into the receiver, using the provided user +// environment and go command runner. +func (env *goEnv) load(ctx context.Context, folder string, configEnv []string, runner *gocommand.Runner) error { + vars := env.vars() + + // We can save ~200 ms by requesting only the variables we care about. + args := []string{"-json"} + for k := range vars { + args = append(args, k) + } + + inv := gocommand.Invocation{ + Verb: "env", + Args: args, + Env: configEnv, + WorkingDir: folder, + } + stdout, err := runner.Run(ctx, inv) + if err != nil { + return err + } + envMap := make(map[string]string) + if err := json.Unmarshal(stdout.Bytes(), &envMap); err != nil { + return fmt.Errorf("internal error unmarshaling JSON from 'go env': %w", err) + } + for key, ptr := range vars { + *ptr = envMap[key] + } + + return nil +} + +func (env goEnv) String() string { + var vars []string + for govar, ptr := range env.vars() { + vars = append(vars, fmt.Sprintf("%s=%s", govar, *ptr)) + } + sort.Strings(vars) + return "[" + strings.Join(vars, ", ") + "]" +} + +// vars returns a map from Go environment variable to field value containing it. +func (env *goEnv) vars() map[string]*string { + return map[string]*string{ + "GOCACHE": &env.gocache, + "GOPATH": &env.gopath, + "GOROOT": &env.goroot, + "GOPRIVATE": &env.goprivate, + "GOMODCACHE": &env.gomodcache, + "GO111MODULE": &env.go111module, + "GOWORK": &env.gowork, + "GOFLAGS": &env.goflags, + } +} + +// workspaceMode holds various flags defining how the gopls workspace should +// behave. They may be derived from the environment, user configuration, or +// depend on the Go version. +// +// TODO(rfindley): remove workspace mode, in favor of explicit checks. +type workspaceMode int + +const ( + moduleMode workspaceMode = 1 << iota + + // tempModfile indicates whether or not the -modfile flag should be used. + tempModfile +) + +func (v *View) ID() string { return v.id } + +// tempModFile creates a temporary go.mod file based on the contents +// of the given go.mod file. On success, it is the caller's +// responsibility to call the cleanup function when the file is no +// longer needed. +func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) { + filenameHash := source.Hashf("%s", modFh.URI().Filename()) + tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash)) + if err != nil { + return "", nil, err + } + defer tmpMod.Close() + + tmpURI = span.URIFromPath(tmpMod.Name()) + tmpSumName := sumFilename(tmpURI) + + content, err := modFh.Read() + if err != nil { + return "", nil, err + } + + if _, err := tmpMod.Write(content); err != nil { + return "", nil, err + } + + // We use a distinct name here to avoid subtlety around the fact + // that both 'return' and 'defer' update the "cleanup" variable. + doCleanup := func() { + _ = os.Remove(tmpSumName) + _ = os.Remove(tmpURI.Filename()) + } + + // Be careful to clean up if we return an error from this function. + defer func() { + if err != nil { + doCleanup() + cleanup = nil + } + }() + + // Create an analogous go.sum, if one exists. + if gosum != nil { + if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil { + return "", nil, err + } + } + + return tmpURI, doCleanup, nil +} + +// Name returns the user visible name of this view. +func (v *View) Name() string { + return v.name +} + +// Folder returns the folder at the base of this view. +func (v *View) Folder() span.URI { + return v.folder +} + +func (v *View) Options() *source.Options { + v.optionsMu.Lock() + defer v.optionsMu.Unlock() + return v.options +} + +func (v *View) FileKind(fh source.FileHandle) source.FileKind { + // The kind of an unsaved buffer comes from the + // TextDocumentItem.LanguageID field in the didChange event, + // not from the file name. They may differ. + if o, ok := fh.(*Overlay); ok { + if o.kind != source.UnknownKind { + return o.kind + } + } + + fext := filepath.Ext(fh.URI().Filename()) + switch fext { + case ".go": + return source.Go + case ".mod": + return source.Mod + case ".sum": + return source.Sum + case ".work": + return source.Work + } + exts := v.Options().TemplateExtensions + for _, ext := range exts { + if fext == ext || fext == "."+ext { + return source.Tmpl + } + } + // and now what? This should never happen, but it does for cgo before go1.15 + return source.Go +} + +func minorOptionsChange(a, b *source.Options) bool { + // Check if any of the settings that modify our understanding of files have + // been changed. + if !reflect.DeepEqual(a.Env, b.Env) { + return false + } + if !reflect.DeepEqual(a.DirectoryFilters, b.DirectoryFilters) { + return false + } + if !reflect.DeepEqual(a.StandaloneTags, b.StandaloneTags) { + return false + } + if a.ExpandWorkspaceToModule != b.ExpandWorkspaceToModule { + return false + } + if a.MemoryMode != b.MemoryMode { + return false + } + aBuildFlags := make([]string, len(a.BuildFlags)) + bBuildFlags := make([]string, len(b.BuildFlags)) + copy(aBuildFlags, a.BuildFlags) + copy(bBuildFlags, b.BuildFlags) + sort.Strings(aBuildFlags) + sort.Strings(bBuildFlags) + // the rest of the options are benign + return reflect.DeepEqual(aBuildFlags, bBuildFlags) +} + +// SetViewOptions sets the options of the given view to new values. Calling +// this may cause the view to be invalidated and a replacement view added to +// the session. If so the new view will be returned, otherwise the original one +// will be returned. +func (s *Session) SetViewOptions(ctx context.Context, v *View, options *source.Options) (*View, error) { + // no need to rebuild the view if the options were not materially changed + v.optionsMu.Lock() + if minorOptionsChange(v.options, options) { + v.options = options + v.optionsMu.Unlock() + return v, nil + } + v.optionsMu.Unlock() + newView, err := s.updateView(ctx, v, options) + return newView, err +} + +// viewEnv returns a string describing the environment of a newly created view. +// +// It must not be called concurrently with any other view methods. +func viewEnv(v *View) string { + env := v.options.EnvSlice() + buildFlags := append([]string{}, v.options.BuildFlags...) + + var buf bytes.Buffer + fmt.Fprintf(&buf, `go info for %v +(go dir %s) +(go version %s) +(valid build configuration = %v) +(build flags: %v) +(selected go env: %v) +`, + v.folder.Filename(), + v.workingDir().Filename(), + strings.TrimRight(v.workspaceInformation.goversionOutput, "\n"), + v.snapshot.ValidBuildConfiguration(), + buildFlags, + v.goEnv, + ) + + for _, v := range env { + s := strings.SplitN(v, "=", 2) + if len(s) != 2 { + continue + } + } + + return buf.String() +} + +func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error { + return s.view.importsState.runProcessEnvFunc(ctx, s, fn) +} + +// separated out from its sole use in locateTemplateFiles for testability +func fileHasExtension(path string, suffixes []string) bool { + ext := filepath.Ext(path) + if ext != "" && ext[0] == '.' { + ext = ext[1:] + } + for _, s := range suffixes { + if s != "" && ext == s { + return true + } + } + return false +} + +// locateTemplateFiles ensures that the snapshot has mapped template files +// within the workspace folder. +func (s *snapshot) locateTemplateFiles(ctx context.Context) { + if len(s.view.Options().TemplateExtensions) == 0 { + return + } + suffixes := s.view.Options().TemplateExtensions + + searched := 0 + filterFunc := s.view.filterFunc() + err := filepath.WalkDir(s.view.folder.Filename(), func(path string, entry os.DirEntry, err error) error { + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + if fileLimit > 0 && searched > fileLimit { + return errExhausted + } + searched++ + if !fileHasExtension(path, suffixes) { + return nil + } + uri := span.URIFromPath(path) + if filterFunc(uri) { + return nil + } + // Get the file in order to include it in the snapshot. + // TODO(golang/go#57558): it is fundamentally broken to track files in this + // way; we may lose them if configuration or layout changes cause a view to + // be recreated. + // + // Furthermore, this operation must ignore errors, including context + // cancellation, or risk leaving the snapshot in an undefined state. + s.GetFile(ctx, uri) + return nil + }) + if err != nil { + event.Error(ctx, "searching for template files failed", err) + } +} + +func (v *View) contains(uri span.URI) bool { + // If we've expanded the go dir to a parent directory, consider if the + // expanded dir contains the uri. + // TODO(rfindley): should we ignore the root here? It is not provided by the + // user. It would be better to explicitly consider the set of active modules + // wherever relevant. + inGoDir := false + if source.InDir(v.workingDir().Filename(), v.folder.Filename()) { + inGoDir = source.InDir(v.workingDir().Filename(), uri.Filename()) + } + inFolder := source.InDir(v.folder.Filename(), uri.Filename()) + + if !inGoDir && !inFolder { + return false + } + + return !v.filterFunc()(uri) +} + +// filterFunc returns a func that reports whether uri is filtered by the currently configured +// directoryFilters. +func (v *View) filterFunc() func(span.URI) bool { + filterer := buildFilterer(v.folder.Filename(), v.gomodcache, v.Options()) + return func(uri span.URI) bool { + // Only filter relative to the configured root directory. + if source.InDir(v.folder.Filename(), uri.Filename()) { + return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), filterer) + } + return false + } +} + +func (v *View) relevantChange(c source.FileModification) bool { + // If the file is known to the view, the change is relevant. + if v.knownFile(c.URI) { + return true + } + // The go.work file may not be "known" because we first access it through the + // session. As a result, treat changes to the view's go.work file as always + // relevant, even if they are only on-disk changes. + // + // TODO(rfindley): Make sure the go.work files are always known + // to the view. + if c.URI == v.effectiveGOWORK() { + return true + } + + // Note: CL 219202 filtered out on-disk changes here that were not known to + // the view, but this introduces a race when changes arrive before the view + // is initialized (and therefore, before it knows about files). Since that CL + // had neither test nor associated issue, and cited only emacs behavior, this + // logic was deleted. + + return v.contains(c.URI) +} + +func (v *View) markKnown(uri span.URI) { + v.knownFilesMu.Lock() + defer v.knownFilesMu.Unlock() + if v.knownFiles == nil { + v.knownFiles = make(map[span.URI]bool) + } + v.knownFiles[uri] = true +} + +// knownFile reports whether the specified valid URI (or an alias) is known to the view. +func (v *View) knownFile(uri span.URI) bool { + v.knownFilesMu.Lock() + defer v.knownFilesMu.Unlock() + return v.knownFiles[uri] +} + +// shutdown releases resources associated with the view, and waits for ongoing +// work to complete. +func (v *View) shutdown() { + // Cancel the initial workspace load if it is still running. + v.initCancelFirstAttempt() + + v.snapshotMu.Lock() + if v.snapshot != nil { + v.releaseSnapshot() + v.destroy(v.snapshot, "View.shutdown") + v.snapshot = nil + v.releaseSnapshot = nil + } + v.snapshotMu.Unlock() + + v.snapshotWG.Wait() +} + +func (s *snapshot) IgnoredFile(uri span.URI) bool { + filename := uri.Filename() + var prefixes []string + if len(s.workspaceModFiles) == 0 { + for _, entry := range filepath.SplitList(s.view.gopath) { + prefixes = append(prefixes, filepath.Join(entry, "src")) + } + } else { + prefixes = append(prefixes, s.view.gomodcache) + for m := range s.workspaceModFiles { + prefixes = append(prefixes, span.Dir(m).Filename()) + } + } + for _, prefix := range prefixes { + if strings.HasPrefix(filename, prefix) { + return checkIgnored(filename[len(prefix):]) + } + } + return false +} + +// checkIgnored implements go list's exclusion rules. +// Quoting ā€œgo help listā€: +// +// Directory and file names that begin with "." or "_" are ignored +// by the go tool, as are directories named "testdata". +func checkIgnored(suffix string) bool { + for _, component := range strings.Split(suffix, string(filepath.Separator)) { + if len(component) == 0 { + continue + } + if component[0] == '.' || component[0] == '_' || component == "testdata" { + return true + } + } + return false +} + +func (v *View) Snapshot() (source.Snapshot, func(), error) { + return v.getSnapshot() +} + +func (v *View) getSnapshot() (*snapshot, func(), error) { + v.snapshotMu.Lock() + defer v.snapshotMu.Unlock() + if v.snapshot == nil { + return nil, nil, errors.New("view is shutdown") + } + return v.snapshot, v.snapshot.Acquire(), nil +} + +func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) { + select { + case <-ctx.Done(): + return + case s.view.initializationSema <- struct{}{}: + } + + defer func() { + <-s.view.initializationSema + }() + + s.mu.Lock() + initialized := s.initialized + s.mu.Unlock() + + if initialized { + return + } + + s.loadWorkspace(ctx, firstAttempt) + s.collectAllKnownSubdirs(ctx) +} + +func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) (loadErr error) { + // A failure is retryable if it may have been due to context cancellation, + // and this is not the initial workspace load (firstAttempt==true). + // + // The IWL runs on a detached context with a long (~10m) timeout, so + // if the context was canceled we consider loading to have failed + // permanently. + retryableFailure := func() bool { + return loadErr != nil && ctx.Err() != nil && !firstAttempt + } + defer func() { + if !retryableFailure() { + s.mu.Lock() + s.initialized = true + s.mu.Unlock() + } + if firstAttempt { + close(s.view.initialWorkspaceLoad) + } + }() + + // TODO(rFindley): we should only locate template files on the first attempt, + // or guard it via a different mechanism. + s.locateTemplateFiles(ctx) + + // Collect module paths to load by parsing go.mod files. If a module fails to + // parse, capture the parsing failure as a critical diagnostic. + var scopes []loadScope // scopes to load + var modDiagnostics []*source.Diagnostic // diagnostics for broken go.mod files + addError := func(uri span.URI, err error) { + modDiagnostics = append(modDiagnostics, &source.Diagnostic{ + URI: uri, + Severity: protocol.SeverityError, + Source: source.ListError, + Message: err.Error(), + }) + } + + if len(s.workspaceModFiles) > 0 { + for modURI := range s.workspaceModFiles { + // Be careful not to add context cancellation errors as critical module + // errors. + fh, err := s.GetFile(ctx, modURI) + if err != nil { + if ctx.Err() == nil { + addError(modURI, err) + } + continue + } + parsed, err := s.ParseMod(ctx, fh) + if err != nil { + if ctx.Err() == nil { + addError(modURI, err) + } + continue + } + if parsed.File == nil || parsed.File.Module == nil { + addError(modURI, fmt.Errorf("no module path for %s", modURI)) + continue + } + path := parsed.File.Module.Mod.Path + scopes = append(scopes, moduleLoadScope(path)) + } + } else { + scopes = append(scopes, viewLoadScope("LOAD_VIEW")) + } + + // If we're loading anything, ensure we also load builtin, + // since it provides fake definitions (and documentation) + // for types like int that are used everywhere. + if len(scopes) > 0 { + scopes = append(scopes, packageLoadScope("builtin")) + } + loadErr = s.load(ctx, true, scopes...) + + if retryableFailure() { + return loadErr + } + + var criticalErr *source.CriticalError + switch { + case loadErr != nil && ctx.Err() != nil: + event.Error(ctx, fmt.Sprintf("initial workspace load: %v", loadErr), loadErr) + criticalErr = &source.CriticalError{ + MainError: loadErr, + } + case loadErr != nil: + event.Error(ctx, "initial workspace load failed", loadErr) + extractedDiags := s.extractGoCommandErrors(ctx, loadErr) + criticalErr = &source.CriticalError{ + MainError: loadErr, + Diagnostics: append(modDiagnostics, extractedDiags...), + } + case len(modDiagnostics) == 1: + criticalErr = &source.CriticalError{ + MainError: fmt.Errorf(modDiagnostics[0].Message), + Diagnostics: modDiagnostics, + } + case len(modDiagnostics) > 1: + criticalErr = &source.CriticalError{ + MainError: fmt.Errorf("error loading module names"), + Diagnostics: modDiagnostics, + } + } + + // Lock the snapshot when setting the initialized error. + s.mu.Lock() + defer s.mu.Unlock() + s.initializedErr = criticalErr + return loadErr +} + +// invalidateContent invalidates the content of a Go file, +// including any position and type information that depends on it. +// +// invalidateContent returns a non-nil snapshot for the new content, along with +// a callback which the caller must invoke to release that snapshot. +func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) { + // Detach the context so that content invalidation cannot be canceled. + ctx = xcontext.Detach(ctx) + + // This should be the only time we hold the view's snapshot lock for any period of time. + v.snapshotMu.Lock() + defer v.snapshotMu.Unlock() + + prevSnapshot, prevReleaseSnapshot := v.snapshot, v.releaseSnapshot + + if prevSnapshot == nil { + panic("invalidateContent called after shutdown") + } + + // Cancel all still-running previous requests, since they would be + // operating on stale data. + prevSnapshot.cancel() + + // Do not clone a snapshot until its view has finished initializing. + prevSnapshot.AwaitInitialized(ctx) + + // Save one lease of the cloned snapshot in the view. + v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) + + prevReleaseSnapshot() + v.destroy(prevSnapshot, "View.invalidateContent") + + // Return a second lease to the caller. + return v.snapshot, v.snapshot.Acquire() +} + +func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (workspaceInformation, error) { + if err := checkPathCase(folder.Filename()); err != nil { + return workspaceInformation{}, fmt.Errorf("invalid workspace folder path: %w; check that the casing of the configured workspace folder path agrees with the casing reported by the operating system", err) + } + var err error + var info workspaceInformation + inv := gocommand.Invocation{ + WorkingDir: folder.Filename(), + Env: options.EnvSlice(), + } + info.goversion, err = gocommand.GoVersion(ctx, inv, s.gocmdRunner) + if err != nil { + return info, err + } + info.goversionOutput, err = gocommand.GoVersionOutput(ctx, inv, s.gocmdRunner) + if err != nil { + return info, err + } + if err := info.goEnv.load(ctx, folder.Filename(), options.EnvSlice(), s.gocmdRunner); err != nil { + return info, err + } + // The value of GOPACKAGESDRIVER is not returned through the go command. + gopackagesdriver := os.Getenv("GOPACKAGESDRIVER") + // A user may also have a gopackagesdriver binary on their machine, which + // works the same way as setting GOPACKAGESDRIVER. + tool, _ := exec.LookPath("gopackagesdriver") + info.hasGopackagesDriver = gopackagesdriver != "off" && (gopackagesdriver != "" || tool != "") + + // filterFunc is the path filter function for this workspace folder. Notably, + // it is relative to folder (which is specified by the user), not root. + filterFunc := pathExcludedByFilterFunc(folder.Filename(), info.gomodcache, options) + info.gomod, err = findWorkspaceModFile(ctx, folder, s, filterFunc) + if err != nil { + return info, err + } + + return info, nil +} + +// findWorkspaceModFile searches for a single go.mod file relative to the given +// folder URI, using the following algorithm: +// 1. if there is a go.mod file in a parent directory, return it +// 2. else, if there is exactly one nested module, return it +// 3. else, return "" +func findWorkspaceModFile(ctx context.Context, folderURI span.URI, fs source.FileSource, excludePath func(string) bool) (span.URI, error) { + folder := folderURI.Filename() + match, err := findRootPattern(ctx, folder, "go.mod", fs) + if err != nil { + if ctxErr := ctx.Err(); ctxErr != nil { + return "", ctxErr + } + return "", err + } + if match != "" { + return span.URIFromPath(match), nil + } + + // ...else we should check if there's exactly one nested module. + all, err := findModules(folderURI, excludePath, 2) + if err == errExhausted { + // Fall-back behavior: if we don't find any modules after searching 10000 + // files, assume there are none. + event.Log(ctx, fmt.Sprintf("stopped searching for modules after %d files", fileLimit)) + return "", nil + } + if err != nil { + return "", err + } + if len(all) == 1 { + // range to access first element. + for uri := range all { + return uri, nil + } + } + return "", nil +} + +// workingDir returns the directory from which to run Go commands. +// +// The only case where this should matter is if we've narrowed the workspace to +// a singular nested module. In that case, the go command won't be able to find +// the module unless we tell it the nested directory. +func (v *View) workingDir() span.URI { + // Note: if gowork is in use, this will default to the workspace folder. In + // the past, we would instead use the folder containing go.work. This should + // not make a difference, and in fact may improve go list error messages. + // + // TODO(golang/go#57514): eliminate the expandWorkspaceToModule setting + // entirely. + if v.Options().ExpandWorkspaceToModule && v.gomod != "" { + return span.Dir(v.gomod) + } + return v.folder +} + +// findRootPattern looks for files with the given basename in dir or any parent +// directory of dir, using the provided FileSource. It returns the first match, +// starting from dir and search parents. +// +// The resulting string is either the file path of a matching file with the +// given basename, or "" if none was found. +func findRootPattern(ctx context.Context, dir, basename string, fs source.FileSource) (string, error) { + for dir != "" { + target := filepath.Join(dir, basename) + exists, err := fileExists(ctx, span.URIFromPath(target), fs) + if err != nil { + return "", err // not readable or context cancelled + } + if exists { + return target, nil + } + // Trailing separators must be trimmed, otherwise filepath.Split is a noop. + next, _ := filepath.Split(strings.TrimRight(dir, string(filepath.Separator))) + if next == dir { + break + } + dir = next + } + return "", nil +} + +// OS-specific path case check, for case-insensitive filesystems. +var checkPathCase = defaultCheckPathCase + +func defaultCheckPathCase(path string) error { + return nil +} + +func (v *View) IsGoPrivatePath(target string) bool { + return globsMatchPath(v.goprivate, target) +} + +func (v *View) ModuleUpgrades(modfile span.URI) map[string]string { + v.moduleUpgradesMu.Lock() + defer v.moduleUpgradesMu.Unlock() + + upgrades := map[string]string{} + for mod, ver := range v.moduleUpgrades[modfile] { + upgrades[mod] = ver + } + return upgrades +} + +func (v *View) RegisterModuleUpgrades(modfile span.URI, upgrades map[string]string) { + // Return early if there are no upgrades. + if len(upgrades) == 0 { + return + } + + v.moduleUpgradesMu.Lock() + defer v.moduleUpgradesMu.Unlock() + + m := v.moduleUpgrades[modfile] + if m == nil { + m = make(map[string]string) + v.moduleUpgrades[modfile] = m + } + for mod, ver := range upgrades { + m[mod] = ver + } +} + +func (v *View) ClearModuleUpgrades(modfile span.URI) { + v.moduleUpgradesMu.Lock() + defer v.moduleUpgradesMu.Unlock() + + delete(v.moduleUpgrades, modfile) +} + +const maxGovulncheckResultAge = 1 * time.Hour // Invalidate results older than this limit. +var timeNow = time.Now // for testing + +func (v *View) Vulnerabilities(modfiles ...span.URI) map[span.URI]*govulncheck.Result { + m := make(map[span.URI]*govulncheck.Result) + now := timeNow() + v.vulnsMu.Lock() + defer v.vulnsMu.Unlock() + + if len(modfiles) == 0 { // empty means all modfiles + for modfile := range v.vulns { + modfiles = append(modfiles, modfile) + } + } + for _, modfile := range modfiles { + vuln := v.vulns[modfile] + if vuln != nil && now.Sub(vuln.AsOf) > maxGovulncheckResultAge { + v.vulns[modfile] = nil // same as SetVulnerabilities(modfile, nil) + vuln = nil + } + m[modfile] = vuln + } + return m +} + +func (v *View) SetVulnerabilities(modfile span.URI, vulns *govulncheck.Result) { + v.vulnsMu.Lock() + defer v.vulnsMu.Unlock() + + v.vulns[modfile] = vulns +} + +func (v *View) GoVersion() int { + return v.workspaceInformation.goversion +} + +func (v *View) GoVersionString() string { + return gocommand.ParseGoVersionOutput(v.workspaceInformation.goversionOutput) +} + +// Copied from +// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a +func globsMatchPath(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// TODO(rstambler): Consolidate modURI and modContent back into a FileHandle +// after we have a version of the workspace go.mod file on disk. Getting a +// FileHandle from the cache for temporary files is problematic, since we +// cannot delete it. +func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modContent []byte) (bool, error) { + // Legacy GOPATH workspace? + if s.workspaceMode()&moduleMode == 0 { + return false, nil + } + + // Explicit -mod flag? + matches := modFlagRegexp.FindStringSubmatch(s.view.goflags) + if len(matches) != 0 { + modFlag := matches[1] + if modFlag != "" { + // Don't override an explicit '-mod=vendor' argument. + // We do want to override '-mod=readonly': it would break various module code lenses, + // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway. + return modFlag == "vendor", nil + } + } + + modFile, err := modfile.Parse(modURI.Filename(), modContent, nil) + if err != nil { + return false, err + } + + // No vendor directory? + // TODO(golang/go#57514): this is wrong if the working dir is not the module + // root. + if fi, err := os.Stat(filepath.Join(s.view.workingDir().Filename(), "vendor")); err != nil || !fi.IsDir() { + return false, nil + } + + // Vendoring enabled by default by go declaration in go.mod? + vendorEnabled := modFile.Go != nil && modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0 + return vendorEnabled, nil +} + +// TODO(rfindley): clean up the redundancy of allFilesExcluded, +// pathExcludedByFilterFunc, pathExcludedByFilter, view.filterFunc... +func allFilesExcluded(files []string, filterFunc func(span.URI) bool) bool { + for _, f := range files { + uri := span.URIFromPath(f) + if !filterFunc(uri) { + return false + } + } + return true +} + +func pathExcludedByFilterFunc(folder, gomodcache string, opts *source.Options) func(string) bool { + filterer := buildFilterer(folder, gomodcache, opts) + return func(path string) bool { + return pathExcludedByFilter(path, filterer) + } +} + +// pathExcludedByFilter reports whether the path (relative to the workspace +// folder) should be excluded by the configured directory filters. +// +// TODO(rfindley): passing root and gomodcache here makes it confusing whether +// path should be absolute or relative, and has already caused at least one +// bug. +func pathExcludedByFilter(path string, filterer *source.Filterer) bool { + path = strings.TrimPrefix(filepath.ToSlash(path), "/") + return filterer.Disallow(path) +} + +func buildFilterer(folder, gomodcache string, opts *source.Options) *source.Filterer { + filters := opts.DirectoryFilters + + if pref := strings.TrimPrefix(gomodcache, folder); pref != gomodcache { + modcacheFilter := "-" + strings.TrimPrefix(filepath.ToSlash(pref), "/") + filters = append(filters, modcacheFilter) + } + return source.NewFilterer(filters) +} diff --git a/gopls/internal/lsp/cache/view_test.go b/gopls/internal/lsp/cache/view_test.go new file mode 100644 index 00000000000..4b456810577 --- /dev/null +++ b/gopls/internal/lsp/cache/view_test.go @@ -0,0 +1,278 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package cache + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +func TestCaseInsensitiveFilesystem(t *testing.T) { + base, err := ioutil.TempDir("", t.Name()) + if err != nil { + t.Fatal(err) + } + + inner := filepath.Join(base, "a/B/c/DEFgh") + if err := os.MkdirAll(inner, 0777); err != nil { + t.Fatal(err) + } + file := filepath.Join(inner, "f.go") + if err := ioutil.WriteFile(file, []byte("hi"), 0777); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(filepath.Join(inner, "F.go")); err != nil { + t.Skip("filesystem is case-sensitive") + } + + tests := []struct { + path string + err bool + }{ + {file, false}, + {filepath.Join(inner, "F.go"), true}, + {filepath.Join(base, "a/b/c/defgh/f.go"), true}, + } + for _, tt := range tests { + err := checkPathCase(tt.path) + if err != nil != tt.err { + t.Errorf("checkPathCase(%q) = %v, wanted error: %v", tt.path, err, tt.err) + } + } +} + +func TestFindWorkspaceModFile(t *testing.T) { + workspace := ` +-- a/go.mod -- +module a +-- a/x/x.go +package x +-- a/x/y/y.go +package x +-- b/go.mod -- +module b +-- b/c/go.mod -- +module bc +-- d/gopls.mod -- +module d-goplsworkspace +-- d/e/go.mod -- +module de +-- f/g/go.mod -- +module fg +` + dir, err := fake.Tempdir(fake.UnpackTxt(workspace)) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + tests := []struct { + folder, want string + }{ + {"", ""}, // no module at root, and more than one nested module + {"a", "a/go.mod"}, + {"a/x", "a/go.mod"}, + {"a/x/y", "a/go.mod"}, + {"b/c", "b/c/go.mod"}, + {"d", "d/e/go.mod"}, + {"d/e", "d/e/go.mod"}, + {"f", "f/g/go.mod"}, + } + + for _, test := range tests { + ctx := context.Background() + rel := fake.RelativeTo(dir) + folderURI := span.URIFromPath(rel.AbsPath(test.folder)) + excludeNothing := func(string) bool { return false } + got, err := findWorkspaceModFile(ctx, folderURI, New(nil, nil), excludeNothing) + if err != nil { + t.Fatal(err) + } + want := span.URI("") + if test.want != "" { + want = span.URIFromPath(rel.AbsPath(test.want)) + } + if got != want { + t.Errorf("findWorkspaceModFile(%q) = %q, want %q", test.folder, got, want) + } + } +} + +func TestInVendor(t *testing.T) { + for _, tt := range []struct { + path string + inVendor bool + }{ + {"foo/vendor/x.go", false}, + {"foo/vendor/x/x.go", true}, + {"foo/x.go", false}, + {"foo/vendor/foo.txt", false}, + {"foo/vendor/modules.txt", false}, + } { + if got := inVendor(span.URIFromPath(tt.path)); got != tt.inVendor { + t.Errorf("expected %s inVendor %v, got %v", tt.path, tt.inVendor, got) + } + } +} + +func TestFilters(t *testing.T) { + tests := []struct { + filters []string + included []string + excluded []string + }{ + { + included: []string{"x"}, + }, + { + filters: []string{"-"}, + excluded: []string{"x", "x/a"}, + }, + { + filters: []string{"-x", "+y"}, + included: []string{"y", "y/a", "z"}, + excluded: []string{"x", "x/a"}, + }, + { + filters: []string{"-x", "+x/y", "-x/y/z"}, + included: []string{"x/y", "x/y/a", "a"}, + excluded: []string{"x", "x/a", "x/y/z/a"}, + }, + { + filters: []string{"+foobar", "-foo"}, + included: []string{"foobar", "foobar/a"}, + excluded: []string{"foo", "foo/a"}, + }, + } + + for _, tt := range tests { + filterer := source.NewFilterer(tt.filters) + for _, inc := range tt.included { + if pathExcludedByFilter(inc, filterer) { + t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc) + } + } + for _, exc := range tt.excluded { + if !pathExcludedByFilter(exc, filterer) { + t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc) + } + } + } +} + +func TestSuffixes(t *testing.T) { + type file struct { + path string + want bool + } + type cases struct { + option []string + files []file + } + tests := []cases{ + {[]string{"tmpl", "gotmpl"}, []file{ // default + {"foo", false}, + {"foo.tmpl", true}, + {"foo.gotmpl", true}, + {"tmpl", false}, + {"tmpl.go", false}}, + }, + {[]string{"tmpl", "gotmpl", "html", "gohtml"}, []file{ + {"foo.gotmpl", true}, + {"foo.html", true}, + {"foo.gohtml", true}, + {"html", false}}, + }, + {[]string{"tmpl", "gotmpl", ""}, []file{ // possible user mistake + {"foo.gotmpl", true}, + {"foo.go", false}, + {"foo", false}}, + }, + } + for _, a := range tests { + suffixes := a.option + for _, b := range a.files { + got := fileHasExtension(b.path, suffixes) + if got != b.want { + t.Errorf("got %v, want %v, option %q, file %q (%+v)", + got, b.want, a.option, b.path, b) + } + } + } +} + +func TestView_Vulnerabilities(t *testing.T) { + // TODO(hyangah): use t.Cleanup when we get rid of go1.13 legacy CI. + defer func() { timeNow = time.Now }() + + now := time.Now() + + view := &View{ + vulns: make(map[span.URI]*govulncheck.Result), + } + file1, file2 := span.URIFromPath("f1/go.mod"), span.URIFromPath("f2/go.mod") + + vuln1 := &govulncheck.Result{AsOf: now.Add(-(maxGovulncheckResultAge * 3) / 4)} // already ~3/4*maxGovulncheckResultAge old + view.SetVulnerabilities(file1, vuln1) + + vuln2 := &govulncheck.Result{AsOf: now} // fresh. + view.SetVulnerabilities(file2, vuln2) + + t.Run("fresh", func(t *testing.T) { + got := view.Vulnerabilities() + want := map[span.URI]*govulncheck.Result{ + file1: vuln1, + file2: vuln2, + } + + if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" { + t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff) + } + }) + + // maxGovulncheckResultAge/2 later + timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge / 2) } + t.Run("after30min", func(t *testing.T) { + got := view.Vulnerabilities() + want := map[span.URI]*govulncheck.Result{ + file1: nil, // expired. + file2: vuln2, + } + + if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" { + t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff) + } + }) + + // maxGovulncheckResultAge later + timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge + time.Minute) } + + t.Run("after1hr", func(t *testing.T) { + got := view.Vulnerabilities() + want := map[span.URI]*govulncheck.Result{ + file1: nil, + file2: nil, + } + + if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" { + t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff) + } + }) +} + +func toJSON(x interface{}) string { + b, _ := json.MarshalIndent(x, "", " ") + return string(b) +} diff --git a/gopls/internal/lsp/cache/workspace.go b/gopls/internal/lsp/cache/workspace.go new file mode 100644 index 00000000000..e9845e89b23 --- /dev/null +++ b/gopls/internal/lsp/cache/workspace.go @@ -0,0 +1,177 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +// TODO(rfindley): now that experimentalWorkspaceModule is gone, this file can +// be massively cleaned up and/or removed. + +// computeWorkspaceModFiles computes the set of workspace mod files based on the +// value of go.mod, go.work, and GO111MODULE. +func computeWorkspaceModFiles(ctx context.Context, gomod, gowork span.URI, go111module go111module, fs source.FileSource) (map[span.URI]struct{}, error) { + if go111module == off { + return nil, nil + } + if gowork != "" { + fh, err := fs.GetFile(ctx, gowork) + if err != nil { + return nil, err + } + content, err := fh.Read() + if err != nil { + return nil, err + } + filename := gowork.Filename() + dir := filepath.Dir(filename) + workFile, err := modfile.ParseWork(filename, content, nil) + if err != nil { + return nil, fmt.Errorf("parsing go.work: %w", err) + } + modFiles := make(map[span.URI]struct{}) + for _, use := range workFile.Use { + modDir := filepath.FromSlash(use.Path) + if !filepath.IsAbs(modDir) { + modDir = filepath.Join(dir, modDir) + } + modURI := span.URIFromPath(filepath.Join(modDir, "go.mod")) + modFiles[modURI] = struct{}{} + } + return modFiles, nil + } + if gomod != "" { + return map[span.URI]struct{}{gomod: {}}, nil + } + return nil, nil +} + +// dirs returns the workspace directories for the loaded modules. +// +// A workspace directory is, roughly speaking, a directory for which we care +// about file changes. This is used for the purpose of registering file +// watching patterns, and expanding directory modifications to their adjacent +// files. +// +// TODO(rfindley): move this to snapshot.go. +// TODO(rfindley): can we make this abstraction simpler and/or more accurate? +func (s *snapshot) dirs(ctx context.Context) []span.URI { + dirSet := make(map[span.URI]struct{}) + + // Dirs should, at the very least, contain the working directory and folder. + dirSet[s.view.workingDir()] = struct{}{} + dirSet[s.view.folder] = struct{}{} + + // Additionally, if e.g. go.work indicates other workspace modules, we should + // include their directories too. + if s.workspaceModFilesErr == nil { + for modFile := range s.workspaceModFiles { + dir := filepath.Dir(modFile.Filename()) + dirSet[span.URIFromPath(dir)] = struct{}{} + } + } + var dirs []span.URI + for d := range dirSet { + dirs = append(dirs, d) + } + sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] }) + return dirs +} + +// isGoMod reports if uri is a go.mod file. +func isGoMod(uri span.URI) bool { + return filepath.Base(uri.Filename()) == "go.mod" +} + +// isGoWork reports if uri is a go.work file. +func isGoWork(uri span.URI) bool { + return filepath.Base(uri.Filename()) == "go.work" +} + +// fileExists reports if the file uri exists within source. +func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) { + fh, err := source.GetFile(ctx, uri) + if err != nil { + return false, err + } + return fileHandleExists(fh) +} + +// fileHandleExists reports if the file underlying fh actually exits. +func fileHandleExists(fh source.FileHandle) (bool, error) { + _, err := fh.Read() + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// errExhausted is returned by findModules if the file scan limit is reached. +var errExhausted = errors.New("exhausted") + +// Limit go.mod search to 1 million files. As a point of reference, +// Kubernetes has 22K files (as of 2020-11-24). +const fileLimit = 1000000 + +// findModules recursively walks the root directory looking for go.mod files, +// returning the set of modules it discovers. If modLimit is non-zero, +// searching stops once modLimit modules have been found. +// +// TODO(rfindley): consider overlays. +func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) { + // Walk the view's folder to find all modules in the view. + modFiles := make(map[span.URI]struct{}) + searched := 0 + errDone := errors.New("done") + err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error { + if err != nil { + // Probably a permission error. Keep looking. + return filepath.SkipDir + } + // For any path that is not the workspace folder, check if the path + // would be ignored by the go command. Vendor directories also do not + // contain workspace modules. + if info.IsDir() && path != root.Filename() { + suffix := strings.TrimPrefix(path, root.Filename()) + switch { + case checkIgnored(suffix), + strings.Contains(filepath.ToSlash(suffix), "/vendor/"), + excludePath(suffix): + return filepath.SkipDir + } + } + // We're only interested in go.mod files. + uri := span.URIFromPath(path) + if isGoMod(uri) { + modFiles[uri] = struct{}{} + } + if modLimit > 0 && len(modFiles) >= modLimit { + return errDone + } + searched++ + if fileLimit > 0 && searched >= fileLimit { + return errExhausted + } + return nil + }) + if err == errDone { + return modFiles, nil + } + return modFiles, err +} diff --git a/gopls/internal/lsp/call_hierarchy.go b/gopls/internal/lsp/call_hierarchy.go new file mode 100644 index 00000000000..79eeb25cc15 --- /dev/null +++ b/gopls/internal/lsp/call_hierarchy.go @@ -0,0 +1,42 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +func (s *Server) prepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + + return source.PrepareCallHierarchy(ctx, snapshot, fh, params.Position) +} + +func (s *Server) incomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go) + defer release() + if !ok { + return nil, err + } + + return source.IncomingCalls(ctx, snapshot, fh, params.Item.Range.Start) +} + +func (s *Server) outgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go) + defer release() + if !ok { + return nil, err + } + + return source.OutgoingCalls(ctx, snapshot, fh, params.Item.Range.Start) +} diff --git a/gopls/internal/lsp/cmd/call_hierarchy.go b/gopls/internal/lsp/cmd/call_hierarchy.go new file mode 100644 index 00000000000..eb5d29de808 --- /dev/null +++ b/gopls/internal/lsp/cmd/call_hierarchy.go @@ -0,0 +1,142 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// callHierarchy implements the callHierarchy verb for gopls. +type callHierarchy struct { + app *Application +} + +func (c *callHierarchy) Name() string { return "call_hierarchy" } +func (c *callHierarchy) Parent() string { return c.app.Name() } +func (c *callHierarchy) Usage() string { return "" } +func (c *callHierarchy) ShortHelp() string { return "display selected identifier's call hierarchy" } +func (c *callHierarchy) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls call_hierarchy helper/helper.go:8:6 + $ gopls call_hierarchy helper/helper.go:#53 +`) + printFlagDefaults(f) +} + +func (c *callHierarchy) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("call_hierarchy expects 1 argument (position)") + } + + conn, err := c.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + + p := protocol.CallHierarchyPrepareParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + + callItems, err := conn.PrepareCallHierarchy(ctx, &p) + if err != nil { + return err + } + if len(callItems) == 0 { + return fmt.Errorf("function declaration identifier not found at %v", args[0]) + } + + for _, item := range callItems { + incomingCalls, err := conn.IncomingCalls(ctx, &protocol.CallHierarchyIncomingCallsParams{Item: item}) + if err != nil { + return err + } + for i, call := range incomingCalls { + // From the spec: CallHierarchyIncomingCall.FromRanges is relative to + // the caller denoted by CallHierarchyIncomingCall.from. + printString, err := callItemPrintString(ctx, conn, call.From, call.From.URI, call.FromRanges) + if err != nil { + return err + } + fmt.Printf("caller[%d]: %s\n", i, printString) + } + + printString, err := callItemPrintString(ctx, conn, item, "", nil) + if err != nil { + return err + } + fmt.Printf("identifier: %s\n", printString) + + outgoingCalls, err := conn.OutgoingCalls(ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: item}) + if err != nil { + return err + } + for i, call := range outgoingCalls { + // From the spec: CallHierarchyOutgoingCall.FromRanges is the range + // relative to the caller, e.g the item passed to + printString, err := callItemPrintString(ctx, conn, call.To, item.URI, call.FromRanges) + if err != nil { + return err + } + fmt.Printf("callee[%d]: %s\n", i, printString) + } + } + + return nil +} + +// callItemPrintString returns a protocol.CallHierarchyItem object represented as a string. +// item and call ranges (protocol.Range) are converted to user friendly spans (1-indexed). +func callItemPrintString(ctx context.Context, conn *connection, item protocol.CallHierarchyItem, callsURI protocol.DocumentURI, calls []protocol.Range) (string, error) { + itemFile := conn.openFile(ctx, item.URI.SpanURI()) + if itemFile.err != nil { + return "", itemFile.err + } + itemSpan, err := itemFile.mapper.LocationSpan(protocol.Location{URI: item.URI, Range: item.Range}) + if err != nil { + return "", err + } + + callsFile := conn.openFile(ctx, callsURI.SpanURI()) + if callsURI != "" && callsFile.err != nil { + return "", callsFile.err + } + var callRanges []string + for _, rng := range calls { + call, err := callsFile.mapper.RangeSpan(rng) + if err != nil { + return "", err + } + callRange := fmt.Sprintf("%d:%d-%d", call.Start().Line(), call.Start().Column(), call.End().Column()) + callRanges = append(callRanges, callRange) + } + + printString := fmt.Sprintf("function %s in %v", item.Name, itemSpan) + if len(calls) > 0 { + printString = fmt.Sprintf("ranges %s in %s from/to %s", strings.Join(callRanges, ", "), callsURI.SpanURI().Filename(), printString) + } + return printString, nil +} diff --git a/internal/lsp/cmd/capabilities_test.go b/gopls/internal/lsp/cmd/capabilities_test.go similarity index 95% rename from internal/lsp/cmd/capabilities_test.go rename to gopls/internal/lsp/cmd/capabilities_test.go index 1d01b4bd0d7..4b38db751a4 100644 --- a/internal/lsp/cmd/capabilities_test.go +++ b/gopls/internal/lsp/cmd/capabilities_test.go @@ -12,9 +12,9 @@ import ( "path/filepath" "testing" - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) // TestCapabilities does some minimal validation of the server's adherence to the LSP. @@ -43,7 +43,7 @@ func TestCapabilities(t *testing.T) { params.Capabilities.Workspace.Configuration = true // Send an initialize request to the server. - c.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), c.Client) + c.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil, nil), app.options), c.Client) result, err := c.Server.Initialize(ctx, params) if err != nil { t.Fatal(err) diff --git a/gopls/internal/lsp/cmd/check.go b/gopls/internal/lsp/cmd/check.go new file mode 100644 index 00000000000..cf081ca2615 --- /dev/null +++ b/gopls/internal/lsp/cmd/check.go @@ -0,0 +1,73 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/span" +) + +// check implements the check verb for gopls. +type check struct { + app *Application +} + +func (c *check) Name() string { return "check" } +func (c *check) Parent() string { return c.app.Name() } +func (c *check) Usage() string { return "" } +func (c *check) ShortHelp() string { return "show diagnostic results for the specified file" } +func (c *check) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: show the diagnostic results of this file: + + $ gopls check internal/lsp/cmd/check.go +`) + printFlagDefaults(f) +} + +// Run performs the check on the files specified by args and prints the +// results to stdout. +func (c *check) Run(ctx context.Context, args ...string) error { + if len(args) == 0 { + // no files, so no results + return nil + } + checking := map[span.URI]*cmdFile{} + var uris []span.URI + // now we ready to kick things off + conn, err := c.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + for _, arg := range args { + uri := span.URIFromPath(arg) + uris = append(uris, uri) + file := conn.openFile(ctx, uri) + if file.err != nil { + return file.err + } + checking[uri] = file + } + if err := conn.diagnoseFiles(ctx, uris); err != nil { + return err + } + conn.Client.filesMu.Lock() + defer conn.Client.filesMu.Unlock() + + for _, file := range checking { + for _, d := range file.diagnostics { + spn, err := file.mapper.RangeSpan(d.Range) + if err != nil { + return fmt.Errorf("Could not convert position %v for %q", d.Range, d.Message) + } + fmt.Printf("%v: %v\n", spn, d.Message) + } + } + return nil +} diff --git a/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go similarity index 91% rename from internal/lsp/cmd/cmd.go rename to gopls/internal/lsp/cmd/cmd.go index a81eb839535..74725f70ee5 100644 --- a/internal/lsp/cmd/cmd.go +++ b/gopls/internal/lsp/cmd/cmd.go @@ -11,7 +11,6 @@ import ( "context" "flag" "fmt" - "go/token" "io/ioutil" "log" "os" @@ -22,14 +21,14 @@ import ( "text/tabwriter" "time" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/lsprpc" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/tool" "golang.org/x/tools/internal/xcontext" ) @@ -230,7 +229,7 @@ func (app *Application) Run(ctx context.Context, args ...string) error { return tool.CommandLineErrorf("Unknown command %v", command) } -// commands returns the set of commands supported by the gopls tool on the +// Commands returns the set of commands supported by the gopls tool on the // command line. // The command is specified by the first non flag argument. func (app *Application) Commands() []tool.Application { @@ -271,7 +270,6 @@ func (app *Application) featureCommands() []tool.Application { &signature{app: app}, &suggestedFix{app: app}, &symbols{app: app}, - newWorkspace(app), &workspaceSymbol{app: app}, &vulncheck{app: app}, } @@ -286,7 +284,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) { switch { case app.Remote == "": connection := newConnection(app) - connection.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), connection.Client) + connection.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil, nil), app.options), connection.Client) ctx = protocol.WithClient(ctx, connection.Client) return connection, connection.initialize(ctx, app.options) case strings.HasPrefix(app.Remote, "internal@"): @@ -385,8 +383,7 @@ type connection struct { type cmdClient struct { protocol.Server - app *Application - fset *token.FileSet + app *Application diagnosticsMu sync.Mutex diagnosticsDone chan struct{} @@ -397,9 +394,9 @@ type cmdClient struct { type cmdFile struct { uri span.URI - mapper *protocol.ColumnMapper + mapper *protocol.Mapper err error - added bool + open bool diagnostics []protocol.Diagnostic } @@ -407,7 +404,6 @@ func newConnection(app *Application) *connection { return &connection{ Client: &cmdClient{ app: app, - fset: token.NewFileSet(), files: make(map[span.URI]*cmdFile), }, } @@ -422,6 +418,10 @@ func fileURI(uri protocol.DocumentURI) span.URI { return sURI } +func (c *cmdClient) CodeLensRefresh(context.Context) error { return nil } + +func (c *cmdClient) LogTrace(context.Context, *protocol.LogTraceParams) error { return nil } + func (c *cmdClient) ShowMessage(ctx context.Context, p *protocol.ShowMessageParams) error { return nil } func (c *cmdClient) ShowMessageRequest(ctx context.Context, p *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) { @@ -528,6 +528,22 @@ func (c *cmdClient) WorkDoneProgressCreate(context.Context, *protocol.WorkDonePr return nil } +func (c *cmdClient) DiagnosticRefresh(context.Context) error { + return nil +} + +func (c *cmdClient) InlayHintRefresh(context.Context) error { + return nil +} + +func (c *cmdClient) SemanticTokensRefresh(context.Context) error { + return nil +} + +func (c *cmdClient) InlineValueRefresh(context.Context) error { + return nil +} + func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile { file, found := c.files[uri] if !found || file.err != nil { @@ -537,39 +553,34 @@ func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile { c.files[uri] = file } if file.mapper == nil { - fname := uri.Filename() - content, err := ioutil.ReadFile(fname) + content, err := ioutil.ReadFile(uri.Filename()) if err != nil { file.err = fmt.Errorf("getFile: %v: %v", uri, err) return file } - f := c.fset.AddFile(fname, -1, len(content)) - f.SetLinesForContent(content) - file.mapper = &protocol.ColumnMapper{ - URI: uri, - TokFile: f, - Content: content, - } + file.mapper = protocol.NewMapper(uri, content) } return file } -func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile { - c.Client.filesMu.Lock() - defer c.Client.filesMu.Unlock() +func (c *cmdClient) openFile(ctx context.Context, uri span.URI) *cmdFile { + c.filesMu.Lock() + defer c.filesMu.Unlock() - file := c.Client.getFile(ctx, uri) - // This should never happen. - if file == nil { - return &cmdFile{ - uri: uri, - err: fmt.Errorf("no file found for %s", uri), - } + file := c.getFile(ctx, uri) + if file.err != nil || file.open { + return file } - if file.err != nil || file.added { + file.open = true + return file +} + +func (c *connection) openFile(ctx context.Context, uri span.URI) *cmdFile { + file := c.Client.openFile(ctx, uri) + if file.err != nil { return file } - file.added = true + p := &protocol.DidOpenTextDocumentParams{ TextDocument: protocol.TextDocumentItem{ URI: protocol.URIFromSpanURI(uri), diff --git a/gopls/internal/lsp/cmd/definition.go b/gopls/internal/lsp/cmd/definition.go new file mode 100644 index 00000000000..952f43b5132 --- /dev/null +++ b/gopls/internal/lsp/cmd/definition.go @@ -0,0 +1,132 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "os" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// A Definition is the result of a 'definition' query. +type Definition struct { + Span span.Span `json:"span"` // span of the definition + Description string `json:"description"` // description of the denoted object +} + +// These constant is printed in the help, and then used in a test to verify the +// help is still valid. +// They refer to "Set" in "flag.FlagSet" from the DetailedHelp method below. +const ( + exampleLine = 44 + exampleColumn = 47 + exampleOffset = 1270 +) + +// definition implements the definition verb for gopls. +type definition struct { + app *Application + + JSON bool `flag:"json" help:"emit output in JSON format"` + MarkdownSupported bool `flag:"markdown" help:"support markdown in responses"` +} + +func (d *definition) Name() string { return "definition" } +func (d *definition) Parent() string { return d.app.Name() } +func (d *definition) Usage() string { return "[definition-flags] " } +func (d *definition) ShortHelp() string { return "show declaration of selected identifier" } +func (d *definition) DetailedHelp(f *flag.FlagSet) { + fmt.Fprintf(f.Output(), ` +Example: show the definition of the identifier at syntax at offset %[1]v in this file (flag.FlagSet): + + $ gopls definition internal/lsp/cmd/definition.go:%[1]v:%[2]v + $ gopls definition internal/lsp/cmd/definition.go:#%[3]v + +definition-flags: +`, exampleLine, exampleColumn, exampleOffset) + printFlagDefaults(f) +} + +// Run performs the definition query as specified by args and prints the +// results to stdout. +func (d *definition) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("definition expects 1 argument") + } + // Plaintext makes more sense for the command line. + opts := d.app.options + d.app.options = func(o *source.Options) { + if opts != nil { + opts(o) + } + o.PreferredContentFormat = protocol.PlainText + if d.MarkdownSupported { + o.PreferredContentFormat = protocol.Markdown + } + } + conn, err := d.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + p := protocol.DefinitionParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + locs, err := conn.Definition(ctx, &p) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + + if len(locs) == 0 { + return fmt.Errorf("%v: not an identifier", from) + } + q := protocol.HoverParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + hover, err := conn.Hover(ctx, &q) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + if hover == nil { + return fmt.Errorf("%v: not an identifier", from) + } + file = conn.openFile(ctx, fileURI(locs[0].URI)) + if file.err != nil { + return fmt.Errorf("%v: %v", from, file.err) + } + definition, err := file.mapper.LocationSpan(locs[0]) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + description := strings.TrimSpace(hover.Contents.Value) + result := &Definition{ + Span: definition, + Description: description, + } + if d.JSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", "\t") + return enc.Encode(result) + } + fmt.Printf("%v: defined here as %s", result.Span, result.Description) + return nil +} diff --git a/gopls/internal/lsp/cmd/folding_range.go b/gopls/internal/lsp/cmd/folding_range.go new file mode 100644 index 00000000000..68d93a3fb7e --- /dev/null +++ b/gopls/internal/lsp/cmd/folding_range.go @@ -0,0 +1,73 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// foldingRanges implements the folding_ranges verb for gopls +type foldingRanges struct { + app *Application +} + +func (r *foldingRanges) Name() string { return "folding_ranges" } +func (r *foldingRanges) Parent() string { return r.app.Name() } +func (r *foldingRanges) Usage() string { return "" } +func (r *foldingRanges) ShortHelp() string { return "display selected file's folding ranges" } +func (r *foldingRanges) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ gopls folding_ranges helper/helper.go +`) + printFlagDefaults(f) +} + +func (r *foldingRanges) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("folding_ranges expects 1 argument (file)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + + p := protocol.FoldingRangeParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(from.URI()), + }, + } + + ranges, err := conn.FoldingRange(ctx, &p) + if err != nil { + return err + } + + for _, r := range ranges { + fmt.Printf("%v:%v-%v:%v\n", + r.StartLine+1, + r.StartCharacter+1, + r.EndLine+1, + r.EndCharacter+1, + ) + } + + return nil +} diff --git a/gopls/internal/lsp/cmd/format.go b/gopls/internal/lsp/cmd/format.go new file mode 100644 index 00000000000..31dfb172ece --- /dev/null +++ b/gopls/internal/lsp/cmd/format.go @@ -0,0 +1,109 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/diff" +) + +// format implements the format verb for gopls. +type format struct { + Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` + Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` + List bool `flag:"l,list" help:"list files whose formatting differs from gofmt's"` + + app *Application +} + +func (c *format) Name() string { return "format" } +func (c *format) Parent() string { return c.app.Name() } +func (c *format) Usage() string { return "[format-flags] " } +func (c *format) ShortHelp() string { return "format the code according to the go standard" } +func (c *format) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +The arguments supplied may be simple file names, or ranges within files. + +Example: reformat this file: + + $ gopls format -w internal/lsp/cmd/check.go + +format-flags: +`) + printFlagDefaults(f) +} + +// Run performs the check on the files specified by args and prints the +// results to stdout. +func (c *format) Run(ctx context.Context, args ...string) error { + if len(args) == 0 { + // no files, so no results + return nil + } + // now we ready to kick things off + conn, err := c.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + for _, arg := range args { + spn := span.Parse(arg) + file := conn.openFile(ctx, spn.URI()) + if file.err != nil { + return file.err + } + filename := spn.URI().Filename() + loc, err := file.mapper.SpanLocation(spn) + if err != nil { + return err + } + if loc.Range.Start != loc.Range.End { + return fmt.Errorf("only full file formatting supported") + } + p := protocol.DocumentFormattingParams{ + TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, + } + edits, err := conn.Formatting(ctx, &p) + if err != nil { + return fmt.Errorf("%v: %v", spn, err) + } + formatted, sedits, err := source.ApplyProtocolEdits(file.mapper, edits) + if err != nil { + return fmt.Errorf("%v: %v", spn, err) + } + printIt := true + if c.List { + printIt = false + if len(edits) > 0 { + fmt.Println(filename) + } + } + if c.Write { + printIt = false + if len(edits) > 0 { + ioutil.WriteFile(filename, []byte(formatted), 0644) + } + } + if c.Diff { + printIt = false + unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) + if err != nil { + return err + } + fmt.Print(unified) + } + if printIt { + fmt.Print(formatted) + } + } + return nil +} diff --git a/internal/lsp/cmd/help_test.go b/gopls/internal/lsp/cmd/help_test.go similarity index 77% rename from internal/lsp/cmd/help_test.go rename to gopls/internal/lsp/cmd/help_test.go index 536d19dc219..6bd3c8c501f 100644 --- a/internal/lsp/cmd/help_test.go +++ b/gopls/internal/lsp/cmd/help_test.go @@ -12,7 +12,8 @@ import ( "path/filepath" "testing" - "golang.org/x/tools/internal/lsp/cmd" + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/cmd" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/tool" ) @@ -45,12 +46,12 @@ func TestHelpFiles(t *testing.T) { } return } - expect, err := ioutil.ReadFile(helpFile) - switch { - case err != nil: - t.Errorf("Missing help file %q", helpFile) - case !bytes.Equal(expect, got): - t.Errorf("Help file %q did not match, got:\n%q\nwant:\n%q", helpFile, string(got), string(expect)) + want, err := ioutil.ReadFile(helpFile) + if err != nil { + t.Fatalf("Missing help file %q", helpFile) + } + if diff := cmp.Diff(string(want), string(got)); diff != "" { + t.Errorf("Help file %q did not match, run with -update-help-files to fix (-want +got)\n%s", helpFile, diff) } }) } diff --git a/gopls/internal/lsp/cmd/highlight.go b/gopls/internal/lsp/cmd/highlight.go new file mode 100644 index 00000000000..60c04b2d46c --- /dev/null +++ b/gopls/internal/lsp/cmd/highlight.go @@ -0,0 +1,82 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// highlight implements the highlight verb for gopls. +type highlight struct { + app *Application +} + +func (r *highlight) Name() string { return "highlight" } +func (r *highlight) Parent() string { return r.app.Name() } +func (r *highlight) Usage() string { return "" } +func (r *highlight) ShortHelp() string { return "display selected identifier's highlights" } +func (r *highlight) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls highlight helper/helper.go:8:6 + $ gopls highlight helper/helper.go:#53 +`) + printFlagDefaults(f) +} + +func (r *highlight) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("highlight expects 1 argument (position)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + + p := protocol.DocumentHighlightParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + highlights, err := conn.DocumentHighlight(ctx, &p) + if err != nil { + return err + } + + var results []span.Span + for _, h := range highlights { + s, err := file.mapper.RangeSpan(h.Range) + if err != nil { + return err + } + results = append(results, s) + } + // Sort results to make tests deterministic since DocumentHighlight uses a map. + span.SortSpans(results) + + for _, s := range results { + fmt.Println(s) + } + return nil +} diff --git a/gopls/internal/lsp/cmd/implementation.go b/gopls/internal/lsp/cmd/implementation.go new file mode 100644 index 00000000000..bb5b1c24edb --- /dev/null +++ b/gopls/internal/lsp/cmd/implementation.go @@ -0,0 +1,84 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "sort" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// implementation implements the implementation verb for gopls +type implementation struct { + app *Application +} + +func (i *implementation) Name() string { return "implementation" } +func (i *implementation) Parent() string { return i.app.Name() } +func (i *implementation) Usage() string { return "" } +func (i *implementation) ShortHelp() string { return "display selected identifier's implementation" } +func (i *implementation) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls implementation helper/helper.go:8:6 + $ gopls implementation helper/helper.go:#53 +`) + printFlagDefaults(f) +} + +func (i *implementation) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("implementation expects 1 argument (position)") + } + + conn, err := i.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + + p := protocol.ImplementationParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + implementations, err := conn.Implementation(ctx, &p) + if err != nil { + return err + } + + var spans []string + for _, impl := range implementations { + f := conn.openFile(ctx, fileURI(impl.URI)) + span, err := f.mapper.LocationSpan(impl) + if err != nil { + return err + } + spans = append(spans, fmt.Sprint(span)) + } + sort.Strings(spans) + + for _, s := range spans { + fmt.Println(s) + } + + return nil +} diff --git a/gopls/internal/lsp/cmd/imports.go b/gopls/internal/lsp/cmd/imports.go new file mode 100644 index 00000000000..fadc8466834 --- /dev/null +++ b/gopls/internal/lsp/cmd/imports.go @@ -0,0 +1,104 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/tool" +) + +// imports implements the import verb for gopls. +type imports struct { + Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` + Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` + + app *Application +} + +func (t *imports) Name() string { return "imports" } +func (t *imports) Parent() string { return t.app.Name() } +func (t *imports) Usage() string { return "[imports-flags] " } +func (t *imports) ShortHelp() string { return "updates import statements" } +func (t *imports) DetailedHelp(f *flag.FlagSet) { + fmt.Fprintf(f.Output(), ` +Example: update imports statements in a file: + + $ gopls imports -w internal/lsp/cmd/check.go + +imports-flags: +`) + printFlagDefaults(f) +} + +// Run performs diagnostic checks on the file specified and either; +// - if -w is specified, updates the file in place; +// - if -d is specified, prints out unified diffs of the changes; or +// - otherwise, prints the new versions to stdout. +func (t *imports) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("imports expects 1 argument") + } + conn, err := t.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + uri := from.URI() + file := conn.openFile(ctx, uri) + if file.err != nil { + return file.err + } + actions, err := conn.CodeAction(ctx, &protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + var edits []protocol.TextEdit + for _, a := range actions { + if a.Title != "Organize Imports" { + continue + } + for _, c := range a.Edit.DocumentChanges { + if c.TextDocumentEdit != nil { + if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri { + edits = append(edits, c.TextDocumentEdit.Edits...) + } + } + } + } + newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits) + if err != nil { + return fmt.Errorf("%v: %v", edits, err) + } + filename := file.uri.Filename() + switch { + case t.Write: + if len(edits) > 0 { + ioutil.WriteFile(filename, []byte(newContent), 0644) + } + case t.Diff: + unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) + if err != nil { + return err + } + fmt.Print(unified) + default: + fmt.Print(string(newContent)) + } + return nil +} diff --git a/gopls/internal/lsp/cmd/info.go b/gopls/internal/lsp/cmd/info.go new file mode 100644 index 00000000000..68ef40ffb29 --- /dev/null +++ b/gopls/internal/lsp/cmd/info.go @@ -0,0 +1,246 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "net/url" + "os" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/browser" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/tool" +) + +// help implements the help command. +type help struct { + app *Application +} + +func (h *help) Name() string { return "help" } +func (h *help) Parent() string { return h.app.Name() } +func (h *help) Usage() string { return "" } +func (h *help) ShortHelp() string { return "print usage information for subcommands" } +func (h *help) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` + +Examples: +$ gopls help # main gopls help message +$ gopls help remote # help on 'remote' command +$ gopls help remote sessions # help on 'remote sessions' subcommand +`) + printFlagDefaults(f) +} + +// Run prints help information about a subcommand. +func (h *help) Run(ctx context.Context, args ...string) error { + find := func(cmds []tool.Application, name string) tool.Application { + for _, cmd := range cmds { + if cmd.Name() == name { + return cmd + } + } + return nil + } + + // Find the subcommand denoted by args (empty => h.app). + var cmd tool.Application = h.app + for i, arg := range args { + cmd = find(getSubcommands(cmd), arg) + if cmd == nil { + return tool.CommandLineErrorf( + "no such subcommand: %s", strings.Join(args[:i+1], " ")) + } + } + + // 'gopls help cmd subcmd' is equivalent to 'gopls cmd subcmd -h'. + // The flag package prints the usage information (defined by tool.Run) + // when it sees the -h flag. + fs := flag.NewFlagSet(cmd.Name(), flag.ExitOnError) + return tool.Run(ctx, fs, h.app, append(args[:len(args):len(args)], "-h")) +} + +// version implements the version command. +type version struct { + JSON bool `flag:"json" help:"outputs in json format."` + + app *Application +} + +func (v *version) Name() string { return "version" } +func (v *version) Parent() string { return v.app.Name() } +func (v *version) Usage() string { return "" } +func (v *version) ShortHelp() string { return "print the gopls version information" } +func (v *version) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ``) + printFlagDefaults(f) +} + +// Run prints version information to stdout. +func (v *version) Run(ctx context.Context, args ...string) error { + var mode = debug.PlainText + if v.JSON { + mode = debug.JSON + } + + return debug.PrintVersionInfo(ctx, os.Stdout, v.app.verbose(), mode) +} + +// bug implements the bug command. +type bug struct { + app *Application +} + +func (b *bug) Name() string { return "bug" } +func (b *bug) Parent() string { return b.app.Name() } +func (b *bug) Usage() string { return "" } +func (b *bug) ShortHelp() string { return "report a bug in gopls" } +func (b *bug) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ``) + printFlagDefaults(f) +} + +const goplsBugPrefix = "x/tools/gopls: " +const goplsBugHeader = `ATTENTION: Please answer these questions BEFORE submitting your issue. Thanks! + +#### What did you do? +If possible, provide a recipe for reproducing the error. +A complete runnable program is good. +A link on play.golang.org is better. +A failing unit test is the best. + +#### What did you expect to see? + + +#### What did you see instead? + + +` + +// Run collects some basic information and then prepares an issue ready to +// be reported. +func (b *bug) Run(ctx context.Context, args ...string) error { + buf := &bytes.Buffer{} + fmt.Fprint(buf, goplsBugHeader) + debug.PrintVersionInfo(ctx, buf, true, debug.Markdown) + body := buf.String() + title := strings.Join(args, " ") + if !strings.HasPrefix(title, goplsBugPrefix) { + title = goplsBugPrefix + title + } + if !browser.Open("https://github.com/golang/go/issues/new?title=" + url.QueryEscape(title) + "&body=" + url.QueryEscape(body)) { + fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n") + fmt.Print(body) + } + return nil +} + +type apiJSON struct { + app *Application +} + +func (j *apiJSON) Name() string { return "api-json" } +func (j *apiJSON) Parent() string { return j.app.Name() } +func (j *apiJSON) Usage() string { return "" } +func (j *apiJSON) ShortHelp() string { return "print json describing gopls API" } +func (j *apiJSON) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ``) + printFlagDefaults(f) +} + +func (j *apiJSON) Run(ctx context.Context, args ...string) error { + js, err := json.MarshalIndent(source.GeneratedAPIJSON, "", "\t") + if err != nil { + return err + } + fmt.Fprint(os.Stdout, string(js)) + return nil +} + +type licenses struct { + app *Application +} + +func (l *licenses) Name() string { return "licenses" } +func (l *licenses) Parent() string { return l.app.Name() } +func (l *licenses) Usage() string { return "" } +func (l *licenses) ShortHelp() string { return "print licenses of included software" } +func (l *licenses) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ``) + printFlagDefaults(f) +} + +const licensePreamble = ` +gopls is made available under the following BSD-style license: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +gopls implements the LSP specification, which is made available under the following license: + +Copyright (c) Microsoft Corporation + +All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +gopls also includes software made available under these licenses: +` + +func (l *licenses) Run(ctx context.Context, args ...string) error { + opts := source.DefaultOptions() + l.app.options(opts) + txt := licensePreamble + if opts.LicensesText == "" { + txt += "(development gopls, license information not available)" + } else { + txt += opts.LicensesText + } + fmt.Fprint(os.Stdout, txt) + return nil +} diff --git a/gopls/internal/lsp/cmd/links.go b/gopls/internal/lsp/cmd/links.go new file mode 100644 index 00000000000..b5413bba59f --- /dev/null +++ b/gopls/internal/lsp/cmd/links.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "os" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// links implements the links verb for gopls. +type links struct { + JSON bool `flag:"json" help:"emit document links in JSON format"` + + app *Application +} + +func (l *links) Name() string { return "links" } +func (l *links) Parent() string { return l.app.Name() } +func (l *links) Usage() string { return "[links-flags] " } +func (l *links) ShortHelp() string { return "list links in a file" } +func (l *links) DetailedHelp(f *flag.FlagSet) { + fmt.Fprintf(f.Output(), ` +Example: list links contained within a file: + + $ gopls links internal/lsp/cmd/check.go + +links-flags: +`) + printFlagDefaults(f) +} + +// Run finds all the links within a document +// - if -json is specified, outputs location range and uri +// - otherwise, prints the a list of unique links +func (l *links) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("links expects 1 argument") + } + conn, err := l.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + uri := from.URI() + file := conn.openFile(ctx, uri) + if file.err != nil { + return file.err + } + results, err := conn.DocumentLink(ctx, &protocol.DocumentLinkParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + if l.JSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", "\t") + return enc.Encode(results) + } + for _, v := range results { + fmt.Println(v.Target) + } + return nil +} diff --git a/gopls/internal/lsp/cmd/prepare_rename.go b/gopls/internal/lsp/cmd/prepare_rename.go new file mode 100644 index 00000000000..5e9d732fbf2 --- /dev/null +++ b/gopls/internal/lsp/cmd/prepare_rename.go @@ -0,0 +1,80 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "errors" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// prepareRename implements the prepare_rename verb for gopls. +type prepareRename struct { + app *Application +} + +func (r *prepareRename) Name() string { return "prepare_rename" } +func (r *prepareRename) Parent() string { return r.app.Name() } +func (r *prepareRename) Usage() string { return "" } +func (r *prepareRename) ShortHelp() string { return "test validity of a rename operation at location" } +func (r *prepareRename) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls prepare_rename helper/helper.go:8:6 + $ gopls prepare_rename helper/helper.go:#53 +`) + printFlagDefaults(f) +} + +// ErrInvalidRenamePosition is returned when prepareRename is run at a position that +// is not a candidate for renaming. +var ErrInvalidRenamePosition = errors.New("request is not valid at the given position") + +func (r *prepareRename) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("prepare_rename expects 1 argument (file)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + p := protocol.PrepareRenameParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + result, err := conn.PrepareRename(ctx, &p) + if err != nil { + return fmt.Errorf("prepare_rename failed: %w", err) + } + if result == nil { + return ErrInvalidRenamePosition + } + + s, err := file.mapper.RangeSpan(result.Range) + if err != nil { + return err + } + + fmt.Println(s) + return nil +} diff --git a/gopls/internal/lsp/cmd/references.go b/gopls/internal/lsp/cmd/references.go new file mode 100644 index 00000000000..6db5ce34e75 --- /dev/null +++ b/gopls/internal/lsp/cmd/references.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "sort" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// references implements the references verb for gopls +type references struct { + IncludeDeclaration bool `flag:"d,declaration" help:"include the declaration of the specified identifier in the results"` + + app *Application +} + +func (r *references) Name() string { return "references" } +func (r *references) Parent() string { return r.app.Name() } +func (r *references) Usage() string { return "[references-flags] " } +func (r *references) ShortHelp() string { return "display selected identifier's references" } +func (r *references) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls references helper/helper.go:8:6 + $ gopls references helper/helper.go:#53 + +references-flags: +`) + printFlagDefaults(f) +} + +func (r *references) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("references expects 1 argument (position)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + p := protocol.ReferenceParams{ + Context: protocol.ReferenceContext{ + IncludeDeclaration: r.IncludeDeclaration, + }, + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + locations, err := conn.References(ctx, &p) + if err != nil { + return err + } + var spans []string + for _, l := range locations { + f := conn.openFile(ctx, fileURI(l.URI)) + // convert location to span for user-friendly 1-indexed line + // and column numbers + span, err := f.mapper.LocationSpan(l) + if err != nil { + return err + } + spans = append(spans, fmt.Sprint(span)) + } + + sort.Strings(spans) + for _, s := range spans { + fmt.Println(s) + } + return nil +} diff --git a/internal/lsp/cmd/remote.go b/gopls/internal/lsp/cmd/remote.go similarity index 97% rename from internal/lsp/cmd/remote.go rename to gopls/internal/lsp/cmd/remote.go index 0f4c7216444..684981cfff8 100644 --- a/internal/lsp/cmd/remote.go +++ b/gopls/internal/lsp/cmd/remote.go @@ -13,8 +13,8 @@ import ( "log" "os" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/lsprpc" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" ) type remote struct { diff --git a/gopls/internal/lsp/cmd/rename.go b/gopls/internal/lsp/cmd/rename.go new file mode 100644 index 00000000000..5a6677d5112 --- /dev/null +++ b/gopls/internal/lsp/cmd/rename.go @@ -0,0 +1,130 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/tool" +) + +// rename implements the rename verb for gopls. +type rename struct { + Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` + Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` + Preserve bool `flag:"preserve" help:"preserve original files"` + + app *Application +} + +func (r *rename) Name() string { return "rename" } +func (r *rename) Parent() string { return r.app.Name() } +func (r *rename) Usage() string { return "[rename-flags] " } +func (r *rename) ShortHelp() string { return "rename selected identifier" } +func (r *rename) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-based location (:line:column or :#position) of the thing to change + $ gopls rename helper/helper.go:8:6 Foo + $ gopls rename helper/helper.go:#53 Foo + +rename-flags: +`) + printFlagDefaults(f) +} + +// Run renames the specified identifier and either; +// - if -w is specified, updates the file(s) in place; +// - if -d is specified, prints out unified diffs of the changes; or +// - otherwise, prints the new versions to stdout. +func (r *rename) Run(ctx context.Context, args ...string) error { + if len(args) != 2 { + return tool.CommandLineErrorf("definition expects 2 arguments (position, new name)") + } + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + p := protocol.RenameParams{ + TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, + Position: loc.Range.Start, + NewName: args[1], + } + edit, err := conn.Rename(ctx, &p) + if err != nil { + return err + } + var orderedURIs []string + edits := map[span.URI][]protocol.TextEdit{} + for _, c := range edit.DocumentChanges { + if c.TextDocumentEdit != nil { + uri := fileURI(c.TextDocumentEdit.TextDocument.URI) + edits[uri] = append(edits[uri], c.TextDocumentEdit.Edits...) + orderedURIs = append(orderedURIs, string(uri)) + } + } + sort.Strings(orderedURIs) + changeCount := len(orderedURIs) + + for _, u := range orderedURIs { + uri := span.URIFromURI(u) + cmdFile := conn.openFile(ctx, uri) + filename := cmdFile.uri.Filename() + + newContent, renameEdits, err := source.ApplyProtocolEdits(cmdFile.mapper, edits[uri]) + if err != nil { + return fmt.Errorf("%v: %v", edits, err) + } + + switch { + case r.Write: + fmt.Fprintln(os.Stderr, filename) + if r.Preserve { + if err := os.Rename(filename, filename+".orig"); err != nil { + return fmt.Errorf("%v: %v", edits, err) + } + } + ioutil.WriteFile(filename, []byte(newContent), 0644) + case r.Diff: + unified, err := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits) + if err != nil { + return err + } + fmt.Print(unified) + default: + if len(orderedURIs) > 1 { + fmt.Printf("%s:\n", filepath.Base(filename)) + } + fmt.Print(string(newContent)) + if changeCount > 1 { // if this wasn't last change, print newline + fmt.Println() + } + changeCount -= 1 + } + } + return nil +} diff --git a/internal/lsp/cmd/semantictokens.go b/gopls/internal/lsp/cmd/semantictokens.go similarity index 94% rename from internal/lsp/cmd/semantictokens.go rename to gopls/internal/lsp/cmd/semantictokens.go index 7dbb7f93c61..6747e468707 100644 --- a/internal/lsp/cmd/semantictokens.go +++ b/gopls/internal/lsp/cmd/semantictokens.go @@ -16,10 +16,10 @@ import ( "os" "unicode/utf8" - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" ) // generate semantic tokens and interpolate them in the file @@ -39,7 +39,7 @@ import ( // 0-based: the first line is line 0, the first character of a line // is character 0, and characters are counted as UTF-16 code points // gopls (and Go error messages): -// 1-based: the first line is line1, the first chararcter of a line +// 1-based: the first line is line1, the first character of a line // is character 0, and characters are counted as bytes // internal (as used in marks, and lines:=bytes.Split(buf, '\n')) // 0-based: lines and character positions are 1 less than in @@ -49,7 +49,7 @@ type semtok struct { app *Application } -var colmap *protocol.ColumnMapper +var colmap *protocol.Mapper func (c *semtok) Name() string { return "semtok" } func (c *semtok) Parent() string { return c.app.Name() } @@ -82,7 +82,7 @@ func (c *semtok) Run(ctx context.Context, args ...string) error { } defer conn.terminate(ctx) uri := span.URIFromPath(args[0]) - file := conn.AddFile(ctx, uri) + file := conn.openFile(ctx, uri) if file.err != nil { return file.err } @@ -117,7 +117,7 @@ func (c *semtok) Run(ctx context.Context, args ...string) error { // can't happen; just parsed this file return fmt.Errorf("can't find %s in fset", args[0]) } - colmap = protocol.NewColumnMapper(uri, buf) + colmap = protocol.NewMapper(uri, buf) err = decorate(file.uri.Filename(), resp.Data) if err != nil { return err diff --git a/gopls/internal/lsp/cmd/serve.go b/gopls/internal/lsp/cmd/serve.go new file mode 100644 index 00000000000..44d4b1d1d6b --- /dev/null +++ b/gopls/internal/lsp/cmd/serve.go @@ -0,0 +1,130 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "time" + + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/fakenet" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/tool" +) + +// Serve is a struct that exposes the configurable parts of the LSP server as +// flags, in the right form for tool.Main to consume. +type Serve struct { + Logfile string `flag:"logfile" help:"filename to log to. if value is \"auto\", then logging to a default output file is enabled"` + Mode string `flag:"mode" help:"no effect"` + Port int `flag:"port" help:"port on which to run gopls for debugging purposes"` + Address string `flag:"listen" help:"address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used."` + IdleTimeout time.Duration `flag:"listen.timeout" help:"when used with -listen, shut down the server when there are no connected clients for this duration"` + Trace bool `flag:"rpc.trace" help:"print the full rpc trace in lsp inspector format"` + Debug string `flag:"debug" help:"serve debug information on the supplied address"` + + RemoteListenTimeout time.Duration `flag:"remote.listen.timeout" help:"when used with -remote=auto, the -listen.timeout value used to start the daemon"` + RemoteDebug string `flag:"remote.debug" help:"when used with -remote=auto, the -debug value used to start the daemon"` + RemoteLogfile string `flag:"remote.logfile" help:"when used with -remote=auto, the -logfile value used to start the daemon"` + + app *Application +} + +func (s *Serve) Name() string { return "serve" } +func (s *Serve) Parent() string { return s.app.Name() } +func (s *Serve) Usage() string { return "[server-flags]" } +func (s *Serve) ShortHelp() string { + return "run a server for Go code using the Language Server Protocol" +} +func (s *Serve) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` gopls [flags] [server-flags] + +The server communicates using JSONRPC2 on stdin and stdout, and is intended to be run directly as +a child of an editor process. + +server-flags: +`) + printFlagDefaults(f) +} + +func (s *Serve) remoteArgs(network, address string) []string { + args := []string{"serve", + "-listen", fmt.Sprintf(`%s;%s`, network, address), + } + if s.RemoteDebug != "" { + args = append(args, "-debug", s.RemoteDebug) + } + if s.RemoteListenTimeout != 0 { + args = append(args, "-listen.timeout", s.RemoteListenTimeout.String()) + } + if s.RemoteLogfile != "" { + args = append(args, "-logfile", s.RemoteLogfile) + } + return args +} + +// Run configures a server based on the flags, and then runs it. +// It blocks until the server shuts down. +func (s *Serve) Run(ctx context.Context, args ...string) error { + if len(args) > 0 { + return tool.CommandLineErrorf("server does not take arguments, got %v", args) + } + + di := debug.GetInstance(ctx) + isDaemon := s.Address != "" || s.Port != 0 + if di != nil { + closeLog, err := di.SetLogFile(s.Logfile, isDaemon) + if err != nil { + return err + } + defer closeLog() + di.ServerAddress = s.Address + di.MonitorMemory(ctx) + di.Serve(ctx, s.Debug) + } + var ss jsonrpc2.StreamServer + if s.app.Remote != "" { + var err error + ss, err = lsprpc.NewForwarder(s.app.Remote, s.remoteArgs) + if err != nil { + return fmt.Errorf("creating forwarder: %w", err) + } + } else { + ss = lsprpc.NewStreamServer(cache.New(nil, nil), isDaemon, s.app.options) + } + + var network, addr string + if s.Address != "" { + network, addr = lsprpc.ParseAddr(s.Address) + } + if s.Port != 0 { + network = "tcp" + addr = fmt.Sprintf(":%v", s.Port) + } + if addr != "" { + log.Printf("Gopls daemon: listening on %s network, address %s...", network, addr) + defer log.Printf("Gopls daemon: exiting") + return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout) + } + stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout)) + if s.Trace && di != nil { + stream = protocol.LoggingStream(stream, di.LogWriter) + } + conn := jsonrpc2.NewConn(stream) + err := ss.ServeStream(ctx, conn) + if errors.Is(err, io.EOF) { + return nil + } + return err +} diff --git a/gopls/internal/lsp/cmd/signature.go b/gopls/internal/lsp/cmd/signature.go new file mode 100644 index 00000000000..4d47cd2d4bc --- /dev/null +++ b/gopls/internal/lsp/cmd/signature.go @@ -0,0 +1,88 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// signature implements the signature verb for gopls +type signature struct { + app *Application +} + +func (r *signature) Name() string { return "signature" } +func (r *signature) Parent() string { return r.app.Name() } +func (r *signature) Usage() string { return "" } +func (r *signature) ShortHelp() string { return "display selected identifier's signature" } +func (r *signature) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ # 1-indexed location (:line:column or :#offset) of the target identifier + $ gopls signature helper/helper.go:8:6 + $ gopls signature helper/helper.go:#53 +`) + printFlagDefaults(f) +} + +func (r *signature) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("signature expects 1 argument (position)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + file := conn.openFile(ctx, from.URI()) + if file.err != nil { + return file.err + } + + loc, err := file.mapper.SpanLocation(from) + if err != nil { + return err + } + + p := protocol.SignatureHelpParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + + s, err := conn.SignatureHelp(ctx, &p) + if err != nil { + return err + } + + if s == nil || len(s.Signatures) == 0 { + return tool.CommandLineErrorf("%v: not a function", from) + } + + // there is only ever one possible signature, + // see toProtocolSignatureHelp in lsp/signature_help.go + signature := s.Signatures[0] + fmt.Printf("%s\n", signature.Label) + switch x := signature.Documentation.Value.(type) { + case string: + if x != "" { + fmt.Printf("\n%s\n", x) + } + case protocol.MarkupContent: + if x.Value != "" { + fmt.Printf("\n%s\n", x.Value) + } + } + + return nil +} diff --git a/internal/lsp/cmd/subcommands.go b/gopls/internal/lsp/cmd/subcommands.go similarity index 100% rename from internal/lsp/cmd/subcommands.go rename to gopls/internal/lsp/cmd/subcommands.go diff --git a/gopls/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go new file mode 100644 index 00000000000..b96849d3781 --- /dev/null +++ b/gopls/internal/lsp/cmd/suggested_fix.go @@ -0,0 +1,166 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/tool" +) + +// suggestedFix implements the fix verb for gopls. +type suggestedFix struct { + Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` + Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` + All bool `flag:"a,all" help:"apply all fixes, not just preferred fixes"` + + app *Application +} + +func (s *suggestedFix) Name() string { return "fix" } +func (s *suggestedFix) Parent() string { return s.app.Name() } +func (s *suggestedFix) Usage() string { return "[fix-flags] " } +func (s *suggestedFix) ShortHelp() string { return "apply suggested fixes" } +func (s *suggestedFix) DetailedHelp(f *flag.FlagSet) { + fmt.Fprintf(f.Output(), ` +Example: apply suggested fixes for this file + $ gopls fix -w internal/lsp/cmd/check.go + +fix-flags: +`) + printFlagDefaults(f) +} + +// Run performs diagnostic checks on the file specified and either; +// - if -w is specified, updates the file in place; +// - if -d is specified, prints out unified diffs of the changes; or +// - otherwise, prints the new versions to stdout. +func (s *suggestedFix) Run(ctx context.Context, args ...string) error { + if len(args) < 1 { + return tool.CommandLineErrorf("fix expects at least 1 argument") + } + conn, err := s.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + uri := from.URI() + file := conn.openFile(ctx, uri) + if file.err != nil { + return file.err + } + + if err := conn.diagnoseFiles(ctx, []span.URI{uri}); err != nil { + return err + } + conn.Client.filesMu.Lock() + defer conn.Client.filesMu.Unlock() + + codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix} + if len(args) > 1 { + codeActionKinds = []protocol.CodeActionKind{} + for _, k := range args[1:] { + codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k)) + } + } + + rng, err := file.mapper.SpanRange(from) + if err != nil { + return err + } + p := protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Context: protocol.CodeActionContext{ + Only: codeActionKinds, + Diagnostics: file.diagnostics, + }, + Range: rng, + } + actions, err := conn.CodeAction(ctx, &p) + if err != nil { + return fmt.Errorf("%v: %v", from, err) + } + var edits []protocol.TextEdit + for _, a := range actions { + if a.Command != nil { + return fmt.Errorf("ExecuteCommand is not yet supported on the command line") + } + if !a.IsPreferred && !s.All { + continue + } + if !from.HasPosition() { + for _, c := range a.Edit.DocumentChanges { + if c.TextDocumentEdit != nil { + if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri { + edits = append(edits, c.TextDocumentEdit.Edits...) + } + } + } + continue + } + // If the span passed in has a position, then we need to find + // the codeaction that has the same range as the passed in span. + for _, diag := range a.Diagnostics { + spn, err := file.mapper.RangeSpan(diag.Range) + if err != nil { + continue + } + if span.ComparePoint(from.Start(), spn.Start()) == 0 { + for _, c := range a.Edit.DocumentChanges { + if c.TextDocumentEdit != nil { + if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri { + edits = append(edits, c.TextDocumentEdit.Edits...) + } + } + } + break + } + } + + // If suggested fix is not a diagnostic, still must collect edits. + if len(a.Diagnostics) == 0 { + for _, c := range a.Edit.DocumentChanges { + if c.TextDocumentEdit != nil { + if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri { + edits = append(edits, c.TextDocumentEdit.Edits...) + } + } + } + } + } + + newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits) + if err != nil { + return fmt.Errorf("%v: %v", edits, err) + } + + filename := file.uri.Filename() + switch { + case s.Write: + if len(edits) > 0 { + ioutil.WriteFile(filename, []byte(newContent), 0644) + } + case s.Diff: + diffs, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) + if err != nil { + return err + } + fmt.Print(diffs) + default: + fmt.Print(string(newContent)) + } + return nil +} diff --git a/gopls/internal/lsp/cmd/symbols.go b/gopls/internal/lsp/cmd/symbols.go new file mode 100644 index 00000000000..3ecdff8011c --- /dev/null +++ b/gopls/internal/lsp/cmd/symbols.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "sort" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/tool" +) + +// symbols implements the symbols verb for gopls +type symbols struct { + app *Application +} + +func (r *symbols) Name() string { return "symbols" } +func (r *symbols) Parent() string { return r.app.Name() } +func (r *symbols) Usage() string { return "" } +func (r *symbols) ShortHelp() string { return "display selected file's symbols" } +func (r *symbols) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + $ gopls symbols helper/helper.go +`) + printFlagDefaults(f) +} +func (r *symbols) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("symbols expects 1 argument (position)") + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + from := span.Parse(args[0]) + p := protocol.DocumentSymbolParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(from.URI()), + }, + } + symbols, err := conn.DocumentSymbol(ctx, &p) + if err != nil { + return err + } + for _, s := range symbols { + if m, ok := s.(map[string]interface{}); ok { + s, err = mapToSymbol(m) + if err != nil { + return err + } + } + switch t := s.(type) { + case protocol.DocumentSymbol: + printDocumentSymbol(t) + case protocol.SymbolInformation: + printSymbolInformation(t) + } + } + return nil +} + +func mapToSymbol(m map[string]interface{}) (interface{}, error) { + b, err := json.Marshal(m) + if err != nil { + return nil, err + } + + if _, ok := m["selectionRange"]; ok { + var s protocol.DocumentSymbol + if err := json.Unmarshal(b, &s); err != nil { + return nil, err + } + return s, nil + } + + var s protocol.SymbolInformation + if err := json.Unmarshal(b, &s); err != nil { + return nil, err + } + return s, nil +} + +func printDocumentSymbol(s protocol.DocumentSymbol) { + fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.SelectionRange)) + // Sort children for consistency + sort.Slice(s.Children, func(i, j int) bool { + return s.Children[i].Name < s.Children[j].Name + }) + for _, c := range s.Children { + fmt.Printf("\t%s %s %s\n", c.Name, c.Kind, positionToString(c.SelectionRange)) + } +} + +func printSymbolInformation(s protocol.SymbolInformation) { + fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.Location.Range)) +} + +func positionToString(r protocol.Range) string { + return fmt.Sprintf("%v:%v-%v:%v", + r.Start.Line+1, + r.Start.Character+1, + r.End.Line+1, + r.End.Character+1, + ) +} diff --git a/gopls/internal/lsp/cmd/test/cmdtest.go b/gopls/internal/lsp/cmd/test/cmdtest.go new file mode 100644 index 00000000000..7f8a13b762d --- /dev/null +++ b/gopls/internal/lsp/cmd/test/cmdtest.go @@ -0,0 +1,6 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmdtest contains the test suite for the command line behavior of gopls. +package cmdtest diff --git a/gopls/internal/lsp/cmd/test/integration_test.go b/gopls/internal/lsp/cmd/test/integration_test.go new file mode 100644 index 00000000000..956fb59059b --- /dev/null +++ b/gopls/internal/lsp/cmd/test/integration_test.go @@ -0,0 +1,898 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package cmdtest + +// This file defines integration tests of each gopls subcommand that +// fork+exec the command in a separate process. +// +// (Rather than execute 'go build gopls' during the test, we reproduce +// the main entrypoint in the test executable.) +// +// The purpose of this test is to exercise client-side logic such as +// argument parsing and formatting of LSP RPC responses, not server +// behavior; see lsp_test for that. +// +// All tests run in parallel. +// +// TODO(adonovan): +// - Use markers to represent positions in the input and in assertions. +// - Coverage of cross-cutting things like cwd, enviro, span parsing, etc. +// - Subcommands that accept -write and -diff flags should implement +// them consistently wrt the default behavior; factor their tests. +// - Add missing test for 'vulncheck' subcommand. +// - Add tests for client-only commands: serve, bug, help, api-json, licenses. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/gopls/internal/lsp/cmd" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/internal/tool" + "golang.org/x/tools/txtar" +) + +// TestVersion tests the 'version' subcommand (../info.go). +func TestVersion(t *testing.T) { + t.Parallel() + + tree := writeTree(t, "") + + // There's not much we can robustly assert about the actual version. + const want = debug.Version // e.g. "master" + + // basic + { + res := gopls(t, tree, "version") + res.checkExit(true) + res.checkStdout(want) + } + + // -json flag + { + res := gopls(t, tree, "version", "-json") + res.checkExit(true) + var v debug.ServerVersion + if res.toJSON(&v) { + if v.Version != want { + t.Errorf("expected Version %q, got %q (%v)", want, v.Version, res) + } + } + } +} + +// TestCheck tests the 'check' subcommand (../check.go). +func TestCheck(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +import "fmt" +var _ = fmt.Sprintf("%s", 123) + +-- b.go -- +package a +import "fmt" +var _ = fmt.Sprintf("%d", "123") +`) + + // no files + { + res := gopls(t, tree, "check") + res.checkExit(true) + if res.stdout != "" { + t.Errorf("unexpected output: %v", res) + } + } + + // one file + { + res := gopls(t, tree, "check", "./a.go") + res.checkExit(true) + res.checkStdout("fmt.Sprintf format %s has arg 123 of wrong type int") + } + + // two files + { + res := gopls(t, tree, "check", "./a.go", "./b.go") + res.checkExit(true) + res.checkStdout(`a.go:.* fmt.Sprintf format %s has arg 123 of wrong type int`) + res.checkStdout(`b.go:.* fmt.Sprintf format %d has arg "123" of wrong type string`) + } +} + +// TestCallHierarchy tests the 'call_hierarchy' subcommand (../call_hierarchy.go). +func TestCallHierarchy(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func f() {} +func g() { + f() +} +func h() { + f() + f() +} +`) + // missing position + { + res := gopls(t, tree, "call_hierarchy") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // wrong place + { + res := gopls(t, tree, "call_hierarchy", "a.go:1") + res.checkExit(false) + res.checkStderr("identifier not found") + } + // f is called once from g and twice from h. + { + res := gopls(t, tree, "call_hierarchy", "a.go:2:6") + res.checkExit(true) + // We use regexp '.' as an OS-agnostic path separator. + res.checkStdout("ranges 7:2-3, 8:2-3 in ..a.go from/to function h in ..a.go:6:6-7") + res.checkStdout("ranges 4:2-3 in ..a.go from/to function g in ..a.go:3:6-7") + res.checkStdout("identifier: function f in ..a.go:2:6-7") + } +} + +// TestDefinition tests the 'definition' subcommand (../definition.go). +func TestDefinition(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +import "fmt" +func f() { + fmt.Println() +} +func g() { + f() +} +`) + // missing position + { + res := gopls(t, tree, "definition") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // intra-package + { + res := gopls(t, tree, "definition", "a.go:7:2") // "f()" + res.checkExit(true) + res.checkStdout("a.go:3:6-7: defined here as func f") + } + // cross-package + { + res := gopls(t, tree, "definition", "a.go:4:7") // "Println" + res.checkExit(true) + res.checkStdout("print.go.* defined here as func fmt.Println") + res.checkStdout("Println formats using the default formats for its operands") + } + // -json and -markdown + { + res := gopls(t, tree, "definition", "-json", "-markdown", "a.go:4:7") + res.checkExit(true) + var defn cmd.Definition + if res.toJSON(&defn) { + if !strings.HasPrefix(defn.Description, "```go\nfunc fmt.Println") { + t.Errorf("Description does not start with markdown code block. Got: %s", defn.Description) + } + } + } +} + +// TestFoldingRanges tests the 'folding_ranges' subcommand (../folding_range.go). +func TestFoldingRanges(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func f(x int) { + // hello +} +`) + // missing filename + { + res := gopls(t, tree, "folding_ranges") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // success + { + res := gopls(t, tree, "folding_ranges", "a.go") + res.checkExit(true) + res.checkStdout("2:8-2:13") // params (x int) + res.checkStdout("2:16-4:1") // body { ... } + } +} + +// TestFormat tests the 'format' subcommand (../format.go). +func TestFormat(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- a.go -- +package a ; func f ( ) { } +`) + const want = `package a + +func f() {} +` + + // no files => nop + { + res := gopls(t, tree, "format") + res.checkExit(true) + } + // default => print formatted result + { + res := gopls(t, tree, "format", "a.go") + res.checkExit(true) + if res.stdout != want { + t.Errorf("format: got <<%s>>, want <<%s>>", res.stdout, want) + } + } + // start/end position not supported (unless equal to start/end of file) + { + res := gopls(t, tree, "format", "a.go:1-2") + res.checkExit(false) + res.checkStderr("only full file formatting supported") + } + // -list: show only file names + { + res := gopls(t, tree, "format", "-list", "a.go") + res.checkExit(true) + res.checkStdout("a.go") + } + // -diff prints a unified diff + { + res := gopls(t, tree, "format", "-diff", "a.go") + res.checkExit(true) + // We omit the filenames as they vary by OS. + want := ` +-package a ; func f ( ) { } ++package a ++ ++func f() {} +` + res.checkStdout(regexp.QuoteMeta(want)) + } + // -write updates the file + { + res := gopls(t, tree, "format", "-write", "a.go") + res.checkExit(true) + res.checkStdout("^$") // empty + checkContent(t, filepath.Join(tree, "a.go"), want) + } +} + +// TestHighlight tests the 'highlight' subcommand (../highlight.go). +func TestHighlight(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- a.go -- +package a +import "fmt" +func f() { + fmt.Println() + fmt.Println() +} +`) + + // no arguments + { + res := gopls(t, tree, "highlight") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // all occurrences of Println + { + res := gopls(t, tree, "highlight", "a.go:4:7") + res.checkExit(true) + res.checkStdout("a.go:4:6-13") + res.checkStdout("a.go:5:6-13") + } +} + +// TestImplementations tests the 'implementation' subcommand (../implementation.go). +func TestImplementations(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- a.go -- +package a +import "fmt" +type T int +func (T) String() string { return "" } +`) + + // no arguments + { + res := gopls(t, tree, "implementation") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // T.String + { + res := gopls(t, tree, "implementation", "a.go:4:10") + res.checkExit(true) + // TODO(adonovan): extract and check the content of the reported ranges? + // We use regexp '.' as an OS-agnostic path separator. + res.checkStdout("fmt.print.go:") // fmt.Stringer.String + res.checkStdout("runtime.error.go:") // runtime.stringer.String + } +} + +// TestImports tests the 'imports' subcommand (../imports.go). +func TestImports(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- a.go -- +package a +func _() { + fmt.Println() +} +`) + + want := ` +package a + +import "fmt" +func _() { + fmt.Println() +} +`[1:] + + // no arguments + { + res := gopls(t, tree, "imports") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // default: print with imports + { + res := gopls(t, tree, "imports", "a.go") + res.checkExit(true) + if res.stdout != want { + t.Errorf("format: got <<%s>>, want <<%s>>", res.stdout, want) + } + } + // -diff: show a unified diff + { + res := gopls(t, tree, "imports", "-diff", "a.go") + res.checkExit(true) + res.checkStdout(regexp.QuoteMeta(`+import "fmt"`)) + } + // -write: update file + { + res := gopls(t, tree, "imports", "-write", "a.go") + res.checkExit(true) + checkContent(t, filepath.Join(tree, "a.go"), want) + } +} + +// TestLinks tests the 'links' subcommand (../links.go). +func TestLinks(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- a.go -- +// Link in package doc: https://pkg.go.dev/ +package a + +// Link in internal comment: https://go.dev/cl + +// Doc comment link: https://blog.go.dev/ +func f() {} +`) + // no arguments + { + res := gopls(t, tree, "links") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // success + { + res := gopls(t, tree, "links", "a.go") + res.checkExit(true) + res.checkStdout("https://go.dev/cl") + res.checkStdout("https://pkg.go.dev") + res.checkStdout("https://blog.go.dev/") + } + // -json + { + res := gopls(t, tree, "links", "-json", "a.go") + res.checkExit(true) + res.checkStdout("https://pkg.go.dev") + res.checkStdout("https://go.dev/cl") + res.checkStdout("https://blog.go.dev/") // at 5:21-5:41 + var links []protocol.DocumentLink + if res.toJSON(&links) { + // Check just one of the three locations. + if got, want := fmt.Sprint(links[2].Range), "5:21-5:41"; got != want { + t.Errorf("wrong link location: got %v, want %v", got, want) + } + } + } +} + +// TestReferences tests the 'references' subcommand (../references.go). +func TestReferences(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +import "fmt" +func f() { + fmt.Println() +} + +-- b.go -- +package a +import "fmt" +func g() { + fmt.Println() +} +`) + // no arguments + { + res := gopls(t, tree, "references") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // fmt.Println + { + res := gopls(t, tree, "references", "a.go:4:10") + res.checkExit(true) + res.checkStdout("a.go:4:6-13") + res.checkStdout("b.go:4:6-13") + } +} + +// TestSignature tests the 'signature' subcommand (../signature.go). +func TestSignature(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +import "fmt" +func f() { + fmt.Println(123) +} +`) + // no arguments + { + res := gopls(t, tree, "signature") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // at 123 inside fmt.Println() call + { + res := gopls(t, tree, "signature", "a.go:4:15") + res.checkExit(true) + res.checkStdout("Println\\(a ...") + res.checkStdout("Println formats using the default formats...") + } +} + +// TestPrepareRename tests the 'prepare_rename' subcommand (../prepare_rename.go). +func TestPrepareRename(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func oldname() {} +`) + // no arguments + { + res := gopls(t, tree, "prepare_rename") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // in 'package' keyword + { + res := gopls(t, tree, "prepare_rename", "a.go:1:3") + res.checkExit(false) + res.checkStderr("request is not valid at the given position") + } + // in 'package' identifier (not supported by client) + { + res := gopls(t, tree, "prepare_rename", "a.go:1:9") + res.checkExit(false) + res.checkStderr("can't rename package") + } + // in func oldname + { + res := gopls(t, tree, "prepare_rename", "a.go:2:9") + res.checkExit(true) + res.checkStdout("a.go:2:6-13") // all of "oldname" + } +} + +// TestRename tests the 'rename' subcommand (../rename.go). +func TestRename(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func oldname() {} +`) + // no arguments + { + res := gopls(t, tree, "rename") + res.checkExit(false) + res.checkStderr("expects 2 arguments") + } + // missing newname + { + res := gopls(t, tree, "rename", "a.go:1:3") + res.checkExit(false) + res.checkStderr("expects 2 arguments") + } + // in 'package' keyword + { + res := gopls(t, tree, "rename", "a.go:1:3", "newname") + res.checkExit(false) + res.checkStderr("no object found") + } + // in 'package' identifier + { + res := gopls(t, tree, "rename", "a.go:1:9", "newname") + res.checkExit(false) + res.checkStderr(`cannot rename package: module path .* same as the package path, so .* no effect`) + } + // success, func oldname (and -diff) + { + res := gopls(t, tree, "rename", "-diff", "a.go:2:9", "newname") + res.checkExit(true) + res.checkStdout(regexp.QuoteMeta("-func oldname() {}")) + res.checkStdout(regexp.QuoteMeta("+func newname() {}")) + } +} + +// TestSymbols tests the 'symbols' subcommand (../symbols.go). +func TestSymbols(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func f() +var v int +const c = 0 +`) + // no files + { + res := gopls(t, tree, "symbols") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // success + { + res := gopls(t, tree, "symbols", "a.go:123:456") // (line/col ignored) + res.checkExit(true) + res.checkStdout("f Function 2:6-2:7") + res.checkStdout("v Variable 3:5-3:6") + res.checkStdout("c Constant 4:7-4:8") + } +} + +// TestSemtok tests the 'semtok' subcommand (../semantictokens.go). +func TestSemtok(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func f() +var v int +const c = 0 +`) + // no files + { + res := gopls(t, tree, "semtok") + res.checkExit(false) + res.checkStderr("expected one file name") + } + // success + { + res := gopls(t, tree, "semtok", "a.go") + res.checkExit(true) + got := res.stdout + want := ` +/*⇒7,keyword,[]*/package /*⇒1,namespace,[]*/a +/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/f() +/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/v /*⇒3,type,[defaultLibrary]*/int +/*⇒5,keyword,[]*/const /*⇒1,variable,[definition readonly]*/c = /*⇒1,number,[]*/0 +`[1:] + if got != want { + t.Errorf("semtok: got <<%s>>, want <<%s>>", got, want) + } + } +} + +// TestFix tests the 'fix' subcommand (../suggested_fix.go). +func TestFix(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +var _ error = T(0) +type T int +func f() (int, string) { return } +`) + want := ` +package a +var _ error = T(0) +type T int +func f() (int, string) { return 0, "" } +`[1:] + + // no arguments + { + res := gopls(t, tree, "fix") + res.checkExit(false) + res.checkStderr("expects at least 1 argument") + } + // success (-a enables fillreturns) + { + res := gopls(t, tree, "fix", "-a", "a.go") + res.checkExit(true) + got := res.stdout + if got != want { + t.Errorf("fix: got <<%s>>, want <<%s>>", got, want) + } + } + // TODO(adonovan): more tests: + // - -write, -diff: factor with imports, format, rename. + // - without -all flag + // - args[2:] is an optional list of protocol.CodeActionKind enum values. + // - a span argument with a range causes filtering. +} + +// TestWorkspaceSymbol tests the 'workspace_symbol' subcommand (../workspace_symbol.go). +func TestWorkspaceSymbol(t *testing.T) { + t.Parallel() + + tree := writeTree(t, ` +-- go.mod -- +module example.com +go 1.18 + +-- a.go -- +package a +func someFunctionName() +`) + // no files + { + res := gopls(t, tree, "workspace_symbol") + res.checkExit(false) + res.checkStderr("expects 1 argument") + } + // success + { + res := gopls(t, tree, "workspace_symbol", "meFun") + res.checkExit(true) + res.checkStdout("a.go:2:6-22 someFunctionName Function") + } +} + +// -- test framework -- + +func TestMain(m *testing.M) { + switch os.Getenv("ENTRYPOINT") { + case "goplsMain": + goplsMain() + default: + os.Exit(m.Run()) + } +} + +// This function is a stand-in for gopls.main in ../../../../main.go. +func goplsMain() { + bug.PanicOnBugs = true // (not in the production command) + tool.Main(context.Background(), cmd.New("gopls", "", nil, hooks.Options), os.Args[1:]) +} + +// writeTree extracts a txtar archive into a new directory and returns its path. +func writeTree(t *testing.T, archive string) string { + root := t.TempDir() + + // This unfortunate step is required because gopls output + // expands symbolic links it its input file names (arguably it + // should not), and on macOS the temp dir is in /var -> private/var. + root, err := filepath.EvalSymlinks(root) + if err != nil { + t.Fatal(err) + } + + for _, f := range txtar.Parse([]byte(archive)).Files { + filename := filepath.Join(root, f.Name) + if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filename, f.Data, 0666); err != nil { + t.Fatal(err) + } + } + return root +} + +// gopls executes gopls in a child process. +func gopls(t *testing.T, dir string, args ...string) *result { + testenv.NeedsTool(t, "go") + + // Catch inadvertent use of dir=".", which would make + // the ReplaceAll below unpredictable. + if !filepath.IsAbs(dir) { + t.Fatalf("dir is not absolute: %s", dir) + } + + cmd := exec.Command(os.Args[0], args...) + cmd.Env = append(os.Environ(), "ENTRYPOINT=goplsMain") + cmd.Dir = dir + cmd.Stdout = new(bytes.Buffer) + cmd.Stderr = new(bytes.Buffer) + + cmdErr := cmd.Run() + + stdout := strings.ReplaceAll(fmt.Sprint(cmd.Stdout), dir, ".") + stderr := strings.ReplaceAll(fmt.Sprint(cmd.Stderr), dir, ".") + exitcode := 0 + if cmdErr != nil { + if exitErr, ok := cmdErr.(*exec.ExitError); ok { + exitcode = exitErr.ExitCode() + } else { + stderr = cmdErr.Error() // (execve failure) + exitcode = -1 + } + } + res := &result{ + t: t, + command: "gopls " + strings.Join(args, " "), + exitcode: exitcode, + stdout: stdout, + stderr: stderr, + } + if false { + t.Log(res) + } + return res +} + +// A result holds the result of a gopls invocation, and provides assertion helpers. +type result struct { + t *testing.T + command string + exitcode int + stdout, stderr string +} + +func (res *result) String() string { + return fmt.Sprintf("%s: exit=%d stdout=<<%s>> stderr=<<%s>>", + res.command, res.exitcode, res.stdout, res.stderr) +} + +// checkExit asserts that gopls returned the expected exit code. +func (res *result) checkExit(success bool) { + res.t.Helper() + if (res.exitcode == 0) != success { + res.t.Errorf("%s: exited with code %d, want success: %t (%s)", + res.command, res.exitcode, success, res) + } +} + +// checkStdout asserts that the gopls standard output matches the pattern. +func (res *result) checkStdout(pattern string) { + res.t.Helper() + res.checkOutput(pattern, "stdout", res.stdout) +} + +// checkStderr asserts that the gopls standard error matches the pattern. +func (res *result) checkStderr(pattern string) { + res.t.Helper() + res.checkOutput(pattern, "stderr", res.stderr) +} + +func (res *result) checkOutput(pattern, name, content string) { + res.t.Helper() + if match, err := regexp.MatchString(pattern, content); err != nil { + res.t.Errorf("invalid regexp: %v", err) + } else if !match { + res.t.Errorf("%s: %s does not match [%s]; got <<%s>>", + res.command, name, pattern, content) + } +} + +// toJSON decodes res.stdout as JSON into to *ptr and reports its success. +func (res *result) toJSON(ptr interface{}) bool { + if err := json.Unmarshal([]byte(res.stdout), ptr); err != nil { + res.t.Errorf("invalid JSON %v", err) + return false + } + return true +} + +// checkContent checks that the contents of the file are as expected. +func checkContent(t *testing.T, filename, want string) { + data, err := os.ReadFile(filename) + if err != nil { + t.Error(err) + return + } + if got := string(data); got != want { + t.Errorf("content of %s is <<%s>>, want <<%s>>", filename, got, want) + } +} diff --git a/internal/lsp/cmd/usage/api-json.hlp b/gopls/internal/lsp/cmd/usage/api-json.hlp similarity index 100% rename from internal/lsp/cmd/usage/api-json.hlp rename to gopls/internal/lsp/cmd/usage/api-json.hlp diff --git a/internal/lsp/cmd/usage/bug.hlp b/gopls/internal/lsp/cmd/usage/bug.hlp similarity index 100% rename from internal/lsp/cmd/usage/bug.hlp rename to gopls/internal/lsp/cmd/usage/bug.hlp diff --git a/internal/lsp/cmd/usage/call_hierarchy.hlp b/gopls/internal/lsp/cmd/usage/call_hierarchy.hlp similarity index 100% rename from internal/lsp/cmd/usage/call_hierarchy.hlp rename to gopls/internal/lsp/cmd/usage/call_hierarchy.hlp diff --git a/internal/lsp/cmd/usage/check.hlp b/gopls/internal/lsp/cmd/usage/check.hlp similarity index 100% rename from internal/lsp/cmd/usage/check.hlp rename to gopls/internal/lsp/cmd/usage/check.hlp diff --git a/internal/lsp/cmd/usage/definition.hlp b/gopls/internal/lsp/cmd/usage/definition.hlp similarity index 100% rename from internal/lsp/cmd/usage/definition.hlp rename to gopls/internal/lsp/cmd/usage/definition.hlp diff --git a/internal/lsp/cmd/usage/fix.hlp b/gopls/internal/lsp/cmd/usage/fix.hlp similarity index 100% rename from internal/lsp/cmd/usage/fix.hlp rename to gopls/internal/lsp/cmd/usage/fix.hlp diff --git a/internal/lsp/cmd/usage/folding_ranges.hlp b/gopls/internal/lsp/cmd/usage/folding_ranges.hlp similarity index 100% rename from internal/lsp/cmd/usage/folding_ranges.hlp rename to gopls/internal/lsp/cmd/usage/folding_ranges.hlp diff --git a/internal/lsp/cmd/usage/format.hlp b/gopls/internal/lsp/cmd/usage/format.hlp similarity index 100% rename from internal/lsp/cmd/usage/format.hlp rename to gopls/internal/lsp/cmd/usage/format.hlp diff --git a/internal/lsp/cmd/usage/help.hlp b/gopls/internal/lsp/cmd/usage/help.hlp similarity index 100% rename from internal/lsp/cmd/usage/help.hlp rename to gopls/internal/lsp/cmd/usage/help.hlp diff --git a/internal/lsp/cmd/usage/highlight.hlp b/gopls/internal/lsp/cmd/usage/highlight.hlp similarity index 100% rename from internal/lsp/cmd/usage/highlight.hlp rename to gopls/internal/lsp/cmd/usage/highlight.hlp diff --git a/internal/lsp/cmd/usage/implementation.hlp b/gopls/internal/lsp/cmd/usage/implementation.hlp similarity index 100% rename from internal/lsp/cmd/usage/implementation.hlp rename to gopls/internal/lsp/cmd/usage/implementation.hlp diff --git a/internal/lsp/cmd/usage/imports.hlp b/gopls/internal/lsp/cmd/usage/imports.hlp similarity index 100% rename from internal/lsp/cmd/usage/imports.hlp rename to gopls/internal/lsp/cmd/usage/imports.hlp diff --git a/internal/lsp/cmd/usage/inspect.hlp b/gopls/internal/lsp/cmd/usage/inspect.hlp similarity index 100% rename from internal/lsp/cmd/usage/inspect.hlp rename to gopls/internal/lsp/cmd/usage/inspect.hlp diff --git a/internal/lsp/cmd/usage/licenses.hlp b/gopls/internal/lsp/cmd/usage/licenses.hlp similarity index 100% rename from internal/lsp/cmd/usage/licenses.hlp rename to gopls/internal/lsp/cmd/usage/licenses.hlp diff --git a/internal/lsp/cmd/usage/links.hlp b/gopls/internal/lsp/cmd/usage/links.hlp similarity index 100% rename from internal/lsp/cmd/usage/links.hlp rename to gopls/internal/lsp/cmd/usage/links.hlp diff --git a/internal/lsp/cmd/usage/prepare_rename.hlp b/gopls/internal/lsp/cmd/usage/prepare_rename.hlp similarity index 100% rename from internal/lsp/cmd/usage/prepare_rename.hlp rename to gopls/internal/lsp/cmd/usage/prepare_rename.hlp diff --git a/internal/lsp/cmd/usage/references.hlp b/gopls/internal/lsp/cmd/usage/references.hlp similarity index 100% rename from internal/lsp/cmd/usage/references.hlp rename to gopls/internal/lsp/cmd/usage/references.hlp diff --git a/internal/lsp/cmd/usage/remote.hlp b/gopls/internal/lsp/cmd/usage/remote.hlp similarity index 100% rename from internal/lsp/cmd/usage/remote.hlp rename to gopls/internal/lsp/cmd/usage/remote.hlp diff --git a/internal/lsp/cmd/usage/rename.hlp b/gopls/internal/lsp/cmd/usage/rename.hlp similarity index 100% rename from internal/lsp/cmd/usage/rename.hlp rename to gopls/internal/lsp/cmd/usage/rename.hlp diff --git a/internal/lsp/cmd/usage/semtok.hlp b/gopls/internal/lsp/cmd/usage/semtok.hlp similarity index 100% rename from internal/lsp/cmd/usage/semtok.hlp rename to gopls/internal/lsp/cmd/usage/semtok.hlp diff --git a/internal/lsp/cmd/usage/serve.hlp b/gopls/internal/lsp/cmd/usage/serve.hlp similarity index 100% rename from internal/lsp/cmd/usage/serve.hlp rename to gopls/internal/lsp/cmd/usage/serve.hlp diff --git a/internal/lsp/cmd/usage/signature.hlp b/gopls/internal/lsp/cmd/usage/signature.hlp similarity index 100% rename from internal/lsp/cmd/usage/signature.hlp rename to gopls/internal/lsp/cmd/usage/signature.hlp diff --git a/internal/lsp/cmd/usage/symbols.hlp b/gopls/internal/lsp/cmd/usage/symbols.hlp similarity index 100% rename from internal/lsp/cmd/usage/symbols.hlp rename to gopls/internal/lsp/cmd/usage/symbols.hlp diff --git a/internal/lsp/cmd/usage/usage.hlp b/gopls/internal/lsp/cmd/usage/usage.hlp similarity index 97% rename from internal/lsp/cmd/usage/usage.hlp rename to gopls/internal/lsp/cmd/usage/usage.hlp index eaa05c5fa3a..404750b7d38 100644 --- a/internal/lsp/cmd/usage/usage.hlp +++ b/gopls/internal/lsp/cmd/usage/usage.hlp @@ -37,7 +37,6 @@ Features signature display selected identifier's signature fix apply suggested fixes symbols display selected file's symbols - workspace manage the gopls workspace (experimental: under development) workspace_symbol search symbols in workspace vulncheck run experimental vulncheck analysis (experimental: under development) diff --git a/internal/lsp/cmd/usage/version.hlp b/gopls/internal/lsp/cmd/usage/version.hlp similarity index 100% rename from internal/lsp/cmd/usage/version.hlp rename to gopls/internal/lsp/cmd/usage/version.hlp diff --git a/gopls/internal/lsp/cmd/usage/vulncheck.hlp b/gopls/internal/lsp/cmd/usage/vulncheck.hlp new file mode 100644 index 00000000000..4fbe573e22a --- /dev/null +++ b/gopls/internal/lsp/cmd/usage/vulncheck.hlp @@ -0,0 +1,17 @@ +run experimental vulncheck analysis (experimental: under development) + +Usage: + gopls [flags] vulncheck + + WARNING: this command is experimental. + + By default, the command outputs a JSON-encoded + golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult + message. + Example: + $ gopls vulncheck + + -config + If true, the command reads a JSON-encoded package load configuration from stdin + -summary + If true, outputs a JSON-encoded govulnchecklib.Summary JSON diff --git a/internal/lsp/cmd/usage/workspace_symbol.hlp b/gopls/internal/lsp/cmd/usage/workspace_symbol.hlp similarity index 100% rename from internal/lsp/cmd/usage/workspace_symbol.hlp rename to gopls/internal/lsp/cmd/usage/workspace_symbol.hlp diff --git a/gopls/internal/lsp/cmd/vulncheck.go b/gopls/internal/lsp/cmd/vulncheck.go new file mode 100644 index 00000000000..5c851b66e78 --- /dev/null +++ b/gopls/internal/lsp/cmd/vulncheck.go @@ -0,0 +1,84 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "os" + + "golang.org/x/tools/go/packages" + vulnchecklib "golang.org/x/tools/gopls/internal/vulncheck" + "golang.org/x/tools/internal/tool" +) + +// vulncheck implements the vulncheck command. +type vulncheck struct { + Config bool `flag:"config" help:"If true, the command reads a JSON-encoded package load configuration from stdin"` + AsSummary bool `flag:"summary" help:"If true, outputs a JSON-encoded govulnchecklib.Summary JSON"` + app *Application +} + +type pkgLoadConfig struct { + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // If Tests is set, the loader includes related test packages. + Tests bool +} + +// TODO(hyangah): document pkgLoadConfig + +func (v *vulncheck) Name() string { return "vulncheck" } +func (v *vulncheck) Parent() string { return v.app.Name() } +func (v *vulncheck) Usage() string { return "" } +func (v *vulncheck) ShortHelp() string { + return "run experimental vulncheck analysis (experimental: under development)" +} +func (v *vulncheck) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` + WARNING: this command is experimental. + + By default, the command outputs a JSON-encoded + golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult + message. + Example: + $ gopls vulncheck + +`) + printFlagDefaults(f) +} + +func (v *vulncheck) Run(ctx context.Context, args ...string) error { + if vulnchecklib.Main == nil { + return fmt.Errorf("vulncheck command is available only in gopls compiled with go1.18 or newer") + } + + // TODO(hyangah): what's wrong with allowing multiple targets? + if len(args) > 1 { + return tool.CommandLineErrorf("vulncheck accepts at most one package pattern") + } + var cfg pkgLoadConfig + if v.Config { + if err := json.NewDecoder(os.Stdin).Decode(&cfg); err != nil { + return tool.CommandLineErrorf("failed to parse cfg: %v", err) + } + } + loadCfg := packages.Config{ + Context: ctx, + Tests: cfg.Tests, + BuildFlags: cfg.BuildFlags, + // inherit the current process's cwd and env. + } + + if err := vulnchecklib.Main(loadCfg, args...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + return nil +} diff --git a/gopls/internal/lsp/cmd/workspace_symbol.go b/gopls/internal/lsp/cmd/workspace_symbol.go new file mode 100644 index 00000000000..0c7160af399 --- /dev/null +++ b/gopls/internal/lsp/cmd/workspace_symbol.go @@ -0,0 +1,85 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmd + +import ( + "context" + "flag" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/tool" +) + +// workspaceSymbol implements the workspace_symbol verb for gopls. +type workspaceSymbol struct { + Matcher string `flag:"matcher" help:"specifies the type of matcher: fuzzy, caseSensitive, or caseInsensitive.\nThe default is caseInsensitive."` + + app *Application +} + +func (r *workspaceSymbol) Name() string { return "workspace_symbol" } +func (r *workspaceSymbol) Parent() string { return r.app.Name() } +func (r *workspaceSymbol) Usage() string { return "[workspace_symbol-flags] " } +func (r *workspaceSymbol) ShortHelp() string { return "search symbols in workspace" } +func (r *workspaceSymbol) DetailedHelp(f *flag.FlagSet) { + fmt.Fprint(f.Output(), ` +Example: + + $ gopls workspace_symbol -matcher fuzzy 'wsymbols' + +workspace_symbol-flags: +`) + printFlagDefaults(f) +} + +func (r *workspaceSymbol) Run(ctx context.Context, args ...string) error { + if len(args) != 1 { + return tool.CommandLineErrorf("workspace_symbol expects 1 argument") + } + + opts := r.app.options + r.app.options = func(o *source.Options) { + if opts != nil { + opts(o) + } + switch r.Matcher { + case "fuzzy": + o.SymbolMatcher = source.SymbolFuzzy + case "caseSensitive": + o.SymbolMatcher = source.SymbolCaseSensitive + case "fastfuzzy": + o.SymbolMatcher = source.SymbolFastFuzzy + default: + o.SymbolMatcher = source.SymbolCaseInsensitive + } + } + + conn, err := r.app.connect(ctx) + if err != nil { + return err + } + defer conn.terminate(ctx) + + p := protocol.WorkspaceSymbolParams{ + Query: args[0], + } + + symbols, err := conn.Symbol(ctx, &p) + if err != nil { + return err + } + for _, s := range symbols { + f := conn.openFile(ctx, fileURI(s.Location.URI)) + span, err := f.mapper.LocationSpan(s.Location) + if err != nil { + return err + } + fmt.Printf("%s %s %s\n", span, s.Name, s.Kind) + } + + return nil +} diff --git a/internal/lsp/code_action.go b/gopls/internal/lsp/code_action.go similarity index 79% rename from internal/lsp/code_action.go rename to gopls/internal/lsp/code_action.go index 9d78e3c9ac9..ef9f921ec2f 100644 --- a/internal/lsp/code_action.go +++ b/gopls/internal/lsp/code_action.go @@ -10,14 +10,14 @@ import ( "sort" "strings" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" ) func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) { @@ -70,18 +70,41 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara switch kind { case source.Mod: if diagnostics := params.Context.Diagnostics; len(diagnostics) > 0 { - diags, err := mod.DiagnosticsForMod(ctx, snapshot, fh) + diags, err := mod.ModDiagnostics(ctx, snapshot, fh) if source.IsNonFatalGoModError(err) { return nil, nil } if err != nil { return nil, err } - quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, diags) + udiags, err := mod.ModUpgradeDiagnostics(ctx, snapshot, fh) + if err != nil { + return nil, err + } + quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, append(diags, udiags...)) if err != nil { return nil, err } codeActions = append(codeActions, quickFixes...) + + vdiags, err := mod.ModVulnerabilityDiagnostics(ctx, snapshot, fh) + if err != nil { + return nil, err + } + // Group vulnerabilities by location and then limit which code actions we return + // for each location. + m := make(map[protocol.Range][]*source.Diagnostic) + for _, v := range vdiags { + m[v.Range] = append(m[v.Range], v) + } + for _, sdiags := range m { + quickFixes, err = codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, sdiags) + if err != nil { + return nil, err + } + quickFixes = mod.SelectUpgradeCodeActions(quickFixes) + codeActions = append(codeActions, quickFixes...) + } } case source.Go: // Don't suggest fixes for generated files, since they are generally @@ -132,20 +155,23 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara if ctx.Err() != nil { return nil, ctx.Err() } - pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckFull, source.WidestPackage) + + // Type-check the package and also run analysis, + // then combine their diagnostics. + pkg, _, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.TypecheckFull, source.NarrowestPackage) if err != nil { return nil, err } - - pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg) + pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, uri) if err != nil { return nil, err } - analysisDiags, err := source.Analyze(ctx, snapshot, pkg, true) + analysisDiags, err := source.Analyze(ctx, snapshot, pkg.Metadata().ID, true) if err != nil { return nil, err } - fileDiags := append(pkgDiagnostics[uri], analysisDiags[uri]...) + var fileDiags []*source.Diagnostic + source.CombineDiagnostics(pkgDiags, analysisDiags[uri], &fileDiags, &fileDiags) // Split diagnostics into fixes, which must match incoming diagnostics, // and non-fixes, which must match the requested range. Build actions @@ -189,7 +215,7 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara } if wanted[protocol.RefactorExtract] { - fixes, err := extractionFixes(ctx, snapshot, pkg, uri, params.Range) + fixes, err := extractionFixes(ctx, snapshot, uri, params.Range) if err != nil { return nil, err } @@ -257,6 +283,12 @@ func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic if ident == fix.IdentName { results = append(results, diagnostic) } + // "undefined: X" may be an unresolved import at Go 1.20+. + case strings.HasPrefix(diagnostic.Message, "undefined: "): + ident := strings.TrimPrefix(diagnostic.Message, "undefined: ") + if ident == fix.IdentName { + results = append(results, diagnostic) + } // "could not import: X" may be an invalid import. case strings.HasPrefix(diagnostic.Message, "could not import: "): ident := strings.TrimPrefix(diagnostic.Message, "could not import: ") @@ -276,7 +308,7 @@ func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic return results } -func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.Package, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) { +func extractionFixes(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) { if rng.Start == rng.End { return nil, nil } @@ -284,17 +316,17 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P if err != nil { return nil, err } - _, pgf, err := source.GetParsedFile(ctx, snapshot, fh, source.NarrowestPackage) + pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) if err != nil { return nil, fmt.Errorf("getting file for Identifier: %w", err) } - srng, err := pgf.Mapper.RangeToSpanRange(rng) + start, end, err := pgf.RangePos(rng) if err != nil { return nil, err } puri := protocol.URIFromSpanURI(uri) var commands []protocol.Command - if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok { + if _, ok, methodOk, _ := source.CanExtractFunction(pgf.Tok, start, end, pgf.Src, pgf.File); ok { cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{ URI: puri, Fix: source.ExtractFunction, @@ -316,7 +348,7 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P commands = append(commands, cmd) } } - if _, _, ok, _ := source.CanExtractVariable(srng, pgf.File); ok { + if _, _, ok, _ := source.CanExtractVariable(start, end, pgf.File); ok { cmd, err := command.NewApplyFixCommand("Extract variable", command.ApplyFixArgs{ URI: puri, Fix: source.ExtractVariable, @@ -338,16 +370,18 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P return actions, nil } -func documentChanges(fh source.VersionedFileHandle, edits []protocol.TextEdit) []protocol.TextDocumentEdit { - return []protocol.TextDocumentEdit{ +func documentChanges(fh source.FileHandle, edits []protocol.TextEdit) []protocol.DocumentChanges { + return []protocol.DocumentChanges{ { - TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ - Version: fh.Version(), - TextDocumentIdentifier: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(fh.URI()), + TextDocumentEdit: &protocol.TextDocumentEdit{ + TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ + Version: fh.Version(), + TextDocumentIdentifier: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(fh.URI()), + }, }, + Edits: edits, }, - Edits: edits, }, } } @@ -378,20 +412,22 @@ func codeActionsMatchingDiagnostics(ctx context.Context, snapshot source.Snapsho func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd *source.Diagnostic, pd *protocol.Diagnostic) ([]protocol.CodeAction, error) { var actions []protocol.CodeAction for _, fix := range sd.SuggestedFixes { - var changes []protocol.TextDocumentEdit + var changes []protocol.DocumentChanges for uri, edits := range fix.Edits { - fh, err := snapshot.GetVersionedFile(ctx, uri) + fh, err := snapshot.GetFile(ctx, uri) if err != nil { return nil, err } - changes = append(changes, protocol.TextDocumentEdit{ - TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ - Version: fh.Version(), - TextDocumentIdentifier: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), + changes = append(changes, protocol.DocumentChanges{ + TextDocumentEdit: &protocol.TextDocumentEdit{ + TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ + Version: fh.Version(), + TextDocumentIdentifier: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(fh.URI()), + }, }, + Edits: edits, }, - Edits: edits, }) } action := protocol.CodeAction{ @@ -411,7 +447,8 @@ func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd } func sameDiagnostic(pd protocol.Diagnostic, sd *source.Diagnostic) bool { - return pd.Message == sd.Message && protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source) + return pd.Message == strings.TrimSpace(sd.Message) && // extra space may have been trimmed when converting to protocol.Diagnostic + protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source) } func goTest(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) { diff --git a/gopls/internal/lsp/code_lens.go b/gopls/internal/lsp/code_lens.go new file mode 100644 index 00000000000..f554e798c3c --- /dev/null +++ b/gopls/internal/lsp/code_lens.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "fmt" + "sort" + + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + var lenses map[command.Command]source.LensFunc + switch snapshot.View().FileKind(fh) { + case source.Mod: + lenses = mod.LensFuncs() + case source.Go: + lenses = source.LensFuncs() + default: + // Unsupported file kind for a code lens. + return nil, nil + } + var result []protocol.CodeLens + for cmd, lf := range lenses { + if !snapshot.View().Options().Codelenses[string(cmd)] { + continue + } + added, err := lf(ctx, snapshot, fh) + // Code lens is called on every keystroke, so we should just operate in + // a best-effort mode, ignoring errors. + if err != nil { + event.Error(ctx, fmt.Sprintf("code lens %s failed", cmd), err) + continue + } + result = append(result, added...) + } + sort.Slice(result, func(i, j int) bool { + a, b := result[i], result[j] + if cmp := protocol.CompareRange(a.Range, b.Range); cmp != 0 { + return cmp < 0 + } + return a.Command.Command < b.Command.Command + }) + return result, nil +} diff --git a/gopls/internal/lsp/command.go b/gopls/internal/lsp/command.go new file mode 100644 index 00000000000..cb3065ff385 --- /dev/null +++ b/gopls/internal/lsp/command.go @@ -0,0 +1,945 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/progress" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/gopls/internal/vulncheck" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/xcontext" +) + +func (s *Server) executeCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) { + var found bool + for _, name := range s.session.Options().SupportedCommands { + if name == params.Command { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("%s is not a supported command", params.Command) + } + + handler := &commandHandler{ + s: s, + params: params, + } + return command.Dispatch(ctx, params, handler) +} + +type commandHandler struct { + s *Server + params *protocol.ExecuteCommandParams +} + +// commandConfig configures common command set-up and execution. +type commandConfig struct { + async bool // whether to run the command asynchronously. Async commands can only return errors. + requireSave bool // whether all files must be saved for the command to work + progress string // title to use for progress reporting. If empty, no progress will be reported. + forURI protocol.DocumentURI // URI to resolve to a snapshot. If unset, snapshot will be nil. +} + +// commandDeps is evaluated from a commandConfig. Note that not all fields may +// be populated, depending on which configuration is set. See comments in-line +// for details. +type commandDeps struct { + snapshot source.Snapshot // present if cfg.forURI was set + fh source.FileHandle // present if cfg.forURI was set + work *progress.WorkDone // present cfg.progress was set +} + +type commandFunc func(context.Context, commandDeps) error + +// run performs command setup for command execution, and invokes the given run +// function. If cfg.async is set, run executes the given func in a separate +// goroutine, and returns as soon as setup is complete and the goroutine is +// scheduled. +// +// Invariant: if the resulting error is non-nil, the given run func will +// (eventually) be executed exactly once. +func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run commandFunc) (err error) { + if cfg.requireSave { + var unsaved []string + for _, overlay := range c.s.session.Overlays() { + if !overlay.Saved() { + unsaved = append(unsaved, overlay.URI().Filename()) + } + } + if len(unsaved) > 0 { + return fmt.Errorf("All files must be saved first (unsaved: %v).", unsaved) + } + } + var deps commandDeps + if cfg.forURI != "" { + var ok bool + var release func() + deps.snapshot, deps.fh, ok, release, err = c.s.beginFileRequest(ctx, cfg.forURI, source.UnknownKind) + defer release() + if !ok { + if err != nil { + return err + } + return fmt.Errorf("invalid file URL: %v", cfg.forURI) + } + } + ctx, cancel := context.WithCancel(xcontext.Detach(ctx)) + if cfg.progress != "" { + deps.work = c.s.progress.Start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel) + } + runcmd := func() error { + defer cancel() + err := run(ctx, deps) + if deps.work != nil { + switch { + case errors.Is(err, context.Canceled): + deps.work.End(ctx, "canceled") + case err != nil: + event.Error(ctx, "command error", err) + deps.work.End(ctx, "failed") + default: + deps.work.End(ctx, "completed") + } + } + return err + } + if cfg.async { + go func() { + if err := runcmd(); err != nil { + if showMessageErr := c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Type: protocol.Error, + Message: err.Error(), + }); showMessageErr != nil { + event.Error(ctx, fmt.Sprintf("failed to show message: %q", err.Error()), showMessageErr) + } + } + }() + return nil + } + return runcmd() +} + +func (c *commandHandler) ApplyFix(ctx context.Context, args command.ApplyFixArgs) error { + return c.run(ctx, commandConfig{ + // Note: no progress here. Applying fixes should be quick. + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + edits, err := source.ApplyFix(ctx, args.Fix, deps.snapshot, deps.fh, args.Range) + if err != nil { + return err + } + var changes []protocol.DocumentChanges + for _, edit := range edits { + changes = append(changes, protocol.DocumentChanges{ + TextDocumentEdit: &edit, + }) + } + r, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ + Edit: protocol.WorkspaceEdit{ + DocumentChanges: changes, + }, + }) + if err != nil { + return err + } + if !r.Applied { + return errors.New(r.FailureReason) + } + return nil + }) +} + +func (c *commandHandler) RegenerateCgo(ctx context.Context, args command.URIArg) error { + return c.run(ctx, commandConfig{ + progress: "Regenerating Cgo", + }, func(ctx context.Context, deps commandDeps) error { + mod := source.FileModification{ + URI: args.URI.SpanURI(), + Action: source.InvalidateMetadata, + } + return c.s.didModifyFiles(ctx, []source.FileModification{mod}, FromRegenerateCgo) + }) +} + +func (c *commandHandler) CheckUpgrades(ctx context.Context, args command.CheckUpgradesArgs) error { + return c.run(ctx, commandConfig{ + forURI: args.URI, + progress: "Checking for upgrades", + }, func(ctx context.Context, deps commandDeps) error { + upgrades, err := c.s.getUpgrades(ctx, deps.snapshot, args.URI.SpanURI(), args.Modules) + if err != nil { + return err + } + deps.snapshot.View().RegisterModuleUpgrades(args.URI.SpanURI(), upgrades) + // Re-diagnose the snapshot to publish the new module diagnostics. + c.s.diagnoseSnapshot(deps.snapshot, nil, false) + return nil + }) +} + +func (c *commandHandler) AddDependency(ctx context.Context, args command.DependencyArgs) error { + return c.GoGetModule(ctx, args) +} + +func (c *commandHandler) UpgradeDependency(ctx context.Context, args command.DependencyArgs) error { + return c.GoGetModule(ctx, args) +} + +func (c *commandHandler) ResetGoModDiagnostics(ctx context.Context, args command.ResetGoModDiagnosticsArgs) error { + return c.run(ctx, commandConfig{ + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + // Clear all diagnostics coming from the upgrade check source and vulncheck. + // This will clear the diagnostics in all go.mod files, but they + // will be re-calculated when the snapshot is diagnosed again. + if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.UpgradeNotification) { + deps.snapshot.View().ClearModuleUpgrades(args.URI.SpanURI()) + c.s.clearDiagnosticSource(modCheckUpgradesSource) + } + + if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.Govulncheck) { + deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), nil) + c.s.clearDiagnosticSource(modVulncheckSource) + } + + // Re-diagnose the snapshot to remove the diagnostics. + c.s.diagnoseSnapshot(deps.snapshot, nil, false) + return nil + }) +} + +func (c *commandHandler) GoGetModule(ctx context.Context, args command.DependencyArgs) error { + return c.run(ctx, commandConfig{ + progress: "Running go get", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + return runGoGetModule(invoke, args.AddRequire, args.GoCmdArgs) + }) + }) +} + +// TODO(rFindley): UpdateGoSum, Tidy, and Vendor could probably all be one command. +func (c *commandHandler) UpdateGoSum(ctx context.Context, args command.URIArgs) error { + return c.run(ctx, commandConfig{ + progress: "Updating go.sum", + }, func(ctx context.Context, deps commandDeps) error { + for _, uri := range args.URIs { + snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind) + defer release() + if !ok { + return err + } + if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + _, err := invoke("list", "all") + return err + }); err != nil { + return err + } + } + return nil + }) +} + +func (c *commandHandler) Tidy(ctx context.Context, args command.URIArgs) error { + return c.run(ctx, commandConfig{ + requireSave: true, + progress: "Running go mod tidy", + }, func(ctx context.Context, deps commandDeps) error { + for _, uri := range args.URIs { + snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind) + defer release() + if !ok { + return err + } + if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + _, err := invoke("mod", "tidy") + return err + }); err != nil { + return err + } + } + return nil + }) +} + +func (c *commandHandler) Vendor(ctx context.Context, args command.URIArg) error { + return c.run(ctx, commandConfig{ + requireSave: true, + progress: "Running go mod vendor", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + // Use RunGoCommandPiped here so that we don't compete with any other go + // command invocations. go mod vendor deletes modules.txt before recreating + // it, and therefore can run into file locking issues on Windows if that + // file is in use by another process, such as go list. + // + // If golang/go#44119 is resolved, go mod vendor will instead modify + // modules.txt in-place. In that case we could theoretically allow this + // command to run concurrently. + err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{ + Verb: "mod", + Args: []string{"vendor"}, + WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()), + }, &bytes.Buffer{}, &bytes.Buffer{}) + return err + }) +} + +func (c *commandHandler) EditGoDirective(ctx context.Context, args command.EditGoDirectiveArgs) error { + return c.run(ctx, commandConfig{ + requireSave: true, // if go.mod isn't saved it could cause a problem + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, args.URI, source.UnknownKind) + defer release() + if !ok { + return err + } + if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + _, err := invoke("mod", "edit", "-go", args.Version) + return err + }); err != nil { + return err + } + return nil + }) +} + +func (c *commandHandler) RemoveDependency(ctx context.Context, args command.RemoveDependencyArgs) error { + return c.run(ctx, commandConfig{ + progress: "Removing dependency", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + // If the module is tidied apart from the one unused diagnostic, we can + // run `go get module@none`, and then run `go mod tidy`. Otherwise, we + // must make textual edits. + // TODO(rstambler): In Go 1.17+, we will be able to use the go command + // without checking if the module is tidy. + if args.OnlyDiagnostic { + return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + if err := runGoGetModule(invoke, false, []string{args.ModulePath + "@none"}); err != nil { + return err + } + _, err := invoke("mod", "tidy") + return err + }) + } + pm, err := deps.snapshot.ParseMod(ctx, deps.fh) + if err != nil { + return err + } + edits, err := dropDependency(deps.snapshot, pm, args.ModulePath) + if err != nil { + return err + } + response, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ + Edit: protocol.WorkspaceEdit{ + DocumentChanges: []protocol.DocumentChanges{ + { + TextDocumentEdit: &protocol.TextDocumentEdit{ + TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ + Version: deps.fh.Version(), + TextDocumentIdentifier: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(deps.fh.URI()), + }, + }, + Edits: edits, + }, + }, + }, + }, + }) + if err != nil { + return err + } + if !response.Applied { + return fmt.Errorf("edits not applied because of %s", response.FailureReason) + } + return nil + }) +} + +// dropDependency returns the edits to remove the given require from the go.mod +// file. +func dropDependency(snapshot source.Snapshot, pm *source.ParsedModule, modulePath string) ([]protocol.TextEdit, error) { + // We need a private copy of the parsed go.mod file, since we're going to + // modify it. + copied, err := modfile.Parse("", pm.Mapper.Content, nil) + if err != nil { + return nil, err + } + if err := copied.DropRequire(modulePath); err != nil { + return nil, err + } + copied.Cleanup() + newContent, err := copied.Format() + if err != nil { + return nil, err + } + // Calculate the edits to be made due to the change. + diff := snapshot.View().Options().ComputeEdits(string(pm.Mapper.Content), string(newContent)) + return source.ToProtocolEdits(pm.Mapper, diff) +} + +func (c *commandHandler) Test(ctx context.Context, uri protocol.DocumentURI, tests, benchmarks []string) error { + return c.RunTests(ctx, command.RunTestsArgs{ + URI: uri, + Tests: tests, + Benchmarks: benchmarks, + }) +} + +func (c *commandHandler) RunTests(ctx context.Context, args command.RunTestsArgs) error { + return c.run(ctx, commandConfig{ + async: true, + progress: "Running go test", + requireSave: true, + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + if err := c.runTests(ctx, deps.snapshot, deps.work, args.URI, args.Tests, args.Benchmarks); err != nil { + return fmt.Errorf("running tests failed: %w", err) + } + return nil + }) +} + +func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error { + // TODO: fix the error reporting when this runs async. + metas, err := snapshot.MetadataForFile(ctx, uri.SpanURI()) + if err != nil { + return err + } + metas = source.RemoveIntermediateTestVariants(metas) + if len(metas) == 0 { + return fmt.Errorf("package could not be found for file: %s", uri.SpanURI().Filename()) + } + pkgPath := string(metas[0].ForTest) + + // create output + buf := &bytes.Buffer{} + ew := progress.NewEventWriter(ctx, "test") + out := io.MultiWriter(ew, progress.NewWorkDoneWriter(ctx, work), buf) + + // Run `go test -run Func` on each test. + var failedTests int + for _, funcName := range tests { + inv := &gocommand.Invocation{ + Verb: "test", + Args: []string{pkgPath, "-v", "-count=1", "-run", fmt.Sprintf("^%s$", funcName)}, + WorkingDir: filepath.Dir(uri.SpanURI().Filename()), + } + if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil { + if errors.Is(err, context.Canceled) { + return err + } + failedTests++ + } + } + + // Run `go test -run=^$ -bench Func` on each test. + var failedBenchmarks int + for _, funcName := range benchmarks { + inv := &gocommand.Invocation{ + Verb: "test", + Args: []string{pkgPath, "-v", "-run=^$", "-bench", fmt.Sprintf("^%s$", funcName)}, + WorkingDir: filepath.Dir(uri.SpanURI().Filename()), + } + if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil { + if errors.Is(err, context.Canceled) { + return err + } + failedBenchmarks++ + } + } + + var title string + if len(tests) > 0 && len(benchmarks) > 0 { + title = "tests and benchmarks" + } else if len(tests) > 0 { + title = "tests" + } else if len(benchmarks) > 0 { + title = "benchmarks" + } else { + return errors.New("No functions were provided") + } + message := fmt.Sprintf("all %s passed", title) + if failedTests > 0 && failedBenchmarks > 0 { + message = fmt.Sprintf("%d / %d tests failed and %d / %d benchmarks failed", failedTests, len(tests), failedBenchmarks, len(benchmarks)) + } else if failedTests > 0 { + message = fmt.Sprintf("%d / %d tests failed", failedTests, len(tests)) + } else if failedBenchmarks > 0 { + message = fmt.Sprintf("%d / %d benchmarks failed", failedBenchmarks, len(benchmarks)) + } + if failedTests > 0 || failedBenchmarks > 0 { + message += "\n" + buf.String() + } + + return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Type: protocol.Info, + Message: message, + }) +} + +func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs) error { + title := "Running go generate ." + if args.Recursive { + title = "Running go generate ./..." + } + return c.run(ctx, commandConfig{ + requireSave: true, + progress: title, + forURI: args.Dir, + }, func(ctx context.Context, deps commandDeps) error { + er := progress.NewEventWriter(ctx, "generate") + + pattern := "." + if args.Recursive { + pattern = "./..." + } + inv := &gocommand.Invocation{ + Verb: "generate", + Args: []string{"-x", pattern}, + WorkingDir: args.Dir.SpanURI().Filename(), + } + stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work)) + if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil { + return err + } + return nil + }) +} + +func (c *commandHandler) GoGetPackage(ctx context.Context, args command.GoGetPackageArgs) error { + return c.run(ctx, commandConfig{ + forURI: args.URI, + progress: "Running go get", + }, func(ctx context.Context, deps commandDeps) error { + // Run on a throwaway go.mod, otherwise it'll write to the real one. + stdout, err := deps.snapshot.RunGoCommandDirect(ctx, source.WriteTemporaryModFile|source.AllowNetwork, &gocommand.Invocation{ + Verb: "list", + Args: []string{"-f", "{{.Module.Path}}@{{.Module.Version}}", args.Pkg}, + WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()), + }) + if err != nil { + return err + } + ver := strings.TrimSpace(stdout.String()) + return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { + if args.AddRequire { + if err := addModuleRequire(invoke, []string{ver}); err != nil { + return err + } + } + _, err := invoke(append([]string{"get", "-d"}, args.Pkg)...) + return err + }) + }) +} + +func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Snapshot, uri span.URI, run func(invoke func(...string) (*bytes.Buffer, error)) error) error { + tmpModfile, newModBytes, newSumBytes, err := snapshot.RunGoCommands(ctx, true, filepath.Dir(uri.Filename()), run) + if err != nil { + return err + } + if !tmpModfile { + return nil + } + modURI := snapshot.GoModForFile(uri) + sumURI := span.URIFromPath(strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum") + modEdits, err := applyFileEdits(ctx, snapshot, modURI, newModBytes) + if err != nil { + return err + } + sumEdits, err := applyFileEdits(ctx, snapshot, sumURI, newSumBytes) + if err != nil { + return err + } + changes := append(sumEdits, modEdits...) + if len(changes) == 0 { + return nil + } + var documentChanges []protocol.DocumentChanges + for _, change := range changes { + documentChanges = append(documentChanges, protocol.DocumentChanges{ + TextDocumentEdit: &change, + }) + } + response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ + Edit: protocol.WorkspaceEdit{ + DocumentChanges: documentChanges, + }, + }) + if err != nil { + return err + } + if !response.Applied { + return fmt.Errorf("edits not applied because of %s", response.FailureReason) + } + return nil +} + +func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + oldContent, err := fh.Read() + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if bytes.Equal(oldContent, newContent) { + return nil, nil + } + + // Sending a workspace edit to a closed file causes VS Code to open the + // file and leave it unsaved. We would rather apply the changes directly, + // especially to go.sum, which should be mostly invisible to the user. + if !snapshot.IsOpen(uri) { + err := ioutil.WriteFile(uri.Filename(), newContent, 0666) + return nil, err + } + + m := protocol.NewMapper(fh.URI(), oldContent) + diff := snapshot.View().Options().ComputeEdits(string(oldContent), string(newContent)) + edits, err := source.ToProtocolEdits(m, diff) + if err != nil { + return nil, err + } + return []protocol.TextDocumentEdit{{ + TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ + Version: fh.Version(), + TextDocumentIdentifier: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }, + Edits: edits, + }}, nil +} + +func runGoGetModule(invoke func(...string) (*bytes.Buffer, error), addRequire bool, args []string) error { + if addRequire { + if err := addModuleRequire(invoke, args); err != nil { + return err + } + } + _, err := invoke(append([]string{"get", "-d"}, args...)...) + return err +} + +func addModuleRequire(invoke func(...string) (*bytes.Buffer, error), args []string) error { + // Using go get to create a new dependency results in an + // `// indirect` comment we may not want. The only way to avoid it + // is to add the require as direct first. Then we can use go get to + // update go.sum and tidy up. + _, err := invoke(append([]string{"mod", "edit", "-require"}, args...)...) + return err +} + +func (s *Server) getUpgrades(ctx context.Context, snapshot source.Snapshot, uri span.URI, modules []string) (map[string]string, error) { + stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{ + Verb: "list", + Args: append([]string{"-m", "-u", "-json"}, modules...), + WorkingDir: filepath.Dir(uri.Filename()), + ModFlag: "readonly", + }) + if err != nil { + return nil, err + } + + upgrades := map[string]string{} + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Update == nil { + continue + } + upgrades[mod.Path] = mod.Update.Version + } + return upgrades, nil +} + +func (c *commandHandler) GCDetails(ctx context.Context, uri protocol.DocumentURI) error { + return c.ToggleGCDetails(ctx, command.URIArg{URI: uri}) +} + +func (c *commandHandler) ToggleGCDetails(ctx context.Context, args command.URIArg) error { + return c.run(ctx, commandConfig{ + requireSave: true, + progress: "Toggling GC Details", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + metas, err := deps.snapshot.MetadataForFile(ctx, deps.fh.URI()) + if err != nil { + return err + } + id := metas[0].ID // 0 => narrowest package + c.s.gcOptimizationDetailsMu.Lock() + if _, ok := c.s.gcOptimizationDetails[id]; ok { + delete(c.s.gcOptimizationDetails, id) + c.s.clearDiagnosticSource(gcDetailsSource) + } else { + c.s.gcOptimizationDetails[id] = struct{}{} + } + c.s.gcOptimizationDetailsMu.Unlock() + c.s.diagnoseSnapshot(deps.snapshot, nil, false) + return nil + }) +} + +func (c *commandHandler) ListKnownPackages(ctx context.Context, args command.URIArg) (command.ListKnownPackagesResult, error) { + var result command.ListKnownPackagesResult + err := c.run(ctx, commandConfig{ + progress: "Listing packages", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + pkgs, err := source.KnownPackagePaths(ctx, deps.snapshot, deps.fh) + for _, pkg := range pkgs { + result.Packages = append(result.Packages, string(pkg)) + } + return err + }) + return result, err +} + +func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (command.ListImportsResult, error) { + var result command.ListImportsResult + err := c.run(ctx, commandConfig{ + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + fh, err := deps.snapshot.GetFile(ctx, args.URI.SpanURI()) + if err != nil { + return err + } + pgf, err := deps.snapshot.ParseGo(ctx, fh, source.ParseHeader) + if err != nil { + return err + } + for _, group := range astutil.Imports(deps.snapshot.FileSet(), pgf.File) { + for _, imp := range group { + if imp.Path == nil { + continue + } + var name string + if imp.Name != nil { + name = imp.Name.Name + } + result.Imports = append(result.Imports, command.FileImport{ + Path: string(source.UnquoteImportPath(imp)), + Name: name, + }) + } + } + metas, err := deps.snapshot.MetadataForFile(ctx, args.URI.SpanURI()) + if err != nil { + return err // e.g. cancelled + } + if len(metas) == 0 { + return fmt.Errorf("no package containing %v", args.URI.SpanURI()) + } + for pkgPath := range metas[0].DepsByPkgPath { // 0 => narrowest package + result.PackageImports = append(result.PackageImports, + command.PackageImport{Path: string(pkgPath)}) + } + sort.Slice(result.PackageImports, func(i, j int) bool { + return result.PackageImports[i].Path < result.PackageImports[j].Path + }) + return nil + }) + return result, err +} + +func (c *commandHandler) AddImport(ctx context.Context, args command.AddImportArgs) error { + return c.run(ctx, commandConfig{ + progress: "Adding import", + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + edits, err := source.AddImport(ctx, deps.snapshot, deps.fh, args.ImportPath) + if err != nil { + return fmt.Errorf("could not add import: %v", err) + } + if _, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ + Edit: protocol.WorkspaceEdit{ + DocumentChanges: documentChanges(deps.fh, edits), + }, + }); err != nil { + return fmt.Errorf("could not apply import edits: %v", err) + } + return nil + }) +} + +func (c *commandHandler) StartDebugging(ctx context.Context, args command.DebuggingArgs) (result command.DebuggingResult, _ error) { + addr := args.Addr + if addr == "" { + addr = "localhost:0" + } + di := debug.GetInstance(ctx) + if di == nil { + return result, errors.New("internal error: server has no debugging instance") + } + listenedAddr, err := di.Serve(ctx, addr) + if err != nil { + return result, fmt.Errorf("starting debug server: %w", err) + } + result.URLs = []string{"http://" + listenedAddr} + return result, nil +} + +// Copy of pkgLoadConfig defined in internal/lsp/cmd/vulncheck.go +// TODO(hyangah): decide where to define this. +type pkgLoadConfig struct { + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // If Tests is set, the loader includes related test packages. + Tests bool +} + +func (c *commandHandler) FetchVulncheckResult(ctx context.Context, arg command.URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error) { + ret := map[protocol.DocumentURI]*govulncheck.Result{} + err := c.run(ctx, commandConfig{forURI: arg.URI}, func(ctx context.Context, deps commandDeps) error { + if deps.snapshot.View().Options().Vulncheck == source.ModeVulncheckImports { + for _, modfile := range deps.snapshot.ModFiles() { + res, err := deps.snapshot.ModVuln(ctx, modfile) + if err != nil { + return err + } + ret[protocol.URIFromSpanURI(modfile)] = res + } + } + // Overwrite if there is any govulncheck-based result. + for modfile, result := range deps.snapshot.View().Vulnerabilities() { + ret[protocol.URIFromSpanURI(modfile)] = result + } + return nil + }) + return ret, err +} + +func (c *commandHandler) RunGovulncheck(ctx context.Context, args command.VulncheckArgs) (command.RunVulncheckResult, error) { + if args.URI == "" { + return command.RunVulncheckResult{}, errors.New("VulncheckArgs is missing URI field") + } + + // Return the workdone token so that clients can identify when this + // vulncheck invocation is complete. + // + // Since the run function executes asynchronously, we use a channel to + // synchronize the start of the run and return the token. + tokenChan := make(chan protocol.ProgressToken, 1) + err := c.run(ctx, commandConfig{ + async: true, // need to be async to be cancellable + progress: "govulncheck", + requireSave: true, + forURI: args.URI, + }, func(ctx context.Context, deps commandDeps) error { + tokenChan <- deps.work.Token() + + view := deps.snapshot.View() + opts := view.Options() + // quickly test if gopls is compiled to support govulncheck + // by checking vulncheck.Main. Alternatively, we can continue and + // let the `gopls vulncheck` command fail. This is lighter-weight. + if vulncheck.Main == nil { + return errors.New("vulncheck feature is not available") + } + + cmd := exec.CommandContext(ctx, os.Args[0], "vulncheck", "-config", args.Pattern) + cmd.Dir = filepath.Dir(args.URI.SpanURI().Filename()) + + var viewEnv []string + if e := opts.EnvSlice(); e != nil { + viewEnv = append(os.Environ(), e...) + } + cmd.Env = viewEnv + + // stdin: gopls vulncheck expects JSON-encoded configuration from STDIN when -config flag is set. + var stdin bytes.Buffer + cmd.Stdin = &stdin + + if err := json.NewEncoder(&stdin).Encode(pkgLoadConfig{ + BuildFlags: opts.BuildFlags, + // TODO(hyangah): add `tests` flag in command.VulncheckArgs + }); err != nil { + return fmt.Errorf("failed to pass package load config: %v", err) + } + + // stderr: stream gopls vulncheck's STDERR as progress reports + er := progress.NewEventWriter(ctx, "vulncheck") + stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work)) + cmd.Stderr = stderr + // TODO: can we stream stdout? + stdout, err := cmd.Output() + if err != nil { + return fmt.Errorf("failed to run govulncheck: %v", err) + } + + var result govulncheck.Result + if err := json.Unmarshal(stdout, &result); err != nil { + // TODO: for easy debugging, log the failed stdout somewhere? + return fmt.Errorf("failed to parse govulncheck output: %v", err) + } + result.Mode = govulncheck.ModeGovulncheck + result.AsOf = time.Now() + deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), &result) + + c.s.diagnoseSnapshot(deps.snapshot, nil, false) + vulns := result.Vulns + affecting := make([]string, 0, len(vulns)) + for _, v := range vulns { + if v.IsCalled() { + affecting = append(affecting, v.OSV.ID) + } + } + if len(affecting) == 0 { + return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Type: protocol.Info, + Message: "No vulnerabilities found", + }) + } + sort.Strings(affecting) + return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Type: protocol.Warning, + Message: fmt.Sprintf("Found %v", strings.Join(affecting, ", ")), + }) + }) + if err != nil { + return command.RunVulncheckResult{}, err + } + select { + case <-ctx.Done(): + return command.RunVulncheckResult{}, ctx.Err() + case token := <-tokenChan: + return command.RunVulncheckResult{Token: token}, nil + } +} diff --git a/internal/lsp/command/command_gen.go b/gopls/internal/lsp/command/command_gen.go similarity index 83% rename from internal/lsp/command/command_gen.go rename to gopls/internal/lsp/command/command_gen.go index 22cfeff5bad..b6aea98c15a 100644 --- a/internal/lsp/command/command_gen.go +++ b/gopls/internal/lsp/command/command_gen.go @@ -15,32 +15,33 @@ import ( "context" "fmt" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) const ( - AddDependency Command = "add_dependency" - AddImport Command = "add_import" - ApplyFix Command = "apply_fix" - CheckUpgrades Command = "check_upgrades" - EditGoDirective Command = "edit_go_directive" - GCDetails Command = "gc_details" - Generate Command = "generate" - GenerateGoplsMod Command = "generate_gopls_mod" - GoGetPackage Command = "go_get_package" - ListImports Command = "list_imports" - ListKnownPackages Command = "list_known_packages" - RegenerateCgo Command = "regenerate_cgo" - RemoveDependency Command = "remove_dependency" - RunTests Command = "run_tests" - RunVulncheckExp Command = "run_vulncheck_exp" - StartDebugging Command = "start_debugging" - Test Command = "test" - Tidy Command = "tidy" - ToggleGCDetails Command = "toggle_gc_details" - UpdateGoSum Command = "update_go_sum" - UpgradeDependency Command = "upgrade_dependency" - Vendor Command = "vendor" + AddDependency Command = "add_dependency" + AddImport Command = "add_import" + ApplyFix Command = "apply_fix" + CheckUpgrades Command = "check_upgrades" + EditGoDirective Command = "edit_go_directive" + FetchVulncheckResult Command = "fetch_vulncheck_result" + GCDetails Command = "gc_details" + Generate Command = "generate" + GoGetPackage Command = "go_get_package" + ListImports Command = "list_imports" + ListKnownPackages Command = "list_known_packages" + RegenerateCgo Command = "regenerate_cgo" + RemoveDependency Command = "remove_dependency" + ResetGoModDiagnostics Command = "reset_go_mod_diagnostics" + RunGovulncheck Command = "run_govulncheck" + RunTests Command = "run_tests" + StartDebugging Command = "start_debugging" + Test Command = "test" + Tidy Command = "tidy" + ToggleGCDetails Command = "toggle_gc_details" + UpdateGoSum Command = "update_go_sum" + UpgradeDependency Command = "upgrade_dependency" + Vendor Command = "vendor" ) var Commands = []Command{ @@ -49,16 +50,17 @@ var Commands = []Command{ ApplyFix, CheckUpgrades, EditGoDirective, + FetchVulncheckResult, GCDetails, Generate, - GenerateGoplsMod, GoGetPackage, ListImports, ListKnownPackages, RegenerateCgo, RemoveDependency, + ResetGoModDiagnostics, + RunGovulncheck, RunTests, - RunVulncheckExp, StartDebugging, Test, Tidy, @@ -100,6 +102,12 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte return nil, err } return nil, s.EditGoDirective(ctx, a0) + case "gopls.fetch_vulncheck_result": + var a0 URIArg + if err := UnmarshalArgs(params.Arguments, &a0); err != nil { + return nil, err + } + return s.FetchVulncheckResult(ctx, a0) case "gopls.gc_details": var a0 protocol.DocumentURI if err := UnmarshalArgs(params.Arguments, &a0); err != nil { @@ -112,12 +120,6 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte return nil, err } return nil, s.Generate(ctx, a0) - case "gopls.generate_gopls_mod": - var a0 URIArg - if err := UnmarshalArgs(params.Arguments, &a0); err != nil { - return nil, err - } - return nil, s.GenerateGoplsMod(ctx, a0) case "gopls.go_get_package": var a0 GoGetPackageArgs if err := UnmarshalArgs(params.Arguments, &a0); err != nil { @@ -148,18 +150,24 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte return nil, err } return nil, s.RemoveDependency(ctx, a0) - case "gopls.run_tests": - var a0 RunTestsArgs + case "gopls.reset_go_mod_diagnostics": + var a0 ResetGoModDiagnosticsArgs if err := UnmarshalArgs(params.Arguments, &a0); err != nil { return nil, err } - return nil, s.RunTests(ctx, a0) - case "gopls.run_vulncheck_exp": + return nil, s.ResetGoModDiagnostics(ctx, a0) + case "gopls.run_govulncheck": var a0 VulncheckArgs if err := UnmarshalArgs(params.Arguments, &a0); err != nil { return nil, err } - return s.RunVulncheckExp(ctx, a0) + return s.RunGovulncheck(ctx, a0) + case "gopls.run_tests": + var a0 RunTestsArgs + if err := UnmarshalArgs(params.Arguments, &a0); err != nil { + return nil, err + } + return nil, s.RunTests(ctx, a0) case "gopls.start_debugging": var a0 DebuggingArgs if err := UnmarshalArgs(params.Arguments, &a0); err != nil { @@ -268,38 +276,38 @@ func NewEditGoDirectiveCommand(title string, a0 EditGoDirectiveArgs) (protocol.C }, nil } -func NewGCDetailsCommand(title string, a0 protocol.DocumentURI) (protocol.Command, error) { +func NewFetchVulncheckResultCommand(title string, a0 URIArg) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { return protocol.Command{}, err } return protocol.Command{ Title: title, - Command: "gopls.gc_details", + Command: "gopls.fetch_vulncheck_result", Arguments: args, }, nil } -func NewGenerateCommand(title string, a0 GenerateArgs) (protocol.Command, error) { +func NewGCDetailsCommand(title string, a0 protocol.DocumentURI) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { return protocol.Command{}, err } return protocol.Command{ Title: title, - Command: "gopls.generate", + Command: "gopls.gc_details", Arguments: args, }, nil } -func NewGenerateGoplsModCommand(title string, a0 URIArg) (protocol.Command, error) { +func NewGenerateCommand(title string, a0 GenerateArgs) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { return protocol.Command{}, err } return protocol.Command{ Title: title, - Command: "gopls.generate_gopls_mod", + Command: "gopls.generate", Arguments: args, }, nil } @@ -364,26 +372,38 @@ func NewRemoveDependencyCommand(title string, a0 RemoveDependencyArgs) (protocol }, nil } -func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) { +func NewResetGoModDiagnosticsCommand(title string, a0 ResetGoModDiagnosticsArgs) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { return protocol.Command{}, err } return protocol.Command{ Title: title, - Command: "gopls.run_tests", + Command: "gopls.reset_go_mod_diagnostics", Arguments: args, }, nil } -func NewRunVulncheckExpCommand(title string, a0 VulncheckArgs) (protocol.Command, error) { +func NewRunGovulncheckCommand(title string, a0 VulncheckArgs) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { return protocol.Command{}, err } return protocol.Command{ Title: title, - Command: "gopls.run_vulncheck_exp", + Command: "gopls.run_govulncheck", + Arguments: args, + }, nil +} + +func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) { + args, err := MarshalArgs(a0) + if err != nil { + return protocol.Command{}, err + } + return protocol.Command{ + Title: title, + Command: "gopls.run_tests", Arguments: args, }, nil } diff --git a/internal/lsp/command/commandmeta/meta.go b/gopls/internal/lsp/command/commandmeta/meta.go similarity index 97% rename from internal/lsp/command/commandmeta/meta.go rename to gopls/internal/lsp/command/commandmeta/meta.go index a3a357df4b0..bf85c4faa9b 100644 --- a/internal/lsp/command/commandmeta/meta.go +++ b/gopls/internal/lsp/command/commandmeta/meta.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/command" ) type Command struct { @@ -52,7 +52,7 @@ func Load() (*packages.Package, []*Command, error) { Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps, BuildFlags: []string{"-tags=generate"}, }, - "golang.org/x/tools/internal/lsp/command", + "golang.org/x/tools/gopls/internal/lsp/command", ) if err != nil { return nil, nil, fmt.Errorf("packages.Load: %v", err) @@ -244,7 +244,7 @@ func findField(pkg *packages.Package, pos token.Pos) (*ast.Field, error) { fset := pkg.Fset var file *ast.File for _, f := range pkg.Syntax { - if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename { + if fset.File(f.Pos()).Name() == fset.File(pos).Name() { file = f break } diff --git a/internal/lsp/command/gen/gen.go b/gopls/internal/lsp/command/gen/gen.go similarity index 94% rename from internal/lsp/command/gen/gen.go rename to gopls/internal/lsp/command/gen/gen.go index 8f7a2d50313..29428699ee6 100644 --- a/internal/lsp/command/gen/gen.go +++ b/gopls/internal/lsp/command/gen/gen.go @@ -13,7 +13,7 @@ import ( "text/template" "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/command/commandmeta" + "golang.org/x/tools/gopls/internal/lsp/command/commandmeta" ) const src = `// Copyright 2021 The Go Authors. All rights reserved. @@ -109,10 +109,10 @@ func Generate() ([]byte, error) { Imports: map[string]bool{ "context": true, "fmt": true, - "golang.org/x/tools/internal/lsp/protocol": true, + "golang.org/x/tools/gopls/internal/lsp/protocol": true, }, } - const thispkg = "golang.org/x/tools/internal/lsp/command" + const thispkg = "golang.org/x/tools/gopls/internal/lsp/command" for _, c := range d.Commands { for _, arg := range c.Args { pth := pkgPath(arg.Type) diff --git a/gopls/internal/lsp/command/generate.go b/gopls/internal/lsp/command/generate.go new file mode 100644 index 00000000000..79ff49b0e33 --- /dev/null +++ b/gopls/internal/lsp/command/generate.go @@ -0,0 +1,25 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "golang.org/x/tools/gopls/internal/lsp/command/gen" +) + +func main() { + content, err := gen.Generate() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + ioutil.WriteFile("command_gen.go", content, 0644) +} diff --git a/internal/lsp/command/interface.go b/gopls/internal/lsp/command/interface.go similarity index 88% rename from internal/lsp/command/interface.go rename to gopls/internal/lsp/command/interface.go index 8e4b1056d32..965158aef13 100644 --- a/internal/lsp/command/interface.go +++ b/gopls/internal/lsp/command/interface.go @@ -17,7 +17,8 @@ package command import ( "context" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) // Interface defines the interface gopls exposes for the @@ -98,6 +99,11 @@ type Interface interface { // Removes a dependency from the go.mod file of a module. RemoveDependency(context.Context, RemoveDependencyArgs) error + // ResetGoModDiagnostics: Reset go.mod diagnostics + // + // Reset diagnostics in the go.mod file of a module. + ResetGoModDiagnostics(context.Context, ResetGoModDiagnosticsArgs) error + // GoGetPackage: go get a package // // Runs `go get` to fetch a package. @@ -115,11 +121,6 @@ type Interface interface { // Toggle the calculation of gc annotations. ToggleGCDetails(context.Context, URIArg) error - // GenerateGoplsMod: Generate gopls.mod - // - // (Re)generate the gopls.mod file for a workspace. - GenerateGoplsMod(context.Context, URIArg) error - // ListKnownPackages: List known packages // // Retrieve a list of packages that are importable from the given URI. @@ -144,10 +145,15 @@ type Interface interface { // address. StartDebugging(context.Context, DebuggingArgs) (DebuggingResult, error) - // RunVulncheckExp: Run vulncheck (experimental) + // RunGovulncheck: Run govulncheck. // // Run vulnerability check (`govulncheck`). - RunVulncheckExp(context.Context, VulncheckArgs) (VulncheckResult, error) + RunGovulncheck(context.Context, VulncheckArgs) (RunVulncheckResult, error) + + // FetchVulncheckResult: Get known vulncheck result + // + // Fetch the result of latest vulnerability check (`govulncheck`). + FetchVulncheckResult(context.Context, URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error) } type RunTestsArgs struct { @@ -267,21 +273,6 @@ type PackageImport struct { Path string } -type WorkspaceMetadataArgs struct { -} - -type WorkspaceMetadataResult struct { - // All workspaces for this session. - Workspaces []Workspace -} - -type Workspace struct { - // The workspace name. - Name string - // The workspace module directory. - ModuleDir string -} - type DebuggingArgs struct { // Optional: the address (including port) for the debug server to listen on. // If not provided, the debug server will bind to "localhost:0", and the @@ -313,15 +304,30 @@ type DebuggingResult struct { URLs []string } +type ResetGoModDiagnosticsArgs struct { + URIArg + + // Optional: source of the diagnostics to reset. + // If not set, all resettable go.mod diagnostics will be cleared. + DiagnosticSource string +} + type VulncheckArgs struct { - // Dir is the directory from which vulncheck will run from. - Dir protocol.DocumentURI + // Any document in the directory from which govulncheck will run. + URI protocol.DocumentURI // Package pattern. E.g. "", ".", "./...". Pattern string - // TODO: Flag []string (flags accepted by govulncheck, e.g., -tests) - // TODO: Format string (json, text) + // TODO: -tests +} + +// RunVulncheckResult holds the result of asynchronously starting the vulncheck +// command. +type RunVulncheckResult struct { + // Token holds the progress token for LSP workDone reporting of the vulncheck + // invocation. + Token protocol.ProgressToken } type VulncheckResult struct { @@ -347,6 +353,7 @@ type StackEntry struct { } // Vuln models an osv.Entry and representative call stacks. +// TODO: deprecate type Vuln struct { // ID is the vulnerability ID (osv.Entry.ID). // https://ossf.github.io/osv-schema/#id-modified-fields @@ -359,8 +366,10 @@ type Vuln struct { Aliases []string `json:",omitempty"` // Symbol is the name of the detected vulnerable function or method. + // Can be empty if the vulnerability exists in required modules, but no vulnerable symbols are used. Symbol string `json:",omitempty"` // PkgPath is the package path of the detected Symbol. + // Can be empty if the vulnerability exists in required modules, but no vulnerable packages are used. PkgPath string `json:",omitempty"` // ModPath is the module path corresponding to PkgPath. // TODO: how do we specify standard library's vulnerability? diff --git a/gopls/internal/lsp/command/interface_test.go b/gopls/internal/lsp/command/interface_test.go new file mode 100644 index 00000000000..e602293a19f --- /dev/null +++ b/gopls/internal/lsp/command/interface_test.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package command_test + +import ( + "io/ioutil" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/command/gen" + "golang.org/x/tools/internal/testenv" +) + +func TestGenerated(t *testing.T) { + testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code. + + onDisk, err := ioutil.ReadFile("command_gen.go") + if err != nil { + t.Fatal(err) + } + + generated, err := gen.Generate() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(string(generated), string(onDisk)); diff != "" { + t.Errorf("command_gen.go is stale -- regenerate (-generated +on disk)\n%s", diff) + } +} diff --git a/internal/lsp/command/util.go b/gopls/internal/lsp/command/util.go similarity index 100% rename from internal/lsp/command/util.go rename to gopls/internal/lsp/command/util.go diff --git a/gopls/internal/lsp/completion.go b/gopls/internal/lsp/completion.go new file mode 100644 index 00000000000..b2e50cc8074 --- /dev/null +++ b/gopls/internal/lsp/completion.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "fmt" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/completion" + "golang.org/x/tools/gopls/internal/lsp/template" + "golang.org/x/tools/gopls/internal/lsp/work" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" +) + +func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + var candidates []completion.CompletionItem + var surrounding *completion.Selection + switch snapshot.View().FileKind(fh) { + case source.Go: + candidates, surrounding, err = completion.Completion(ctx, snapshot, fh, params.Position, params.Context) + case source.Mod: + candidates, surrounding = nil, nil + case source.Work: + cl, err := work.Completion(ctx, snapshot, fh, params.Position) + if err != nil { + break + } + return cl, nil + case source.Tmpl: + var cl *protocol.CompletionList + cl, err = template.Completion(ctx, snapshot, fh, params.Position, params.Context) + if err != nil { + break // use common error handling, candidates==nil + } + return cl, nil + } + if err != nil { + event.Error(ctx, "no completions found", err, tag.Position.Of(params.Position)) + } + if candidates == nil { + return &protocol.CompletionList{ + IsIncomplete: true, + Items: []protocol.CompletionItem{}, + }, nil + } + + rng, err := surrounding.Range() + if err != nil { + return nil, err + } + + // When using deep completions/fuzzy matching, report results as incomplete so + // client fetches updated completions after every key stroke. + options := snapshot.View().Options() + incompleteResults := options.DeepCompletion || options.Matcher == source.Fuzzy + + items := toProtocolCompletionItems(candidates, rng, options) + + return &protocol.CompletionList{ + IsIncomplete: incompleteResults, + Items: items, + }, nil +} + +func toProtocolCompletionItems(candidates []completion.CompletionItem, rng protocol.Range, options *source.Options) []protocol.CompletionItem { + var ( + items = make([]protocol.CompletionItem, 0, len(candidates)) + numDeepCompletionsSeen int + ) + for i, candidate := range candidates { + // Limit the number of deep completions to not overwhelm the user in cases + // with dozens of deep completion matches. + if candidate.Depth > 0 { + if !options.DeepCompletion { + continue + } + if numDeepCompletionsSeen >= completion.MaxDeepCompletions { + continue + } + numDeepCompletionsSeen++ + } + insertText := candidate.InsertText + if options.InsertTextFormat == protocol.SnippetTextFormat { + insertText = candidate.Snippet() + } + + // This can happen if the client has snippets disabled but the + // candidate only supports snippet insertion. + if insertText == "" { + continue + } + + doc := &protocol.Or_CompletionItem_documentation{ + Value: protocol.MarkupContent{ + Kind: protocol.Markdown, + Value: source.CommentToMarkdown(candidate.Documentation), + }, + } + if options.PreferredContentFormat != protocol.Markdown { + doc.Value = candidate.Documentation + } + item := protocol.CompletionItem{ + Label: candidate.Label, + Detail: candidate.Detail, + Kind: candidate.Kind, + TextEdit: &protocol.TextEdit{ + NewText: insertText, + Range: rng, + }, + InsertTextFormat: options.InsertTextFormat, + AdditionalTextEdits: candidate.AdditionalTextEdits, + // This is a hack so that the client sorts completion results in the order + // according to their score. This can be removed upon the resolution of + // https://github.com/Microsoft/language-server-protocol/issues/348. + SortText: fmt.Sprintf("%05d", i), + + // Trim operators (VSCode doesn't like weird characters in + // filterText). + FilterText: strings.TrimLeft(candidate.InsertText, "&*"), + + Preselect: i == 0, + Documentation: doc, + Tags: candidate.Tags, + Deprecated: candidate.Deprecated, + } + items = append(items, item) + } + return items +} diff --git a/gopls/internal/lsp/completion_test.go b/gopls/internal/lsp/completion_test.go new file mode 100644 index 00000000000..cd3bcec992c --- /dev/null +++ b/gopls/internal/lsp/completion_test.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/completion" + "golang.org/x/tools/gopls/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/span" +) + +func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) { + opts.DeepCompletion = false + opts.Matcher = source.CaseInsensitive + opts.CompleteUnimported = false + opts.InsertTextFormat = protocol.SnippetTextFormat + opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal") + opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix") + }) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) + if diff := tests.DiffCompletionItems(want, got); diff != "" { + t.Errorf("mismatching completion items (-want +got):\n%s", diff) + } +} + +func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) { + list := r.callCompletion(t, src, func(opts *source.Options) { + opts.UsePlaceholders = placeholders + opts.DeepCompletion = true + opts.Matcher = source.Fuzzy + opts.CompleteUnimported = false + }) + got := tests.FindItem(list, *items[expected.CompletionItem]) + want := expected.PlainSnippet + if placeholders { + want = expected.PlaceholderSnippet + } + if diff := tests.DiffSnippets(want, got); diff != "" { + t.Errorf("%s", diff) + } +} + +func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) {}) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) + if diff := tests.CheckCompletionOrder(want, got, false); diff != "" { + t.Errorf("%s", diff) + } +} + +func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) { + opts.DeepCompletion = true + opts.Matcher = source.CaseInsensitive + opts.CompleteUnimported = false + }) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) + if diff := tests.DiffCompletionItems(want, got); diff != "" { + t.Errorf("mismatching completion items (-want +got):\n%s", diff) + } +} + +func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) { + opts.DeepCompletion = true + opts.Matcher = source.Fuzzy + opts.CompleteUnimported = false + }) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) + if diff := tests.DiffCompletionItems(want, got); diff != "" { + t.Errorf("mismatching completion items (-want +got):\n%s", diff) + } +} + +func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) { + opts.Matcher = source.CaseSensitive + opts.CompleteUnimported = false + }) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) + if diff := tests.DiffCompletionItems(want, got); diff != "" { + t.Errorf("mismatching completion items (-want +got):\n%s", diff) + } +} + +func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { + got := r.callCompletion(t, src, func(opts *source.Options) { + opts.DeepCompletion = true + opts.Matcher = source.Fuzzy + opts.CompleteUnimported = false + opts.LiteralCompletions = true + opts.ExperimentalPostfixCompletions = true + }) + want := expected(t, test, items) + if msg := tests.CheckCompletionOrder(want, got, true); msg != "" { + t.Errorf("%s", msg) + } +} + +func expected(t *testing.T, test tests.Completion, items tests.CompletionItems) []protocol.CompletionItem { + t.Helper() + + toProtocolCompletionItem := func(item *completion.CompletionItem) protocol.CompletionItem { + pItem := protocol.CompletionItem{ + Label: item.Label, + Kind: item.Kind, + Detail: item.Detail, + Documentation: &protocol.Or_CompletionItem_documentation{ + Value: item.Documentation, + }, + InsertText: item.InsertText, + TextEdit: &protocol.TextEdit{ + NewText: item.Snippet(), + }, + // Negate score so best score has lowest sort text like real API. + SortText: fmt.Sprint(-item.Score), + } + if pItem.InsertText == "" { + pItem.InsertText = pItem.Label + } + return pItem + } + + var want []protocol.CompletionItem + for _, pos := range test.CompletionItems { + want = append(want, toProtocolCompletionItem(items[pos])) + } + return want +} + +func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) []protocol.CompletionItem { + t.Helper() + + view, err := r.server.session.ViewOf(src.URI()) + if err != nil { + t.Fatal(err) + } + original := view.Options() + modified := view.Options().Clone() + options(modified) + view, err = r.server.session.SetViewOptions(r.ctx, view, modified) + if err != nil { + t.Error(err) + return nil + } + defer r.server.session.SetViewOptions(r.ctx, view, original) + + list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{ + TextDocumentPositionParams: protocol.TextDocumentPositionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(src.URI()), + }, + Position: protocol.Position{ + Line: uint32(src.Start().Line() - 1), + Character: uint32(src.Start().Column() - 1), + }, + }, + }) + if err != nil { + t.Fatal(err) + } + return list.Items +} diff --git a/internal/lsp/debounce.go b/gopls/internal/lsp/debounce.go similarity index 100% rename from internal/lsp/debounce.go rename to gopls/internal/lsp/debounce.go diff --git a/internal/lsp/debounce_test.go b/gopls/internal/lsp/debounce_test.go similarity index 100% rename from internal/lsp/debounce_test.go rename to gopls/internal/lsp/debounce_test.go diff --git a/internal/lsp/debug/buildinfo_go1.12.go b/gopls/internal/lsp/debug/buildinfo_go1.12.go similarity index 100% rename from internal/lsp/debug/buildinfo_go1.12.go rename to gopls/internal/lsp/debug/buildinfo_go1.12.go diff --git a/internal/lsp/debug/buildinfo_go1.18.go b/gopls/internal/lsp/debug/buildinfo_go1.18.go similarity index 100% rename from internal/lsp/debug/buildinfo_go1.18.go rename to gopls/internal/lsp/debug/buildinfo_go1.18.go diff --git a/gopls/internal/lsp/debug/info.go b/gopls/internal/lsp/debug/info.go new file mode 100644 index 00000000000..00752e6f9a3 --- /dev/null +++ b/gopls/internal/lsp/debug/info.go @@ -0,0 +1,254 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package debug exports debug information for gopls. +package debug + +import ( + "context" + "encoding/json" + "fmt" + "io" + "reflect" + "runtime" + "runtime/debug" + "sort" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/source" +) + +type PrintMode int + +const ( + PlainText = PrintMode(iota) + Markdown + HTML + JSON +) + +// Version is a manually-updated mechanism for tracking versions. +const Version = "master" + +// ServerVersion is the format used by gopls to report its version to the +// client. This format is structured so that the client can parse it easily. +type ServerVersion struct { + *BuildInfo + Version string +} + +// VersionInfo returns the build info for the gopls process. If it was not +// built in module mode, we return a GOPATH-specific message with the +// hardcoded version. +func VersionInfo() *ServerVersion { + if info, ok := readBuildInfo(); ok { + return getVersion(info) + } + buildInfo := &BuildInfo{} + // go1.17 or earlier, part of s.BuildInfo are embedded fields. + buildInfo.Path = "gopls, built in GOPATH mode" + buildInfo.GoVersion = runtime.Version() + return &ServerVersion{ + Version: Version, + BuildInfo: buildInfo, + } +} + +func getVersion(info *BuildInfo) *ServerVersion { + return &ServerVersion{ + Version: Version, + BuildInfo: info, + } +} + +// PrintServerInfo writes HTML debug info to w for the Instance. +func (i *Instance) PrintServerInfo(ctx context.Context, w io.Writer) { + section(w, HTML, "Server Instance", func() { + fmt.Fprintf(w, "Start time: %v\n", i.StartTime) + fmt.Fprintf(w, "LogFile: %s\n", i.Logfile) + fmt.Fprintf(w, "Working directory: %s\n", i.Workdir) + fmt.Fprintf(w, "Address: %s\n", i.ServerAddress) + fmt.Fprintf(w, "Debug address: %s\n", i.DebugAddress()) + }) + PrintVersionInfo(ctx, w, true, HTML) + section(w, HTML, "Command Line", func() { + fmt.Fprintf(w, "cmdline") + }) +} + +// PrintVersionInfo writes version information to w, using the output format +// specified by mode. verbose controls whether additional information is +// written, including section headers. +func PrintVersionInfo(_ context.Context, w io.Writer, verbose bool, mode PrintMode) error { + info := VersionInfo() + if mode == JSON { + return printVersionInfoJSON(w, info) + } + + if !verbose { + printBuildInfo(w, info, false, mode) + return nil + } + section(w, mode, "Build info", func() { + printBuildInfo(w, info, true, mode) + }) + return nil +} + +func printVersionInfoJSON(w io.Writer, info *ServerVersion) error { + js, err := json.MarshalIndent(info, "", "\t") + if err != nil { + return err + } + _, err = fmt.Fprint(w, string(js)) + return err +} + +func section(w io.Writer, mode PrintMode, title string, body func()) { + switch mode { + case PlainText: + fmt.Fprintln(w, title) + fmt.Fprintln(w, strings.Repeat("-", len(title))) + body() + case Markdown: + fmt.Fprintf(w, "#### %s\n\n```\n", title) + body() + fmt.Fprintf(w, "```\n") + case HTML: + fmt.Fprintf(w, "

    %s

    \n
    \n", title)
    +		body()
    +		fmt.Fprint(w, "
    \n") + } +} + +func printBuildInfo(w io.Writer, info *ServerVersion, verbose bool, mode PrintMode) { + fmt.Fprintf(w, "%v %v\n", info.Path, Version) + printModuleInfo(w, info.Main, mode) + if !verbose { + return + } + for _, dep := range info.Deps { + printModuleInfo(w, *dep, mode) + } + fmt.Fprintf(w, "go: %v\n", info.GoVersion) +} + +func printModuleInfo(w io.Writer, m debug.Module, _ PrintMode) { + fmt.Fprintf(w, " %s@%s", m.Path, m.Version) + if m.Sum != "" { + fmt.Fprintf(w, " %s", m.Sum) + } + if m.Replace != nil { + fmt.Fprintf(w, " => %v", m.Replace.Path) + } + fmt.Fprintf(w, "\n") +} + +type field struct { + index []int +} + +var fields []field + +// find all the options. The presumption is that the Options are nested structs +// and that pointers don't need to be dereferenced +func swalk(t reflect.Type, ix []int, indent string) { + switch t.Kind() { + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + ixx := append(append([]int{}, ix...), i) + swalk(fld.Type, ixx, indent+". ") + } + default: + // everything is either a struct or a field (that's an assumption about Options) + fields = append(fields, field{ix}) + } +} + +type sessionOption struct { + Name string + Type string + Current string + Default string +} + +func showOptions(o *source.Options) []sessionOption { + var out []sessionOption + t := reflect.TypeOf(*o) + swalk(t, []int{}, "") + v := reflect.ValueOf(*o) + do := reflect.ValueOf(*source.DefaultOptions()) + for _, f := range fields { + val := v.FieldByIndex(f.index) + def := do.FieldByIndex(f.index) + tx := t.FieldByIndex(f.index) + is := strVal(val) + was := strVal(def) + out = append(out, sessionOption{ + Name: tx.Name, + Type: tx.Type.String(), + Current: is, + Default: was, + }) + } + sort.Slice(out, func(i, j int) bool { + rd := out[i].Current == out[i].Default + ld := out[j].Current == out[j].Default + if rd != ld { + return ld + } + return out[i].Name < out[j].Name + }) + return out +} + +func strVal(val reflect.Value) string { + switch val.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", val.Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", val.Interface()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", val.Interface()) + case reflect.Uintptr, reflect.UnsafePointer: + return fmt.Sprintf("0x%x", val.Pointer()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", val.Complex()) + case reflect.Array, reflect.Slice: + ans := []string{} + for i := 0; i < val.Len(); i++ { + ans = append(ans, strVal(val.Index(i))) + } + sort.Strings(ans) + return fmt.Sprintf("%v", ans) + case reflect.Chan, reflect.Func, reflect.Ptr: + return val.Kind().String() + case reflect.Struct: + var x source.Analyzer + if val.Type() != reflect.TypeOf(x) { + return val.Kind().String() + } + // this is sort of ugly, but usable + str := val.FieldByName("Analyzer").Elem().FieldByName("Doc").String() + ix := strings.Index(str, "\n") + if ix == -1 { + ix = len(str) + } + return str[:ix] + case reflect.String: + return fmt.Sprintf("%q", val.Interface()) + case reflect.Map: + ans := []string{} + iter := val.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + ans = append(ans, fmt.Sprintf("%s:%s, ", strVal(k), strVal(v))) + } + sort.Strings(ans) + return fmt.Sprintf("%v", ans) + } + return fmt.Sprintf("??%s??", val.Type()) +} diff --git a/internal/lsp/debug/info_test.go b/gopls/internal/lsp/debug/info_test.go similarity index 100% rename from internal/lsp/debug/info_test.go rename to gopls/internal/lsp/debug/info_test.go diff --git a/internal/lsp/debug/log/log.go b/gopls/internal/lsp/debug/log/log.go similarity index 95% rename from internal/lsp/debug/log/log.go rename to gopls/internal/lsp/debug/log/log.go index 44638f8a582..e3eaa106f7e 100644 --- a/internal/lsp/debug/log/log.go +++ b/gopls/internal/lsp/debug/log/log.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/debug/tag" + "golang.org/x/tools/internal/event/tag" ) // Level parameterizes log severity. diff --git a/internal/lsp/debug/metrics.go b/gopls/internal/lsp/debug/metrics.go similarity index 97% rename from internal/lsp/debug/metrics.go rename to gopls/internal/lsp/debug/metrics.go index 8efc1d495e0..c8da803d6b1 100644 --- a/internal/lsp/debug/metrics.go +++ b/gopls/internal/lsp/debug/metrics.go @@ -7,7 +7,7 @@ package debug import ( "golang.org/x/tools/internal/event/export/metric" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/debug/tag" + "golang.org/x/tools/internal/event/tag" ) var ( diff --git a/internal/lsp/debug/rpc.go b/gopls/internal/lsp/debug/rpc.go similarity index 99% rename from internal/lsp/debug/rpc.go rename to gopls/internal/lsp/debug/rpc.go index 033ee3797fb..5610021479c 100644 --- a/internal/lsp/debug/rpc.go +++ b/gopls/internal/lsp/debug/rpc.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/internal/event/core" "golang.org/x/tools/internal/event/export" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/debug/tag" + "golang.org/x/tools/internal/event/tag" ) var RPCTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` diff --git a/gopls/internal/lsp/debug/serve.go b/gopls/internal/lsp/debug/serve.go new file mode 100644 index 00000000000..e79e88c3f6d --- /dev/null +++ b/gopls/internal/lsp/debug/serve.go @@ -0,0 +1,914 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package debug + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "html/template" + "io" + stdlog "log" + "net" + "net/http" + "net/http/pprof" + "os" + "path" + "path/filepath" + "runtime" + rpprof "runtime/pprof" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug/log" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/export" + "golang.org/x/tools/internal/event/export/metric" + "golang.org/x/tools/internal/event/export/ocagent" + "golang.org/x/tools/internal/event/export/prometheus" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" +) + +type contextKeyType int + +const ( + instanceKey contextKeyType = iota + traceKey +) + +// An Instance holds all debug information associated with a gopls instance. +type Instance struct { + Logfile string + StartTime time.Time + ServerAddress string + Workdir string + OCAgentConfig string + + LogWriter io.Writer + + exporter event.Exporter + + ocagent *ocagent.Exporter + prometheus *prometheus.Exporter + rpcs *Rpcs + traces *traces + State *State + + serveMu sync.Mutex + debugAddress string + listenedDebugAddress string +} + +// State holds debugging information related to the server state. +type State struct { + mu sync.Mutex + clients []*Client + servers []*Server +} + +func (st *State) Bugs() []bug.Bug { + return bug.List() +} + +// Caches returns the set of Cache objects currently being served. +func (st *State) Caches() []*cache.Cache { + var caches []*cache.Cache + seen := make(map[string]struct{}) + for _, client := range st.Clients() { + cache := client.Session.Cache() + if _, found := seen[cache.ID()]; found { + continue + } + seen[cache.ID()] = struct{}{} + caches = append(caches, cache) + } + return caches +} + +// Cache returns the Cache that matches the supplied id. +func (st *State) Cache(id string) *cache.Cache { + for _, c := range st.Caches() { + if c.ID() == id { + return c + } + } + return nil +} + +// Sessions returns the set of Session objects currently being served. +func (st *State) Sessions() []*cache.Session { + var sessions []*cache.Session + for _, client := range st.Clients() { + sessions = append(sessions, client.Session) + } + return sessions +} + +// Session returns the Session that matches the supplied id. +func (st *State) Session(id string) *cache.Session { + for _, s := range st.Sessions() { + if s.ID() == id { + return s + } + } + return nil +} + +// Views returns the set of View objects currently being served. +func (st *State) Views() []*cache.View { + var views []*cache.View + for _, s := range st.Sessions() { + views = append(views, s.Views()...) + } + return views +} + +// View returns the View that matches the supplied id. +func (st *State) View(id string) *cache.View { + for _, v := range st.Views() { + if v.ID() == id { + return v + } + } + return nil +} + +// Clients returns the set of Clients currently being served. +func (st *State) Clients() []*Client { + st.mu.Lock() + defer st.mu.Unlock() + clients := make([]*Client, len(st.clients)) + copy(clients, st.clients) + return clients +} + +// Client returns the Client matching the supplied id. +func (st *State) Client(id string) *Client { + for _, c := range st.Clients() { + if c.Session.ID() == id { + return c + } + } + return nil +} + +// Servers returns the set of Servers the instance is currently connected to. +func (st *State) Servers() []*Server { + st.mu.Lock() + defer st.mu.Unlock() + servers := make([]*Server, len(st.servers)) + copy(servers, st.servers) + return servers +} + +// A Client is an incoming connection from a remote client. +type Client struct { + Session *cache.Session + DebugAddress string + Logfile string + GoplsPath string + ServerID string + Service protocol.Server +} + +// A Server is an outgoing connection to a remote LSP server. +type Server struct { + ID string + DebugAddress string + Logfile string + GoplsPath string + ClientID string +} + +// addClient adds a client to the set being served. +func (st *State) addClient(session *cache.Session) { + st.mu.Lock() + defer st.mu.Unlock() + st.clients = append(st.clients, &Client{Session: session}) +} + +// dropClient removes a client from the set being served. +func (st *State) dropClient(session *cache.Session) { + st.mu.Lock() + defer st.mu.Unlock() + for i, c := range st.clients { + if c.Session == session { + copy(st.clients[i:], st.clients[i+1:]) + st.clients[len(st.clients)-1] = nil + st.clients = st.clients[:len(st.clients)-1] + return + } + } +} + +// updateServer updates a server to the set being queried. In practice, there should +// be at most one remote server. +func (st *State) updateServer(server *Server) { + st.mu.Lock() + defer st.mu.Unlock() + for i, existing := range st.servers { + if existing.ID == server.ID { + // Replace, rather than mutate, to avoid a race. + newServers := make([]*Server, len(st.servers)) + copy(newServers, st.servers[:i]) + newServers[i] = server + copy(newServers[i+1:], st.servers[i+1:]) + st.servers = newServers + return + } + } + st.servers = append(st.servers, server) +} + +// dropServer drops a server from the set being queried. +func (st *State) dropServer(id string) { + st.mu.Lock() + defer st.mu.Unlock() + for i, s := range st.servers { + if s.ID == id { + copy(st.servers[i:], st.servers[i+1:]) + st.servers[len(st.servers)-1] = nil + st.servers = st.servers[:len(st.servers)-1] + return + } + } +} + +// an http.ResponseWriter that filters writes +type filterResponse struct { + w http.ResponseWriter + edit func([]byte) []byte +} + +func (c filterResponse) Header() http.Header { + return c.w.Header() +} + +func (c filterResponse) Write(buf []byte) (int, error) { + ans := c.edit(buf) + return c.w.Write(ans) +} + +func (c filterResponse) WriteHeader(n int) { + c.w.WriteHeader(n) +} + +// replace annoying nuls by spaces +func cmdline(w http.ResponseWriter, r *http.Request) { + fake := filterResponse{ + w: w, + edit: func(buf []byte) []byte { + return bytes.ReplaceAll(buf, []byte{0}, []byte{' '}) + }, + } + pprof.Cmdline(fake, r) +} + +func (i *Instance) getCache(r *http.Request) interface{} { + return i.State.Cache(path.Base(r.URL.Path)) +} + +func (i *Instance) getSession(r *http.Request) interface{} { + return i.State.Session(path.Base(r.URL.Path)) +} + +func (i *Instance) getClient(r *http.Request) interface{} { + return i.State.Client(path.Base(r.URL.Path)) +} + +func (i *Instance) getServer(r *http.Request) interface{} { + i.State.mu.Lock() + defer i.State.mu.Unlock() + id := path.Base(r.URL.Path) + for _, s := range i.State.servers { + if s.ID == id { + return s + } + } + return nil +} + +func (i *Instance) getView(r *http.Request) interface{} { + return i.State.View(path.Base(r.URL.Path)) +} + +func (i *Instance) getFile(r *http.Request) interface{} { + identifier := path.Base(r.URL.Path) + sid := path.Base(path.Dir(r.URL.Path)) + s := i.State.Session(sid) + if s == nil { + return nil + } + for _, o := range s.Overlays() { + // TODO(adonovan): understand and document this comparison. + if o.FileIdentity().Hash.String() == identifier { + return o + } + } + return nil +} + +func (i *Instance) getInfo(r *http.Request) interface{} { + buf := &bytes.Buffer{} + i.PrintServerInfo(r.Context(), buf) + return template.HTML(buf.String()) +} + +func (i *Instance) AddService(s protocol.Server, session *cache.Session) { + for _, c := range i.State.clients { + if c.Session == session { + c.Service = s + return + } + } + stdlog.Printf("unable to find a Client to add the protocol.Server to") +} + +func getMemory(_ *http.Request) interface{} { + var m runtime.MemStats + runtime.ReadMemStats(&m) + return m +} + +func init() { + event.SetExporter(makeGlobalExporter(os.Stderr)) +} + +func GetInstance(ctx context.Context) *Instance { + if ctx == nil { + return nil + } + v := ctx.Value(instanceKey) + if v == nil { + return nil + } + return v.(*Instance) +} + +// WithInstance creates debug instance ready for use using the supplied +// configuration and stores it in the returned context. +func WithInstance(ctx context.Context, workdir, agent string) context.Context { + i := &Instance{ + StartTime: time.Now(), + Workdir: workdir, + OCAgentConfig: agent, + } + i.LogWriter = os.Stderr + ocConfig := ocagent.Discover() + //TODO: we should not need to adjust the discovered configuration + ocConfig.Address = i.OCAgentConfig + i.ocagent = ocagent.Connect(ocConfig) + i.prometheus = prometheus.New() + i.rpcs = &Rpcs{} + i.traces = &traces{} + i.State = &State{} + i.exporter = makeInstanceExporter(i) + return context.WithValue(ctx, instanceKey, i) +} + +// SetLogFile sets the logfile for use with this instance. +func (i *Instance) SetLogFile(logfile string, isDaemon bool) (func(), error) { + // TODO: probably a better solution for deferring closure to the caller would + // be for the debug instance to itself be closed, but this fixes the + // immediate bug of logs not being captured. + closeLog := func() {} + if logfile != "" { + if logfile == "auto" { + if isDaemon { + logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-daemon-%d.log", os.Getpid())) + } else { + logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid())) + } + } + f, err := os.Create(logfile) + if err != nil { + return nil, fmt.Errorf("unable to create log file: %w", err) + } + closeLog = func() { + defer f.Close() + } + stdlog.SetOutput(io.MultiWriter(os.Stderr, f)) + i.LogWriter = f + } + i.Logfile = logfile + return closeLog, nil +} + +// Serve starts and runs a debug server in the background on the given addr. +// It also logs the port the server starts on, to allow for :0 auto assigned +// ports. +func (i *Instance) Serve(ctx context.Context, addr string) (string, error) { + stdlog.SetFlags(stdlog.Lshortfile) + if addr == "" { + return "", nil + } + i.serveMu.Lock() + defer i.serveMu.Unlock() + + if i.listenedDebugAddress != "" { + // Already serving. Return the bound address. + return i.listenedDebugAddress, nil + } + + i.debugAddress = addr + listener, err := net.Listen("tcp", i.debugAddress) + if err != nil { + return "", err + } + i.listenedDebugAddress = listener.Addr().String() + + port := listener.Addr().(*net.TCPAddr).Port + if strings.HasSuffix(i.debugAddress, ":0") { + stdlog.Printf("debug server listening at http://localhost:%d", port) + } + event.Log(ctx, "Debug serving", tag.Port.Of(port)) + go func() { + mux := http.NewServeMux() + mux.HandleFunc("/", render(MainTmpl, func(*http.Request) interface{} { return i })) + mux.HandleFunc("/debug/", render(DebugTmpl, nil)) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + if i.prometheus != nil { + mux.HandleFunc("/metrics/", i.prometheus.Serve) + } + if i.rpcs != nil { + mux.HandleFunc("/rpc/", render(RPCTmpl, i.rpcs.getData)) + } + if i.traces != nil { + mux.HandleFunc("/trace/", render(TraceTmpl, i.traces.getData)) + } + mux.HandleFunc("/cache/", render(CacheTmpl, i.getCache)) + mux.HandleFunc("/session/", render(SessionTmpl, i.getSession)) + mux.HandleFunc("/view/", render(ViewTmpl, i.getView)) + mux.HandleFunc("/client/", render(ClientTmpl, i.getClient)) + mux.HandleFunc("/server/", render(ServerTmpl, i.getServer)) + mux.HandleFunc("/file/", render(FileTmpl, i.getFile)) + mux.HandleFunc("/info", render(InfoTmpl, i.getInfo)) + mux.HandleFunc("/memory", render(MemoryTmpl, getMemory)) + + mux.HandleFunc("/_makeabug", func(w http.ResponseWriter, r *http.Request) { + bug.Report("bug here", nil) + http.Error(w, "made a bug", http.StatusOK) + }) + + if err := http.Serve(listener, mux); err != nil { + event.Error(ctx, "Debug server failed", err) + return + } + event.Log(ctx, "Debug server finished") + }() + return i.listenedDebugAddress, nil +} + +func (i *Instance) DebugAddress() string { + i.serveMu.Lock() + defer i.serveMu.Unlock() + return i.debugAddress +} + +func (i *Instance) ListenedDebugAddress() string { + i.serveMu.Lock() + defer i.serveMu.Unlock() + return i.listenedDebugAddress +} + +// MonitorMemory starts recording memory statistics each second. +func (i *Instance) MonitorMemory(ctx context.Context) { + tick := time.NewTicker(time.Second) + nextThresholdGiB := uint64(1) + go func() { + for { + <-tick.C + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + if mem.HeapAlloc < nextThresholdGiB*1<<30 { + continue + } + if err := i.writeMemoryDebug(nextThresholdGiB, true); err != nil { + event.Error(ctx, "writing memory debug info", err) + } + if err := i.writeMemoryDebug(nextThresholdGiB, false); err != nil { + event.Error(ctx, "writing memory debug info", err) + } + event.Log(ctx, fmt.Sprintf("Wrote memory usage debug info to %v", os.TempDir())) + nextThresholdGiB++ + } + }() +} + +func (i *Instance) writeMemoryDebug(threshold uint64, withNames bool) error { + suffix := "withnames" + if !withNames { + suffix = "nonames" + } + + filename := fmt.Sprintf("gopls.%d-%dGiB-%s.zip", os.Getpid(), threshold, suffix) + zipf, err := os.OpenFile(filepath.Join(os.TempDir(), filename), os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return err + } + zipw := zip.NewWriter(zipf) + + f, err := zipw.Create("heap.pb.gz") + if err != nil { + return err + } + if err := rpprof.Lookup("heap").WriteTo(f, 0); err != nil { + return err + } + + f, err = zipw.Create("goroutines.txt") + if err != nil { + return err + } + if err := rpprof.Lookup("goroutine").WriteTo(f, 1); err != nil { + return err + } + + for _, cache := range i.State.Caches() { + cf, err := zipw.Create(fmt.Sprintf("cache-%v.html", cache.ID())) + if err != nil { + return err + } + if _, err := cf.Write([]byte(cache.PackageStats(withNames))); err != nil { + return err + } + } + + if err := zipw.Close(); err != nil { + return err + } + return zipf.Close() +} + +func makeGlobalExporter(stderr io.Writer) event.Exporter { + p := export.Printer{} + var pMu sync.Mutex + return func(ctx context.Context, ev core.Event, lm label.Map) context.Context { + i := GetInstance(ctx) + + if event.IsLog(ev) { + // Don't log context cancellation errors. + if err := keys.Err.Get(ev); errors.Is(err, context.Canceled) { + return ctx + } + // Make sure any log messages without an instance go to stderr. + if i == nil { + pMu.Lock() + p.WriteEvent(stderr, ev, lm) + pMu.Unlock() + } + level := log.LabeledLevel(lm) + // Exclude trace logs from LSP logs. + if level < log.Trace { + ctx = protocol.LogEvent(ctx, ev, lm, messageType(level)) + } + } + if i == nil { + return ctx + } + return i.exporter(ctx, ev, lm) + } +} + +func messageType(l log.Level) protocol.MessageType { + switch l { + case log.Error: + return protocol.Error + case log.Warning: + return protocol.Warning + case log.Debug: + return protocol.Log + } + return protocol.Info +} + +func makeInstanceExporter(i *Instance) event.Exporter { + exporter := func(ctx context.Context, ev core.Event, lm label.Map) context.Context { + if i.ocagent != nil { + ctx = i.ocagent.ProcessEvent(ctx, ev, lm) + } + if i.prometheus != nil { + ctx = i.prometheus.ProcessEvent(ctx, ev, lm) + } + if i.rpcs != nil { + ctx = i.rpcs.ProcessEvent(ctx, ev, lm) + } + if i.traces != nil { + ctx = i.traces.ProcessEvent(ctx, ev, lm) + } + if event.IsLog(ev) { + if s := cache.KeyCreateSession.Get(ev); s != nil { + i.State.addClient(s) + } + if sid := tag.NewServer.Get(ev); sid != "" { + i.State.updateServer(&Server{ + ID: sid, + Logfile: tag.Logfile.Get(ev), + DebugAddress: tag.DebugAddress.Get(ev), + GoplsPath: tag.GoplsPath.Get(ev), + ClientID: tag.ClientID.Get(ev), + }) + } + if s := cache.KeyShutdownSession.Get(ev); s != nil { + i.State.dropClient(s) + } + if sid := tag.EndServer.Get(ev); sid != "" { + i.State.dropServer(sid) + } + if s := cache.KeyUpdateSession.Get(ev); s != nil { + if c := i.State.Client(s.ID()); c != nil { + c.DebugAddress = tag.DebugAddress.Get(ev) + c.Logfile = tag.Logfile.Get(ev) + c.ServerID = tag.ServerID.Get(ev) + c.GoplsPath = tag.GoplsPath.Get(ev) + } + } + } + return ctx + } + // StdTrace must be above export.Spans below (by convention, export + // middleware applies its wrapped exporter last). + exporter = StdTrace(exporter) + metrics := metric.Config{} + registerMetrics(&metrics) + exporter = metrics.Exporter(exporter) + exporter = export.Spans(exporter) + exporter = export.Labels(exporter) + return exporter +} + +type dataFunc func(*http.Request) interface{} + +func render(tmpl *template.Template, fun dataFunc) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + var data interface{} + if fun != nil { + data = fun(r) + } + if err := tmpl.Execute(w, data); err != nil { + event.Error(context.Background(), "", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } + } +} + +func commas(s string) string { + for i := len(s); i > 3; { + i -= 3 + s = s[:i] + "," + s[i:] + } + return s +} + +func fuint64(v uint64) string { + return commas(strconv.FormatUint(v, 10)) +} + +func fuint32(v uint32) string { + return commas(strconv.FormatUint(uint64(v), 10)) +} + +func fcontent(v []byte) string { + return string(v) +} + +var BaseTemplate = template.Must(template.New("").Parse(` + + +{{template "title" .}} + +{{block "head" .}}{{end}} + + +Main +Info +Memory +Metrics +RPC +Trace +
    +

    {{template "title" .}}

    +{{block "body" .}} +Unknown page +{{end}} + + + +{{define "cachelink"}}Cache {{.}}{{end}} +{{define "clientlink"}}Client {{.}}{{end}} +{{define "serverlink"}}Server {{.}}{{end}} +{{define "sessionlink"}}Session {{.}}{{end}} +{{define "viewlink"}}View {{.}}{{end}} +`)).Funcs(template.FuncMap{ + "fuint64": fuint64, + "fuint32": fuint32, + "fcontent": fcontent, + "localAddress": func(s string) string { + // Try to translate loopback addresses to localhost, both for cosmetics and + // because unspecified ipv6 addresses can break links on Windows. + // + // TODO(rfindley): In the future, it would be better not to assume the + // server is running on localhost, and instead construct this address using + // the remote host. + host, port, err := net.SplitHostPort(s) + if err != nil { + return s + } + ip := net.ParseIP(host) + if ip == nil { + return s + } + if ip.IsLoopback() || ip.IsUnspecified() { + return "localhost:" + port + } + return s + }, + "options": func(s *cache.Session) []sessionOption { + return showOptions(s.Options()) + }, +}) + +var MainTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}GoPls server information{{end}} +{{define "body"}} +

    Caches

    +
      {{range .State.Caches}}
    • {{template "cachelink" .ID}}
    • {{end}}
    +

    Sessions

    +
      {{range .State.Sessions}}
    • {{template "sessionlink" .ID}} from {{template "cachelink" .Cache.ID}}
    • {{end}}
    +

    Clients

    +
      {{range .State.Clients}}
    • {{template "clientlink" .Session.ID}}
    • {{end}}
    +

    Servers

    +
      {{range .State.Servers}}
    • {{template "serverlink" .ID}}
    • {{end}}
    +

    Bug reports

    +
    {{range .State.Bugs}}
    {{.Key}}
    {{.Description}}
    {{end}}
    +{{end}} +`)) + +var InfoTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}GoPls version information{{end}} +{{define "body"}} +{{.}} +{{end}} +`)) + +var MemoryTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}GoPls memory usage{{end}} +{{define "head"}}{{end}} +{{define "body"}} +

    Stats

    + + + + + + + + + + + + + + + + +
    Allocated bytes{{fuint64 .HeapAlloc}}
    Total allocated bytes{{fuint64 .TotalAlloc}}
    System bytes{{fuint64 .Sys}}
    Heap system bytes{{fuint64 .HeapSys}}
    Malloc calls{{fuint64 .Mallocs}}
    Frees{{fuint64 .Frees}}
    Idle heap bytes{{fuint64 .HeapIdle}}
    In use bytes{{fuint64 .HeapInuse}}
    Released to system bytes{{fuint64 .HeapReleased}}
    Heap object count{{fuint64 .HeapObjects}}
    Stack in use bytes{{fuint64 .StackInuse}}
    Stack from system bytes{{fuint64 .StackSys}}
    Bucket hash bytes{{fuint64 .BuckHashSys}}
    GC metadata bytes{{fuint64 .GCSys}}
    Off heap bytes{{fuint64 .OtherSys}}
    +

    By size

    + + +{{range .BySize}}{{end}} +
    SizeMallocsFrees
    {{fuint32 .Size}}{{fuint64 .Mallocs}}{{fuint64 .Frees}}
    +{{end}} +`)) + +var DebugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}GoPls Debug pages{{end}} +{{define "body"}} +Profiling +{{end}} +`)) + +var CacheTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}Cache {{.ID}}{{end}} +{{define "body"}} +

    memoize.Store entries

    +
      {{range $k,$v := .MemStats}}
    • {{$k}} - {{$v}}
    • {{end}}
    +

    Per-package usage - not accurate, for guidance only

    +{{.PackageStats true}} +{{end}} +`)) + +var ClientTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}Client {{.Session.ID}}{{end}} +{{define "body"}} +Using session: {{template "sessionlink" .Session.ID}}
    +{{if .DebugAddress}}Debug this client at: {{localAddress .DebugAddress}}
    {{end}} +Logfile: {{.Logfile}}
    +Gopls Path: {{.GoplsPath}}
    +

    Diagnostics

    +{{/*Service: []protocol.Server; each server has map[uri]fileReports; + each fileReport: map[diagnosticSoure]diagnosticReport + diagnosticSource is one of 5 source + diagnosticReport: snapshotID and map[hash]*source.Diagnostic + sourceDiagnostic: struct { + Range protocol.Range + Message string + Source string + Code string + CodeHref string + Severity protocol.DiagnosticSeverity + Tags []protocol.DiagnosticTag + + Related []RelatedInformation + } + RelatedInformation: struct { + URI span.URI + Range protocol.Range + Message string + } + */}} +
      {{range $k, $v := .Service.Diagnostics}}
    • {{$k}}:
        {{range $v}}
      1. {{.}}
      2. {{end}}
    • {{end}}
    +{{end}} +`)) + +var ServerTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}Server {{.ID}}{{end}} +{{define "body"}} +{{if .DebugAddress}}Debug this server at: {{localAddress .DebugAddress}}
    {{end}} +Logfile: {{.Logfile}}
    +Gopls Path: {{.GoplsPath}}
    +{{end}} +`)) + +var SessionTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}Session {{.ID}}{{end}} +{{define "body"}} +From: {{template "cachelink" .Cache.ID}}
    +

    Views

    +
      {{range .Views}}
    • {{.Name}} is {{template "viewlink" .ID}} in {{.Folder}}
    • {{end}}
    +

    Overlays

    +{{$session := .}} + +

    Options

    +{{range options .}} +

    {{.Name}} {{.Type}}

    +

    default: {{.Default}}

    +{{if ne .Default .Current}}

    current: {{.Current}}

    {{end}} +{{end}} +{{end}} +`)) + +var ViewTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}View {{.ID}}{{end}} +{{define "body"}} +Name: {{.Name}}
    +Folder: {{.Folder}}
    +

    Environment

    +
      {{range .Options.Env}}
    • {{.}}
    • {{end}}
    +{{end}} +`)) + +var FileTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` +{{define "title"}}Overlay {{.FileIdentity.Hash}}{{end}} +{{define "body"}} +{{with .}} + URI: {{.URI}}
    + Identifier: {{.FileIdentity.Hash}}
    + Version: {{.Version}}
    + Kind: {{.Kind}}
    +{{end}} +

    Contents

    +
    {{fcontent .Read}}
    +{{end}} +`)) diff --git a/internal/lsp/debug/trace.go b/gopls/internal/lsp/debug/trace.go similarity index 93% rename from internal/lsp/debug/trace.go rename to gopls/internal/lsp/debug/trace.go index ca612867a5d..bb402cfaa8f 100644 --- a/internal/lsp/debug/trace.go +++ b/gopls/internal/lsp/debug/trace.go @@ -119,8 +119,6 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string { } func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context { - t.mu.Lock() - defer t.mu.Unlock() span := export.GetSpan(ctx) if span == nil { return ctx @@ -128,11 +126,8 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) switch { case event.IsStart(ev): - if t.sets == nil { - t.sets = make(map[string]*traceSet) - t.unfinished = make(map[export.SpanContext]*traceData) - } - // just starting, add it to the unfinished map + // Just starting: add it to the unfinished map. + // Allocate before the critical section. td := &traceData{ TraceID: span.ID.TraceID, SpanID: span.ID.SpanID, @@ -141,6 +136,13 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) Start: span.Start().At(), Tags: renderLabels(span.Start()), } + + t.mu.Lock() + defer t.mu.Unlock() + if t.sets == nil { + t.sets = make(map[string]*traceSet) + t.unfinished = make(map[export.SpanContext]*traceData) + } t.unfinished[span.ID] = td // and wire up parents if we have them if !span.ParentID.IsValid() { @@ -155,7 +157,19 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) parent.Children = append(parent.Children, td) case event.IsEnd(ev): - // finishing, must be already in the map + // Finishing: must be already in the map. + // Allocate events before the critical section. + events := span.Events() + tdEvents := make([]traceEvent, len(events)) + for i, event := range events { + tdEvents[i] = traceEvent{ + Time: event.At(), + Tags: renderLabels(event), + } + } + + t.mu.Lock() + defer t.mu.Unlock() td, found := t.unfinished[span.ID] if !found { return ctx // if this happens we are in a bad place @@ -164,14 +178,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) td.Finish = span.Finish().At() td.Duration = span.Finish().At().Sub(span.Start().At()) - events := span.Events() - td.Events = make([]traceEvent, len(events)) - for i, event := range events { - td.Events[i] = traceEvent{ - Time: event.At(), - Tags: renderLabels(event), - } - } + td.Events = tdEvents set, ok := t.sets[span.Name] if !ok { diff --git a/gopls/internal/lsp/definition.go b/gopls/internal/lsp/definition.go new file mode 100644 index 00000000000..6259d4dbb84 --- /dev/null +++ b/gopls/internal/lsp/definition.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "errors" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" +) + +func (s *Server) definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) { + // TODO(rfindley): definition requests should be multiplexed across all views. + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + switch kind := snapshot.View().FileKind(fh); kind { + case source.Tmpl: + return template.Definition(snapshot, fh, params.Position) + case source.Go: + // Partial support for jumping from linkname directive (position at 2nd argument). + locations, err := source.LinknameDefinition(ctx, snapshot, fh, params.Position) + if !errors.Is(err, source.ErrNoLinkname) { + return locations, err + } + return source.Definition(ctx, snapshot, fh, params.Position) + default: + return nil, fmt.Errorf("can't find definitions for file type %s", kind) + } +} + +func (s *Server) typeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) { + // TODO(rfindley): type definition requests should be multiplexed across all views. + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + switch kind := snapshot.View().FileKind(fh); kind { + case source.Go: + return source.TypeDefinition(ctx, snapshot, fh, params.Position) + default: + return nil, fmt.Errorf("can't find type definitions for file type %s", kind) + } +} diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go new file mode 100644 index 00000000000..e473bcaa227 --- /dev/null +++ b/gopls/internal/lsp/diagnostics.go @@ -0,0 +1,780 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/gopls/internal/lsp/debug/log" + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" + "golang.org/x/tools/gopls/internal/lsp/work" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/xcontext" +) + +// diagnosticSource differentiates different sources of diagnostics. +type diagnosticSource int + +const ( + modSource diagnosticSource = iota + gcDetailsSource + analysisSource + typeCheckSource + orphanedSource + workSource + modCheckUpgradesSource + modVulncheckSource // source.Govulncheck + source.Vulncheck +) + +// A diagnosticReport holds results for a single diagnostic source. +type diagnosticReport struct { + snapshotID source.GlobalSnapshotID // global snapshot ID on which the report was computed + publishedHash string // last published hash for this (URI, source) + diags map[string]*source.Diagnostic +} + +// fileReports holds a collection of diagnostic reports for a single file, as +// well as the hash of the last published set of diagnostics. +type fileReports struct { + // publishedSnapshotID is the last snapshot ID for which we have "published" + // diagnostics (though the publishDiagnostics notification may not have + // actually been sent, if nothing changed). + // + // Specifically, publishedSnapshotID is updated to a later snapshot ID when + // we either: + // (1) publish diagnostics for the file for a snapshot, or + // (2) determine that published diagnostics are valid for a new snapshot. + // + // Notably publishedSnapshotID may not match the snapshot id on individual reports in + // the reports map: + // - we may have published partial diagnostics from only a subset of + // diagnostic sources for which new results have been computed, or + // - we may have started computing reports for an even new snapshot, but not + // yet published. + // + // This prevents gopls from publishing stale diagnostics. + publishedSnapshotID source.GlobalSnapshotID + + // publishedHash is a hash of the latest diagnostics published for the file. + publishedHash string + + // If set, mustPublish marks diagnostics as needing publication, independent + // of whether their publishedHash has changed. + mustPublish bool + + // The last stored diagnostics for each diagnostic source. + reports map[diagnosticSource]diagnosticReport +} + +func (d diagnosticSource) String() string { + switch d { + case modSource: + return "FromSource" + case gcDetailsSource: + return "FromGCDetails" + case analysisSource: + return "FromAnalysis" + case typeCheckSource: + return "FromTypeChecking" + case orphanedSource: + return "FromOrphans" + case workSource: + return "FromGoWork" + case modCheckUpgradesSource: + return "FromCheckForUpgrades" + case modVulncheckSource: + return "FromModVulncheck" + default: + return fmt.Sprintf("From?%d?", d) + } +} + +// hashDiagnostics computes a hash to identify diags. +func hashDiagnostics(diags ...*source.Diagnostic) string { + source.SortDiagnostics(diags) + h := sha256.New() + for _, d := range diags { + for _, t := range d.Tags { + fmt.Fprintf(h, "%s", t) + } + for _, r := range d.Related { + fmt.Fprintf(h, "%s%s%s", r.URI, r.Message, r.Range) + } + fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source) + } + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (s *Server) diagnoseDetached(snapshot source.Snapshot) { + ctx := snapshot.BackgroundContext() + ctx = xcontext.Detach(ctx) + s.diagnose(ctx, snapshot, false) + s.publishDiagnostics(ctx, true, snapshot) +} + +func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onDisk bool) { + var diagnosticWG sync.WaitGroup + for snapshot, uris := range snapshots { + diagnosticWG.Add(1) + go func(snapshot source.Snapshot, uris []span.URI) { + defer diagnosticWG.Done() + s.diagnoseSnapshot(snapshot, uris, onDisk) + }(snapshot, uris) + } + diagnosticWG.Wait() +} + +func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) { + ctx := snapshot.BackgroundContext() + ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", source.SnapshotLabels(snapshot)...) + defer done() + + delay := snapshot.View().Options().DiagnosticsDelay + if delay > 0 { + // 2-phase diagnostics. + // + // The first phase just parses and type-checks (but + // does not analyze) packages directly affected by + // file modifications. + // + // The second phase runs analysis on the entire snapshot, + // and is debounced by the configured delay. + s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk) + s.publishDiagnostics(ctx, false, snapshot) + + // We debounce diagnostics separately for each view, using the snapshot + // local ID as logical ordering. + // + // TODO(rfindley): it would be cleaner to simply put the diagnostic + // debouncer on the view, and remove the "key" argument to debouncing. + if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.SequenceID(), time.After(delay)); ok { + s.diagnose(ctx, snapshot, false) + s.publishDiagnostics(ctx, true, snapshot) + } + return + } + + // Ignore possible workspace configuration warnings in the normal flow. + s.diagnose(ctx, snapshot, false) + s.publishDiagnostics(ctx, true, snapshot) +} + +func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) { + ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", source.SnapshotLabels(snapshot)...) + defer done() + + // TODO(adonovan): safety: refactor so that group.Go is called + // in a second loop, so that if we should later add an early + // return to the first loop, we don't leak goroutines. + var group errgroup.Group + seen := make(map[*source.Metadata]bool) + for _, uri := range uris { + // If the change is only on-disk and the file is not open, don't + // directly request its package. It may not be a workspace package. + if onDisk && !snapshot.IsOpen(uri) { + continue + } + // If the file is not known to the snapshot (e.g., if it was deleted), + // don't diagnose it. + if snapshot.FindFile(uri) == nil { + continue + } + + // Don't request type-checking for builtin.go: it's not a real package. + if snapshot.IsBuiltin(ctx, uri) { + continue + } + + // Find all packages that include this file and diagnose them in parallel. + metas, err := snapshot.MetadataForFile(ctx, uri) + if err != nil { + // TODO(findleyr): we should probably do something with the error here, + // but as of now this can fail repeatedly if load fails, so can be too + // noisy to log (and we'll handle things later in the slow pass). + continue + } + for _, m := range metas { + if m.IsIntermediateTestVariant() { + continue + } + if !seen[m] { + seen[m] = true + m := m + group.Go(func() error { + s.diagnosePkg(ctx, snapshot, m, false) + return nil // error result is ignored + }) + } + } + } + group.Wait() // ignore error +} + +// diagnose is a helper function for running diagnostics with a given context. +// Do not call it directly. forceAnalysis is only true for testing purposes. +func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAnalysis bool) { + ctx, done := event.Start(ctx, "Server.diagnose", source.SnapshotLabels(snapshot)...) + defer done() + + // Wait for a free diagnostics slot. + // TODO(adonovan): opt: shouldn't it be the analysis implementation's + // job to de-dup and limit resource consumption? In any case this + // this function spends most its time waiting for awaitLoaded, at + // least initially. + select { + case <-ctx.Done(): + return + case s.diagnosticsSema <- struct{}{}: + } + defer func() { + <-s.diagnosticsSema + }() + + // common code for dispatching diagnostics + store := func(dsource diagnosticSource, operation string, diagsByFile map[span.URI][]*source.Diagnostic, err error, merge bool) { + if err != nil { + event.Error(ctx, "warning: while "+operation, err, source.SnapshotLabels(snapshot)...) + } + for uri, diags := range diagsByFile { + if uri == "" { + event.Error(ctx, "missing URI while "+operation, fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename())) + continue + } + s.storeDiagnostics(snapshot, uri, dsource, diags, merge) + } + } + + // Diagnose go.mod upgrades. + upgradeReports, upgradeErr := mod.UpgradeDiagnostics(ctx, snapshot) + if ctx.Err() != nil { + log.Trace.Log(ctx, "diagnose cancelled") + return + } + store(modCheckUpgradesSource, "diagnosing go.mod upgrades", upgradeReports, upgradeErr, true) + + // Diagnose go.work file. + workReports, workErr := work.Diagnostics(ctx, snapshot) + if ctx.Err() != nil { + log.Trace.Log(ctx, "diagnose cancelled") + return + } + store(workSource, "diagnosing go.work file", workReports, workErr, true) + + // Diagnose go.mod file. + // (This step demands type checking of all active packages: + // the bottleneck in the startup sequence for a big workspace.) + modReports, modErr := mod.Diagnostics(ctx, snapshot) + if ctx.Err() != nil { + log.Trace.Log(ctx, "diagnose cancelled") + return + } + store(modSource, "diagnosing go.mod file", modReports, modErr, true) + + // Diagnose vulnerabilities. + vulnReports, vulnErr := mod.VulnerabilityDiagnostics(ctx, snapshot) + if ctx.Err() != nil { + log.Trace.Log(ctx, "diagnose cancelled") + return + } + store(modVulncheckSource, "diagnosing vulnerabilities", vulnReports, vulnErr, false) + + activeMetas, activeErr := snapshot.ActiveMetadata(ctx) + if s.shouldIgnoreError(ctx, snapshot, activeErr) { + return + } + criticalErr := snapshot.GetCriticalError(ctx) + if ctx.Err() != nil { // must check ctx after GetCriticalError + return + } + + // Show the error as a progress error report so that it appears in the + // status bar. If a client doesn't support progress reports, the error + // will still be shown as a ShowMessage. If there is no error, any running + // error progress reports will be closed. + s.showCriticalErrorStatus(ctx, snapshot, criticalErr) + + // Diagnose template (.tmpl) files. + for _, f := range snapshot.Templates() { + diags := template.Diagnose(f) + s.storeDiagnostics(snapshot, f.URI(), typeCheckSource, diags, true) + } + + // If there are no workspace packages, there is nothing to diagnose and + // there are no orphaned files. + if len(activeMetas) == 0 { + return + } + + // Run go/analysis diagnosis of packages in parallel. + // TODO(adonovan): opt: it may be more efficient to + // have diagnosePkg take a set of packages. + // + // TODO(adonovan): opt: since the new analysis driver does its + // own type checking, we could strength-reduce pkg to + // PackageID and get this step started as soon as the set of + // active package IDs are known, without waiting for them to load. + var ( + wg sync.WaitGroup + seen = map[span.URI]struct{}{} + ) + for _, m := range activeMetas { + for _, uri := range m.CompiledGoFiles { + seen[uri] = struct{}{} + } + + wg.Add(1) + go func(m *source.Metadata) { + defer wg.Done() + s.diagnosePkg(ctx, snapshot, m, forceAnalysis) + }(m) + } + wg.Wait() + + // Orphaned files. + // Confirm that every opened file belongs to a package (if any exist in + // the workspace). Otherwise, add a diagnostic to the file. + for _, o := range s.session.Overlays() { + if _, ok := seen[o.URI()]; ok { + continue + } + diagnostic := s.checkForOrphanedFile(ctx, snapshot, o) + if diagnostic == nil { + continue + } + s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic}, true) + } +} + +func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, m *source.Metadata, alwaysAnalyze bool) { + ctx, done := event.Start(ctx, "Server.diagnosePkg", append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...) + defer done() + enableDiagnostics := false + includeAnalysis := alwaysAnalyze // only run analyses for packages with open files + for _, uri := range m.CompiledGoFiles { + enableDiagnostics = enableDiagnostics || !snapshot.IgnoredFile(uri) + includeAnalysis = includeAnalysis || snapshot.IsOpen(uri) + } + // Don't show any diagnostics on ignored files. + if !enableDiagnostics { + return + } + + pkgs, err := snapshot.TypeCheck(ctx, source.TypecheckFull, m.ID) + if err != nil { + event.Error(ctx, "warning: typecheck failed", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...) + return + } + pkg := pkgs[0] + + // Get diagnostics from analysis framework. + // This includes type-error analyzers, which suggest fixes to compiler errors. + var analysisDiags map[span.URI][]*source.Diagnostic + if includeAnalysis { + diags, err := source.Analyze(ctx, snapshot, m.ID, false) + if err != nil { + event.Error(ctx, "warning: analyzing package", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...) + return + } + analysisDiags = diags + } + + // For each file, update the server's diagnostics state. + for _, cgf := range pkg.CompiledGoFiles() { + // builtin.go exists only for documentation purposes and + // is not valid Go code. Don't report distracting errors. + if snapshot.IsBuiltin(ctx, cgf.URI) { + continue + } + + pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, cgf.URI) + if err != nil { + event.Error(ctx, "warning: getting package diagnostics", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...) + return + } + + var tdiags, adiags []*source.Diagnostic + source.CombineDiagnostics(pkgDiags, analysisDiags[cgf.URI], &tdiags, &adiags) + s.storeDiagnostics(snapshot, cgf.URI, typeCheckSource, tdiags, true) + s.storeDiagnostics(snapshot, cgf.URI, analysisSource, adiags, true) + } + + // If gc optimization details are requested, add them to the + // diagnostic reports. + s.gcOptimizationDetailsMu.Lock() + _, enableGCDetails := s.gcOptimizationDetails[m.ID] + s.gcOptimizationDetailsMu.Unlock() + if enableGCDetails { + gcReports, err := source.GCOptimizationDetails(ctx, snapshot, m) + if err != nil { + event.Error(ctx, "warning: gc details", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...) + } + s.gcOptimizationDetailsMu.Lock() + _, enableGCDetails := s.gcOptimizationDetails[m.ID] + + // NOTE(golang/go#44826): hold the gcOptimizationDetails lock, and re-check + // whether gc optimization details are enabled, while storing gc_details + // results. This ensures that the toggling of GC details and clearing of + // diagnostics does not race with storing the results here. + if enableGCDetails { + for uri, diags := range gcReports { + fh := snapshot.FindFile(uri) + // Don't publish gc details for unsaved buffers, since the underlying + // logic operates on the file on disk. + if fh == nil || !fh.Saved() { + continue + } + s.storeDiagnostics(snapshot, uri, gcDetailsSource, diags, true) + } + } + s.gcOptimizationDetailsMu.Unlock() + } +} + +// mustPublishDiagnostics marks the uri as needing publication, independent of +// whether the published contents have changed. +// +// This can be used for ensuring gopls publishes diagnostics after certain file +// events. +func (s *Server) mustPublishDiagnostics(uri span.URI) { + s.diagnosticsMu.Lock() + defer s.diagnosticsMu.Unlock() + + if s.diagnostics[uri] == nil { + s.diagnostics[uri] = &fileReports{ + publishedHash: hashDiagnostics(), // Hash for 0 diagnostics. + reports: map[diagnosticSource]diagnosticReport{}, + } + } + s.diagnostics[uri].mustPublish = true +} + +// storeDiagnostics stores results from a single diagnostic source. If merge is +// true, it merges results into any existing results for this snapshot. +// +// TODO(hyangah): investigate whether we can unconditionally overwrite previous report.diags +// with the new diags and eliminate the need for the `merge` flag. +func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic, merge bool) { + // Safeguard: ensure that the file actually exists in the snapshot + // (see golang.org/issues/38602). + fh := snapshot.FindFile(uri) + if fh == nil { + return + } + + s.diagnosticsMu.Lock() + defer s.diagnosticsMu.Unlock() + if s.diagnostics[uri] == nil { + s.diagnostics[uri] = &fileReports{ + publishedHash: hashDiagnostics(), // Hash for 0 diagnostics. + reports: map[diagnosticSource]diagnosticReport{}, + } + } + report := s.diagnostics[uri].reports[dsource] + // Don't set obsolete diagnostics. + if report.snapshotID > snapshot.GlobalID() { + return + } + if report.diags == nil || report.snapshotID != snapshot.GlobalID() || !merge { + report.diags = map[string]*source.Diagnostic{} + } + report.snapshotID = snapshot.GlobalID() + for _, d := range diags { + report.diags[hashDiagnostics(d)] = d + } + s.diagnostics[uri].reports[dsource] = report +} + +// clearDiagnosticSource clears all diagnostics for a given source type. It is +// necessary for cases where diagnostics have been invalidated by something +// other than a snapshot change, for example when gc_details is toggled. +func (s *Server) clearDiagnosticSource(dsource diagnosticSource) { + s.diagnosticsMu.Lock() + defer s.diagnosticsMu.Unlock() + for _, reports := range s.diagnostics { + delete(reports.reports, dsource) + } +} + +const WorkspaceLoadFailure = "Error loading workspace" + +// showCriticalErrorStatus shows the error as a progress report. +// If the error is nil, it clears any existing error progress report. +func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Snapshot, err *source.CriticalError) { + s.criticalErrorStatusMu.Lock() + defer s.criticalErrorStatusMu.Unlock() + + // Remove all newlines so that the error message can be formatted in a + // status bar. + var errMsg string + if err != nil { + event.Error(ctx, "errors loading workspace", err.MainError, source.SnapshotLabels(snapshot)...) + for _, d := range err.Diagnostics { + s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d}, true) + } + errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ") + } + + if s.criticalErrorStatus == nil { + if errMsg != "" { + s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil) + } + return + } + + // If an error is already shown to the user, update it or mark it as + // resolved. + if errMsg == "" { + s.criticalErrorStatus.End(ctx, "Done.") + s.criticalErrorStatus = nil + } else { + s.criticalErrorStatus.Report(ctx, errMsg, 0) + } +} + +// checkForOrphanedFile checks that the given URIs can be mapped to packages. +// If they cannot and the workspace is not otherwise unloaded, it also surfaces +// a warning, suggesting that the user check the file for build tags. +func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) *source.Diagnostic { + // TODO(rfindley): this function may fail to produce a diagnostic for a + // variety of reasons, some of which should probably not be ignored. For + // example, should this function be tolerant of the case where fh does not + // exist, or does not have a package name? + // + // It would be better to panic or report a bug in several of the cases below, + // so that we can move toward guaranteeing we show the user a meaningful + // error whenever it makes sense. + if snapshot.View().FileKind(fh) != source.Go { + return nil + } + // builtin files won't have a package, but they are never orphaned. + if snapshot.IsBuiltin(ctx, fh.URI()) { + return nil + } + + // This call has the effect of inserting fh into snapshot.files, + // where for better or worse (actually: just worse) it influences + // the sets of open, known, and orphaned files. + snapshot.GetFile(ctx, fh.URI()) + + metas, _ := snapshot.MetadataForFile(ctx, fh.URI()) + if len(metas) > 0 || ctx.Err() != nil { + return nil // no package, or cancelled + } + // Inv: file does not belong to a package we know about. + pgf, err := snapshot.ParseGo(ctx, fh, source.ParseHeader) + if err != nil { + return nil + } + if !pgf.File.Name.Pos().IsValid() { + return nil + } + rng, err := pgf.NodeRange(pgf.File.Name) + if err != nil { + return nil + } + // If the file no longer has a name ending in .go, this diagnostic is wrong + if filepath.Ext(fh.URI().Filename()) != ".go" { + return nil + } + // TODO(rstambler): We should be able to parse the build tags in the + // file and show a more specific error message. For now, put the diagnostic + // on the package declaration. + return &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityWarning, + Source: source.ListError, + Message: fmt.Sprintf(`No packages found for open file %s: %v. +If this file contains build tags, try adding "-tags=" to your gopls "buildFlags" configuration (see (https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string). +Otherwise, see the troubleshooting guidelines for help investigating (https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md). +`, fh.URI().Filename(), err), + } +} + +// publishDiagnostics collects and publishes any unpublished diagnostic reports. +func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) { + ctx, done := event.Start(ctx, "Server.publishDiagnostics", source.SnapshotLabels(snapshot)...) + defer done() + + s.diagnosticsMu.Lock() + defer s.diagnosticsMu.Unlock() + + for uri, r := range s.diagnostics { + // Global snapshot IDs are monotonic, so we use them to enforce an ordering + // for diagnostics. + // + // If we've already delivered diagnostics for a future snapshot for this + // file, do not deliver them. See golang/go#42837 for an example of why + // this is necessary. + // + // TODO(rfindley): even using a global snapshot ID, this mechanism is + // potentially racy: elsewhere in the code (e.g. invalidateContent) we + // allow for multiple views track a given file. In this case, we should + // either only report diagnostics for snapshots from the "best" view of a + // URI, or somehow merge diagnostics from multiple views. + if r.publishedSnapshotID > snapshot.GlobalID() { + continue + } + + anyReportsChanged := false + reportHashes := map[diagnosticSource]string{} + var diags []*source.Diagnostic + for dsource, report := range r.reports { + if report.snapshotID != snapshot.GlobalID() { + continue + } + var reportDiags []*source.Diagnostic + for _, d := range report.diags { + diags = append(diags, d) + reportDiags = append(reportDiags, d) + } + hash := hashDiagnostics(reportDiags...) + if hash != report.publishedHash { + anyReportsChanged = true + } + reportHashes[dsource] = hash + } + + if !final && !anyReportsChanged { + // Don't invalidate existing reports on the client if we haven't got any + // new information. + continue + } + + source.SortDiagnostics(diags) + hash := hashDiagnostics(diags...) + if hash == r.publishedHash && !r.mustPublish { + // Update snapshotID to be the latest snapshot for which this diagnostic + // hash is valid. + r.publishedSnapshotID = snapshot.GlobalID() + continue + } + var version int32 + if fh := snapshot.FindFile(uri); fh != nil { // file may have been deleted + version = fh.Version() + } + if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ + Diagnostics: toProtocolDiagnostics(diags), + URI: protocol.URIFromSpanURI(uri), + Version: version, + }); err == nil { + r.publishedHash = hash + r.mustPublish = false // diagnostics have been successfully published + r.publishedSnapshotID = snapshot.GlobalID() + for dsource, hash := range reportHashes { + report := r.reports[dsource] + report.publishedHash = hash + r.reports[dsource] = report + } + } else { + if ctx.Err() != nil { + // Publish may have failed due to a cancelled context. + log.Trace.Log(ctx, "publish cancelled") + return + } + event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri)) + } + } +} + +func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnostic { + reports := []protocol.Diagnostic{} + for _, diag := range diagnostics { + related := make([]protocol.DiagnosticRelatedInformation, 0, len(diag.Related)) + for _, rel := range diag.Related { + related = append(related, protocol.DiagnosticRelatedInformation{ + Location: protocol.Location{ + URI: protocol.URIFromSpanURI(rel.URI), + Range: rel.Range, + }, + Message: rel.Message, + }) + } + pdiag := protocol.Diagnostic{ + // diag.Message might start with \n or \t + Message: strings.TrimSpace(diag.Message), + Range: diag.Range, + Severity: diag.Severity, + Source: string(diag.Source), + Tags: diag.Tags, + RelatedInformation: related, + } + if diag.Code != "" { + pdiag.Code = diag.Code + } + if diag.CodeHref != "" { + pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref} + } + reports = append(reports, pdiag) + } + return reports +} + +func (s *Server) shouldIgnoreError(ctx context.Context, snapshot source.Snapshot, err error) bool { + if err == nil { // if there is no error at all + return false + } + if errors.Is(err, context.Canceled) { + return true + } + // If the folder has no Go code in it, we shouldn't spam the user with a warning. + var hasGo bool + _ = filepath.Walk(snapshot.View().Folder().Filename(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !strings.HasSuffix(info.Name(), ".go") { + return nil + } + hasGo = true + return errors.New("done") + }) + return !hasGo +} + +// Diagnostics formattedfor the debug server +// (all the relevant fields of Server are private) +// (The alternative is to export them) +func (s *Server) Diagnostics() map[string][]string { + ans := make(map[string][]string) + s.diagnosticsMu.Lock() + defer s.diagnosticsMu.Unlock() + for k, v := range s.diagnostics { + fn := k.Filename() + for typ, d := range v.reports { + if len(d.diags) == 0 { + continue + } + for _, dx := range d.diags { + ans[fn] = append(ans[fn], auxStr(dx, d, typ)) + } + } + } + return ans +} + +func auxStr(v *source.Diagnostic, d diagnosticReport, typ diagnosticSource) string { + // Tags? RelatedInformation? + msg := fmt.Sprintf("(%s)%q(source:%q,code:%q,severity:%s,snapshot:%d,type:%s)", + v.Range, v.Message, v.Source, v.Code, v.Severity, d.snapshotID, typ) + for _, r := range v.Related { + msg += fmt.Sprintf(" [%s:%s,%q]", r.URI.Filename(), r.Range, r.Message) + } + return msg +} diff --git a/gopls/internal/lsp/fake/client.go b/gopls/internal/lsp/fake/client.go new file mode 100644 index 00000000000..d6c886f179d --- /dev/null +++ b/gopls/internal/lsp/fake/client.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "context" + "encoding/json" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/glob" + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +// ClientHooks are called to handle the corresponding client LSP method. +type ClientHooks struct { + OnLogMessage func(context.Context, *protocol.LogMessageParams) error + OnDiagnostics func(context.Context, *protocol.PublishDiagnosticsParams) error + OnWorkDoneProgressCreate func(context.Context, *protocol.WorkDoneProgressCreateParams) error + OnProgress func(context.Context, *protocol.ProgressParams) error + OnShowMessage func(context.Context, *protocol.ShowMessageParams) error + OnShowMessageRequest func(context.Context, *protocol.ShowMessageRequestParams) error + OnRegistration func(context.Context, *protocol.RegistrationParams) error + OnUnregistration func(context.Context, *protocol.UnregistrationParams) error +} + +// Client is an adapter that converts an *Editor into an LSP Client. It mosly +// delegates functionality to hooks that can be configured by tests. +type Client struct { + editor *Editor + hooks ClientHooks +} + +func (c *Client) CodeLensRefresh(context.Context) error { return nil } + +func (c *Client) InlayHintRefresh(context.Context) error { return nil } + +func (c *Client) DiagnosticRefresh(context.Context) error { return nil } + +func (c *Client) InlineValueRefresh(context.Context) error { return nil } + +func (c *Client) SemanticTokensRefresh(context.Context) error { return nil } + +func (c *Client) LogTrace(context.Context, *protocol.LogTraceParams) error { return nil } + +func (c *Client) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) error { + if c.hooks.OnShowMessage != nil { + return c.hooks.OnShowMessage(ctx, params) + } + return nil +} + +func (c *Client) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) { + if c.hooks.OnShowMessageRequest != nil { + if err := c.hooks.OnShowMessageRequest(ctx, params); err != nil { + return nil, err + } + } + if len(params.Actions) == 0 || len(params.Actions) > 1 { + return nil, fmt.Errorf("fake editor cannot handle multiple action items") + } + return ¶ms.Actions[0], nil +} + +func (c *Client) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error { + if c.hooks.OnLogMessage != nil { + return c.hooks.OnLogMessage(ctx, params) + } + return nil +} + +func (c *Client) Event(ctx context.Context, event *interface{}) error { + return nil +} + +func (c *Client) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) error { + if c.hooks.OnDiagnostics != nil { + return c.hooks.OnDiagnostics(ctx, params) + } + return nil +} + +func (c *Client) WorkspaceFolders(context.Context) ([]protocol.WorkspaceFolder, error) { + return []protocol.WorkspaceFolder{}, nil +} + +func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) { + results := make([]interface{}, len(p.Items)) + for i, item := range p.Items { + if item.Section == "gopls" { + c.editor.mu.Lock() + results[i] = c.editor.settingsLocked() + c.editor.mu.Unlock() + } + } + return results, nil +} + +func (c *Client) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) error { + if c.hooks.OnRegistration != nil { + if err := c.hooks.OnRegistration(ctx, params); err != nil { + return err + } + } + // Update file watching patterns. + // + // TODO(rfindley): We could verify more here, like verify that the + // registration ID is distinct, and that the capability is not currently + // registered. + for _, registration := range params.Registrations { + if registration.Method == "workspace/didChangeWatchedFiles" { + // Marshal and unmarshal to interpret RegisterOptions as + // DidChangeWatchedFilesRegistrationOptions. + raw, err := json.Marshal(registration.RegisterOptions) + if err != nil { + return fmt.Errorf("marshaling registration options: %v", err) + } + var opts protocol.DidChangeWatchedFilesRegistrationOptions + if err := json.Unmarshal(raw, &opts); err != nil { + return fmt.Errorf("unmarshaling registration options: %v", err) + } + var globs []*glob.Glob + for _, watcher := range opts.Watchers { + // TODO(rfindley): honor the watch kind. + g, err := glob.Parse(watcher.GlobPattern) + if err != nil { + return fmt.Errorf("error parsing glob pattern %q: %v", watcher.GlobPattern, err) + } + globs = append(globs, g) + } + c.editor.mu.Lock() + c.editor.watchPatterns = globs + c.editor.mu.Unlock() + } + } + return nil +} + +func (c *Client) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) error { + if c.hooks.OnUnregistration != nil { + return c.hooks.OnUnregistration(ctx, params) + } + return nil +} + +func (c *Client) Progress(ctx context.Context, params *protocol.ProgressParams) error { + if c.hooks.OnProgress != nil { + return c.hooks.OnProgress(ctx, params) + } + return nil +} + +func (c *Client) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error { + if c.hooks.OnWorkDoneProgressCreate != nil { + return c.hooks.OnWorkDoneProgressCreate(ctx, params) + } + return nil +} + +func (c *Client) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) { + return nil, nil +} + +// ApplyEdit applies edits sent from the server. +func (c *Client) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) { + if len(params.Edit.Changes) != 0 { + return &protocol.ApplyWorkspaceEditResult{FailureReason: "Edit.Changes is unsupported"}, nil + } + for _, change := range params.Edit.DocumentChanges { + if err := c.editor.applyDocumentChange(ctx, change); err != nil { + return nil, err + } + } + return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil +} diff --git a/internal/lsp/fake/doc.go b/gopls/internal/lsp/fake/doc.go similarity index 100% rename from internal/lsp/fake/doc.go rename to gopls/internal/lsp/fake/doc.go diff --git a/gopls/internal/lsp/fake/edit.go b/gopls/internal/lsp/fake/edit.go new file mode 100644 index 00000000000..5fd65b0c855 --- /dev/null +++ b/gopls/internal/lsp/fake/edit.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/diff" +) + +// NewEdit creates an edit replacing all content between the 0-based +// (startLine, startColumn) and (endLine, endColumn) with text. +// +// Columns measure UTF-16 codes. +func NewEdit(startLine, startColumn, endLine, endColumn uint32, text string) protocol.TextEdit { + return protocol.TextEdit{ + Range: protocol.Range{ + Start: protocol.Position{Line: startLine, Character: startColumn}, + End: protocol.Position{Line: endLine, Character: endColumn}, + }, + NewText: text, + } +} + +func EditToChangeEvent(e protocol.TextEdit) protocol.TextDocumentContentChangeEvent { + var rng protocol.Range = e.Range + return protocol.TextDocumentContentChangeEvent{ + Range: &rng, + Text: e.NewText, + } +} + +// applyEdits applies the edits to a file with the specified lines, +// and returns a new slice containing the lines of the patched file. +// It is a wrapper around diff.Apply; see that function for preconditions. +func applyEdits(mapper *protocol.Mapper, edits []protocol.TextEdit, windowsLineEndings bool) ([]byte, error) { + // Convert fake.Edits to diff.Edits + diffEdits := make([]diff.Edit, len(edits)) + for i, edit := range edits { + start, end, err := mapper.RangeOffsets(edit.Range) + if err != nil { + return nil, err + } + diffEdits[i] = diff.Edit{ + Start: start, + End: end, + New: edit.NewText, + } + } + + patchedString, err := diff.Apply(string(mapper.Content), diffEdits) + if err != nil { + return nil, err + } + patched := []byte(patchedString) + if windowsLineEndings { + patched = toWindowsLineEndings(patched) + } + return patched, nil +} diff --git a/gopls/internal/lsp/fake/edit_test.go b/gopls/internal/lsp/fake/edit_test.go new file mode 100644 index 00000000000..97e2c73e42d --- /dev/null +++ b/gopls/internal/lsp/fake/edit_test.go @@ -0,0 +1,96 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +func TestApplyEdits(t *testing.T) { + tests := []struct { + label string + content string + edits []protocol.TextEdit + want string + wantErr bool + }{ + { + label: "empty content", + }, + { + label: "empty edit", + content: "hello", + edits: []protocol.TextEdit{}, + want: "hello", + }, + { + label: "unicode edit", + content: "hello, ę—„ęœ¬čŖž", + edits: []protocol.TextEdit{ + NewEdit(0, 7, 0, 10, "world"), + }, + want: "hello, world", + }, + { + label: "range edit", + content: "ABC\nDEF\nGHI\nJKL", + edits: []protocol.TextEdit{ + NewEdit(1, 1, 2, 3, "12\n345"), + }, + want: "ABC\nD12\n345\nJKL", + }, + { + label: "regression test for issue #57627", + content: "go 1.18\nuse moda/a", + edits: []protocol.TextEdit{ + NewEdit(1, 0, 1, 0, "\n"), + NewEdit(2, 0, 2, 0, "\n"), + }, + want: "go 1.18\n\nuse moda/a\n", + }, + { + label: "end before start", + content: "ABC\nDEF\nGHI\nJKL", + edits: []protocol.TextEdit{ + NewEdit(2, 3, 1, 1, "12\n345"), + }, + wantErr: true, + }, + { + label: "out of bounds line", + content: "ABC\nDEF\nGHI\nJKL", + edits: []protocol.TextEdit{ + NewEdit(1, 1, 4, 3, "12\n345"), + }, + wantErr: true, + }, + { + label: "out of bounds column", + content: "ABC\nDEF\nGHI\nJKL", + edits: []protocol.TextEdit{ + NewEdit(1, 4, 2, 3, "12\n345"), + }, + wantErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.label, func(t *testing.T) { + got, err := applyEdits(protocol.NewMapper("", []byte(test.content)), test.edits, false) + if (err != nil) != test.wantErr { + t.Errorf("got err %v, want error: %t", err, test.wantErr) + } + if err != nil { + return + } + if got := string(got); got != test.want { + t.Errorf("got %q, want %q", got, test.want) + } + }) + } +} diff --git a/gopls/internal/lsp/fake/editor.go b/gopls/internal/lsp/fake/editor.go new file mode 100644 index 00000000000..84d7fb85fda --- /dev/null +++ b/gopls/internal/lsp/fake/editor.go @@ -0,0 +1,1461 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/glob" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" + "golang.org/x/tools/internal/xcontext" +) + +// Editor is a fake editor client. It keeps track of client state and can be +// used for writing LSP tests. +type Editor struct { + + // Server, client, and sandbox are concurrency safe and written only + // at construction time, so do not require synchronization. + Server protocol.Server + cancelConn func() + serverConn jsonrpc2.Conn + client *Client + sandbox *Sandbox + defaultEnv map[string]string + + // TODO(adonovan): buffers should be keyed by protocol.DocumentURI. + mu sync.Mutex + config EditorConfig // editor configuration + buffers map[string]buffer // open buffers (relative path -> buffer content) + serverCapabilities protocol.ServerCapabilities // capabilities / options + watchPatterns []*glob.Glob // glob patterns to watch + + // Call metrics for the purpose of expectations. This is done in an ad-hoc + // manner for now. Perhaps in the future we should do something more + // systematic. Guarded with a separate mutex as calls may need to be accessed + // asynchronously via callbacks into the Editor. + callsMu sync.Mutex + calls CallCounts +} + +// CallCounts tracks the number of protocol notifications of different types. +type CallCounts struct { + DidOpen, DidChange, DidSave, DidChangeWatchedFiles, DidClose uint64 +} + +// buffer holds information about an open buffer in the editor. +type buffer struct { + version int // monotonic version; incremented on edits + path string // relative path in the workspace + mapper *protocol.Mapper // buffer content + dirty bool // if true, content is unsaved (TODO(rfindley): rename this field) +} + +func (b buffer) text() string { + return string(b.mapper.Content) +} + +// EditorConfig configures the editor's LSP session. This is similar to +// source.UserOptions, but we use a separate type here so that we expose only +// that configuration which we support. +// +// The zero value for EditorConfig should correspond to its defaults. +type EditorConfig struct { + // Env holds environment variables to apply on top of the default editor + // environment. When applying these variables, the special string + // $SANDBOX_WORKDIR is replaced by the absolute path to the sandbox working + // directory. + Env map[string]string + + // WorkspaceFolders is the workspace folders to configure on the LSP server, + // relative to the sandbox workdir. + // + // As a special case, if WorkspaceFolders is nil the editor defaults to + // configuring a single workspace folder corresponding to the workdir root. + // To explicitly send no workspace folders, use an empty (non-nil) slice. + WorkspaceFolders []string + + // Whether to edit files with windows line endings. + WindowsLineEndings bool + + // Map of language ID -> regexp to match, used to set the file type of new + // buffers. Applied as an overlay on top of the following defaults: + // "go" -> ".*\.go" + // "go.mod" -> "go\.mod" + // "go.sum" -> "go\.sum" + // "gotmpl" -> ".*tmpl" + FileAssociations map[string]string + + // Settings holds user-provided configuration for the LSP server. + Settings map[string]interface{} +} + +// NewEditor Creates a new Editor. +func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor { + return &Editor{ + buffers: make(map[string]buffer), + sandbox: sandbox, + defaultEnv: sandbox.GoEnv(), + config: config, + } +} + +// Connect configures the editor to communicate with an LSP server on conn. It +// is not concurrency safe, and should be called at most once, before using the +// editor. +// +// It returns the editor, so that it may be called as follows: +// +// editor, err := NewEditor(s).Connect(ctx, conn, hooks) +func (e *Editor) Connect(ctx context.Context, connector servertest.Connector, hooks ClientHooks) (*Editor, error) { + bgCtx, cancelConn := context.WithCancel(xcontext.Detach(ctx)) + conn := connector.Connect(bgCtx) + e.cancelConn = cancelConn + + e.serverConn = conn + e.Server = protocol.ServerDispatcher(conn) + e.client = &Client{editor: e, hooks: hooks} + conn.Go(bgCtx, + protocol.Handlers( + protocol.ClientHandler(e.client, + jsonrpc2.MethodNotFound))) + + if err := e.initialize(ctx); err != nil { + return nil, err + } + e.sandbox.Workdir.AddWatcher(e.onFileChanges) + return e, nil +} + +func (e *Editor) Stats() CallCounts { + e.callsMu.Lock() + defer e.callsMu.Unlock() + return e.calls +} + +// Shutdown issues the 'shutdown' LSP notification. +func (e *Editor) Shutdown(ctx context.Context) error { + if e.Server != nil { + if err := e.Server.Shutdown(ctx); err != nil { + return fmt.Errorf("Shutdown: %w", err) + } + } + return nil +} + +// Exit issues the 'exit' LSP notification. +func (e *Editor) Exit(ctx context.Context) error { + if e.Server != nil { + // Not all LSP clients issue the exit RPC, but we do so here to ensure that + // we gracefully handle it on multi-session servers. + if err := e.Server.Exit(ctx); err != nil { + return fmt.Errorf("Exit: %w", err) + } + } + return nil +} + +// Close issues the shutdown and exit sequence an editor should. +func (e *Editor) Close(ctx context.Context) error { + if err := e.Shutdown(ctx); err != nil { + return err + } + if err := e.Exit(ctx); err != nil { + return err + } + defer func() { + e.cancelConn() + }() + + // called close on the editor should result in the connection closing + select { + case <-e.serverConn.Done(): + // connection closed itself + return nil + case <-ctx.Done(): + return fmt.Errorf("connection not closed: %w", ctx.Err()) + } +} + +// Client returns the LSP client for this editor. +func (e *Editor) Client() *Client { + return e.client +} + +// settingsLocked builds the settings map for use in LSP settings RPCs. +// +// e.mu must be held while calling this function. +func (e *Editor) settingsLocked() map[string]interface{} { + env := make(map[string]string) + for k, v := range e.defaultEnv { + env[k] = v + } + for k, v := range e.config.Env { + env[k] = v + } + for k, v := range env { + v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) + env[k] = v + } + + settings := map[string]interface{}{ + "env": env, + + // Use verbose progress reporting so that regtests can assert on + // asynchronous operations being completed (such as diagnosing a snapshot). + "verboseWorkDoneProgress": true, + + // Set a generous completion budget, so that tests don't flake because + // completions are too slow. + "completionBudget": "10s", + + // Shorten the diagnostic delay to speed up test execution (else we'd add + // the default delay to each assertion about diagnostics) + "diagnosticsDelay": "10ms", + } + + for k, v := range e.config.Settings { + if k == "env" { + panic("must not provide env via the EditorConfig.Settings field: use the EditorConfig.Env field instead") + } + settings[k] = v + } + + return settings +} + +func (e *Editor) initialize(ctx context.Context) error { + params := &protocol.ParamInitialize{} + params.ClientInfo.Name = "fakeclient" + params.ClientInfo.Version = "v1.0.0" + e.mu.Lock() + params.WorkspaceFolders = e.makeWorkspaceFoldersLocked() + params.InitializationOptions = e.settingsLocked() + e.mu.Unlock() + params.Capabilities.Workspace.Configuration = true + params.Capabilities.Window.WorkDoneProgress = true + + // TODO: set client capabilities + params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated} + + params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true + params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true + // copied from lsp/semantic.go to avoid import cycle in tests + params.Capabilities.TextDocument.SemanticTokens.TokenTypes = []string{ + "namespace", "type", "class", "enum", "interface", + "struct", "typeParameter", "parameter", "variable", "property", "enumMember", + "event", "function", "method", "macro", "keyword", "modifier", "comment", + "string", "number", "regexp", "operator", + } + params.Capabilities.TextDocument.SemanticTokens.TokenModifiers = []string{ + "declaration", "definition", "readonly", "static", + "deprecated", "abstract", "async", "modification", "documentation", "defaultLibrary", + } + + // This is a bit of a hack, since the fake editor doesn't actually support + // watching changed files that match a specific glob pattern. However, the + // editor does send didChangeWatchedFiles notifications, so set this to + // true. + params.Capabilities.Workspace.DidChangeWatchedFiles.DynamicRegistration = true + params.Capabilities.Workspace.WorkspaceEdit = &protocol.WorkspaceEditClientCapabilities{ + ResourceOperations: []protocol.ResourceOperationKind{ + "rename", + }, + } + + trace := protocol.TraceValues("messages") + params.Trace = &trace + // TODO: support workspace folders. + if e.Server != nil { + resp, err := e.Server.Initialize(ctx, params) + if err != nil { + return fmt.Errorf("initialize: %w", err) + } + e.mu.Lock() + e.serverCapabilities = resp.Capabilities + e.mu.Unlock() + + if err := e.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil { + return fmt.Errorf("initialized: %w", err) + } + } + // TODO: await initial configuration here, or expect gopls to manage that? + return nil +} + +// makeWorkspaceFoldersLocked creates a slice of workspace folders to use for +// this editing session, based on the editor configuration. +// +// e.mu must be held while calling this function. +func (e *Editor) makeWorkspaceFoldersLocked() (folders []protocol.WorkspaceFolder) { + paths := e.config.WorkspaceFolders + if len(paths) == 0 { + paths = append(paths, string(e.sandbox.Workdir.RelativeTo)) + } + + for _, path := range paths { + uri := string(e.sandbox.Workdir.URI(path)) + folders = append(folders, protocol.WorkspaceFolder{ + URI: uri, + Name: filepath.Base(uri), + }) + } + + return folders +} + +// onFileChanges is registered to be called by the Workdir on any writes that +// go through the Workdir API. It is called synchronously by the Workdir. +func (e *Editor) onFileChanges(ctx context.Context, evts []protocol.FileEvent) { + if e.Server == nil { + return + } + + // e may be locked when onFileChanges is called, but it is important that we + // synchronously increment this counter so that we can subsequently assert on + // the number of expected DidChangeWatchedFiles calls. + e.callsMu.Lock() + e.calls.DidChangeWatchedFiles++ + e.callsMu.Unlock() + + // Since e may be locked, we must run this mutation asynchronously. + go func() { + e.mu.Lock() + defer e.mu.Unlock() + for _, evt := range evts { + // Always send an on-disk change, even for events that seem useless + // because they're shadowed by an open buffer. + path := e.sandbox.Workdir.URIToPath(evt.URI) + if buf, ok := e.buffers[path]; ok { + // Following VS Code, don't honor deletions or changes to dirty buffers. + if buf.dirty || evt.Type == protocol.Deleted { + continue + } + + content, err := e.sandbox.Workdir.ReadFile(path) + if err != nil { + continue // A race with some other operation. + } + // No need to update if the buffer content hasn't changed. + if string(content) == buf.text() { + continue + } + // During shutdown, this call will fail. Ignore the error. + _ = e.setBufferContentLocked(ctx, path, false, content, nil) + } + } + var matchedEvts []protocol.FileEvent + for _, evt := range evts { + filename := filepath.ToSlash(evt.URI.SpanURI().Filename()) + for _, g := range e.watchPatterns { + if g.Match(filename) { + matchedEvts = append(matchedEvts, evt) + break + } + } + } + + // TODO(rfindley): don't send notifications while locked. + e.Server.DidChangeWatchedFiles(ctx, &protocol.DidChangeWatchedFilesParams{ + Changes: matchedEvts, + }) + }() +} + +// OpenFile creates a buffer for the given workdir-relative file. +// +// If the file is already open, it is a no-op. +func (e *Editor) OpenFile(ctx context.Context, path string) error { + if e.HasBuffer(path) { + return nil + } + content, err := e.sandbox.Workdir.ReadFile(path) + if err != nil { + return err + } + if e.Config().WindowsLineEndings { + content = toWindowsLineEndings(content) + } + return e.createBuffer(ctx, path, false, content) +} + +// toWindowsLineEndings checks whether content has windows line endings. +// +// If so, it returns content unmodified. If not, it returns a new byte slice modified to use CRLF line endings. +func toWindowsLineEndings(content []byte) []byte { + abnormal := false + for i, b := range content { + if b == '\n' && (i == 0 || content[i-1] != '\r') { + abnormal = true + break + } + } + if !abnormal { + return content + } + var buf bytes.Buffer + for i, b := range content { + if b == '\n' && (i == 0 || content[i-1] != '\r') { + buf.WriteByte('\r') + } + buf.WriteByte(b) + } + return buf.Bytes() +} + +// CreateBuffer creates a new unsaved buffer corresponding to the workdir path, +// containing the given textual content. +func (e *Editor) CreateBuffer(ctx context.Context, path, content string) error { + return e.createBuffer(ctx, path, true, []byte(content)) +} + +func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content []byte) error { + e.mu.Lock() + + if _, ok := e.buffers[path]; ok { + e.mu.Unlock() + return fmt.Errorf("buffer %q already exists", path) + } + + uri := e.sandbox.Workdir.URI(path).SpanURI() + buf := buffer{ + version: 1, + path: path, + mapper: protocol.NewMapper(uri, content), + dirty: dirty, + } + e.buffers[path] = buf + + item := e.textDocumentItem(buf) + e.mu.Unlock() + + return e.sendDidOpen(ctx, item) +} + +// textDocumentItem builds a protocol.TextDocumentItem for the given buffer. +// +// Precondition: e.mu must be held. +func (e *Editor) textDocumentItem(buf buffer) protocol.TextDocumentItem { + return protocol.TextDocumentItem{ + URI: e.sandbox.Workdir.URI(buf.path), + LanguageID: languageID(buf.path, e.config.FileAssociations), + Version: int32(buf.version), + Text: buf.text(), + } +} + +func (e *Editor) sendDidOpen(ctx context.Context, item protocol.TextDocumentItem) error { + if e.Server != nil { + if err := e.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{ + TextDocument: item, + }); err != nil { + return fmt.Errorf("DidOpen: %w", err) + } + e.callsMu.Lock() + e.calls.DidOpen++ + e.callsMu.Unlock() + } + return nil +} + +var defaultFileAssociations = map[string]*regexp.Regexp{ + "go": regexp.MustCompile(`^.*\.go$`), // '$' is important: don't match .gotmpl! + "go.mod": regexp.MustCompile(`^go\.mod$`), + "go.sum": regexp.MustCompile(`^go(\.work)?\.sum$`), + "go.work": regexp.MustCompile(`^go\.work$`), + "gotmpl": regexp.MustCompile(`^.*tmpl$`), +} + +// languageID returns the language identifier for the path p given the user +// configured fileAssociations. +func languageID(p string, fileAssociations map[string]string) string { + base := path.Base(p) + for lang, re := range fileAssociations { + re := regexp.MustCompile(re) + if re.MatchString(base) { + return lang + } + } + for lang, re := range defaultFileAssociations { + if re.MatchString(base) { + return lang + } + } + return "" +} + +// CloseBuffer removes the current buffer (regardless of whether it is saved). +func (e *Editor) CloseBuffer(ctx context.Context, path string) error { + e.mu.Lock() + _, ok := e.buffers[path] + if !ok { + e.mu.Unlock() + return ErrUnknownBuffer + } + delete(e.buffers, path) + e.mu.Unlock() + + return e.sendDidClose(ctx, e.TextDocumentIdentifier(path)) +} + +func (e *Editor) sendDidClose(ctx context.Context, doc protocol.TextDocumentIdentifier) error { + if e.Server != nil { + if err := e.Server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{ + TextDocument: doc, + }); err != nil { + return fmt.Errorf("DidClose: %w", err) + } + e.callsMu.Lock() + e.calls.DidClose++ + e.callsMu.Unlock() + } + return nil +} + +func (e *Editor) TextDocumentIdentifier(path string) protocol.TextDocumentIdentifier { + return protocol.TextDocumentIdentifier{ + URI: e.sandbox.Workdir.URI(path), + } +} + +// SaveBuffer writes the content of the buffer specified by the given path to +// the filesystem. +func (e *Editor) SaveBuffer(ctx context.Context, path string) error { + if err := e.OrganizeImports(ctx, path); err != nil { + return fmt.Errorf("organizing imports before save: %w", err) + } + if err := e.FormatBuffer(ctx, path); err != nil { + return fmt.Errorf("formatting before save: %w", err) + } + return e.SaveBufferWithoutActions(ctx, path) +} + +func (e *Editor) SaveBufferWithoutActions(ctx context.Context, path string) error { + e.mu.Lock() + defer e.mu.Unlock() + buf, ok := e.buffers[path] + if !ok { + return fmt.Errorf(fmt.Sprintf("unknown buffer: %q", path)) + } + content := buf.text() + includeText := false + syncOptions, ok := e.serverCapabilities.TextDocumentSync.(protocol.TextDocumentSyncOptions) + if ok { + includeText = syncOptions.Save.IncludeText + } + + docID := e.TextDocumentIdentifier(buf.path) + if e.Server != nil { + if err := e.Server.WillSave(ctx, &protocol.WillSaveTextDocumentParams{ + TextDocument: docID, + Reason: protocol.Manual, + }); err != nil { + return fmt.Errorf("WillSave: %w", err) + } + } + if err := e.sandbox.Workdir.WriteFile(ctx, path, content); err != nil { + return fmt.Errorf("writing %q: %w", path, err) + } + + buf.dirty = false + e.buffers[path] = buf + + if e.Server != nil { + params := &protocol.DidSaveTextDocumentParams{ + TextDocument: docID, + } + if includeText { + params.Text = &content + } + if err := e.Server.DidSave(ctx, params); err != nil { + return fmt.Errorf("DidSave: %w", err) + } + e.callsMu.Lock() + e.calls.DidSave++ + e.callsMu.Unlock() + } + return nil +} + +// ErrNoMatch is returned if a regexp search fails. +var ( + ErrNoMatch = errors.New("no match") + ErrUnknownBuffer = errors.New("unknown buffer") +) + +// regexpLocation returns the location of the first occurrence of either re +// or its singular subgroup. It returns ErrNoMatch if the regexp doesn't match. +func regexpLocation(mapper *protocol.Mapper, re string) (protocol.Location, error) { + var start, end int + rec, err := regexp.Compile(re) + if err != nil { + return protocol.Location{}, err + } + indexes := rec.FindSubmatchIndex(mapper.Content) + if indexes == nil { + return protocol.Location{}, ErrNoMatch + } + switch len(indexes) { + case 2: + // no subgroups: return the range of the regexp expression + start, end = indexes[0], indexes[1] + case 4: + // one subgroup: return its range + start, end = indexes[2], indexes[3] + default: + return protocol.Location{}, fmt.Errorf("invalid search regexp %q: expect either 0 or 1 subgroups, got %d", re, len(indexes)/2-1) + } + return mapper.OffsetLocation(start, end) +} + +// RegexpSearch returns the Location of the first match for re in the buffer +// bufName. For convenience, RegexpSearch supports the following two modes: +// 1. If re has no subgroups, return the position of the match for re itself. +// 2. If re has one subgroup, return the position of the first subgroup. +// +// It returns an error re is invalid, has more than one subgroup, or doesn't +// match the buffer. +func (e *Editor) RegexpSearch(bufName, re string) (protocol.Location, error) { + e.mu.Lock() + buf, ok := e.buffers[bufName] + e.mu.Unlock() + if !ok { + return protocol.Location{}, ErrUnknownBuffer + } + return regexpLocation(buf.mapper, re) +} + +// RegexpReplace edits the buffer corresponding to path by replacing the first +// instance of re, or its first subgroup, with the replace text. See +// RegexpSearch for more explanation of these two modes. +// It returns an error if re is invalid, has more than one subgroup, or doesn't +// match the buffer. +func (e *Editor) RegexpReplace(ctx context.Context, path, re, replace string) error { + e.mu.Lock() + defer e.mu.Unlock() + buf, ok := e.buffers[path] + if !ok { + return ErrUnknownBuffer + } + loc, err := regexpLocation(buf.mapper, re) + if err != nil { + return err + } + edits := []protocol.TextEdit{{ + Range: loc.Range, + NewText: replace, + }} + patched, err := applyEdits(buf.mapper, edits, e.config.WindowsLineEndings) + if err != nil { + return fmt.Errorf("editing %q: %v", path, err) + } + return e.setBufferContentLocked(ctx, path, true, patched, edits) +} + +// EditBuffer applies the given test edits to the buffer identified by path. +func (e *Editor) EditBuffer(ctx context.Context, path string, edits []protocol.TextEdit) error { + e.mu.Lock() + defer e.mu.Unlock() + return e.editBufferLocked(ctx, path, edits) +} + +func (e *Editor) SetBufferContent(ctx context.Context, path, content string) error { + e.mu.Lock() + defer e.mu.Unlock() + return e.setBufferContentLocked(ctx, path, true, []byte(content), nil) +} + +// HasBuffer reports whether the file name is open in the editor. +func (e *Editor) HasBuffer(name string) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, ok := e.buffers[name] + return ok +} + +// BufferText returns the content of the buffer with the given name, or "" if +// the file at that path is not open. The second return value reports whether +// the file is open. +func (e *Editor) BufferText(name string) (string, bool) { + e.mu.Lock() + defer e.mu.Unlock() + buf, ok := e.buffers[name] + if !ok { + return "", false + } + return buf.text(), true +} + +// Mapper returns the protocol.Mapper for the given buffer name. +// +// If there is no open buffer with that name, it returns nil. +func (e *Editor) Mapper(name string) *protocol.Mapper { + e.mu.Lock() + defer e.mu.Unlock() + buf, ok := e.buffers[name] + if !ok { + return nil + } + return buf.mapper +} + +// BufferVersion returns the current version of the buffer corresponding to +// name (or 0 if it is not being edited). +func (e *Editor) BufferVersion(name string) int { + e.mu.Lock() + defer e.mu.Unlock() + return e.buffers[name].version +} + +func (e *Editor) editBufferLocked(ctx context.Context, path string, edits []protocol.TextEdit) error { + buf, ok := e.buffers[path] + if !ok { + return fmt.Errorf("unknown buffer %q", path) + } + content, err := applyEdits(buf.mapper, edits, e.config.WindowsLineEndings) + if err != nil { + return fmt.Errorf("editing %q: %v; edits:\n%v", path, err, edits) + } + return e.setBufferContentLocked(ctx, path, true, content, edits) +} + +func (e *Editor) setBufferContentLocked(ctx context.Context, path string, dirty bool, content []byte, fromEdits []protocol.TextEdit) error { + buf, ok := e.buffers[path] + if !ok { + return fmt.Errorf("unknown buffer %q", path) + } + buf.mapper = protocol.NewMapper(buf.mapper.URI, content) + buf.version++ + buf.dirty = dirty + e.buffers[path] = buf + // A simple heuristic: if there is only one edit, send it incrementally. + // Otherwise, send the entire content. + var evts []protocol.TextDocumentContentChangeEvent + if len(fromEdits) == 1 { + evts = append(evts, EditToChangeEvent(fromEdits[0])) + } else { + evts = append(evts, protocol.TextDocumentContentChangeEvent{ + Text: buf.text(), + }) + } + params := &protocol.DidChangeTextDocumentParams{ + TextDocument: protocol.VersionedTextDocumentIdentifier{ + Version: int32(buf.version), + TextDocumentIdentifier: e.TextDocumentIdentifier(buf.path), + }, + ContentChanges: evts, + } + if e.Server != nil { + if err := e.Server.DidChange(ctx, params); err != nil { + return fmt.Errorf("DidChange: %w", err) + } + e.callsMu.Lock() + e.calls.DidChange++ + e.callsMu.Unlock() + } + return nil +} + +// GoToDefinition jumps to the definition of the symbol at the given position +// in an open buffer. It returns the location of the resulting jump. +// +// TODO(rfindley): rename to "Definition", to be consistent with LSP +// terminology. +func (e *Editor) GoToDefinition(ctx context.Context, loc protocol.Location) (protocol.Location, error) { + if err := e.checkBufferLocation(loc); err != nil { + return protocol.Location{}, err + } + params := &protocol.DefinitionParams{} + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start + + resp, err := e.Server.Definition(ctx, params) + if err != nil { + return protocol.Location{}, fmt.Errorf("definition: %w", err) + } + return e.extractFirstLocation(ctx, resp) +} + +// GoToTypeDefinition jumps to the type definition of the symbol at the given location +// in an open buffer. +func (e *Editor) GoToTypeDefinition(ctx context.Context, loc protocol.Location) (protocol.Location, error) { + if err := e.checkBufferLocation(loc); err != nil { + return protocol.Location{}, err + } + params := &protocol.TypeDefinitionParams{} + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start + + resp, err := e.Server.TypeDefinition(ctx, params) + if err != nil { + return protocol.Location{}, fmt.Errorf("type definition: %w", err) + } + return e.extractFirstLocation(ctx, resp) +} + +// extractFirstLocation returns the first location. +// It opens the file if needed. +func (e *Editor) extractFirstLocation(ctx context.Context, locs []protocol.Location) (protocol.Location, error) { + if len(locs) == 0 { + return protocol.Location{}, nil + } + + newPath := e.sandbox.Workdir.URIToPath(locs[0].URI) + if !e.HasBuffer(newPath) { + if err := e.OpenFile(ctx, newPath); err != nil { + return protocol.Location{}, fmt.Errorf("OpenFile: %w", err) + } + } + return locs[0], nil +} + +// Symbol performs a workspace symbol search using query +func (e *Editor) Symbol(ctx context.Context, query string) ([]protocol.SymbolInformation, error) { + params := &protocol.WorkspaceSymbolParams{Query: query} + return e.Server.Symbol(ctx, params) +} + +// OrganizeImports requests and performs the source.organizeImports codeAction. +func (e *Editor) OrganizeImports(ctx context.Context, path string) error { + loc := protocol.Location{URI: e.sandbox.Workdir.URI(path)} // zero Range => whole file + _, err := e.applyCodeActions(ctx, loc, nil, protocol.SourceOrganizeImports) + return err +} + +// RefactorRewrite requests and performs the source.refactorRewrite codeAction. +func (e *Editor) RefactorRewrite(ctx context.Context, loc protocol.Location) error { + applied, err := e.applyCodeActions(ctx, loc, nil, protocol.RefactorRewrite) + if err != nil { + return err + } + if applied == 0 { + return fmt.Errorf("no refactorings were applied") + } + return nil +} + +// ApplyQuickFixes requests and performs the quickfix codeAction. +func (e *Editor) ApplyQuickFixes(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) error { + applied, err := e.applyCodeActions(ctx, loc, diagnostics, protocol.SourceFixAll, protocol.QuickFix) + if applied == 0 { + return fmt.Errorf("no quick fixes were applied") + } + return err +} + +// ApplyCodeAction applies the given code action. +func (e *Editor) ApplyCodeAction(ctx context.Context, action protocol.CodeAction) error { + for _, change := range action.Edit.DocumentChanges { + if change.TextDocumentEdit != nil { + path := e.sandbox.Workdir.URIToPath(change.TextDocumentEdit.TextDocument.URI) + if int32(e.buffers[path].version) != change.TextDocumentEdit.TextDocument.Version { + // Skip edits for old versions. + continue + } + if err := e.EditBuffer(ctx, path, change.TextDocumentEdit.Edits); err != nil { + return fmt.Errorf("editing buffer %q: %w", path, err) + } + } + } + // Execute any commands. The specification says that commands are + // executed after edits are applied. + if action.Command != nil { + if _, err := e.ExecuteCommand(ctx, &protocol.ExecuteCommandParams{ + Command: action.Command.Command, + Arguments: action.Command.Arguments, + }); err != nil { + return err + } + } + // Some commands may edit files on disk. + return e.sandbox.Workdir.CheckForFileChanges(ctx) +} + +// GetQuickFixes returns the available quick fix code actions. +func (e *Editor) GetQuickFixes(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) { + return e.getCodeActions(ctx, loc, diagnostics, protocol.QuickFix, protocol.SourceFixAll) +} + +func (e *Editor) applyCodeActions(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) (int, error) { + actions, err := e.getCodeActions(ctx, loc, diagnostics, only...) + if err != nil { + return 0, err + } + applied := 0 + for _, action := range actions { + if action.Title == "" { + return 0, fmt.Errorf("empty title for code action") + } + var match bool + for _, o := range only { + if action.Kind == o { + match = true + break + } + } + if !match { + continue + } + applied++ + if err := e.ApplyCodeAction(ctx, action); err != nil { + return 0, err + } + } + return applied, nil +} + +func (e *Editor) getCodeActions(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) ([]protocol.CodeAction, error) { + if e.Server == nil { + return nil, nil + } + params := &protocol.CodeActionParams{} + params.TextDocument.URI = loc.URI + params.Context.Only = only + params.Range = loc.Range // may be zero => whole file + if diagnostics != nil { + params.Context.Diagnostics = diagnostics + } + return e.Server.CodeAction(ctx, params) +} + +func (e *Editor) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) { + if e.Server == nil { + return nil, nil + } + var match bool + // Ensure that this command was actually listed as a supported command. + for _, command := range e.serverCapabilities.ExecuteCommandProvider.Commands { + if command == params.Command { + match = true + break + } + } + if !match { + return nil, fmt.Errorf("unsupported command %q", params.Command) + } + result, err := e.Server.ExecuteCommand(ctx, params) + if err != nil { + return nil, err + } + // Some commands use the go command, which writes directly to disk. + // For convenience, check for those changes. + if err := e.sandbox.Workdir.CheckForFileChanges(ctx); err != nil { + return nil, err + } + return result, nil +} + +// FormatBuffer gofmts a Go file. +func (e *Editor) FormatBuffer(ctx context.Context, path string) error { + if e.Server == nil { + return nil + } + e.mu.Lock() + version := e.buffers[path].version + e.mu.Unlock() + params := &protocol.DocumentFormattingParams{} + params.TextDocument.URI = e.sandbox.Workdir.URI(path) + edits, err := e.Server.Formatting(ctx, params) + if err != nil { + return fmt.Errorf("textDocument/formatting: %w", err) + } + e.mu.Lock() + defer e.mu.Unlock() + if versionAfter := e.buffers[path].version; versionAfter != version { + return fmt.Errorf("before receipt of formatting edits, buffer version changed from %d to %d", version, versionAfter) + } + if len(edits) == 0 { + return nil + } + return e.editBufferLocked(ctx, path, edits) +} + +func (e *Editor) checkBufferLocation(loc protocol.Location) error { + e.mu.Lock() + defer e.mu.Unlock() + path := e.sandbox.Workdir.URIToPath(loc.URI) + buf, ok := e.buffers[path] + if !ok { + return fmt.Errorf("buffer %q is not open", path) + } + + _, _, err := buf.mapper.RangeOffsets(loc.Range) + return err +} + +// RunGenerate runs `go generate` non-recursively in the workdir-relative dir +// path. It does not report any resulting file changes as a watched file +// change, so must be followed by a call to Workdir.CheckForFileChanges once +// the generate command has completed. +// TODO(rFindley): this shouldn't be necessary anymore. Delete it. +func (e *Editor) RunGenerate(ctx context.Context, dir string) error { + if e.Server == nil { + return nil + } + absDir := e.sandbox.Workdir.AbsPath(dir) + cmd, err := command.NewGenerateCommand("", command.GenerateArgs{ + Dir: protocol.URIFromSpanURI(span.URIFromPath(absDir)), + Recursive: false, + }) + if err != nil { + return err + } + params := &protocol.ExecuteCommandParams{ + Command: cmd.Command, + Arguments: cmd.Arguments, + } + if _, err := e.ExecuteCommand(ctx, params); err != nil { + return fmt.Errorf("running generate: %v", err) + } + // Unfortunately we can't simply poll the workdir for file changes here, + // because server-side command may not have completed. In regtests, we can + // Await this state change, but here we must delegate that responsibility to + // the caller. + return nil +} + +// CodeLens executes a codelens request on the server. +func (e *Editor) CodeLens(ctx context.Context, path string) ([]protocol.CodeLens, error) { + if e.Server == nil { + return nil, nil + } + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.CodeLensParams{ + TextDocument: e.TextDocumentIdentifier(path), + } + lens, err := e.Server.CodeLens(ctx, params) + if err != nil { + return nil, err + } + return lens, nil +} + +// Completion executes a completion request on the server. +func (e *Editor) Completion(ctx context.Context, loc protocol.Location) (*protocol.CompletionList, error) { + if e.Server == nil { + return nil, nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.CompletionParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + completions, err := e.Server.Completion(ctx, params) + if err != nil { + return nil, err + } + return completions, nil +} + +// AcceptCompletion accepts a completion for the given item at the given +// position. +func (e *Editor) AcceptCompletion(ctx context.Context, loc protocol.Location, item protocol.CompletionItem) error { + if e.Server == nil { + return nil + } + e.mu.Lock() + defer e.mu.Unlock() + path := e.sandbox.Workdir.URIToPath(loc.URI) + _, ok := e.buffers[path] + if !ok { + return fmt.Errorf("buffer %q is not open", path) + } + return e.editBufferLocked(ctx, path, append([]protocol.TextEdit{ + *item.TextEdit, + }, item.AdditionalTextEdits...)) +} + +// Symbols executes a workspace/symbols request on the server. +func (e *Editor) Symbols(ctx context.Context, sym string) ([]protocol.SymbolInformation, error) { + if e.Server == nil { + return nil, nil + } + params := &protocol.WorkspaceSymbolParams{Query: sym} + ans, err := e.Server.Symbol(ctx, params) + return ans, err +} + +// CodeLens executes a codelens request on the server. +func (e *Editor) InlayHint(ctx context.Context, path string) ([]protocol.InlayHint, error) { + if e.Server == nil { + return nil, nil + } + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.InlayHintParams{ + TextDocument: e.TextDocumentIdentifier(path), + } + hints, err := e.Server.InlayHint(ctx, params) + if err != nil { + return nil, err + } + return hints, nil +} + +// References returns references to the object at loc, as returned by +// the connected LSP server. If no server is connected, it returns (nil, nil). +func (e *Editor) References(ctx context.Context, loc protocol.Location) ([]protocol.Location, error) { + if e.Server == nil { + return nil, nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.ReferenceParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + Context: protocol.ReferenceContext{ + IncludeDeclaration: true, + }, + } + locations, err := e.Server.References(ctx, params) + if err != nil { + return nil, err + } + return locations, nil +} + +// Rename performs a rename of the object at loc to newName, using the +// connected LSP server. If no server is connected, it returns nil. +func (e *Editor) Rename(ctx context.Context, loc protocol.Location, newName string) error { + if e.Server == nil { + return nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + + // Verify that PrepareRename succeeds. + prepareParams := &protocol.PrepareRenameParams{} + prepareParams.TextDocument = e.TextDocumentIdentifier(path) + prepareParams.Position = loc.Range.Start + if _, err := e.Server.PrepareRename(ctx, prepareParams); err != nil { + return fmt.Errorf("preparing rename: %v", err) + } + + params := &protocol.RenameParams{ + TextDocument: e.TextDocumentIdentifier(path), + Position: loc.Range.Start, + NewName: newName, + } + wsEdits, err := e.Server.Rename(ctx, params) + if err != nil { + return err + } + for _, change := range wsEdits.DocumentChanges { + if err := e.applyDocumentChange(ctx, change); err != nil { + return err + } + } + return nil +} + +// Implementations returns implementations for the object at loc, as +// returned by the connected LSP server. If no server is connected, it returns +// (nil, nil). +func (e *Editor) Implementations(ctx context.Context, loc protocol.Location) ([]protocol.Location, error) { + if e.Server == nil { + return nil, nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.ImplementationParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + return e.Server.Implementation(ctx, params) +} + +func (e *Editor) SignatureHelp(ctx context.Context, loc protocol.Location) (*protocol.SignatureHelp, error) { + if e.Server == nil { + return nil, nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.SignatureHelpParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + return e.Server.SignatureHelp(ctx, params) +} + +func (e *Editor) RenameFile(ctx context.Context, oldPath, newPath string) error { + closed, opened, err := e.renameBuffers(ctx, oldPath, newPath) + if err != nil { + return err + } + + for _, c := range closed { + if err := e.sendDidClose(ctx, c); err != nil { + return err + } + } + for _, o := range opened { + if err := e.sendDidOpen(ctx, o); err != nil { + return err + } + } + + // Finally, perform the renaming on disk. + if err := e.sandbox.Workdir.RenameFile(ctx, oldPath, newPath); err != nil { + return fmt.Errorf("renaming sandbox file: %w", err) + } + return nil +} + +// renameBuffers renames in-memory buffers affected by the renaming of +// oldPath->newPath, returning the resulting text documents that must be closed +// and opened over the LSP. +func (e *Editor) renameBuffers(ctx context.Context, oldPath, newPath string) (closed []protocol.TextDocumentIdentifier, opened []protocol.TextDocumentItem, _ error) { + e.mu.Lock() + defer e.mu.Unlock() + + // In case either oldPath or newPath is absolute, convert to absolute paths + // before checking for containment. + oldAbs := e.sandbox.Workdir.AbsPath(oldPath) + newAbs := e.sandbox.Workdir.AbsPath(newPath) + + // Collect buffers that are affected by the given file or directory renaming. + buffersToRename := make(map[string]string) // old path -> new path + + for path := range e.buffers { + abs := e.sandbox.Workdir.AbsPath(path) + if oldAbs == abs || source.InDir(oldAbs, abs) { + rel, err := filepath.Rel(oldAbs, abs) + if err != nil { + return nil, nil, fmt.Errorf("filepath.Rel(%q, %q): %v", oldAbs, abs, err) + } + nabs := filepath.Join(newAbs, rel) + newPath := e.sandbox.Workdir.RelPath(nabs) + buffersToRename[path] = newPath + } + } + + // Update buffers, and build protocol changes. + for old, new := range buffersToRename { + buf := e.buffers[old] + delete(e.buffers, old) + buf.version = 1 + buf.path = new + e.buffers[new] = buf + + closed = append(closed, e.TextDocumentIdentifier(old)) + opened = append(opened, e.textDocumentItem(buf)) + } + + return closed, opened, nil +} + +func (e *Editor) applyDocumentChange(ctx context.Context, change protocol.DocumentChanges) error { + if change.RenameFile != nil { + oldPath := e.sandbox.Workdir.URIToPath(change.RenameFile.OldURI) + newPath := e.sandbox.Workdir.URIToPath(change.RenameFile.NewURI) + + return e.RenameFile(ctx, oldPath, newPath) + } + if change.TextDocumentEdit != nil { + return e.applyTextDocumentEdit(ctx, *change.TextDocumentEdit) + } + panic("Internal error: one of RenameFile or TextDocumentEdit must be set") +} + +func (e *Editor) applyTextDocumentEdit(ctx context.Context, change protocol.TextDocumentEdit) error { + path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI) + if ver := int32(e.BufferVersion(path)); ver != change.TextDocument.Version { + return fmt.Errorf("buffer versions for %q do not match: have %d, editing %d", path, ver, change.TextDocument.Version) + } + if !e.HasBuffer(path) { + err := e.OpenFile(ctx, path) + if os.IsNotExist(err) { + // TODO: it's unclear if this is correct. Here we create the buffer (with + // version 1), then apply edits. Perhaps we should apply the edits before + // sending the didOpen notification. + e.CreateBuffer(ctx, path, "") + err = nil + } + if err != nil { + return err + } + } + return e.EditBuffer(ctx, path, change.Edits) +} + +// Config returns the current editor configuration. +func (e *Editor) Config() EditorConfig { + e.mu.Lock() + defer e.mu.Unlock() + return e.config +} + +// ChangeConfiguration sets the new editor configuration, and if applicable +// sends a didChangeConfiguration notification. +// +// An error is returned if the change notification failed to send. +func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig) error { + e.mu.Lock() + e.config = newConfig + e.mu.Unlock() // don't hold e.mu during server calls + if e.Server != nil { + var params protocol.DidChangeConfigurationParams // empty: gopls ignores the Settings field + if err := e.Server.DidChangeConfiguration(ctx, ¶ms); err != nil { + return err + } + } + return nil +} + +// ChangeWorkspaceFolders sets the new workspace folders, and sends a +// didChangeWorkspaceFolders notification to the server. +// +// The given folders must all be unique. +func (e *Editor) ChangeWorkspaceFolders(ctx context.Context, folders []string) error { + // capture existing folders so that we can compute the change. + e.mu.Lock() + oldFolders := e.makeWorkspaceFoldersLocked() + e.config.WorkspaceFolders = folders + newFolders := e.makeWorkspaceFoldersLocked() + e.mu.Unlock() + + if e.Server == nil { + return nil + } + + var params protocol.DidChangeWorkspaceFoldersParams + + // Keep track of old workspace folders that must be removed. + toRemove := make(map[protocol.URI]protocol.WorkspaceFolder) + for _, folder := range oldFolders { + toRemove[folder.URI] = folder + } + + // Sanity check: if we see a folder twice the algorithm below doesn't work, + // so track seen folders to ensure that we panic in that case. + seen := make(map[protocol.URI]protocol.WorkspaceFolder) + for _, folder := range newFolders { + if _, ok := seen[folder.URI]; ok { + panic(fmt.Sprintf("folder %s seen twice", folder.URI)) + } + + // If this folder already exists, we don't want to remove it. + // Otherwise, we need to add it. + if _, ok := toRemove[folder.URI]; ok { + delete(toRemove, folder.URI) + } else { + params.Event.Added = append(params.Event.Added, folder) + } + } + + for _, v := range toRemove { + params.Event.Removed = append(params.Event.Removed, v) + } + + return e.Server.DidChangeWorkspaceFolders(ctx, ¶ms) +} + +// CodeAction executes a codeAction request on the server. +// If loc.Range is zero, the whole file is implied. +func (e *Editor) CodeAction(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) { + if e.Server == nil { + return nil, nil + } + path := e.sandbox.Workdir.URIToPath(loc.URI) + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.CodeActionParams{ + TextDocument: e.TextDocumentIdentifier(path), + Context: protocol.CodeActionContext{ + Diagnostics: diagnostics, + }, + Range: loc.Range, // may be zero + } + lens, err := e.Server.CodeAction(ctx, params) + if err != nil { + return nil, err + } + return lens, nil +} + +// Hover triggers a hover at the given position in an open buffer. +func (e *Editor) Hover(ctx context.Context, loc protocol.Location) (*protocol.MarkupContent, protocol.Location, error) { + if err := e.checkBufferLocation(loc); err != nil { + return nil, protocol.Location{}, err + } + params := &protocol.HoverParams{} + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start + + resp, err := e.Server.Hover(ctx, params) + if err != nil { + return nil, protocol.Location{}, fmt.Errorf("hover: %w", err) + } + if resp == nil { + return nil, protocol.Location{}, nil + } + return &resp.Contents, protocol.Location{URI: loc.URI, Range: resp.Range}, nil +} + +func (e *Editor) DocumentLink(ctx context.Context, path string) ([]protocol.DocumentLink, error) { + if e.Server == nil { + return nil, nil + } + params := &protocol.DocumentLinkParams{} + params.TextDocument.URI = e.sandbox.Workdir.URI(path) + return e.Server.DocumentLink(ctx, params) +} + +func (e *Editor) DocumentHighlight(ctx context.Context, loc protocol.Location) ([]protocol.DocumentHighlight, error) { + if e.Server == nil { + return nil, nil + } + if err := e.checkBufferLocation(loc); err != nil { + return nil, err + } + params := &protocol.DocumentHighlightParams{} + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start + + return e.Server.DocumentHighlight(ctx, params) +} diff --git a/gopls/internal/lsp/fake/editor_test.go b/gopls/internal/lsp/fake/editor_test.go new file mode 100644 index 00000000000..cc8a14744d2 --- /dev/null +++ b/gopls/internal/lsp/fake/editor_test.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "context" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +const exampleProgram = ` +-- go.mod -- +go 1.12 +-- main.go -- +package main + +import "fmt" + +func main() { + fmt.Println("Hello World.") +} +` + +func TestClientEditing(t *testing.T) { + ws, err := NewSandbox(&SandboxConfig{Files: UnpackTxt(exampleProgram)}) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + ctx := context.Background() + editor := NewEditor(ws, EditorConfig{}) + if err := editor.OpenFile(ctx, "main.go"); err != nil { + t.Fatal(err) + } + if err := editor.EditBuffer(ctx, "main.go", []protocol.TextEdit{ + { + Range: protocol.Range{ + Start: protocol.Position{Line: 5, Character: 14}, + End: protocol.Position{Line: 5, Character: 26}, + }, + NewText: "Hola, mundo.", + }, + }); err != nil { + t.Fatal(err) + } + got := editor.buffers["main.go"].text() + want := `package main + +import "fmt" + +func main() { + fmt.Println("Hola, mundo.") +} +` + if got != want { + t.Errorf("got text %q, want %q", got, want) + } +} diff --git a/internal/lsp/fake/proxy.go b/gopls/internal/lsp/fake/proxy.go similarity index 100% rename from internal/lsp/fake/proxy.go rename to gopls/internal/lsp/fake/proxy.go diff --git a/internal/lsp/fake/sandbox.go b/gopls/internal/lsp/fake/sandbox.go similarity index 87% rename from internal/lsp/fake/sandbox.go rename to gopls/internal/lsp/fake/sandbox.go index b4395646bc6..018bace3b30 100644 --- a/internal/lsp/fake/sandbox.go +++ b/gopls/internal/lsp/fake/sandbox.go @@ -14,6 +14,7 @@ import ( "strings" "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/robustio" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/txtar" ) @@ -21,10 +22,11 @@ import ( // Sandbox holds a collection of temporary resources to use for working with Go // code in tests. type Sandbox struct { - gopath string - rootdir string - goproxy string - Workdir *Workdir + gopath string + rootdir string + goproxy string + Workdir *Workdir + goCommandRunner gocommand.Runner } // SandboxConfig controls the behavior of a test sandbox. The zero value @@ -68,6 +70,10 @@ type SandboxConfig struct { // If rootDir is non-empty, it will be used as the root of temporary // directories created for the sandbox. Otherwise, a new temporary directory // will be used as root. +// +// TODO(rfindley): the sandbox abstraction doesn't seem to carry its weight. +// Sandboxes should be composed out of their building-blocks, rather than via a +// monolithic configuration. func NewSandbox(config *SandboxConfig) (_ *Sandbox, err error) { if config == nil { config = new(SandboxConfig) @@ -146,7 +152,7 @@ func Tempdir(files map[string][]byte) (string, error) { return "", err } for name, data := range files { - if err := WriteFileData(name, data, RelativeTo(dir)); err != nil { + if err := writeFileData(name, data, RelativeTo(dir)); err != nil { return "", fmt.Errorf("writing to tempdir: %w", err) } } @@ -157,6 +163,9 @@ func UnpackTxt(txt string) map[string][]byte { dataMap := make(map[string][]byte) archive := txtar.Parse([]byte(txt)) for _, f := range archive.Files { + if _, ok := dataMap[f.Name]; ok { + panic(fmt.Sprintf("found file %q twice", f.Name)) + } dataMap[f.Name] = f.Data } return dataMap @@ -220,30 +229,36 @@ func (sb *Sandbox) GoEnv() map[string]string { return vars } -// RunGoCommand executes a go command in the sandbox. If checkForFileChanges is -// true, the sandbox scans the working directory and emits file change events -// for any file changes it finds. -func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []string, checkForFileChanges bool) error { +// goCommandInvocation returns a new gocommand.Invocation initialized with the +// sandbox environment variables and working directory. +func (sb *Sandbox) goCommandInvocation() gocommand.Invocation { var vars []string for k, v := range sb.GoEnv() { vars = append(vars, fmt.Sprintf("%s=%s", k, v)) } inv := gocommand.Invocation{ - Verb: verb, - Args: args, - Env: vars, + Env: vars, } - // Use the provided directory for the working directory, if available. // sb.Workdir may be nil if we exited the constructor with errors (we call // Close to clean up any partial state from the constructor, which calls // RunGoCommand). + if sb.Workdir != nil { + inv.WorkingDir = string(sb.Workdir.RelativeTo) + } + return inv +} + +// RunGoCommand executes a go command in the sandbox. If checkForFileChanges is +// true, the sandbox scans the working directory and emits file change events +// for any file changes it finds. +func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []string, checkForFileChanges bool) error { + inv := sb.goCommandInvocation() + inv.Verb = verb + inv.Args = args if dir != "" { inv.WorkingDir = sb.Workdir.AbsPath(dir) - } else if sb.Workdir != nil { - inv.WorkingDir = string(sb.Workdir.RelativeTo) } - gocmdRunner := &gocommand.Runner{} - stdout, stderr, _, err := gocmdRunner.RunRaw(ctx, inv) + stdout, stderr, _, err := sb.goCommandRunner.RunRaw(ctx, inv) if err != nil { return fmt.Errorf("go command failed (stdout: %s) (stderr: %s): %v", stdout.String(), stderr.String(), err) } @@ -260,13 +275,20 @@ func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []st return nil } +// GoVersion checks the version of the go command. +// It returns the X in Go 1.X. +func (sb *Sandbox) GoVersion(ctx context.Context) (int, error) { + inv := sb.goCommandInvocation() + return gocommand.GoVersion(ctx, inv, &sb.goCommandRunner) +} + // Close removes all state associated with the sandbox. func (sb *Sandbox) Close() error { var goCleanErr error if sb.gopath != "" { goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false) } - err := os.RemoveAll(sb.rootdir) + err := robustio.RemoveAll(sb.rootdir) if err != nil || goCleanErr != nil { return fmt.Errorf("error(s) cleaning sandbox: cleaning modcache: %v; removing files: %v", goCleanErr, err) } diff --git a/gopls/internal/lsp/fake/workdir.go b/gopls/internal/lsp/fake/workdir.go new file mode 100644 index 00000000000..97d70b9cafb --- /dev/null +++ b/gopls/internal/lsp/fake/workdir.go @@ -0,0 +1,441 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/robustio" +) + +// RelativeTo is a helper for operations relative to a given directory. +type RelativeTo string + +// AbsPath returns an absolute filesystem path for the workdir-relative path. +func (r RelativeTo) AbsPath(path string) string { + fp := filepath.FromSlash(path) + if filepath.IsAbs(fp) { + return fp + } + return filepath.Join(string(r), filepath.FromSlash(path)) +} + +// RelPath returns a '/'-encoded path relative to the working directory (or an +// absolute path if the file is outside of workdir) +func (r RelativeTo) RelPath(fp string) string { + root := string(r) + if rel, err := filepath.Rel(root, fp); err == nil && !strings.HasPrefix(rel, "..") { + return filepath.ToSlash(rel) + } + return filepath.ToSlash(fp) +} + +// writeFileData writes content to the relative path, replacing the special +// token $SANDBOX_WORKDIR with the relative root given by rel. It does not +// trigger any file events. +func writeFileData(path string, content []byte, rel RelativeTo) error { + content = bytes.ReplaceAll(content, []byte("$SANDBOX_WORKDIR"), []byte(rel)) + fp := rel.AbsPath(path) + if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil { + return fmt.Errorf("creating nested directory: %w", err) + } + backoff := 1 * time.Millisecond + for { + err := ioutil.WriteFile(fp, []byte(content), 0644) + if err != nil { + // This lock file violation is not handled by the robustio package, as it + // indicates a real race condition that could be avoided. + if isWindowsErrLockViolation(err) { + time.Sleep(backoff) + backoff *= 2 + continue + } + return fmt.Errorf("writing %q: %w", path, err) + } + return nil + } +} + +// isWindowsErrLockViolation reports whether err is ERROR_LOCK_VIOLATION +// on Windows. +var isWindowsErrLockViolation = func(err error) bool { return false } + +// Workdir is a temporary working directory for tests. It exposes file +// operations in terms of relative paths, and fakes file watching by triggering +// events on file operations. +type Workdir struct { + RelativeTo + + watcherMu sync.Mutex + watchers []func(context.Context, []protocol.FileEvent) + + fileMu sync.Mutex + // File identities we know about, for the purpose of detecting changes. + // + // Since files is only used for detecting _changes_, we are tolerant of + // fileIDs that may have hash and mtime coming from different states of the + // file: if either are out of sync, then the next poll should detect a + // discrepancy. It is OK if we detect too many changes, but not OK if we miss + // changes. + // + // For that matter, this mechanism for detecting changes can still be flaky + // on platforms where mtime is very coarse (such as older versions of WSL). + // It would be much better to use a proper fs event library, but we can't + // currently import those into x/tools. + // + // TODO(golang/go#52284): replace this polling mechanism with a + // cross-platform library for filesystem notifications. + files map[string]fileID +} + +// fileID is a file identity for the purposes of detecting on-disk +// modifications. +type fileID struct { + hash string + mtime time.Time +} + +// NewWorkdir writes the txtar-encoded file data in txt to dir, and returns a +// Workir for operating on these files using +func NewWorkdir(dir string) *Workdir { + return &Workdir{RelativeTo: RelativeTo(dir)} +} + +func hashFile(data []byte) string { + return fmt.Sprintf("%x", sha256.Sum256(data)) +} + +func (w *Workdir) writeInitialFiles(files map[string][]byte) error { + w.files = map[string]fileID{} + for name, data := range files { + if err := writeFileData(name, data, w.RelativeTo); err != nil { + return fmt.Errorf("writing to workdir: %w", err) + } + fp := w.AbsPath(name) + + // We need the mtime of the file just written for the purposes of tracking + // file identity. Calling Stat here could theoretically return an mtime + // that is inconsistent with the file contents represented by the hash, but + // since we "own" this file we assume that the mtime is correct. + // + // Furthermore, see the documentation for Workdir.files for why mismatches + // between identifiers are considered to be benign. + fi, err := os.Stat(fp) + if err != nil { + return fmt.Errorf("reading file info: %v", err) + } + + w.files[name] = fileID{ + hash: hashFile(data), + mtime: fi.ModTime(), + } + } + return nil +} + +// RootURI returns the root URI for this working directory of this scratch +// environment. +func (w *Workdir) RootURI() protocol.DocumentURI { + return toURI(string(w.RelativeTo)) +} + +// AddWatcher registers the given func to be called on any file change. +func (w *Workdir) AddWatcher(watcher func(context.Context, []protocol.FileEvent)) { + w.watcherMu.Lock() + w.watchers = append(w.watchers, watcher) + w.watcherMu.Unlock() +} + +// URI returns the URI to a the workdir-relative path. +func (w *Workdir) URI(path string) protocol.DocumentURI { + return toURI(w.AbsPath(path)) +} + +// URIToPath converts a uri to a workdir-relative path (or an absolute path, +// if the uri is outside of the workdir). +func (w *Workdir) URIToPath(uri protocol.DocumentURI) string { + fp := uri.SpanURI().Filename() + return w.RelPath(fp) +} + +func toURI(fp string) protocol.DocumentURI { + return protocol.DocumentURI(span.URIFromPath(fp)) +} + +// ReadFile reads a text file specified by a workdir-relative path. +func (w *Workdir) ReadFile(path string) ([]byte, error) { + backoff := 1 * time.Millisecond + for { + b, err := ioutil.ReadFile(w.AbsPath(path)) + if err != nil { + if runtime.GOOS == "plan9" && strings.HasSuffix(err.Error(), " exclusive use file already open") { + // Plan 9 enforces exclusive access to locked files. + // Give the owner time to unlock it and retry. + time.Sleep(backoff) + backoff *= 2 + continue + } + return nil, err + } + return b, nil + } +} + +// RegexpSearch searches the file corresponding to path for the first position +// matching re. +func (w *Workdir) RegexpSearch(path string, re string) (protocol.Location, error) { + content, err := w.ReadFile(path) + if err != nil { + return protocol.Location{}, err + } + mapper := protocol.NewMapper(w.URI(path).SpanURI(), content) + return regexpLocation(mapper, re) +} + +// RemoveFile removes a workdir-relative file path and notifies watchers of the +// change. +func (w *Workdir) RemoveFile(ctx context.Context, path string) error { + fp := w.AbsPath(path) + if err := robustio.RemoveAll(fp); err != nil { + return fmt.Errorf("removing %q: %w", path, err) + } + + return w.CheckForFileChanges(ctx) +} + +// WriteFiles writes the text file content to workdir-relative paths and +// notifies watchers of the changes. +func (w *Workdir) WriteFiles(ctx context.Context, files map[string]string) error { + for path, content := range files { + fp := w.AbsPath(path) + _, err := os.Stat(fp) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("checking if %q exists: %w", path, err) + } + if err := writeFileData(path, []byte(content), w.RelativeTo); err != nil { + return err + } + } + return w.CheckForFileChanges(ctx) +} + +// WriteFile writes text file content to a workdir-relative path and notifies +// watchers of the change. +func (w *Workdir) WriteFile(ctx context.Context, path, content string) error { + return w.WriteFiles(ctx, map[string]string{path: content}) +} + +func (w *Workdir) fileEvent(path string, changeType protocol.FileChangeType) protocol.FileEvent { + return protocol.FileEvent{ + URI: w.URI(path), + Type: changeType, + } +} + +// RenameFile performs an on disk-renaming of the workdir-relative oldPath to +// workdir-relative newPath, and notifies watchers of the changes. +// +// oldPath must either be a regular file or in the same directory as newPath. +func (w *Workdir) RenameFile(ctx context.Context, oldPath, newPath string) error { + oldAbs := w.AbsPath(oldPath) + newAbs := w.AbsPath(newPath) + + // For os.Rename, ā€œOS-specific restrictions may apply when oldpath and newpath + // are in different directories.ā€ If that applies here, we may fall back to + // ReadFile, WriteFile, and RemoveFile to perform the rename non-atomically. + // + // However, the fallback path only works for regular files: renaming a + // directory would be much more complex and isn't needed for our tests. + fallbackOk := false + if filepath.Dir(oldAbs) != filepath.Dir(newAbs) { + fi, err := os.Stat(oldAbs) + if err == nil && !fi.Mode().IsRegular() { + return &os.PathError{ + Op: "RenameFile", + Path: oldPath, + Err: fmt.Errorf("%w: file is not regular and not in the same directory as %s", os.ErrInvalid, newPath), + } + } + fallbackOk = true + } + + var renameErr error + const debugFallback = false + if fallbackOk && debugFallback { + renameErr = fmt.Errorf("%w: debugging fallback path", os.ErrInvalid) + } else { + renameErr = robustio.Rename(oldAbs, newAbs) + } + if renameErr != nil { + if !fallbackOk { + return renameErr // The OS-specific Rename restrictions do not apply. + } + + content, err := w.ReadFile(oldPath) + if err != nil { + // If we can't even read the file, the error from Rename may be accurate. + return renameErr + } + fi, err := os.Stat(newAbs) + if err == nil { + if fi.IsDir() { + // ā€œIf newpath already exists and is not a directory, Rename replaces it.ā€ + // But if it is a directory, maybe not? + return renameErr + } + // On most platforms, Rename replaces the named file with a new file, + // rather than overwriting the existing file it in place. Mimic that + // behavior here. + if err := robustio.RemoveAll(newAbs); err != nil { + // Maybe we don't have permission to replace newPath? + return renameErr + } + } else if !os.IsNotExist(err) { + // If the destination path already exists or there is some problem with it, + // the error from Rename may be accurate. + return renameErr + } + if writeErr := writeFileData(newPath, []byte(content), w.RelativeTo); writeErr != nil { + // At this point we have tried to actually write the file. + // If it still doesn't exist, assume that the error from Rename was accurate: + // for example, maybe we don't have permission to create the new path. + // Otherwise, return the error from the write, which may indicate some + // other problem (such as a full disk). + if _, statErr := os.Stat(newAbs); !os.IsNotExist(statErr) { + return writeErr + } + return renameErr + } + if err := robustio.RemoveAll(oldAbs); err != nil { + // If we failed to remove the old file, that may explain the Rename error too. + // Make a best effort to back out the write to the new path. + robustio.RemoveAll(newAbs) + return renameErr + } + } + + return w.CheckForFileChanges(ctx) +} + +// ListFiles returns a new sorted list of the relative paths of files in dir, +// recursively. +func (w *Workdir) ListFiles(dir string) ([]string, error) { + m, err := w.listFiles(dir) + if err != nil { + return nil, err + } + + var paths []string + for p := range m { + paths = append(paths, p) + } + sort.Strings(paths) + return paths, nil +} + +// listFiles lists files in the given directory, returning a map of relative +// path to contents and modification time. +func (w *Workdir) listFiles(dir string) (map[string]fileID, error) { + files := make(map[string]fileID) + absDir := w.AbsPath(dir) + if err := filepath.Walk(absDir, func(fp string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + path := w.RelPath(fp) + + data, err := ioutil.ReadFile(fp) + if err != nil { + return err + } + // The content returned by ioutil.ReadFile could be inconsistent with + // info.ModTime(), due to a subsequent modification. See the documentation + // for w.files for why we consider this to be benign. + files[path] = fileID{ + hash: hashFile(data), + mtime: info.ModTime(), + } + return nil + }); err != nil { + return nil, err + } + return files, nil +} + +// CheckForFileChanges walks the working directory and checks for any files +// that have changed since the last poll. +func (w *Workdir) CheckForFileChanges(ctx context.Context) error { + evts, err := w.pollFiles() + if err != nil { + return err + } + if len(evts) == 0 { + return nil + } + w.watcherMu.Lock() + watchers := make([]func(context.Context, []protocol.FileEvent), len(w.watchers)) + copy(watchers, w.watchers) + w.watcherMu.Unlock() + for _, w := range watchers { + w(ctx, evts) + } + return nil +} + +// pollFiles updates w.files and calculates FileEvents corresponding to file +// state changes since the last poll. It does not call sendEvents. +func (w *Workdir) pollFiles() ([]protocol.FileEvent, error) { + w.fileMu.Lock() + defer w.fileMu.Unlock() + + files, err := w.listFiles(".") + if err != nil { + return nil, err + } + var evts []protocol.FileEvent + // Check which files have been added or modified. + for path, id := range files { + oldID, ok := w.files[path] + delete(w.files, path) + var typ protocol.FileChangeType + switch { + case !ok: + typ = protocol.Created + case oldID != id: + typ = protocol.Changed + default: + continue + } + evts = append(evts, protocol.FileEvent{ + URI: w.URI(path), + Type: typ, + }) + } + // Any remaining files must have been deleted. + for path := range w.files { + evts = append(evts, protocol.FileEvent{ + URI: w.URI(path), + Type: protocol.Deleted, + }) + } + w.files = files + return evts, nil +} diff --git a/gopls/internal/lsp/fake/workdir_test.go b/gopls/internal/lsp/fake/workdir_test.go new file mode 100644 index 00000000000..d036658ef2d --- /dev/null +++ b/gopls/internal/lsp/fake/workdir_test.go @@ -0,0 +1,250 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "context" + "io/ioutil" + "os" + "sort" + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +const sharedData = ` +-- go.mod -- +go 1.12 +-- nested/README.md -- +Hello World! +` + +// newWorkdir sets up a temporary Workdir with the given txtar-encoded content. +// It also configures an eventBuffer to receive file event notifications. These +// notifications are sent synchronously for each operation, such that once a +// workdir file operation has returned the caller can expect that any relevant +// file notifications are present in the buffer. +// +// It is the caller's responsibility to call the returned cleanup function. +func newWorkdir(t *testing.T, txt string) (*Workdir, *eventBuffer, func()) { + t.Helper() + + tmpdir, err := ioutil.TempDir("", "goplstest-workdir-") + if err != nil { + t.Fatal(err) + } + wd := NewWorkdir(tmpdir) + if err := wd.writeInitialFiles(UnpackTxt(txt)); err != nil { + t.Fatal(err) + } + cleanup := func() { + if err := os.RemoveAll(tmpdir); err != nil { + t.Error(err) + } + } + + buf := new(eventBuffer) + wd.AddWatcher(buf.onEvents) + return wd, buf, cleanup +} + +// eventBuffer collects events from a file watcher. +type eventBuffer struct { + mu sync.Mutex + events []protocol.FileEvent +} + +// onEvents collects adds events to the buffer; to be used with Workdir.AddWatcher. +func (c *eventBuffer) onEvents(_ context.Context, events []protocol.FileEvent) { + c.mu.Lock() + defer c.mu.Unlock() + + c.events = append(c.events, events...) +} + +// take empties the buffer, returning its previous contents. +func (c *eventBuffer) take() []protocol.FileEvent { + c.mu.Lock() + defer c.mu.Unlock() + + evts := c.events + c.events = nil + return evts +} + +func TestWorkdir_ReadFile(t *testing.T) { + wd, _, cleanup := newWorkdir(t, sharedData) + defer cleanup() + + got, err := wd.ReadFile("nested/README.md") + if err != nil { + t.Fatal(err) + } + want := "Hello World!\n" + if got := string(got); got != want { + t.Errorf("reading workdir file, got %q, want %q", got, want) + } +} + +func TestWorkdir_WriteFile(t *testing.T) { + wd, events, cleanup := newWorkdir(t, sharedData) + defer cleanup() + ctx := context.Background() + + tests := []struct { + path string + wantType protocol.FileChangeType + }{ + {"data.txt", protocol.Created}, + {"nested/README.md", protocol.Changed}, + } + + for _, test := range tests { + if err := wd.WriteFile(ctx, test.path, "42"); err != nil { + t.Fatal(err) + } + es := events.take() + if got := len(es); got != 1 { + t.Fatalf("len(events) = %d, want 1", got) + } + path := wd.URIToPath(es[0].URI) + if path != test.path { + t.Errorf("event path = %q, want %q", path, test.path) + } + if es[0].Type != test.wantType { + t.Errorf("event type = %v, want %v", es[0].Type, test.wantType) + } + got, err := wd.ReadFile(test.path) + if err != nil { + t.Fatal(err) + } + want := "42" + if got := string(got); got != want { + t.Errorf("ws.ReadFile(%q) = %q, want %q", test.path, got, want) + } + } +} + +// Test for file notifications following file operations. +func TestWorkdir_FileWatching(t *testing.T) { + wd, events, cleanup := newWorkdir(t, "") + defer cleanup() + ctx := context.Background() + + must := func(err error) { + if err != nil { + t.Fatal(err) + } + } + + type changeMap map[string]protocol.FileChangeType + checkEvent := func(wantChanges changeMap) { + gotChanges := make(changeMap) + for _, e := range events.take() { + gotChanges[wd.URIToPath(e.URI)] = e.Type + } + if diff := cmp.Diff(wantChanges, gotChanges); diff != "" { + t.Errorf("mismatching file events (-want +got):\n%s", diff) + } + } + + must(wd.WriteFile(ctx, "foo.go", "package foo")) + checkEvent(changeMap{"foo.go": protocol.Created}) + + must(wd.RenameFile(ctx, "foo.go", "bar.go")) + checkEvent(changeMap{"foo.go": protocol.Deleted, "bar.go": protocol.Created}) + + must(wd.RemoveFile(ctx, "bar.go")) + checkEvent(changeMap{"bar.go": protocol.Deleted}) +} + +func TestWorkdir_ListFiles(t *testing.T) { + wd, _, cleanup := newWorkdir(t, sharedData) + defer cleanup() + + checkFiles := func(dir string, want []string) { + files, err := wd.listFiles(dir) + if err != nil { + t.Fatal(err) + } + sort.Strings(want) + var got []string + for p := range files { + got = append(got, p) + } + sort.Strings(got) + if len(got) != len(want) { + t.Fatalf("ListFiles(): len = %d, want %d; got=%v; want=%v", len(got), len(want), got, want) + } + for i, f := range got { + if f != want[i] { + t.Errorf("ListFiles()[%d] = %s, want %s", i, f, want[i]) + } + } + } + + checkFiles(".", []string{"go.mod", "nested/README.md"}) + checkFiles("nested", []string{"nested/README.md"}) +} + +func TestWorkdir_CheckForFileChanges(t *testing.T) { + t.Skip("broken on darwin-amd64-10_12") + wd, events, cleanup := newWorkdir(t, sharedData) + defer cleanup() + ctx := context.Background() + + checkChange := func(wantPath string, wantType protocol.FileChangeType) { + if err := wd.CheckForFileChanges(ctx); err != nil { + t.Fatal(err) + } + ev := events.take() + if len(ev) == 0 { + t.Fatal("no file events received") + } + gotEvt := ev[0] + gotPath := wd.URIToPath(gotEvt.URI) + // Only check relative path and Type + if gotPath != wantPath || gotEvt.Type != wantType { + t.Errorf("file events: got %v, want {Path: %s, Type: %v}", gotEvt, wantPath, wantType) + } + } + // Sleep some positive amount of time to ensure a distinct mtime. + if err := writeFileData("go.mod", []byte("module foo.test\n"), wd.RelativeTo); err != nil { + t.Fatal(err) + } + checkChange("go.mod", protocol.Changed) + if err := writeFileData("newFile", []byte("something"), wd.RelativeTo); err != nil { + t.Fatal(err) + } + checkChange("newFile", protocol.Created) + fp := wd.AbsPath("newFile") + if err := os.Remove(fp); err != nil { + t.Fatal(err) + } + checkChange("newFile", protocol.Deleted) +} + +func TestSplitModuleVersionPath(t *testing.T) { + tests := []struct { + path string + wantModule, wantVersion, wantSuffix string + }{ + {"foo.com@v1.2.3/bar", "foo.com", "v1.2.3", "bar"}, + {"foo.com/module@v1.2.3/bar", "foo.com/module", "v1.2.3", "bar"}, + {"foo.com@v1.2.3", "foo.com", "v1.2.3", ""}, + {"std@v1.14.0", "std", "v1.14.0", ""}, + {"another/module/path", "another/module/path", "", ""}, + } + + for _, test := range tests { + module, version, suffix := splitModuleVersionPath(test.path) + if module != test.wantModule || version != test.wantVersion || suffix != test.wantSuffix { + t.Errorf("splitModuleVersionPath(%q) =\n\t(%q, %q, %q)\nwant\n\t(%q, %q, %q)", + test.path, module, version, suffix, test.wantModule, test.wantVersion, test.wantSuffix) + } + } +} diff --git a/gopls/internal/lsp/fake/workdir_windows.go b/gopls/internal/lsp/fake/workdir_windows.go new file mode 100644 index 00000000000..4d4f0152764 --- /dev/null +++ b/gopls/internal/lsp/fake/workdir_windows.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fake + +import ( + "errors" + "syscall" +) + +func init() { + // constants copied from GOROOT/src/internal/syscall/windows/syscall_windows.go + const ( + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ) + + isWindowsErrLockViolation = func(err error) bool { + return errors.Is(err, ERROR_LOCK_VIOLATION) + } +} diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go new file mode 100644 index 00000000000..44485cb80ac --- /dev/null +++ b/gopls/internal/lsp/filecache/filecache.go @@ -0,0 +1,307 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The filecache package provides a file-based shared durable blob cache. +// +// The cache is a machine-global mapping from (kind string, key +// [32]byte) to []byte, where kind is an identifier describing the +// namespace or purpose (e.g. "analysis"), and key is a SHA-256 digest +// of the recipe of the value. (It need not be the digest of the value +// itself, so you can query the cache without knowing what value the +// recipe would produce.) +// +// The space budget of the cache can be controlled by [SetBudget]. +// Cache entries may be evicted at any time or in any order. +// +// The Get and Set operations are concurrency-safe. +package filecache + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "sync" + "sync/atomic" + "time" + + "golang.org/x/tools/internal/lockedfile" +) + +// Get retrieves from the cache and returns a newly allocated +// copy of the value most recently supplied to Set(kind, key), +// possibly by another process. +// Get returns ErrNotFound if the value was not found. +func Get(kind string, key [32]byte) ([]byte, error) { + name := filename(kind, key) + data, err := lockedfile.Read(name) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, ErrNotFound + } + return nil, err + } + + // Verify that the Write was complete + // by checking the recorded length. + if len(data) < 8 { + return nil, ErrNotFound // cache entry is incomplete + } + if length := binary.LittleEndian.Uint64(data); int(length) != len(data)-8 { + return nil, ErrNotFound // cache entry is incomplete (or too long!) + } + data = data[8:] + + // Update file time for use by LRU eviction. + // (This turns every read into a write operation. + // If this is a performance problem, we should + // touch the files aynchronously.) + // + // (Traditionally the access time would be updated + // automatically, but for efficiency most POSIX systems have + // for many years set the noatime mount option to avoid every + // open or read operation entailing a metadata write.) + now := time.Now() + if err := os.Chtimes(name, now, now); err != nil { + return nil, fmt.Errorf("failed to update access time: %w", err) + } + + return data, nil +} + +// ErrNotFound is the distinguished error +// returned by Get when the key is not found. +var ErrNotFound = fmt.Errorf("not found") + +// Set updates the value in the cache. +func Set(kind string, key [32]byte, value []byte) error { + name := filename(kind, key) + if err := os.MkdirAll(filepath.Dir(name), 0700); err != nil { + return err + } + + // In the unlikely event of a short write (e.g. ENOSPC) + // followed by process termination (e.g. a power cut), we + // don't want a reader to see a short file, so we record + // the expected length first and verify it in Get. + var length [8]byte + binary.LittleEndian.PutUint64(length[:], uint64(len(value))) + header := bytes.NewReader(length[:]) + payload := bytes.NewReader(value) + + // Windows doesn't support atomic rename--we tried MoveFile, + // MoveFileEx, ReplaceFileEx, and SetFileInformationByHandle + // of RenameFileInfo, all to no avail--so instead we use + // advisory file locking, which is only about 2x slower even + // on POSIX platforms with atomic rename. + return lockedfile.Write(name, io.MultiReader(header, payload), 0600) +} + +var budget int64 = 1e9 // 1GB + +// SetBudget sets a soft limit on disk usage of the cache (in bytes) +// and returns the previous value. Supplying a negative value queries +// the current value without changing it. +// +// If two gopls processes have different budgets, the one with the +// lower budget will collect garbage more actively, but both will +// observe the effect. +func SetBudget(new int64) (old int64) { + if new < 0 { + return atomic.LoadInt64(&budget) + } + return atomic.SwapInt64(&budget, new) +} + +// --- implementation ---- + +// filename returns the cache entry of the specified kind and key. +// +// A typical cache entry is a file name such as: +// +// $HOME/Library/Caches / gopls / VVVVVVVV / kind / KK / KKKK...KKKK +// +// The portions separated by spaces are as follows: +// - The user's preferred cache directory; the default value varies by OS. +// - The constant "gopls". +// - The "version", 32 bits of the digest of the gopls executable. +// - The kind or purpose of this cache subtree (e.g. "analysis"). +// - The first 8 bits of the key, to avoid huge directories. +// - The full 256 bits of the key. +// +// Once a file is written its contents are never modified, though it +// may be atomically replaced or removed. +// +// New versions of gopls are free to reorganize the contents of the +// version directory as needs evolve. But all versions of gopls must +// in perpetuity treat the "gopls" directory in a common fashion. +// +// In particular, each gopls process attempts to garbage collect +// the entire gopls directory so that newer binaries can clean up +// after older ones: in the development cycle especially, new +// new versions may be created frequently. +func filename(kind string, key [32]byte) string { + hex := fmt.Sprintf("%x", key) + return filepath.Join(getCacheDir(), kind, hex[:2], hex) +} + +// getCacheDir returns the persistent cache directory of all processes +// running this version of the gopls executable. +// +// It must incorporate the hash of the executable so that we needn't +// worry about incompatible changes to the file format or changes to +// the algorithm that produced the index. +func getCacheDir() string { + cacheDirOnce.Do(func() { + // Use user's preferred cache directory. + userDir := os.Getenv("GOPLS_CACHE") + if userDir == "" { + var err error + userDir, err = os.UserCacheDir() + if err != nil { + userDir = os.TempDir() + } + } + goplsDir := filepath.Join(userDir, "gopls") + + // UserCacheDir may return a nonexistent directory + // (in which case we must create it, which may fail), + // or it may return a non-writable directory, in + // which case we should ideally respect the user's express + // wishes (e.g. XDG_CACHE_HOME) and not write somewhere else. + // Sadly UserCacheDir doesn't currently let us distinguish + // such intent from accidental misconfiguraton such as HOME=/ + // in a CI builder. So, we check whether the gopls subdirectory + // can be created (or already exists) and not fall back to /tmp. + // See also https://github.com/golang/go/issues/57638. + if os.MkdirAll(goplsDir, 0700) != nil { + goplsDir = filepath.Join(os.TempDir(), "gopls") + } + + // Start the garbage collector. + go gc(goplsDir) + + // Compute the hash of this executable (~20ms) and create a subdirectory. + hash, err := hashExecutable() + if err != nil { + log.Fatalf("can't hash gopls executable: %v", err) + } + // Use only 32 bits of the digest to avoid unwieldy filenames. + // It's not an adversarial situation. + cacheDir = filepath.Join(goplsDir, fmt.Sprintf("%x", hash[:4])) + if err := os.MkdirAll(cacheDir, 0700); err != nil { + log.Fatalf("can't create cache: %v", err) + } + }) + return cacheDir +} + +var ( + cacheDirOnce sync.Once + cacheDir string // only accessed by getCacheDir +) + +func hashExecutable() (hash [32]byte, err error) { + exe, err := os.Executable() + if err != nil { + return hash, err + } + f, err := os.Open(exe) + if err != nil { + return hash, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return hash, fmt.Errorf("can't read executable: %w", err) + } + h.Sum(hash[:0]) + return hash, nil +} + +// gc runs forever, periodically deleting files from the gopls +// directory until the space budget is no longer exceeded, and also +// deleting files older than the maximum age, regardless of budget. +// +// One gopls process may delete garbage created by a different gopls +// process, possibly running a different version of gopls, possibly +// running concurrently. +func gc(goplsDir string) { + const period = 1 * time.Minute // period between collections + const statDelay = 100 * time.Microsecond // delay between stats to smooth out I/O + const maxAge = 5 * 24 * time.Hour // max time since last access before file is deleted + + // The macOS filesystem is strikingly slow, at least on some machines. + // /usr/bin/find achieves only about 25,000 stats per second + // at full speed (no pause between items), meaning a large + // cache may take several minutes to scan. + // We must ensure that short-lived processes (crucially, + // tests) are able to make progress sweeping garbage. + // + // (gopls' caches should never actually get this big in + // practise: the example mentioned above resulted from a bug + // that caused filecache to fail to delete any files.) + + const debug = false + + for { + // Enumerate all files in the cache. + type item struct { + path string + stat os.FileInfo + } + var files []item + var total int64 // bytes + _ = filepath.Walk(goplsDir, func(path string, stat os.FileInfo, err error) error { + // TODO(adonovan): opt: also collect empty directories, + // as they typically occupy around 1KB. + if err == nil && !stat.IsDir() { + // Unconditionally delete files we haven't used in ages. + // (We do this here, not in the second loop, so that we + // perform age-based collection even in short-lived processes.) + age := time.Since(stat.ModTime()) + if age > maxAge { + if debug { + log.Printf("age: deleting stale file %s (%dB, age %v)", + path, stat.Size(), age) + } + os.Remove(path) // ignore error + } else { + files = append(files, item{path, stat}) + total += stat.Size() + time.Sleep(statDelay) + } + } + return nil + }) + + // Sort oldest files first. + sort.Slice(files, func(i, j int) bool { + return files[i].stat.ModTime().Before(files[j].stat.ModTime()) + }) + + // Delete oldest files until we're under budget. + budget := atomic.LoadInt64(&budget) + for _, file := range files { + if total < budget { + break + } + if debug { + age := time.Since(file.stat.ModTime()) + log.Printf("budget: deleting stale file %s (%dB, age %v)", + file.path, file.stat.Size(), age) + } + os.Remove(file.path) // ignore error + total -= file.stat.Size() + } + + time.Sleep(period) + } +} diff --git a/gopls/internal/lsp/filecache/filecache_test.go b/gopls/internal/lsp/filecache/filecache_test.go new file mode 100644 index 00000000000..c96cb16eb99 --- /dev/null +++ b/gopls/internal/lsp/filecache/filecache_test.go @@ -0,0 +1,215 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filecache_test + +// This file defines tests of the API of the filecache package. +// +// Some properties (e.g. garbage collection) cannot be exercised +// through the API, so this test does not attempt to do so. + +import ( + "bytes" + cryptorand "crypto/rand" + "fmt" + "log" + mathrand "math/rand" + "os" + "os/exec" + "strconv" + "testing" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/gopls/internal/lsp/filecache" +) + +func TestBasics(t *testing.T) { + const kind = "TestBasics" + key := uniqueKey() // never used before + value := []byte("hello") + + // Get of a never-seen key returns not found. + if _, err := filecache.Get(kind, key); err != filecache.ErrNotFound { + t.Errorf("Get of random key returned err=%q, want not found", err) + } + + // Set of a never-seen key and a small value succeeds. + if err := filecache.Set(kind, key, value); err != nil { + t.Errorf("Set failed: %v", err) + } + + // Get of the key returns a copy of the value. + if got, err := filecache.Get(kind, key); err != nil { + t.Errorf("Get after Set failed: %v", err) + } else if string(got) != string(value) { + t.Errorf("Get after Set returned different value: got %q, want %q", got, value) + } + + // The kind is effectively part of the key. + if _, err := filecache.Get("different-kind", key); err != filecache.ErrNotFound { + t.Errorf("Get with wrong kind returned err=%q, want not found", err) + } +} + +// TestConcurrency exercises concurrent access to the same entry. +func TestConcurrency(t *testing.T) { + const kind = "TestConcurrency" + key := uniqueKey() + const N = 100 // concurrency level + + // Construct N distinct values, each larger + // than a typical 4KB OS file buffer page. + var values [N][8192]byte + for i := range values { + if _, err := mathrand.Read(values[i][:]); err != nil { + t.Fatalf("rand: %v", err) + } + } + + // get calls Get and verifies that the cache entry + // matches one of the values passed to Set. + get := func(mustBeFound bool) error { + got, err := filecache.Get(kind, key) + if err != nil { + if err == filecache.ErrNotFound && !mustBeFound { + return nil // not found + } + return err + } + for _, want := range values { + if bytes.Equal(want[:], got) { + return nil // a match + } + } + return fmt.Errorf("Get returned a value that was never Set") + } + + // Perform N concurrent calls to Set and Get. + // All sets must succeed. + // All gets must return nothing, or one of the Set values; + // there is no third possibility. + var group errgroup.Group + for i := range values { + i := i + group.Go(func() error { return filecache.Set(kind, key, values[i][:]) }) + group.Go(func() error { return get(false) }) + } + if err := group.Wait(); err != nil { + t.Fatal(err) + } + + // A final Get must report one of the values that was Set. + if err := get(true); err != nil { + t.Fatalf("final Get failed: %v", err) + } +} + +const ( + testIPCKind = "TestIPC" + testIPCValueA = "hello" + testIPCValueB = "world" +) + +// TestIPC exercises interprocess communication through the cache. +// It calls Set(A) in the parent, { Get(A); Set(B) } in the child +// process, then Get(B) in the parent. +func TestIPC(t *testing.T) { + keyA := uniqueKey() + keyB := uniqueKey() + value := []byte(testIPCValueA) + + // Set keyA. + if err := filecache.Set(testIPCKind, keyA, value); err != nil { + t.Fatalf("Set: %v", err) + } + + // Call ipcChild in a child process, + // passing it the keys in the environment + // (quoted, to avoid NUL termination of C strings). + // It will Get(A) then Set(B). + cmd := exec.Command(os.Args[0], os.Args[1:]...) + cmd.Env = append(os.Environ(), + "ENTRYPOINT=ipcChild", + fmt.Sprintf("KEYA=%q", keyA), + fmt.Sprintf("KEYB=%q", keyB)) + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } + + // Verify keyB. + got, err := filecache.Get(testIPCKind, keyB) + if err != nil { + t.Fatal(err) + } + if string(got) != "world" { + t.Fatalf("Get(keyB) = %q, want %q", got, "world") + } +} + +// We define our own main function so that portions of +// some tests can run in a separate (child) process. +func TestMain(m *testing.M) { + switch os.Getenv("ENTRYPOINT") { + case "ipcChild": + ipcChild() + default: + os.Exit(m.Run()) + } +} + +// ipcChild is the portion of TestIPC that runs in a child process. +func ipcChild() { + getenv := func(name string) (key [32]byte) { + s, _ := strconv.Unquote(os.Getenv(name)) + copy(key[:], []byte(s)) + return + } + + // Verify key A. + got, err := filecache.Get(testIPCKind, getenv("KEYA")) + if err != nil || string(got) != testIPCValueA { + log.Fatalf("child: Get(key) = %q, %v; want %q", got, err, testIPCValueA) + } + + // Set key B. + if err := filecache.Set(testIPCKind, getenv("KEYB"), []byte(testIPCValueB)); err != nil { + log.Fatalf("child: Set(keyB) failed: %v", err) + } +} + +// uniqueKey returns a key that has never been used before. +func uniqueKey() (key [32]byte) { + if _, err := cryptorand.Read(key[:]); err != nil { + log.Fatalf("rand: %v", err) + } + return +} + +func BenchmarkUncontendedGet(b *testing.B) { + const kind = "BenchmarkUncontendedGet" + key := uniqueKey() + + var value [8192]byte + if _, err := mathrand.Read(value[:]); err != nil { + b.Fatalf("rand: %v", err) + } + if err := filecache.Set(kind, key, value[:]); err != nil { + b.Fatal(err) + } + b.ResetTimer() + + var group errgroup.Group + group.SetLimit(50) + for i := 0; i < b.N; i++ { + group.Go(func() error { + _, err := filecache.Get(kind, key) + return err + }) + } + if err := group.Wait(); err != nil { + b.Fatal(err) + } +} diff --git a/gopls/internal/lsp/folding_range.go b/gopls/internal/lsp/folding_range.go new file mode 100644 index 00000000000..3a29ce9927b --- /dev/null +++ b/gopls/internal/lsp/folding_range.go @@ -0,0 +1,41 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +func (s *Server) foldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + + ranges, err := source.FoldingRange(ctx, snapshot, fh, snapshot.View().Options().LineFoldingOnly) + if err != nil { + return nil, err + } + return toProtocolFoldingRanges(ranges) +} + +func toProtocolFoldingRanges(ranges []*source.FoldingRangeInfo) ([]protocol.FoldingRange, error) { + result := make([]protocol.FoldingRange, 0, len(ranges)) + for _, info := range ranges { + rng := info.MappedRange.Range() + result = append(result, protocol.FoldingRange{ + StartLine: rng.Start.Line, + StartCharacter: rng.Start.Character, + EndLine: rng.End.Line, + EndCharacter: rng.End.Character, + Kind: string(info.Kind), + }) + } + return result, nil +} diff --git a/gopls/internal/lsp/format.go b/gopls/internal/lsp/format.go new file mode 100644 index 00000000000..773a4690e92 --- /dev/null +++ b/gopls/internal/lsp/format.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/work" +) + +func (s *Server) formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + switch snapshot.View().FileKind(fh) { + case source.Mod: + return mod.Format(ctx, snapshot, fh) + case source.Go: + return source.Format(ctx, snapshot, fh) + case source.Work: + return work.Format(ctx, snapshot, fh) + } + return nil, nil +} diff --git a/gopls/internal/lsp/general.go b/gopls/internal/lsp/general.go new file mode 100644 index 00000000000..d55e10384e2 --- /dev/null +++ b/gopls/internal/lsp/general.go @@ -0,0 +1,618 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/jsonrpc2" +) + +func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) { + s.stateMu.Lock() + if s.state >= serverInitializing { + defer s.stateMu.Unlock() + return nil, fmt.Errorf("%w: initialize called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state) + } + s.state = serverInitializing + s.stateMu.Unlock() + + // For uniqueness, use the gopls PID rather than params.ProcessID (the client + // pid). Some clients might start multiple gopls servers, though they + // probably shouldn't. + pid := os.Getpid() + s.tempDir = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.%s", pid, s.session.ID())) + err := os.Mkdir(s.tempDir, 0700) + if err != nil { + // MkdirTemp could fail due to permissions issues. This is a problem with + // the user's environment, but should not block gopls otherwise behaving. + // All usage of s.tempDir should be predicated on having a non-empty + // s.tempDir. + event.Error(ctx, "creating temp dir", err) + s.tempDir = "" + } + s.progress.SetSupportsWorkDoneProgress(params.Capabilities.Window.WorkDoneProgress) + + options := s.session.Options() + defer func() { s.session.SetOptions(options) }() + + if err := s.handleOptionResults(ctx, source.SetOptions(options, params.InitializationOptions)); err != nil { + return nil, err + } + options.ForClientCapabilities(params.Capabilities) + + if options.ShowBugReports { + // Report the next bug that occurs on the server. + bugCh := bug.Notify() + go func() { + b := <-bugCh + msg := &protocol.ShowMessageParams{ + Type: protocol.Error, + Message: fmt.Sprintf("A bug occurred on the server: %s\nLocation:%s", b.Description, b.Key), + } + if err := s.eventuallyShowMessage(context.Background(), msg); err != nil { + log.Printf("error showing bug: %v", err) + } + }() + } + + folders := params.WorkspaceFolders + if len(folders) == 0 { + if params.RootURI != "" { + folders = []protocol.WorkspaceFolder{{ + URI: string(params.RootURI), + Name: path.Base(params.RootURI.SpanURI().Filename()), + }} + } + } + for _, folder := range folders { + uri := span.URIFromURI(folder.URI) + if !uri.IsFile() { + continue + } + s.pendingFolders = append(s.pendingFolders, folder) + } + // gopls only supports URIs with a file:// scheme, so if we have no + // workspace folders with a supported scheme, fail to initialize. + if len(folders) > 0 && len(s.pendingFolders) == 0 { + return nil, fmt.Errorf("unsupported URI schemes: %v (gopls only supports file URIs)", folders) + } + + var codeActionProvider interface{} = true + if ca := params.Capabilities.TextDocument.CodeAction; len(ca.CodeActionLiteralSupport.CodeActionKind.ValueSet) > 0 { + // If the client has specified CodeActionLiteralSupport, + // send the code actions we support. + // + // Using CodeActionOptions is only valid if codeActionLiteralSupport is set. + codeActionProvider = &protocol.CodeActionOptions{ + CodeActionKinds: s.getSupportedCodeActions(), + } + } + var renameOpts interface{} = true + if r := params.Capabilities.TextDocument.Rename; r.PrepareSupport { + renameOpts = protocol.RenameOptions{ + PrepareProvider: r.PrepareSupport, + } + } + + versionInfo := debug.VersionInfo() + + // golang/go#45732: Warn users who've installed sergi/go-diff@v1.2.0, since + // it will corrupt the formatting of their files. + for _, dep := range versionInfo.Deps { + if dep.Path == "github.com/sergi/go-diff" && dep.Version == "v1.2.0" { + if err := s.eventuallyShowMessage(ctx, &protocol.ShowMessageParams{ + Message: `It looks like you have a bad gopls installation. +Please reinstall gopls by running 'GO111MODULE=on go install golang.org/x/tools/gopls@latest'. +See https://github.com/golang/go/issues/45732 for more information.`, + Type: protocol.Error, + }); err != nil { + return nil, err + } + } + } + + goplsVersion, err := json.Marshal(versionInfo) + if err != nil { + return nil, err + } + + return &protocol.InitializeResult{ + Capabilities: protocol.ServerCapabilities{ + CallHierarchyProvider: true, + CodeActionProvider: codeActionProvider, + CodeLensProvider: &protocol.CodeLensOptions{}, // must be non-nil to enable the code lens capability + CompletionProvider: protocol.CompletionOptions{ + TriggerCharacters: []string{"."}, + }, + DefinitionProvider: true, + TypeDefinitionProvider: true, + ImplementationProvider: true, + DocumentFormattingProvider: true, + DocumentSymbolProvider: true, + WorkspaceSymbolProvider: true, + ExecuteCommandProvider: protocol.ExecuteCommandOptions{ + Commands: options.SupportedCommands, + }, + FoldingRangeProvider: true, + HoverProvider: true, + DocumentHighlightProvider: true, + DocumentLinkProvider: protocol.DocumentLinkOptions{}, + InlayHintProvider: protocol.InlayHintOptions{}, + ReferencesProvider: true, + RenameProvider: renameOpts, + SelectionRangeProvider: protocol.SelectionRangeRegistrationOptions{}, + SemanticTokensProvider: protocol.SemanticTokensOptions{ + Range: true, + Full: true, + Legend: protocol.SemanticTokensLegend{ + TokenTypes: s.session.Options().SemanticTypes, + TokenModifiers: s.session.Options().SemanticMods, + }, + }, + SignatureHelpProvider: protocol.SignatureHelpOptions{ + TriggerCharacters: []string{"(", ","}, + }, + TextDocumentSync: &protocol.TextDocumentSyncOptions{ + Change: protocol.Incremental, + OpenClose: true, + Save: protocol.SaveOptions{ + IncludeText: false, + }, + }, + Workspace: protocol.Workspace6Gn{ + WorkspaceFolders: protocol.WorkspaceFolders5Gn{ + Supported: true, + ChangeNotifications: "workspace/didChangeWorkspaceFolders", + }, + }, + }, + ServerInfo: protocol.PServerInfoMsg_initialize{ + Name: "gopls", + Version: string(goplsVersion), + }, + }, nil +} + +func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error { + s.stateMu.Lock() + if s.state >= serverInitialized { + defer s.stateMu.Unlock() + return fmt.Errorf("%w: initialized called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state) + } + s.state = serverInitialized + s.stateMu.Unlock() + + for _, not := range s.notifications { + s.client.ShowMessage(ctx, not) + } + s.notifications = nil + + options := s.session.Options() + defer func() { s.session.SetOptions(options) }() + + if err := s.addFolders(ctx, s.pendingFolders); err != nil { + return err + } + s.pendingFolders = nil + s.checkViewGoVersions() + + var registrations []protocol.Registration + if options.ConfigurationSupported && options.DynamicConfigurationSupported { + registrations = append(registrations, protocol.Registration{ + ID: "workspace/didChangeConfiguration", + Method: "workspace/didChangeConfiguration", + }) + } + if len(registrations) > 0 { + if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ + Registrations: registrations, + }); err != nil { + return err + } + } + return nil +} + +// GoVersionTable maps Go versions to the gopls version in which support will +// be deprecated, and the final gopls version supporting them without warnings. +// Keep this in sync with gopls/README.md +// +// Must be sorted in ascending order of Go version. +// +// Mutable for testing. +var GoVersionTable = []GoVersionSupport{ + {12, "", "v0.7.5"}, + {15, "v0.11.0", "v0.9.5"}, +} + +// GoVersionSupport holds information about end-of-life Go version support. +type GoVersionSupport struct { + GoVersion int + DeprecatedVersion string // if unset, the version is already deprecated + InstallGoplsVersion string +} + +// OldestSupportedGoVersion is the last X in Go 1.X that this version of gopls +// supports. +func OldestSupportedGoVersion() int { + return GoVersionTable[len(GoVersionTable)-1].GoVersion + 1 +} + +// versionMessage returns the warning/error message to display if the user is +// on the given Go version, if any. The goVersion variable is the X in Go 1.X. +// +// If goVersion is invalid (< 0), it returns "", 0. +func versionMessage(goVersion int) (string, protocol.MessageType) { + if goVersion < 0 { + return "", 0 + } + + for _, v := range GoVersionTable { + if goVersion <= v.GoVersion { + var msgBuilder strings.Builder + + mType := protocol.Error + fmt.Fprintf(&msgBuilder, "Found Go version 1.%d", goVersion) + if v.DeprecatedVersion != "" { + // not deprecated yet, just a warning + fmt.Fprintf(&msgBuilder, ", which will be unsupported by gopls %s. ", v.DeprecatedVersion) + mType = protocol.Warning + } else { + fmt.Fprint(&msgBuilder, ", which is not supported by this version of gopls. ") + } + fmt.Fprintf(&msgBuilder, "Please upgrade to Go 1.%d or later and reinstall gopls. ", OldestSupportedGoVersion()) + fmt.Fprintf(&msgBuilder, "If you can't upgrade and want this message to go away, please install gopls %s. ", v.InstallGoplsVersion) + fmt.Fprint(&msgBuilder, "See https://go.dev/s/gopls-support-policy for more details.") + + return msgBuilder.String(), mType + } + } + return "", 0 +} + +// checkViewGoVersions checks whether any Go version used by a view is too old, +// raising a showMessage notification if so. +// +// It should be called after views change. +func (s *Server) checkViewGoVersions() { + oldestVersion := -1 + for _, view := range s.session.Views() { + viewVersion := view.GoVersion() + if oldestVersion == -1 || viewVersion < oldestVersion { + oldestVersion = viewVersion + } + } + + if msg, mType := versionMessage(oldestVersion); msg != "" { + s.eventuallyShowMessage(context.Background(), &protocol.ShowMessageParams{ + Type: mType, + Message: msg, + }) + } +} + +func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error { + originalViews := len(s.session.Views()) + viewErrors := make(map[span.URI]error) + + var ndiagnose sync.WaitGroup // number of unfinished diagnose calls + if s.session.Options().VerboseWorkDoneProgress { + work := s.progress.Start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil) + defer func() { + go func() { + ndiagnose.Wait() + work.End(ctx, "Done.") + }() + }() + } + // Only one view gets to have a workspace. + var nsnapshots sync.WaitGroup // number of unfinished snapshot initializations + for _, folder := range folders { + uri := span.URIFromURI(folder.URI) + // Ignore non-file URIs. + if !uri.IsFile() { + continue + } + work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil) + snapshot, release, err := s.addView(ctx, folder.Name, uri) + if err != nil { + if err == source.ErrViewExists { + continue + } + viewErrors[uri] = err + work.End(ctx, fmt.Sprintf("Error loading packages: %s", err)) + continue + } + // Inv: release() must be called once. + + // Initialize snapshot asynchronously. + initialized := make(chan struct{}) + nsnapshots.Add(1) + go func() { + snapshot.AwaitInitialized(ctx) + work.End(ctx, "Finished loading packages.") + nsnapshots.Done() + close(initialized) // signal + }() + + // Diagnose the newly created view asynchronously. + ndiagnose.Add(1) + go func() { + s.diagnoseDetached(snapshot) + <-initialized + release() + ndiagnose.Done() + }() + } + + // Wait for snapshots to be initialized so that all files are known. + // (We don't need to wait for diagnosis to finish.) + nsnapshots.Wait() + + // Register for file watching notifications, if they are supported. + if err := s.updateWatchedDirectories(ctx); err != nil { + event.Error(ctx, "failed to register for file watching notifications", err) + } + + if len(viewErrors) > 0 { + errMsg := fmt.Sprintf("Error loading workspace folders (expected %v, got %v)\n", len(folders), len(s.session.Views())-originalViews) + for uri, err := range viewErrors { + errMsg += fmt.Sprintf("failed to load view for %s: %v\n", uri, err) + } + return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Type: protocol.Error, + Message: errMsg, + }) + } + return nil +} + +// updateWatchedDirectories compares the current set of directories to watch +// with the previously registered set of directories. If the set of directories +// has changed, we unregister and re-register for file watching notifications. +// updatedSnapshots is the set of snapshots that have been updated. +func (s *Server) updateWatchedDirectories(ctx context.Context) error { + patterns := s.session.FileWatchingGlobPatterns(ctx) + + s.watchedGlobPatternsMu.Lock() + defer s.watchedGlobPatternsMu.Unlock() + + // Nothing to do if the set of workspace directories is unchanged. + if equalURISet(s.watchedGlobPatterns, patterns) { + return nil + } + + // If the set of directories to watch has changed, register the updates and + // unregister the previously watched directories. This ordering avoids a + // period where no files are being watched. Still, if a user makes on-disk + // changes before these updates are complete, we may miss them for the new + // directories. + prevID := s.watchRegistrationCount - 1 + if err := s.registerWatchedDirectoriesLocked(ctx, patterns); err != nil { + return err + } + if prevID >= 0 { + return s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{ + Unregisterations: []protocol.Unregistration{{ + ID: watchedFilesCapabilityID(prevID), + Method: "workspace/didChangeWatchedFiles", + }}, + }) + } + return nil +} + +func watchedFilesCapabilityID(id int) string { + return fmt.Sprintf("workspace/didChangeWatchedFiles-%d", id) +} + +func equalURISet(m1, m2 map[string]struct{}) bool { + if len(m1) != len(m2) { + return false + } + for k := range m1 { + _, ok := m2[k] + if !ok { + return false + } + } + return true +} + +// registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles +// registrations to the client and updates s.watchedDirectories. +func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns map[string]struct{}) error { + if !s.session.Options().DynamicWatchedFilesSupported { + return nil + } + for k := range s.watchedGlobPatterns { + delete(s.watchedGlobPatterns, k) + } + var watchers []protocol.FileSystemWatcher + for pattern := range patterns { + watchers = append(watchers, protocol.FileSystemWatcher{ + GlobPattern: pattern, + Kind: uint32(protocol.WatchChange + protocol.WatchDelete + protocol.WatchCreate), + }) + } + + if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ + Registrations: []protocol.Registration{{ + ID: watchedFilesCapabilityID(s.watchRegistrationCount), + Method: "workspace/didChangeWatchedFiles", + RegisterOptions: protocol.DidChangeWatchedFilesRegistrationOptions{ + Watchers: watchers, + }, + }}, + }); err != nil { + return err + } + s.watchRegistrationCount++ + + for k, v := range patterns { + s.watchedGlobPatterns[k] = v + } + return nil +} + +func (s *Server) fetchConfig(ctx context.Context, name string, folder span.URI, o *source.Options) error { + if !s.session.Options().ConfigurationSupported { + return nil + } + configs, err := s.client.Configuration(ctx, &protocol.ParamConfiguration{ + Items: []protocol.ConfigurationItem{{ + ScopeURI: string(folder), + Section: "gopls", + }}, + }, + ) + if err != nil { + return fmt.Errorf("failed to get workspace configuration from client (%s): %v", folder, err) + } + for _, config := range configs { + if err := s.handleOptionResults(ctx, source.SetOptions(o, config)); err != nil { + return err + } + } + return nil +} + +func (s *Server) eventuallyShowMessage(ctx context.Context, msg *protocol.ShowMessageParams) error { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state == serverInitialized { + return s.client.ShowMessage(ctx, msg) + } + s.notifications = append(s.notifications, msg) + return nil +} + +func (s *Server) handleOptionResults(ctx context.Context, results source.OptionResults) error { + var warnings, errors []string + for _, result := range results { + switch result.Error.(type) { + case nil: + // nothing to do + case *source.SoftError: + warnings = append(warnings, result.Error.Error()) + default: + errors = append(errors, result.Error.Error()) + } + } + + // Sort messages, but put errors first. + // + // Having stable content for the message allows clients to de-duplicate. This + // matters because we may send duplicate warnings for clients that support + // dynamic configuration: one for the initial settings, and then more for the + // individual view settings. + var msgs []string + msgType := protocol.Warning + if len(errors) > 0 { + msgType = protocol.Error + sort.Strings(errors) + msgs = append(msgs, errors...) + } + if len(warnings) > 0 { + sort.Strings(warnings) + msgs = append(msgs, warnings...) + } + + if len(msgs) > 0 { + // Settings + combined := "Invalid settings: " + strings.Join(msgs, "; ") + params := &protocol.ShowMessageParams{ + Type: msgType, + Message: combined, + } + return s.eventuallyShowMessage(ctx, params) + } + + return nil +} + +// beginFileRequest checks preconditions for a file-oriented request and routes +// it to a snapshot. +// We don't want to return errors for benign conditions like wrong file type, +// so callers should do if !ok { return err } rather than if err != nil. +// The returned cleanup function is non-nil even in case of false/error result. +func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI, expectKind source.FileKind) (source.Snapshot, source.FileHandle, bool, func(), error) { + uri := pURI.SpanURI() + if !uri.IsFile() { + // Not a file URI. Stop processing the request, but don't return an error. + return nil, nil, false, func() {}, nil + } + view, err := s.session.ViewOf(uri) + if err != nil { + return nil, nil, false, func() {}, err + } + snapshot, release, err := view.Snapshot() + if err != nil { + return nil, nil, false, func() {}, err + } + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + release() + return nil, nil, false, func() {}, err + } + if expectKind != source.UnknownKind && view.FileKind(fh) != expectKind { + // Wrong kind of file. Nothing to do. + release() + return nil, nil, false, func() {}, nil + } + return snapshot, fh, true, release, nil +} + +// shutdown implements the 'shutdown' LSP handler. It releases resources +// associated with the server and waits for all ongoing work to complete. +func (s *Server) shutdown(ctx context.Context) error { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state < serverInitialized { + event.Log(ctx, "server shutdown without initialization") + } + if s.state != serverShutDown { + // drop all the active views + s.session.Shutdown(ctx) + s.state = serverShutDown + if s.tempDir != "" { + if err := os.RemoveAll(s.tempDir); err != nil { + event.Error(ctx, "removing temp dir", err) + } + } + } + return nil +} + +func (s *Server) exit(ctx context.Context) error { + s.stateMu.Lock() + defer s.stateMu.Unlock() + + s.client.Close() + + if s.state != serverShutDown { + // TODO: We should be able to do better than this. + os.Exit(1) + } + // we don't terminate the process on a normal exit, we just allow it to + // close naturally if needed after the connection is closed. + return nil +} diff --git a/gopls/internal/lsp/general_test.go b/gopls/internal/lsp/general_test.go new file mode 100644 index 00000000000..a0312ba1b43 --- /dev/null +++ b/gopls/internal/lsp/general_test.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +func TestVersionMessage(t *testing.T) { + tests := []struct { + goVersion int + wantContains []string // string fragments that we expect to see + wantType protocol.MessageType + }{ + {-1, nil, 0}, + {12, []string{"1.12", "not supported", "upgrade to Go 1.16", "install gopls v0.7.5"}, protocol.Error}, + {13, []string{"1.13", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning}, + {15, []string{"1.15", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning}, + {16, nil, 0}, + } + + for _, test := range tests { + gotMsg, gotType := versionMessage(test.goVersion) + + if len(test.wantContains) == 0 && gotMsg != "" { + t.Errorf("versionMessage(%d) = %q, want \"\"", test.goVersion, gotMsg) + } + + for _, want := range test.wantContains { + if !strings.Contains(gotMsg, want) { + t.Errorf("versionMessage(%d) = %q, want containing %q", test.goVersion, gotMsg, want) + } + } + + if gotType != test.wantType { + t.Errorf("versionMessage(%d) = returned message type %d, want %d", test.goVersion, gotType, test.wantType) + } + } +} diff --git a/gopls/internal/lsp/glob/glob.go b/gopls/internal/lsp/glob/glob.go new file mode 100644 index 00000000000..a540ebefac5 --- /dev/null +++ b/gopls/internal/lsp/glob/glob.go @@ -0,0 +1,349 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package glob implements an LSP-compliant glob pattern matcher for testing. +package glob + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" +) + +// A Glob is an LSP-compliant glob pattern, as defined by the spec: +// https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#documentFilter +// +// NOTE: this implementation is currently only intended for testing. In order +// to make it production ready, we'd need to: +// - verify it against the VS Code implementation +// - add more tests +// - microbenchmark, likely avoiding the element interface +// - resolve the question of what is meant by "character". If it's a UTF-16 +// code (as we suspect) it'll be a bit more work. +// +// Quoting from the spec: +// Glob patterns can have the following syntax: +// - `*` to match one or more characters in a path segment +// - `?` to match on one character in a path segment +// - `**` to match any number of path segments, including none +// - `{}` to group sub patterns into an OR expression. (e.g. `**/*.{ts,js}` +// matches all TypeScript and JavaScript files) +// - `[]` to declare a range of characters to match in a path segment +// (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) +// - `[!...]` to negate a range of characters to match in a path segment +// (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but +// not `example.0`) +// +// Expanding on this: +// - '/' matches one or more literal slashes. +// - any other character matches itself literally. +type Glob struct { + elems []element // pattern elements +} + +// Parse builds a Glob for the given pattern, returning an error if the pattern +// is invalid. +func Parse(pattern string) (*Glob, error) { + g, _, err := parse(pattern, false) + return g, err +} + +func parse(pattern string, nested bool) (*Glob, string, error) { + g := new(Glob) + for len(pattern) > 0 { + switch pattern[0] { + case '/': + pattern = pattern[1:] + g.elems = append(g.elems, slash{}) + + case '*': + if len(pattern) > 1 && pattern[1] == '*' { + if (len(g.elems) > 0 && g.elems[len(g.elems)-1] != slash{}) || (len(pattern) > 2 && pattern[2] != '/') { + return nil, "", errors.New("** may only be adjacent to '/'") + } + pattern = pattern[2:] + g.elems = append(g.elems, starStar{}) + break + } + pattern = pattern[1:] + g.elems = append(g.elems, star{}) + + case '?': + pattern = pattern[1:] + g.elems = append(g.elems, anyChar{}) + + case '{': + var gs group + for pattern[0] != '}' { + pattern = pattern[1:] + g, pat, err := parse(pattern, true) + if err != nil { + return nil, "", err + } + if len(pat) == 0 { + return nil, "", errors.New("unmatched '{'") + } + pattern = pat + gs = append(gs, g) + } + pattern = pattern[1:] + g.elems = append(g.elems, gs) + + case '}', ',': + if nested { + return g, pattern, nil + } + pattern = g.parseLiteral(pattern, false) + + case '[': + pattern = pattern[1:] + if len(pattern) == 0 { + return nil, "", errBadRange + } + negate := false + if pattern[0] == '!' { + pattern = pattern[1:] + negate = true + } + low, sz, err := readRangeRune(pattern) + if err != nil { + return nil, "", err + } + pattern = pattern[sz:] + if len(pattern) == 0 || pattern[0] != '-' { + return nil, "", errBadRange + } + pattern = pattern[1:] + high, sz, err := readRangeRune(pattern) + if err != nil { + return nil, "", err + } + pattern = pattern[sz:] + if len(pattern) == 0 || pattern[0] != ']' { + return nil, "", errBadRange + } + pattern = pattern[1:] + g.elems = append(g.elems, charRange{negate, low, high}) + + default: + pattern = g.parseLiteral(pattern, nested) + } + } + return g, "", nil +} + +// helper for decoding a rune in range elements, e.g. [a-z] +func readRangeRune(input string) (rune, int, error) { + r, sz := utf8.DecodeRuneInString(input) + var err error + if r == utf8.RuneError { + // See the documentation for DecodeRuneInString. + switch sz { + case 0: + err = errBadRange + case 1: + err = errInvalidUTF8 + } + } + return r, sz, err +} + +var ( + errBadRange = errors.New("'[' patterns must be of the form [x-y]") + errInvalidUTF8 = errors.New("invalid UTF-8 encoding") +) + +func (g *Glob) parseLiteral(pattern string, nested bool) string { + var specialChars string + if nested { + specialChars = "*?{[/}," + } else { + specialChars = "*?{[/" + } + end := strings.IndexAny(pattern, specialChars) + if end == -1 { + end = len(pattern) + } + g.elems = append(g.elems, literal(pattern[:end])) + return pattern[end:] +} + +func (g *Glob) String() string { + var b strings.Builder + for _, e := range g.elems { + fmt.Fprint(&b, e) + } + return b.String() +} + +// element holds a glob pattern element, as defined below. +type element fmt.Stringer + +// element types. +type ( + slash struct{} // One or more '/' separators + literal string // string literal, not containing /, *, ?, {}, or [] + star struct{} // * + anyChar struct{} // ? + starStar struct{} // ** + group []*Glob // {foo, bar, ...} grouping + charRange struct { // [a-z] character range + negate bool + low, high rune + } +) + +func (s slash) String() string { return "/" } +func (l literal) String() string { return string(l) } +func (s star) String() string { return "*" } +func (a anyChar) String() string { return "?" } +func (s starStar) String() string { return "**" } +func (g group) String() string { + var parts []string + for _, g := range g { + parts = append(parts, g.String()) + } + return "{" + strings.Join(parts, ",") + "}" +} +func (r charRange) String() string { + return "[" + string(r.low) + "-" + string(r.high) + "]" +} + +// Match reports whether the input string matches the glob pattern. +func (g *Glob) Match(input string) bool { + return match(g.elems, input) +} + +func match(elems []element, input string) (ok bool) { + var elem interface{} + for len(elems) > 0 { + elem, elems = elems[0], elems[1:] + switch elem := elem.(type) { + case slash: + if len(input) == 0 || input[0] != '/' { + return false + } + for input[0] == '/' { + input = input[1:] + } + + case starStar: + // Special cases: + // - **/a matches "a" + // - **/ matches everything + // + // Note that if ** is followed by anything, it must be '/' (this is + // enforced by Parse). + if len(elems) > 0 { + elems = elems[1:] + } + + // A trailing ** matches anything. + if len(elems) == 0 { + return true + } + + // Backtracking: advance pattern segments until the remaining pattern + // elements match. + for len(input) != 0 { + if match(elems, input) { + return true + } + _, input = split(input) + } + return false + + case literal: + if !strings.HasPrefix(input, string(elem)) { + return false + } + input = input[len(elem):] + + case star: + var segInput string + segInput, input = split(input) + + elemEnd := len(elems) + for i, e := range elems { + if e == (slash{}) { + elemEnd = i + break + } + } + segElems := elems[:elemEnd] + elems = elems[elemEnd:] + + // A trailing * matches the entire segment. + if len(segElems) == 0 { + break + } + + // Backtracking: advance characters until remaining subpattern elements + // match. + matched := false + for i := range segInput { + if match(segElems, segInput[i:]) { + matched = true + break + } + } + if !matched { + return false + } + + case anyChar: + if len(input) == 0 || input[0] == '/' { + return false + } + input = input[1:] + + case group: + // Append remaining pattern elements to each group member looking for a + // match. + var branch []element + for _, m := range elem { + branch = branch[:0] + branch = append(branch, m.elems...) + branch = append(branch, elems...) + if match(branch, input) { + return true + } + } + return false + + case charRange: + if len(input) == 0 || input[0] == '/' { + return false + } + c, sz := utf8.DecodeRuneInString(input) + if c < elem.low || c > elem.high { + return false + } + input = input[sz:] + + default: + panic(fmt.Sprintf("segment type %T not implemented", elem)) + } + } + + return len(input) == 0 +} + +// split returns the portion before and after the first slash +// (or sequence of consecutive slashes). If there is no slash +// it returns (input, nil). +func split(input string) (first, rest string) { + i := strings.IndexByte(input, '/') + if i < 0 { + return input, "" + } + first = input[:i] + for j := i; j < len(input); j++ { + if input[j] != '/' { + return first, input[j:] + } + } + return first, "" +} diff --git a/gopls/internal/lsp/glob/glob_test.go b/gopls/internal/lsp/glob/glob_test.go new file mode 100644 index 00000000000..df602624d9c --- /dev/null +++ b/gopls/internal/lsp/glob/glob_test.go @@ -0,0 +1,118 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package glob_test + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/glob" +) + +func TestParseErrors(t *testing.T) { + tests := []string{ + "***", + "ab{c", + "[]", + "[a-]", + "ab{c{d}", + } + + for _, test := range tests { + _, err := glob.Parse(test) + if err == nil { + t.Errorf("Parse(%q) succeeded unexpectedly", test) + } + } +} + +func TestMatch(t *testing.T) { + tests := []struct { + pattern, input string + want bool + }{ + // Basic cases. + {"", "", true}, + {"", "a", false}, + {"", "/", false}, + {"abc", "abc", true}, + + // ** behavior + {"**", "abc", true}, + {"**/abc", "abc", true}, + {"**", "abc/def", true}, + {"{a/**/c,a/**/d}", "a/b/c", true}, + {"{a/**/c,a/**/d}", "a/b/c/d", true}, + {"{a/**/c,a/**/e}", "a/b/c/d", false}, + {"{a/**/c,a/**/e,a/**/d}", "a/b/c/d", true}, + {"{/a/**/c,a/**/e,a/**/d}", "a/b/c/d", true}, + {"{/a/**/c,a/**/e,a/**/d}", "/a/b/c/d", false}, + {"{/a/**/c,a/**/e,a/**/d}", "/a/b/c", true}, + {"{/a/**/e,a/**/e,a/**/d}", "/a/b/c", false}, + + // * and ? behavior + {"/*", "/a", true}, + {"*", "foo", true}, + {"*o", "foo", true}, + {"*o", "foox", false}, + {"f*o", "foo", true}, + {"f*o", "fo", true}, + {"fo?", "foo", true}, + {"fo?", "fox", true}, + {"fo?", "fooo", false}, + {"fo?", "fo", false}, + {"?", "a", true}, + {"?", "ab", false}, + {"?", "", false}, + {"*?", "", false}, + {"?b", "ab", true}, + {"?c", "ab", false}, + + // {} behavior + {"ab{c,d}e", "abce", true}, + {"ab{c,d}e", "abde", true}, + {"ab{c,d}e", "abxe", false}, + {"ab{c,d}e", "abe", false}, + {"{a,b}c", "ac", true}, + {"{a,b}c", "bc", true}, + {"{a,b}c", "ab", false}, + {"a{b,c}", "ab", true}, + {"a{b,c}", "ac", true}, + {"a{b,c}", "bc", false}, + {"ab{c{1,2},d}e", "abc1e", true}, + {"ab{c{1,2},d}e", "abde", true}, + {"ab{c{1,2},d}e", "abc1f", false}, + {"ab{c{1,2},d}e", "abce", false}, + {"ab{c[}-~]}d", "abc}d", true}, + {"ab{c[}-~]}d", "abc~d", true}, + {"ab{c[}-~],y}d", "abcxd", false}, + {"ab{c[}-~],y}d", "abyd", true}, + {"ab{c[}-~],y}d", "abd", false}, + {"{a/b/c,d/e/f}", "a/b/c", true}, + {"/ab{/c,d}e", "/ab/ce", true}, + {"/ab{/c,d}e", "/ab/cf", false}, + + // [-] behavior + {"[a-c]", "a", true}, + {"[a-c]", "b", true}, + {"[a-c]", "c", true}, + {"[a-c]", "d", false}, + {"[a-c]", " ", false}, + + // Realistic examples. + {"**/*.{ts,js}", "path/to/foo.ts", true}, + {"**/*.{ts,js}", "path/to/foo.js", true}, + {"**/*.{ts,js}", "path/to/foo.go", false}, + } + + for _, test := range tests { + g, err := glob.Parse(test.pattern) + if err != nil { + t.Fatalf("New(%q) failed unexpectedly: %v", test.pattern, err) + } + if got := g.Match(test.input); got != test.want { + t.Errorf("New(%q).Match(%q) = %t, want %t", test.pattern, test.input, got, test.want) + } + } +} diff --git a/internal/lsp/helper/README.md b/gopls/internal/lsp/helper/README.md similarity index 100% rename from internal/lsp/helper/README.md rename to gopls/internal/lsp/helper/README.md diff --git a/internal/lsp/helper/helper.go b/gopls/internal/lsp/helper/helper.go similarity index 99% rename from internal/lsp/helper/helper.go rename to gopls/internal/lsp/helper/helper.go index cadda0246be..391d75adef0 100644 --- a/internal/lsp/helper/helper.go +++ b/gopls/internal/lsp/helper/helper.go @@ -56,7 +56,7 @@ package lsp import ( "context" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) {{range $key, $v := .Stuff}} diff --git a/gopls/internal/lsp/highlight.go b/gopls/internal/lsp/highlight.go new file mode 100644 index 00000000000..290444ec962 --- /dev/null +++ b/gopls/internal/lsp/highlight.go @@ -0,0 +1,45 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" +) + +func (s *Server) documentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + + if snapshot.View().FileKind(fh) == source.Tmpl { + return template.Highlight(ctx, snapshot, fh, params.Position) + } + + rngs, err := source.Highlight(ctx, snapshot, fh, params.Position) + if err != nil { + event.Error(ctx, "no highlight", err, tag.URI.Of(params.TextDocument.URI)) + } + return toProtocolHighlight(rngs), nil +} + +func toProtocolHighlight(rngs []protocol.Range) []protocol.DocumentHighlight { + result := make([]protocol.DocumentHighlight, 0, len(rngs)) + kind := protocol.Text + for _, rng := range rngs { + result = append(result, protocol.DocumentHighlight{ + Kind: kind, + Range: rng, + }) + } + return result +} diff --git a/gopls/internal/lsp/hover.go b/gopls/internal/lsp/hover.go new file mode 100644 index 00000000000..2d1aae7d5b5 --- /dev/null +++ b/gopls/internal/lsp/hover.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/mod" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" + "golang.org/x/tools/gopls/internal/lsp/work" +) + +func (s *Server) hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + switch snapshot.View().FileKind(fh) { + case source.Mod: + return mod.Hover(ctx, snapshot, fh, params.Position) + case source.Go: + return source.Hover(ctx, snapshot, fh, params.Position) + case source.Tmpl: + return template.Hover(ctx, snapshot, fh, params.Position) + case source.Work: + return work.Hover(ctx, snapshot, fh, params.Position) + } + return nil, nil +} diff --git a/gopls/internal/lsp/implementation.go b/gopls/internal/lsp/implementation.go new file mode 100644 index 00000000000..0eb82652e9e --- /dev/null +++ b/gopls/internal/lsp/implementation.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +func (s *Server) implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + return source.Implementation(ctx, snapshot, fh, params.Position) +} diff --git a/gopls/internal/lsp/inlay_hint.go b/gopls/internal/lsp/inlay_hint.go new file mode 100644 index 00000000000..6aceecb0d33 --- /dev/null +++ b/gopls/internal/lsp/inlay_hint.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +func (s *Server) inlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + return source.InlayHint(ctx, snapshot, fh, params.Range) +} diff --git a/gopls/internal/lsp/link.go b/gopls/internal/lsp/link.go new file mode 100644 index 00000000000..2713715cd64 --- /dev/null +++ b/gopls/internal/lsp/link.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/token" + "net/url" + "regexp" + "strings" + "sync" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" +) + +func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLinkParams) (links []protocol.DocumentLink, err error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + switch snapshot.View().FileKind(fh) { + case source.Mod: + links, err = modLinks(ctx, snapshot, fh) + case source.Go: + links, err = goLinks(ctx, snapshot, fh) + } + // Don't return errors for document links. + if err != nil { + event.Error(ctx, "failed to compute document links", err, tag.URI.Of(fh.URI())) + return nil, nil + } + return links, nil +} + +func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + return nil, err + } + + var links []protocol.DocumentLink + for _, req := range pm.File.Require { + if req.Syntax == nil { + continue + } + // See golang/go#36998: don't link to modules matching GOPRIVATE. + if snapshot.View().IsGoPrivatePath(req.Mod.Path) { + continue + } + dep := []byte(req.Mod.Path) + start, end := req.Syntax.Start.Byte, req.Syntax.End.Byte + i := bytes.Index(pm.Mapper.Content[start:end], dep) + if i == -1 { + continue + } + // Shift the start position to the location of the + // dependency within the require statement. + target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "") + l, err := toProtocolLink(pm.Mapper, target, start+i, start+i+len(dep)) + if err != nil { + return nil, err + } + links = append(links, l) + } + // TODO(ridersofrohan): handle links for replace and exclude directives. + if syntax := pm.File.Syntax; syntax == nil { + return links, nil + } + + // Get all the links that are contained in the comments of the file. + urlRegexp := snapshot.View().Options().URLRegexp + for _, expr := range pm.File.Syntax.Stmt { + comments := expr.Comment() + if comments == nil { + continue + } + for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} { + for _, comment := range section { + l, err := findLinksInString(urlRegexp, comment.Token, comment.Start.Byte, pm.Mapper) + if err != nil { + return nil, err + } + links = append(links, l...) + } + } + } + return links, nil +} + +// goLinks returns the set of hyperlink annotations for the specified Go file. +func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) { + view := snapshot.View() + + pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) + if err != nil { + return nil, err + } + + var links []protocol.DocumentLink + + // Create links for import specs. + if view.Options().ImportShortcut.ShowLinks() { + + // If links are to pkg.go.dev, append module version suffixes. + // This requires the import map from the package metadata. Ignore errors. + var depsByImpPath map[source.ImportPath]source.PackageID + if strings.ToLower(view.Options().LinkTarget) == "pkg.go.dev" { + if metas, _ := snapshot.MetadataForFile(ctx, fh.URI()); len(metas) > 0 { + depsByImpPath = metas[0].DepsByImpPath // 0 => narrowest package + } + } + + for _, imp := range pgf.File.Imports { + importPath := source.UnquoteImportPath(imp) + if importPath == "" { + continue // bad import + } + // See golang/go#36998: don't link to modules matching GOPRIVATE. + if view.IsGoPrivatePath(string(importPath)) { + continue + } + + urlPath := string(importPath) + + // For pkg.go.dev, append module version suffix to package import path. + if m := snapshot.Metadata(depsByImpPath[importPath]); m != nil && m.Module != nil && m.Module.Path != "" && m.Module.Version != "" { + urlPath = strings.Replace(urlPath, m.Module.Path, m.Module.Path+"@"+m.Module.Version, 1) + } + + start, end, err := safetoken.Offsets(pgf.Tok, imp.Path.Pos(), imp.Path.End()) + if err != nil { + return nil, err + } + targetURL := source.BuildLink(view.Options().LinkTarget, urlPath, "") + // Account for the quotation marks in the positions. + l, err := toProtocolLink(pgf.Mapper, targetURL, start+len(`"`), end-len(`"`)) + if err != nil { + return nil, err + } + links = append(links, l) + } + } + + urlRegexp := snapshot.View().Options().URLRegexp + + // Gather links found in string literals. + var str []*ast.BasicLit + ast.Inspect(pgf.File, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.ImportSpec: + return false // don't process import strings again + case *ast.BasicLit: + if n.Kind == token.STRING { + str = append(str, n) + } + } + return true + }) + for _, s := range str { + strOffset, err := safetoken.Offset(pgf.Tok, s.Pos()) + if err != nil { + return nil, err + } + l, err := findLinksInString(urlRegexp, s.Value, strOffset, pgf.Mapper) + if err != nil { + return nil, err + } + links = append(links, l...) + } + + // Gather links found in comments. + for _, commentGroup := range pgf.File.Comments { + for _, comment := range commentGroup.List { + commentOffset, err := safetoken.Offset(pgf.Tok, comment.Pos()) + if err != nil { + return nil, err + } + l, err := findLinksInString(urlRegexp, comment.Text, commentOffset, pgf.Mapper) + if err != nil { + return nil, err + } + links = append(links, l...) + } + } + + return links, nil +} + +// acceptedSchemes controls the schemes that URLs must have to be shown to the +// user. Other schemes can't be opened by LSP clients, so linkifying them is +// distracting. See golang/go#43990. +var acceptedSchemes = map[string]bool{ + "http": true, + "https": true, +} + +// urlRegexp is the user-supplied regular expression to match URL. +// srcOffset is the start offset of 'src' within m's file. +func findLinksInString(urlRegexp *regexp.Regexp, src string, srcOffset int, m *protocol.Mapper) ([]protocol.DocumentLink, error) { + var links []protocol.DocumentLink + for _, index := range urlRegexp.FindAllIndex([]byte(src), -1) { + start, end := index[0], index[1] + link := src[start:end] + linkURL, err := url.Parse(link) + // Fallback: Linkify IP addresses as suggested in golang/go#18824. + if err != nil { + linkURL, err = url.Parse("//" + link) + // Not all potential links will be valid, so don't return this error. + if err != nil { + continue + } + } + // If the URL has no scheme, use https. + if linkURL.Scheme == "" { + linkURL.Scheme = "https" + } + if !acceptedSchemes[linkURL.Scheme] { + continue + } + + l, err := toProtocolLink(m, linkURL.String(), srcOffset+start, srcOffset+end) + if err != nil { + return nil, err + } + links = append(links, l) + } + // Handle golang/go#1234-style links. + r := getIssueRegexp() + for _, index := range r.FindAllIndex([]byte(src), -1) { + start, end := index[0], index[1] + matches := r.FindStringSubmatch(src) + if len(matches) < 4 { + continue + } + org, repo, number := matches[1], matches[2], matches[3] + targetURL := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number) + l, err := toProtocolLink(m, targetURL, srcOffset+start, srcOffset+end) + if err != nil { + return nil, err + } + links = append(links, l) + } + return links, nil +} + +func getIssueRegexp() *regexp.Regexp { + once.Do(func() { + issueRegexp = regexp.MustCompile(`(\w+)/([\w-]+)#([0-9]+)`) + }) + return issueRegexp +} + +var ( + once sync.Once + issueRegexp *regexp.Regexp +) + +func toProtocolLink(m *protocol.Mapper, targetURL string, start, end int) (protocol.DocumentLink, error) { + rng, err := m.OffsetRange(start, end) + if err != nil { + return protocol.DocumentLink{}, err + } + return protocol.DocumentLink{ + Range: rng, + Target: targetURL, + }, nil +} diff --git a/gopls/internal/lsp/lsp_test.go b/gopls/internal/lsp/lsp_test.go new file mode 100644 index 00000000000..6d15a46bd00 --- /dev/null +++ b/gopls/internal/lsp/lsp_test.go @@ -0,0 +1,1335 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/testenv" +) + +func TestMain(m *testing.M) { + bug.PanicOnBugs = true + testenv.ExitIfSmallMachine() + + // Set the global exporter to nil so that we don't log to stderr. This avoids + // a lot of misleading noise in test output. + // + // TODO(rfindley): investigate whether we can/should capture logs scoped to + // individual tests by passing in a context with a local exporter. + event.SetExporter(nil) + + os.Exit(m.Run()) +} + +// TestLSP runs the marker tests in files beneath testdata/ using +// implementations of each of the marker operations (e.g. @codelens) that +// make LSP RPCs (e.g. textDocument/codeLens) to a gopls server. +func TestLSP(t *testing.T) { + tests.RunTests(t, "testdata", true, testLSP) +} + +func testLSP(t *testing.T, datum *tests.Data) { + ctx := tests.Context(t) + + session := cache.NewSession(ctx, cache.New(nil, nil), nil) + options := source.DefaultOptions().Clone() + tests.DefaultOptions(options) + session.SetOptions(options) + options.SetEnvSlice(datum.Config.Env) + view, snapshot, release, err := session.NewView(ctx, datum.Config.Dir, span.URIFromPath(datum.Config.Dir), options) + if err != nil { + t.Fatal(err) + } + + defer session.RemoveView(view) + + // Enable type error analyses for tests. + // TODO(golang/go#38212): Delete this once they are enabled by default. + tests.EnableAllAnalyzers(options) + session.SetViewOptions(ctx, view, options) + + // Enable all inlay hints for tests. + tests.EnableAllInlayHints(options) + + // Only run the -modfile specific tests in module mode with Go 1.14 or above. + datum.ModfileFlagAvailable = len(snapshot.ModFiles()) > 0 && testenv.Go1Point() >= 14 + release() + + var modifications []source.FileModification + for filename, content := range datum.Config.Overlay { + if filepath.Ext(filename) != ".go" { + continue + } + modifications = append(modifications, source.FileModification{ + URI: span.URIFromPath(filename), + Action: source.Open, + Version: -1, + Text: content, + LanguageID: "go", + }) + } + if err := session.ModifyFiles(ctx, modifications); err != nil { + t.Fatal(err) + } + r := &runner{ + data: datum, + ctx: ctx, + normalizers: tests.CollectNormalizers(datum.Exported), + editRecv: make(chan map[span.URI]string, 1), + } + + r.server = NewServer(session, testClient{runner: r}) + tests.Run(t, r, datum) +} + +// runner implements tests.Tests by making LSP RPCs to a gopls server. +type runner struct { + server *Server + data *tests.Data + diagnostics map[span.URI][]*source.Diagnostic + ctx context.Context + normalizers []tests.Normalizer + editRecv chan map[span.URI]string +} + +// testClient stubs any client functions that may be called by LSP functions. +type testClient struct { + protocol.Client + runner *runner +} + +func (c testClient) Close() error { + return nil +} + +// Trivially implement PublishDiagnostics so that we can call +// server.publishReports below to de-dup sent diagnostics. +func (c testClient) PublishDiagnostics(context.Context, *protocol.PublishDiagnosticsParams) error { + return nil +} + +func (c testClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error { + return nil +} + +func (c testClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) { + res, err := applyTextDocumentEdits(c.runner, params.Edit.DocumentChanges) + if err != nil { + return nil, err + } + c.runner.editRecv <- res + return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil +} + +func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) { + mapper, err := r.data.Mapper(spn.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := mapper.SpanLocation(spn) + if err != nil { + t.Fatalf("failed for %v: %v", spn, err) + } + + params := &protocol.CallHierarchyPrepareParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + + items, err := r.server.PrepareCallHierarchy(r.ctx, params) + if err != nil { + t.Fatal(err) + } + if len(items) == 0 { + t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range) + } + + callLocation := protocol.Location{ + URI: items[0].URI, + Range: items[0].Range, + } + if callLocation != loc { + t.Fatalf("expected server.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation) + } + + incomingCalls, err := r.server.IncomingCalls(r.ctx, &protocol.CallHierarchyIncomingCallsParams{Item: items[0]}) + if err != nil { + t.Error(err) + } + var incomingCallItems []protocol.CallHierarchyItem + for _, item := range incomingCalls { + incomingCallItems = append(incomingCallItems, item.From) + } + msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls) + if msg != "" { + t.Error(fmt.Sprintf("incoming calls: %s", msg)) + } + + outgoingCalls, err := r.server.OutgoingCalls(r.ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: items[0]}) + if err != nil { + t.Error(err) + } + var outgoingCallItems []protocol.CallHierarchyItem + for _, item := range outgoingCalls { + outgoingCallItems = append(outgoingCallItems, item.To) + } + msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls) + if msg != "" { + t.Error(fmt.Sprintf("outgoing calls: %s", msg)) + } +} + +func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) { + if !strings.HasSuffix(uri.Filename(), "go.mod") { + return + } + got, err := r.server.codeLens(r.ctx, &protocol.CodeLensParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.DocumentURI(uri), + }, + }) + if err != nil { + t.Fatal(err) + } + if diff := tests.DiffCodeLens(uri, want, got); diff != "" { + t.Errorf("%s: %s", uri, diff) + } +} + +func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) { + // Get the diagnostics for this view if we have not done it before. + v := r.server.session.View(r.data.Config.Dir) + r.collectDiagnostics(v) + tests.CompareDiagnostics(t, uri, want, r.diagnostics[uri]) +} + +func (r *runner) FoldingRanges(t *testing.T, spn span.Span) { + uri := spn.URI() + view, err := r.server.session.ViewOf(uri) + if err != nil { + t.Fatal(err) + } + original := view.Options() + modified := original + defer r.server.session.SetViewOptions(r.ctx, view, original) + + for _, test := range []struct { + lineFoldingOnly bool + prefix string + }{ + {false, "foldingRange"}, + {true, "foldingRange-lineFolding"}, + } { + modified.LineFoldingOnly = test.lineFoldingOnly + view, err = r.server.session.SetViewOptions(r.ctx, view, modified) + if err != nil { + t.Error(err) + continue + } + ranges, err := r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + t.Error(err) + continue + } + r.foldingRanges(t, test.prefix, uri, ranges) + } +} + +func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, ranges []protocol.FoldingRange) { + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + // Fold all ranges. + nonOverlapping := nonOverlappingRanges(ranges) + for i, rngs := range nonOverlapping { + got, err := foldRanges(m, string(m.Content), rngs) + if err != nil { + t.Error(err) + continue + } + tag := fmt.Sprintf("%s-%d", prefix, i) + want := string(r.data.Golden(t, tag, uri.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + + if want != got { + t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got) + } + } + + // Filter by kind. + kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment} + for _, kind := range kinds { + var kindOnly []protocol.FoldingRange + for _, fRng := range ranges { + if fRng.Kind == string(kind) { + kindOnly = append(kindOnly, fRng) + } + } + + nonOverlapping := nonOverlappingRanges(kindOnly) + for i, rngs := range nonOverlapping { + got, err := foldRanges(m, string(m.Content), rngs) + if err != nil { + t.Error(err) + continue + } + tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i) + want := string(r.data.Golden(t, tag, uri.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + + if want != got { + t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got) + } + } + + } +} + +func nonOverlappingRanges(ranges []protocol.FoldingRange) (res [][]protocol.FoldingRange) { + for _, fRng := range ranges { + setNum := len(res) + for i := 0; i < len(res); i++ { + canInsert := true + for _, rng := range res[i] { + if conflict(rng, fRng) { + canInsert = false + break + } + } + if canInsert { + setNum = i + break + } + } + if setNum == len(res) { + res = append(res, []protocol.FoldingRange{}) + } + res[setNum] = append(res[setNum], fRng) + } + return res +} + +func conflict(a, b protocol.FoldingRange) bool { + // a start position is <= b start positions + return (a.StartLine < b.StartLine || (a.StartLine == b.StartLine && a.StartCharacter <= b.StartCharacter)) && + (a.EndLine > b.StartLine || (a.EndLine == b.StartLine && a.EndCharacter > b.StartCharacter)) +} + +func foldRanges(m *protocol.Mapper, contents string, ranges []protocol.FoldingRange) (string, error) { + foldedText := "<>" + res := contents + // Apply the edits from the end of the file forward + // to preserve the offsets + // TODO(adonovan): factor to use diff.ApplyEdits, which validates the input. + for i := len(ranges) - 1; i >= 0; i-- { + r := ranges[i] + start, err := m.PositionPoint(protocol.Position{Line: r.StartLine, Character: r.StartCharacter}) + if err != nil { + return "", err + } + end, err := m.PositionPoint(protocol.Position{Line: r.EndLine, Character: r.EndCharacter}) + if err != nil { + return "", err + } + res = res[:start.Offset()] + foldedText + res[end.Offset():] + } + return res, nil +} + +func (r *runner) Format(t *testing.T, spn span.Span) { + uri := spn.URI() + filename := uri.Filename() + gofmted := string(r.data.Golden(t, "gofmt", filename, func() ([]byte, error) { + cmd := exec.Command("gofmt", filename) + out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files + return out, nil + })) + + edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + if gofmted != "" { + t.Error(err) + } + return + } + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + got, _, err := source.ApplyProtocolEdits(m, edits) + if err != nil { + t.Error(err) + } + if diff := compare.Text(gofmted, got); diff != "" { + t.Errorf("format failed for %s (-want +got):\n%s", filename, diff) + } +} + +func (r *runner) SemanticTokens(t *testing.T, spn span.Span) { + uri := spn.URI() + filename := uri.Filename() + // this is called solely for coverage in semantic.go + _, err := r.server.semanticTokensFull(r.ctx, &protocol.SemanticTokensParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + t.Errorf("%v for %s", err, filename) + } + _, err = r.server.semanticTokensRange(r.ctx, &protocol.SemanticTokensRangeParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + // any legal range. Just to exercise the call. + Range: protocol.Range{ + Start: protocol.Position{ + Line: 0, + Character: 0, + }, + End: protocol.Position{ + Line: 2, + Character: 0, + }, + }, + }) + if err != nil { + t.Errorf("%v for Range %s", err, filename) + } +} + +func (r *runner) Import(t *testing.T, spn span.Span) { + // Invokes textDocument/codeAction and applies all the "goimports" edits. + + uri := spn.URI() + filename := uri.Filename() + actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + t.Fatal(err) + } + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + got := string(m.Content) + if len(actions) > 0 { + res, err := applyTextDocumentEdits(r, actions[0].Edit.DocumentChanges) + if err != nil { + t.Fatal(err) + } + got = res[uri] + } + want := string(r.data.Golden(t, "goimports", filename, func() ([]byte, error) { + return []byte(got), nil + })) + + if d := compare.Text(want, got); d != "" { + t.Errorf("import failed for %s:\n%s", filename, d) + } +} + +func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []tests.SuggestedFix, expectedActions int) { + uri := spn.URI() + view, err := r.server.session.ViewOf(uri) + if err != nil { + t.Fatal(err) + } + + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + rng, err := m.SpanRange(spn) + if err != nil { + t.Fatal(err) + } + // Get the diagnostics for this view if we have not done it before. + r.collectDiagnostics(view) + var diagnostics []protocol.Diagnostic + for _, d := range r.diagnostics[uri] { + // Compare the start positions rather than the entire range because + // some diagnostics have a range with the same start and end position (8:1-8:1). + // The current marker functionality prevents us from having a range of 0 length. + if protocol.ComparePosition(d.Range.Start, rng.Start) == 0 { + diagnostics = append(diagnostics, toProtocolDiagnostics([]*source.Diagnostic{d})...) + break + } + } + var codeActionKinds []protocol.CodeActionKind + for _, k := range actionKinds { + codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k.ActionKind)) + } + allActions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Range: rng, + Context: protocol.CodeActionContext{ + Only: codeActionKinds, + Diagnostics: diagnostics, + }, + }) + if err != nil { + t.Fatalf("CodeAction %s failed: %v", spn, err) + } + var actions []protocol.CodeAction + for _, action := range allActions { + for _, fix := range actionKinds { + if strings.Contains(action.Title, fix.Title) { + actions = append(actions, action) + break + } + } + + } + if len(actions) != expectedActions { + var summaries []string + for _, a := range actions { + summaries = append(summaries, fmt.Sprintf("%q (%s)", a.Title, a.Kind)) + } + t.Fatalf("CodeAction(...): got %d code actions (%v), want %d", len(actions), summaries, expectedActions) + } + action := actions[0] + var match bool + for _, k := range codeActionKinds { + if action.Kind == k { + match = true + break + } + } + if !match { + t.Fatalf("unexpected kind for code action %s, got %v, want one of %v", action.Title, action.Kind, codeActionKinds) + } + var res map[span.URI]string + if cmd := action.Command; cmd != nil { + _, err := r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ + Command: action.Command.Command, + Arguments: action.Command.Arguments, + }) + if err != nil { + t.Fatalf("error converting command %q to edits: %v", action.Command.Command, err) + } + res = <-r.editRecv + } else { + res, err = applyTextDocumentEdits(r, action.Edit.DocumentChanges) + if err != nil { + t.Fatal(err) + } + } + for u, got := range res { + want := string(r.data.Golden(t, "suggestedfix_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + if want != got { + t.Errorf("suggested fixes failed for %s:\n%s", u.Filename(), compare.Text(want, got)) + } + } +} + +func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) { + uri := start.URI() + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + spn := span.New(start.URI(), start.Start(), end.End()) + rng, err := m.SpanRange(spn) + if err != nil { + t.Fatal(err) + } + actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Range: rng, + Context: protocol.CodeActionContext{ + Only: []protocol.CodeActionKind{"refactor.extract"}, + }, + }) + if err != nil { + t.Fatal(err) + } + var actions []protocol.CodeAction + for _, action := range actionsRaw { + if action.Command.Title == "Extract function" { + actions = append(actions, action) + } + } + // Hack: We assume that we only get one code action per range. + // TODO(rstambler): Support multiple code actions per test. + if len(actions) == 0 || len(actions) > 1 { + t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions)) + } + _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ + Command: actions[0].Command.Command, + Arguments: actions[0].Command.Arguments, + }) + if err != nil { + t.Fatal(err) + } + res := <-r.editRecv + for u, got := range res { + want := string(r.data.Golden(t, "functionextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + if want != got { + t.Errorf("function extraction failed for %s:\n%s", u.Filename(), compare.Text(want, got)) + } + } +} + +func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) { + uri := start.URI() + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + spn := span.New(start.URI(), start.Start(), end.End()) + rng, err := m.SpanRange(spn) + if err != nil { + t.Fatal(err) + } + actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Range: rng, + Context: protocol.CodeActionContext{ + Only: []protocol.CodeActionKind{"refactor.extract"}, + }, + }) + if err != nil { + t.Fatal(err) + } + var actions []protocol.CodeAction + for _, action := range actionsRaw { + if action.Command.Title == "Extract method" { + actions = append(actions, action) + } + } + // Hack: We assume that we only get one matching code action per range. + // TODO(rstambler): Support multiple code actions per test. + if len(actions) == 0 || len(actions) > 1 { + t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions)) + } + _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ + Command: actions[0].Command.Command, + Arguments: actions[0].Command.Arguments, + }) + if err != nil { + t.Fatal(err) + } + res := <-r.editRecv + for u, got := range res { + want := string(r.data.Golden(t, "methodextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + if want != got { + t.Errorf("method extraction failed for %s:\n%s", u.Filename(), compare.Text(want, got)) + } + } +} + +// TODO(rfindley): This handler needs more work. The output is still a bit hard +// to read (range diffs do not format nicely), and it is too entangled with hover. +func (r *runner) Definition(t *testing.T, _ span.Span, d tests.Definition) { + sm, err := r.data.Mapper(d.Src.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := sm.SpanLocation(d.Src) + if err != nil { + t.Fatalf("failed for %v: %v", d.Src, err) + } + tdpp := protocol.LocationTextDocumentPositionParams(loc) + var got []protocol.Location + var hover *protocol.Hover + if d.IsType { + params := &protocol.TypeDefinitionParams{ + TextDocumentPositionParams: tdpp, + } + got, err = r.server.TypeDefinition(r.ctx, params) + } else { + params := &protocol.DefinitionParams{ + TextDocumentPositionParams: tdpp, + } + got, err = r.server.Definition(r.ctx, params) + if err != nil { + t.Fatalf("failed for %v: %+v", d.Src, err) + } + v := &protocol.HoverParams{ + TextDocumentPositionParams: tdpp, + } + hover, err = r.server.Hover(r.ctx, v) + } + if err != nil { + t.Fatalf("failed for %v: %v", d.Src, err) + } + dm, err := r.data.Mapper(d.Def.URI()) + if err != nil { + t.Fatal(err) + } + def, err := dm.SpanLocation(d.Def) + if err != nil { + t.Fatal(err) + } + if !d.OnlyHover { + want := []protocol.Location{def} + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("Definition(%s) mismatch (-want +got):\n%s", d.Src, diff) + } + } + didSomething := false + if hover != nil { + didSomething = true + tag := fmt.Sprintf("%s-hoverdef", d.Name) + want := string(r.data.Golden(t, tag, d.Src.URI().Filename(), func() ([]byte, error) { + return []byte(hover.Contents.Value), nil + })) + got := hover.Contents.Value + if diff := tests.DiffMarkdown(want, got); diff != "" { + t.Errorf("%s: markdown mismatch:\n%s", d.Src, diff) + } + } + if !d.OnlyHover { + didSomething = true + locURI := got[0].URI.SpanURI() + lm, err := r.data.Mapper(locURI) + if err != nil { + t.Fatal(err) + } + if def, err := lm.LocationSpan(got[0]); err != nil { + t.Fatalf("failed for %v: %v", got[0], err) + } else if def != d.Def { + t.Errorf("for %v got %v want %v", d.Src, def, d.Def) + } + } + if !didSomething { + t.Errorf("no tests ran for %s", d.Src.URI()) + } +} + +func (r *runner) Implementation(t *testing.T, spn span.Span, wantSpans []span.Span) { + sm, err := r.data.Mapper(spn.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := sm.SpanLocation(spn) + if err != nil { + t.Fatal(err) + } + gotImpls, err := r.server.Implementation(r.ctx, &protocol.ImplementationParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + }) + if err != nil { + t.Fatalf("Server.Implementation(%s): %v", spn, err) + } + gotLocs, err := tests.LocationsToSpans(r.data, gotImpls) + if err != nil { + t.Fatal(err) + } + sanitize := func(s string) string { + return strings.ReplaceAll(s, r.data.Config.Dir, "gopls/internal/lsp/testdata") + } + want := sanitize(tests.SortAndFormatSpans(wantSpans)) + got := sanitize(tests.SortAndFormatSpans(gotLocs)) + if got != want { + t.Errorf("implementations(%s):\n%s", sanitize(fmt.Sprint(spn)), diff.Unified("want", "got", want, got)) + } +} + +func (r *runner) Highlight(t *testing.T, src span.Span, spans []span.Span) { + m, err := r.data.Mapper(src.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := m.SpanLocation(src) + if err != nil { + t.Fatal(err) + } + params := &protocol.DocumentHighlightParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + highlights, err := r.server.DocumentHighlight(r.ctx, params) + if err != nil { + t.Fatalf("DocumentHighlight(%v) failed: %v", params, err) + } + var got []protocol.Range + for _, h := range highlights { + got = append(got, h.Range) + } + + var want []protocol.Range + for _, s := range spans { + rng, err := m.SpanRange(s) + if err != nil { + t.Fatalf("Mapper.SpanRange(%v) failed: %v", s, err) + } + want = append(want, rng) + } + + sortRanges := func(s []protocol.Range) { + sort.Slice(s, func(i, j int) bool { + return protocol.CompareRange(s[i], s[j]) < 0 + }) + } + + sortRanges(got) + sortRanges(want) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("DocumentHighlight(%v) mismatch (-want +got):\n%s", src, diff) + } +} + +func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) { + // This test is substantially the same as (*runner).References in source/source_test.go. + // TODO(adonovan): Factor (and remove fluff). Where should the common code live? + + sm, err := r.data.Mapper(src.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := sm.SpanLocation(src) + if err != nil { + t.Fatalf("failed for %v: %v", src, err) + } + for _, includeDeclaration := range []bool{true, false} { + t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) { + want := make(map[protocol.Location]bool) + for i, pos := range itemList { + // We don't want the first result if we aren't including the declaration. + // TODO(adonovan): don't assume a single declaration: + // there may be >1 if corresponding methods are considered. + if i == 0 && !includeDeclaration { + continue + } + m, err := r.data.Mapper(pos.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := m.SpanLocation(pos) + if err != nil { + t.Fatalf("failed for %v: %v", src, err) + } + want[loc] = true + } + params := &protocol.ReferenceParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + Context: protocol.ReferenceContext{ + IncludeDeclaration: includeDeclaration, + }, + } + got, err := r.server.References(r.ctx, params) + if err != nil { + t.Fatalf("failed for %v: %v", src, err) + } + + sanitize := func(s string) string { + // In practice, CONFIGDIR means "gopls/internal/lsp/testdata". + return strings.ReplaceAll(s, r.data.Config.Dir, "CONFIGDIR") + } + formatLocation := func(loc protocol.Location) string { + return fmt.Sprintf("%s:%d.%d-%d.%d", + sanitize(string(loc.URI)), + loc.Range.Start.Line+1, + loc.Range.Start.Character+1, + loc.Range.End.Line+1, + loc.Range.End.Character+1) + } + toSlice := func(set map[protocol.Location]bool) []protocol.Location { + // TODO(adonovan): use generic maps.Keys(), someday. + list := make([]protocol.Location, 0, len(set)) + for key := range set { + list = append(list, key) + } + return list + } + toString := func(locs []protocol.Location) string { + // TODO(adonovan): use generic JoinValues(locs, formatLocation). + strs := make([]string, len(locs)) + for i, loc := range locs { + strs[i] = formatLocation(loc) + } + sort.Strings(strs) + return strings.Join(strs, "\n") + } + gotStr := toString(got) + wantStr := toString(toSlice(want)) + if gotStr != wantStr { + t.Errorf("incorrect references (got %d, want %d) at %s:\n%s", + len(got), len(want), + formatLocation(loc), + diff.Unified("want", "got", wantStr, gotStr)) + } + }) + } +} + +func (r *runner) InlayHints(t *testing.T, spn span.Span) { + uri := spn.URI() + filename := uri.Filename() + + hints, err := r.server.InlayHint(r.ctx, &protocol.InlayHintParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + // TODO: add Range + }) + if err != nil { + t.Fatal(err) + } + + // Map inlay hints to text edits. + edits := make([]protocol.TextEdit, len(hints)) + for i, hint := range hints { + var paddingLeft, paddingRight string + if hint.PaddingLeft { + paddingLeft = " " + } + if hint.PaddingRight { + paddingRight = " " + } + edits[i] = protocol.TextEdit{ + Range: protocol.Range{Start: *hint.Position, End: *hint.Position}, + NewText: fmt.Sprintf("<%s%s%s>", paddingLeft, hint.Label[0].Value, paddingRight), + } + } + + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + got, _, err := source.ApplyProtocolEdits(m, edits) + if err != nil { + t.Error(err) + } + + withinlayHints := string(r.data.Golden(t, "inlayHint", filename, func() ([]byte, error) { + return []byte(got), nil + })) + + if withinlayHints != got { + t.Errorf("inlay hints failed for %s, expected:\n%v\ngot:\n%v", filename, withinlayHints, got) + } +} + +func (r *runner) Rename(t *testing.T, spn span.Span, newText string) { + tag := fmt.Sprintf("%s-rename", newText) + + uri := spn.URI() + filename := uri.Filename() + sm, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + loc, err := sm.SpanLocation(spn) + if err != nil { + t.Fatalf("failed for %v: %v", spn, err) + } + + wedit, err := r.server.Rename(r.ctx, &protocol.RenameParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Position: loc.Range.Start, + NewName: newText, + }) + if err != nil { + renamed := string(r.data.Golden(t, tag, filename, func() ([]byte, error) { + return []byte(err.Error()), nil + })) + if err.Error() != renamed { + t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v\n", newText, renamed, err) + } + return + } + res, err := applyTextDocumentEdits(r, wedit.DocumentChanges) + if err != nil { + t.Fatal(err) + } + var orderedURIs []string + for uri := range res { + orderedURIs = append(orderedURIs, string(uri)) + } + sort.Strings(orderedURIs) + + var got string + for i := 0; i < len(res); i++ { + if i != 0 { + got += "\n" + } + uri := span.URIFromURI(orderedURIs[i]) + if len(res) > 1 { + got += filepath.Base(uri.Filename()) + ":\n" + } + val := res[uri] + got += val + } + want := string(r.data.Golden(t, tag, filename, func() ([]byte, error) { + return []byte(got), nil + })) + if want != got { + t.Errorf("rename failed for %s:\n%s", newText, compare.Text(want, got)) + } +} + +func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) { + m, err := r.data.Mapper(src.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := m.SpanLocation(src) + if err != nil { + t.Fatalf("failed for %v: %v", src, err) + } + params := &protocol.PrepareRenameParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + got, err := r.server.PrepareRename(context.Background(), params) + if err != nil { + t.Errorf("prepare rename failed for %v: got error: %v", src, err) + return + } + + // TODO(rfindley): can we consolidate on a single representation for + // PrepareRename results, and use cmp.Diff here? + + // PrepareRename may fail with no error if there was no object found at the + // position. + if got == nil { + if want.Text != "" { // expected an ident. + t.Errorf("prepare rename failed for %v: got nil", src) + } + return + } + if got.Range.Start == got.Range.End { + // Special case for 0-length ranges. Marks can't specify a 0-length range, + // so just compare the start. + if got.Range.Start != want.Range.Start { + t.Errorf("prepare rename failed: incorrect point, got %v want %v", got.Range.Start, want.Range.Start) + } + } else { + if got.Range != want.Range { + t.Errorf("prepare rename failed: incorrect range got %v want %v", got.Range, want.Range) + } + } + if got.Placeholder != want.Text { + t.Errorf("prepare rename failed: incorrect text got %v want %v", got.Placeholder, want.Text) + } +} + +func applyTextDocumentEdits(r *runner, edits []protocol.DocumentChanges) (map[span.URI]string, error) { + res := map[span.URI]string{} + for _, docEdits := range edits { + if docEdits.TextDocumentEdit != nil { + uri := docEdits.TextDocumentEdit.TextDocument.URI.SpanURI() + var m *protocol.Mapper + // If we have already edited this file, we use the edited version (rather than the + // file in its original state) so that we preserve our initial changes. + if content, ok := res[uri]; ok { + m = protocol.NewMapper(uri, []byte(content)) + } else { + var err error + if m, err = r.data.Mapper(uri); err != nil { + return nil, err + } + } + patched, _, err := source.ApplyProtocolEdits(m, docEdits.TextDocumentEdit.Edits) + if err != nil { + return nil, err + } + res[uri] = patched + } + } + return res, nil +} + +func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) { + params := &protocol.DocumentSymbolParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + } + got, err := r.server.DocumentSymbol(r.ctx, params) + if err != nil { + t.Fatal(err) + } + + symbols := make([]protocol.DocumentSymbol, len(got)) + for i, s := range got { + s, ok := s.(protocol.DocumentSymbol) + if !ok { + t.Fatalf("%v: wanted []DocumentSymbols but got %v", uri, got) + } + symbols[i] = s + } + + // Sort by position to make it easier to find errors. + sortSymbols := func(s []protocol.DocumentSymbol) { + sort.Slice(s, func(i, j int) bool { + return protocol.CompareRange(s[i].SelectionRange, s[j].SelectionRange) < 0 + }) + } + sortSymbols(expectedSymbols) + sortSymbols(symbols) + + // Ignore 'Range' here as it is difficult (impossible?) to express + // multi-line ranges in the packagestest framework. + ignoreRange := cmpopts.IgnoreFields(protocol.DocumentSymbol{}, "Range") + if diff := cmp.Diff(expectedSymbols, symbols, ignoreRange); diff != "" { + t.Errorf("mismatching symbols (-want +got)\n%s", diff) + } +} + +func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { + matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ) + + original := r.server.session.Options() + modified := original + modified.SymbolMatcher = matcher + r.server.session.SetOptions(modified) + defer r.server.session.SetOptions(original) + + params := &protocol.WorkspaceSymbolParams{ + Query: query, + } + gotSymbols, err := r.server.Symbol(r.ctx, params) + if err != nil { + t.Fatal(err) + } + got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols) + if err != nil { + t.Fatal(err) + } + got = filepath.ToSlash(tests.Normalize(got, r.normalizers)) + want := string(r.data.Golden(t, fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) { + return []byte(got), nil + })) + if diff := compare.Text(want, got); diff != "" { + t.Error(diff) + } +} + +func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) { + m, err := r.data.Mapper(spn.URI()) + if err != nil { + t.Fatal(err) + } + loc, err := m.SpanLocation(spn) + if err != nil { + t.Fatalf("failed for %v: %v", loc, err) + } + params := &protocol.SignatureHelpParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + got, err := r.server.SignatureHelp(r.ctx, params) + if err != nil { + // Only fail if we got an error we did not expect. + if want != nil { + t.Fatal(err) + } + return + } + if want == nil { + if got != nil { + t.Errorf("expected no signature, got %v", got) + } + return + } + if got == nil { + t.Fatalf("expected %v, got nil", want) + } + if diff := tests.DiffSignatures(spn, want, got); diff != "" { + t.Error(diff) + } +} + +func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) { + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + got, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + }) + if err != nil { + t.Fatal(err) + } + if diff := tests.DiffLinks(m, wantLinks, got); diff != "" { + t.Error(diff) + } +} + +func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) { + cmd, err := command.NewListKnownPackagesCommand("List Known Packages", command.URIArg{ + URI: protocol.URIFromSpanURI(uri), + }) + if err != nil { + t.Fatal(err) + } + resp, err := r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{ + Command: cmd.Command, + Arguments: cmd.Arguments, + }) + if err != nil { + t.Fatal(err) + } + res := resp.(command.ListKnownPackagesResult) + var hasPkg bool + for _, p := range res.Packages { + if p == expectedImport { + hasPkg = true + break + } + } + if !hasPkg { + t.Fatalf("%s: got %v packages\nwant contains %q", command.ListKnownPackages, res.Packages, expectedImport) + } + cmd, err = command.NewAddImportCommand("Add Imports", command.AddImportArgs{ + URI: protocol.URIFromSpanURI(uri), + ImportPath: expectedImport, + }) + if err != nil { + t.Fatal(err) + } + _, err = r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{ + Command: cmd.Command, + Arguments: cmd.Arguments, + }) + if err != nil { + t.Fatal(err) + } + got := (<-r.editRecv)[uri] + want := r.data.Golden(t, "addimport", uri.Filename(), func() ([]byte, error) { + return []byte(got), nil + }) + if want == nil { + t.Fatalf("golden file %q not found", uri.Filename()) + } + if diff := compare.Text(got, string(want)); diff != "" { + t.Errorf("%s mismatch\n%s", command.AddImport, diff) + } +} + +func (r *runner) SelectionRanges(t *testing.T, spn span.Span) { + uri := spn.URI() + sm, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + loc, err := sm.SpanLocation(spn) + if err != nil { + t.Error(err) + } + + ranges, err := r.server.selectionRange(r.ctx, &protocol.SelectionRangeParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + Positions: []protocol.Position{loc.Range.Start}, + }) + if err != nil { + t.Fatal(err) + } + + sb := &strings.Builder{} + for i, path := range ranges { + fmt.Fprintf(sb, "Ranges %d: ", i) + rng := path + for { + s, e, err := sm.RangeOffsets(rng.Range) + if err != nil { + t.Error(err) + } + + var snippet string + if e-s < 30 { + snippet = string(sm.Content[s:e]) + } else { + snippet = string(sm.Content[s:s+15]) + "..." + string(sm.Content[e-15:e]) + } + + fmt.Fprintf(sb, "\n\t%v %q", rng.Range, strings.ReplaceAll(snippet, "\n", "\\n")) + + if rng.Parent == nil { + break + } + rng = *rng.Parent + } + sb.WriteRune('\n') + } + got := sb.String() + + testName := "selectionrange_" + tests.SpanName(spn) + want := r.data.Golden(t, testName, uri.Filename(), func() ([]byte, error) { + return []byte(got), nil + }) + if want == nil { + t.Fatalf("golden file %q not found", uri.Filename()) + } + if diff := compare.Text(got, string(want)); diff != "" { + t.Errorf("%s mismatch\n%s", testName, diff) + } +} + +func (r *runner) collectDiagnostics(view *cache.View) { + if r.diagnostics != nil { + return + } + r.diagnostics = make(map[span.URI][]*source.Diagnostic) + + snapshot, release, err := view.Snapshot() + if err != nil { + panic(err) + } + defer release() + + // Always run diagnostics with analysis. + r.server.diagnose(r.ctx, snapshot, true) + for uri, reports := range r.server.diagnostics { + for _, report := range reports.reports { + for _, d := range report.diags { + r.diagnostics[uri] = append(r.diagnostics[uri], d) + } + } + } +} diff --git a/internal/lsp/lsprpc/autostart_default.go b/gopls/internal/lsp/lsprpc/autostart_default.go similarity index 92% rename from internal/lsp/lsprpc/autostart_default.go rename to gopls/internal/lsp/lsprpc/autostart_default.go index 59a76dc2f9f..20b974728d9 100644 --- a/internal/lsp/lsprpc/autostart_default.go +++ b/gopls/internal/lsp/lsprpc/autostart_default.go @@ -24,7 +24,7 @@ func runRemote(cmd *exec.Cmd) error { return nil } -// autoNetworkAddress returns the default network and address for the +// autoNetworkAddressDefault returns the default network and address for the // automatically-started gopls remote. See autostart_posix.go for more // information. func autoNetworkAddressDefault(goplsPath, id string) (network string, address string) { diff --git a/internal/lsp/lsprpc/autostart_posix.go b/gopls/internal/lsp/lsprpc/autostart_posix.go similarity index 97% rename from internal/lsp/lsprpc/autostart_posix.go rename to gopls/internal/lsp/lsprpc/autostart_posix.go index 948d44fcedf..90cc72ddf10 100644 --- a/internal/lsp/lsprpc/autostart_posix.go +++ b/gopls/internal/lsp/lsprpc/autostart_posix.go @@ -33,7 +33,7 @@ func daemonizePosix(cmd *exec.Cmd) { } } -// autoNetworkAddress resolves an id on the 'auto' pseduo-network to a +// autoNetworkAddressPosix resolves an id on the 'auto' pseduo-network to a // real network and address. On unix, this uses unix domain sockets. func autoNetworkAddressPosix(goplsPath, id string) (network string, address string) { // Especially when doing local development or testing, it's important that diff --git a/internal/lsp/lsprpc/binder.go b/gopls/internal/lsp/lsprpc/binder.go similarity index 88% rename from internal/lsp/lsprpc/binder.go rename to gopls/internal/lsp/lsprpc/binder.go index aa2edb3309d..01e59f7bb62 100644 --- a/internal/lsp/lsprpc/binder.go +++ b/gopls/internal/lsp/lsprpc/binder.go @@ -9,17 +9,17 @@ import ( "encoding/json" "fmt" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/event" jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2" - "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/xcontext" ) // The BinderFunc type adapts a bind function to implement the jsonrpc2.Binder // interface. -type BinderFunc func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) +type BinderFunc func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions -func (f BinderFunc) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { +func (f BinderFunc) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { return f(ctx, conn) } @@ -39,7 +39,7 @@ func NewServerBinder(newServer ServerFunc) *ServerBinder { return &ServerBinder{newServer: newServer} } -func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { +func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { client := protocol.ClientDispatcherV2(conn) server := b.newServer(ctx, client) serverHandler := protocol.ServerHandlerV2(server) @@ -55,7 +55,7 @@ func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) ( return jsonrpc2_v2.ConnectionOptions{ Handler: wrapped, Preempter: preempter, - }, nil + } } type canceler struct { @@ -94,13 +94,19 @@ func NewForwardBinder(dialer jsonrpc2_v2.Dialer) *ForwardBinder { } } -func (b *ForwardBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (opts jsonrpc2_v2.ConnectionOptions, _ error) { +func (b *ForwardBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (opts jsonrpc2_v2.ConnectionOptions) { client := protocol.ClientDispatcherV2(conn) clientBinder := NewClientBinder(func(context.Context, protocol.Server) protocol.Client { return client }) + serverConn, err := jsonrpc2_v2.Dial(context.Background(), b.dialer, clientBinder) if err != nil { - return opts, err + return jsonrpc2_v2.ConnectionOptions{ + Handler: jsonrpc2_v2.HandlerFunc(func(context.Context, *jsonrpc2_v2.Request) (interface{}, error) { + return nil, fmt.Errorf("%w: %v", jsonrpc2_v2.ErrInternal, err) + }), + } } + if b.onBind != nil { b.onBind(serverConn) } @@ -118,7 +124,7 @@ func (b *ForwardBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) return jsonrpc2_v2.ConnectionOptions{ Handler: protocol.ServerHandlerV2(server), Preempter: preempter, - }, nil + } } // A ClientFunc is used to construct an LSP client for a given server. @@ -133,10 +139,10 @@ func NewClientBinder(newClient ClientFunc) *ClientBinder { return &ClientBinder{newClient} } -func (b *ClientBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { +func (b *ClientBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { server := protocol.ServerDispatcherV2(conn) client := b.newClient(ctx, server) return jsonrpc2_v2.ConnectionOptions{ Handler: protocol.ClientHandlerV2(client), - }, nil + } } diff --git a/internal/lsp/lsprpc/binder_test.go b/gopls/internal/lsp/lsprpc/binder_test.go similarity index 90% rename from internal/lsp/lsprpc/binder_test.go rename to gopls/internal/lsp/lsprpc/binder_test.go index f7dd830331c..3315c3eb775 100644 --- a/internal/lsp/lsprpc/binder_test.go +++ b/gopls/internal/lsp/lsprpc/binder_test.go @@ -11,23 +11,20 @@ import ( "testing" "time" + "golang.org/x/tools/gopls/internal/lsp/protocol" jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/lsprpc" + . "golang.org/x/tools/gopls/internal/lsp/lsprpc" ) type TestEnv struct { - Listeners []jsonrpc2_v2.Listener - Conns []*jsonrpc2_v2.Connection - Servers []*jsonrpc2_v2.Server + Conns []*jsonrpc2_v2.Connection + Servers []*jsonrpc2_v2.Server } func (e *TestEnv) Shutdown(t *testing.T) { - for _, l := range e.Listeners { - if err := l.Close(); err != nil { - t.Error(err) - } + for _, s := range e.Servers { + s.Shutdown() } for _, c := range e.Conns { if err := c.Close(); err != nil { @@ -46,11 +43,7 @@ func (e *TestEnv) serve(ctx context.Context, t *testing.T, server jsonrpc2_v2.Bi if err != nil { t.Fatal(err) } - e.Listeners = append(e.Listeners, l) - s, err := jsonrpc2_v2.Serve(ctx, l, server) - if err != nil { - t.Fatal(err) - } + s := jsonrpc2_v2.NewServer(ctx, l, server) e.Servers = append(e.Servers, s) return l, s } diff --git a/internal/lsp/lsprpc/commandinterceptor.go b/gopls/internal/lsp/lsprpc/commandinterceptor.go similarity index 86% rename from internal/lsp/lsprpc/commandinterceptor.go rename to gopls/internal/lsp/lsprpc/commandinterceptor.go index 5c36af759e1..607ee9c9e9f 100644 --- a/internal/lsp/lsprpc/commandinterceptor.go +++ b/gopls/internal/lsp/lsprpc/commandinterceptor.go @@ -8,8 +8,8 @@ import ( "context" "encoding/json" + "golang.org/x/tools/gopls/internal/lsp/protocol" jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2" - "golang.org/x/tools/internal/lsp/protocol" ) // HandlerMiddleware is a middleware that only modifies the jsonrpc2 handler. @@ -18,13 +18,10 @@ type HandlerMiddleware func(jsonrpc2_v2.Handler) jsonrpc2_v2.Handler // BindHandler transforms a HandlerMiddleware into a Middleware. func BindHandler(hmw HandlerMiddleware) Middleware { return Middleware(func(binder jsonrpc2_v2.Binder) jsonrpc2_v2.Binder { - return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { - opts, err := binder.Bind(ctx, conn) - if err != nil { - return opts, err - } + return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { + opts := binder.Bind(ctx, conn) opts.Handler = hmw(opts.Handler) - return opts, nil + return opts }) }) } diff --git a/internal/lsp/lsprpc/commandinterceptor_test.go b/gopls/internal/lsp/lsprpc/commandinterceptor_test.go similarity index 90% rename from internal/lsp/lsprpc/commandinterceptor_test.go rename to gopls/internal/lsp/lsprpc/commandinterceptor_test.go index 06550e8fa7d..555f15130cc 100644 --- a/internal/lsp/lsprpc/commandinterceptor_test.go +++ b/gopls/internal/lsp/lsprpc/commandinterceptor_test.go @@ -8,9 +8,9 @@ import ( "context" "testing" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/lsprpc" + . "golang.org/x/tools/gopls/internal/lsp/lsprpc" ) func TestCommandInterceptor(t *testing.T) { diff --git a/internal/lsp/lsprpc/dialer.go b/gopls/internal/lsp/lsprpc/dialer.go similarity index 100% rename from internal/lsp/lsprpc/dialer.go rename to gopls/internal/lsp/lsprpc/dialer.go diff --git a/internal/lsp/lsprpc/goenv.go b/gopls/internal/lsp/lsprpc/goenv.go similarity index 98% rename from internal/lsp/lsprpc/goenv.go rename to gopls/internal/lsp/lsprpc/goenv.go index f313724c875..c316ea07c70 100644 --- a/internal/lsp/lsprpc/goenv.go +++ b/gopls/internal/lsp/lsprpc/goenv.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) func GoEnvMiddleware() (Middleware, error) { diff --git a/internal/lsp/lsprpc/goenv_test.go b/gopls/internal/lsp/lsprpc/goenv_test.go similarity index 91% rename from internal/lsp/lsprpc/goenv_test.go rename to gopls/internal/lsp/lsprpc/goenv_test.go index cdfe23c9089..5edd64fbe78 100644 --- a/internal/lsp/lsprpc/goenv_test.go +++ b/gopls/internal/lsp/lsprpc/goenv_test.go @@ -8,10 +8,9 @@ import ( "context" "testing" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/gopls/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/lsprpc" + . "golang.org/x/tools/gopls/internal/lsp/lsprpc" ) type initServer struct { @@ -26,8 +25,6 @@ func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamIniti } func TestGoEnvMiddleware(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - ctx := context.Background() server := &initServer{} diff --git a/internal/lsp/lsprpc/lsprpc.go b/gopls/internal/lsp/lsprpc/lsprpc.go similarity index 94% rename from internal/lsp/lsprpc/lsprpc.go rename to gopls/internal/lsp/lsprpc/lsprpc.go index a85e7914219..6b02cf5aa65 100644 --- a/internal/lsp/lsprpc/lsprpc.go +++ b/gopls/internal/lsp/lsprpc/lsprpc.go @@ -19,14 +19,15 @@ import ( "sync/atomic" "time" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" ) // Unique identifiers for client/server. @@ -39,6 +40,9 @@ type StreamServer struct { // daemon controls whether or not to log new connections. daemon bool + // optionsOverrides is passed to newly created sessions. + optionsOverrides func(*source.Options) + // serverForTest may be set to a test fake for testing. serverForTest protocol.Server } @@ -46,17 +50,19 @@ type StreamServer struct { // NewStreamServer creates a StreamServer using the shared cache. If // withTelemetry is true, each session is instrumented with telemetry that // records RPC statistics. -func NewStreamServer(cache *cache.Cache, daemon bool) *StreamServer { - return &StreamServer{cache: cache, daemon: daemon} +func NewStreamServer(cache *cache.Cache, daemon bool, optionsFunc func(*source.Options)) *StreamServer { + return &StreamServer{cache: cache, daemon: daemon, optionsOverrides: optionsFunc} } func (s *StreamServer) Binder() *ServerBinder { newServer := func(ctx context.Context, client protocol.ClientCloser) protocol.Server { - session := s.cache.NewSession(ctx) + session := cache.NewSession(ctx, s.cache, s.optionsOverrides) server := s.serverForTest if server == nil { server = lsp.NewServer(session, client) - debug.GetInstance(ctx).AddService(server, session) + if instance := debug.GetInstance(ctx); instance != nil { + instance.AddService(server, session) + } } return server } @@ -67,11 +73,13 @@ func (s *StreamServer) Binder() *ServerBinder { // incoming streams using a new lsp server. func (s *StreamServer) ServeStream(ctx context.Context, conn jsonrpc2.Conn) error { client := protocol.ClientDispatcher(conn) - session := s.cache.NewSession(ctx) + session := cache.NewSession(ctx, s.cache, s.optionsOverrides) server := s.serverForTest if server == nil { server = lsp.NewServer(session, client) - debug.GetInstance(ctx).AddService(server, session) + if instance := debug.GetInstance(ctx); instance != nil { + instance.AddService(server, session) + } } // Clients may or may not send a shutdown message. Make sure the server is // shut down. @@ -234,7 +242,7 @@ func (f *Forwarder) ServeStream(ctx context.Context, clientConn jsonrpc2.Conn) e // TODO(rfindley): remove this handshaking in favor of middleware. func (f *Forwarder) handshake(ctx context.Context) { - // This call to os.Execuable is redundant, and will be eliminated by the + // This call to os.Executable is redundant, and will be eliminated by the // transition to the V2 API. goplsPath, err := os.Executable() if err != nil { diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/gopls/internal/lsp/lsprpc/lsprpc_test.go similarity index 91% rename from internal/lsp/lsprpc/lsprpc_test.go rename to gopls/internal/lsp/lsprpc/lsprpc_test.go index 795c887e4b4..0dc78e67d8a 100644 --- a/internal/lsp/lsprpc/lsprpc_test.go +++ b/gopls/internal/lsp/lsprpc/lsprpc_test.go @@ -12,14 +12,13 @@ import ( "testing" "time" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/jsonrpc2" "golang.org/x/tools/internal/jsonrpc2/servertest" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/testenv" ) type FakeClient struct { @@ -58,9 +57,9 @@ func TestClientLogging(t *testing.T) { client := FakeClient{Logs: make(chan string, 10)} ctx = debug.WithInstance(ctx, "", "") - ss := NewStreamServer(cache.New(nil), false) + ss := NewStreamServer(cache.New(nil, nil), false, nil) ss.serverForTest = server - ts := servertest.NewPipeServer(ctx, ss, nil) + ts := servertest.NewPipeServer(ss, nil) defer checkClose(t, ts.Close) cc := ts.Connect(ctx) cc.Go(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound)) @@ -121,16 +120,15 @@ func checkClose(t *testing.T, closer func() error) { func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) { t.Helper() serveCtx := debug.WithInstance(ctx, "", "") - ss := NewStreamServer(cache.New(nil), false) + ss := NewStreamServer(cache.New(nil, nil), false, nil) ss.serverForTest = s tsDirect := servertest.NewTCPServer(serveCtx, ss, nil) - forwarderCtx := debug.WithInstance(ctx, "", "") forwarder, err := NewForwarder("tcp;"+tsDirect.Addr, nil) if err != nil { t.Fatal(err) } - tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder, nil) + tsForwarded := servertest.NewPipeServer(forwarder, nil) return tsDirect, tsForwarded, func() { checkClose(t, tsDirect.Close) checkClose(t, tsForwarded.Close) @@ -217,24 +215,22 @@ func TestDebugInfoLifecycle(t *testing.T) { clientCtx := debug.WithInstance(baseCtx, "", "") serverCtx := debug.WithInstance(baseCtx, "", "") - cache := cache.New(nil) - ss := NewStreamServer(cache, false) + cache := cache.New(nil, nil) + ss := NewStreamServer(cache, false, nil) tsBackend := servertest.NewTCPServer(serverCtx, ss, nil) forwarder, err := NewForwarder("tcp;"+tsBackend.Addr, nil) if err != nil { t.Fatal(err) } - tsForwarder := servertest.NewPipeServer(clientCtx, forwarder, nil) + tsForwarder := servertest.NewPipeServer(forwarder, nil) - conn1 := tsForwarder.Connect(clientCtx) - ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{}) + ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, tsForwarder, fake.ClientHooks{}) if err != nil { t.Fatal(err) } defer ed1.Close(clientCtx) - conn2 := tsBackend.Connect(baseCtx) - ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2, fake.ClientHooks{}) + ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, tsBackend, fake.ClientHooks{}) if err != nil { t.Fatal(err) } @@ -292,10 +288,9 @@ func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamIniti } func TestEnvForwarding(t *testing.T) { - testenv.NeedsGo1Point(t, 13) + ctx := context.Background() + server := &initServer{} - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() _, tsForwarded, cleanup := setupForwarding(ctx, t, server) defer cleanup() diff --git a/internal/lsp/lsprpc/middleware.go b/gopls/internal/lsp/lsprpc/middleware.go similarity index 96% rename from internal/lsp/lsprpc/middleware.go rename to gopls/internal/lsp/lsprpc/middleware.go index f703217dd0b..50089cde7dc 100644 --- a/internal/lsp/lsprpc/middleware.go +++ b/gopls/internal/lsp/lsprpc/middleware.go @@ -62,11 +62,8 @@ func (h *Handshaker) Peers() []PeerInfo { // Middleware is a jsonrpc2 middleware function to augment connection binding // to handle the handshake method, and record disconnections. func (h *Handshaker) Middleware(inner jsonrpc2_v2.Binder) jsonrpc2_v2.Binder { - return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { - opts, err := inner.Bind(ctx, conn) - if err != nil { - return opts, err - } + return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { + opts := inner.Bind(ctx, conn) localID := h.nextID() info := &PeerInfo{ @@ -93,7 +90,7 @@ func (h *Handshaker) Middleware(inner jsonrpc2_v2.Binder) jsonrpc2_v2.Binder { // Record the dropped client. go h.cleanupAtDisconnect(conn, localID) - return opts, nil + return opts }) } diff --git a/internal/lsp/lsprpc/middleware_test.go b/gopls/internal/lsp/lsprpc/middleware_test.go similarity index 93% rename from internal/lsp/lsprpc/middleware_test.go rename to gopls/internal/lsp/lsprpc/middleware_test.go index a385f10037a..c528eae5c62 100644 --- a/internal/lsp/lsprpc/middleware_test.go +++ b/gopls/internal/lsp/lsprpc/middleware_test.go @@ -11,12 +11,12 @@ import ( "testing" "time" + . "golang.org/x/tools/gopls/internal/lsp/lsprpc" jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2" - . "golang.org/x/tools/internal/lsp/lsprpc" ) -var noopBinder = BinderFunc(func(context.Context, *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) { - return jsonrpc2_v2.ConnectionOptions{}, nil +var noopBinder = BinderFunc(func(context.Context, *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions { + return jsonrpc2_v2.ConnectionOptions{} }) func TestHandshakeMiddleware(t *testing.T) { diff --git a/gopls/internal/lsp/mod/code_lens.go b/gopls/internal/lsp/mod/code_lens.go new file mode 100644 index 00000000000..eb8d8b79d88 --- /dev/null +++ b/gopls/internal/lsp/mod/code_lens.go @@ -0,0 +1,191 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mod + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +// LensFuncs returns the supported lensFuncs for go.mod files. +func LensFuncs() map[command.Command]source.LensFunc { + return map[command.Command]source.LensFunc{ + command.UpgradeDependency: upgradeLenses, + command.Tidy: tidyLens, + command.Vendor: vendorLens, + command.RunGovulncheck: vulncheckLenses, + } +} + +func upgradeLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil || pm.File == nil { + return nil, err + } + uri := protocol.URIFromSpanURI(fh.URI()) + reset, err := command.NewResetGoModDiagnosticsCommand("Reset go.mod diagnostics", command.ResetGoModDiagnosticsArgs{URIArg: command.URIArg{URI: uri}}) + if err != nil { + return nil, err + } + // Put the `Reset go.mod diagnostics` codelens on the module statement. + modrng, err := moduleStmtRange(fh, pm) + if err != nil { + return nil, err + } + lenses := []protocol.CodeLens{{Range: modrng, Command: reset}} + if len(pm.File.Require) == 0 { + // Nothing to upgrade. + return lenses, nil + } + var requires []string + for _, req := range pm.File.Require { + requires = append(requires, req.Mod.Path) + } + checkUpgrade, err := command.NewCheckUpgradesCommand("Check for upgrades", command.CheckUpgradesArgs{ + URI: uri, + Modules: requires, + }) + if err != nil { + return nil, err + } + upgradeTransitive, err := command.NewUpgradeDependencyCommand("Upgrade transitive dependencies", command.DependencyArgs{ + URI: uri, + AddRequire: false, + GoCmdArgs: []string{"-d", "-u", "-t", "./..."}, + }) + if err != nil { + return nil, err + } + upgradeDirect, err := command.NewUpgradeDependencyCommand("Upgrade direct dependencies", command.DependencyArgs{ + URI: uri, + AddRequire: false, + GoCmdArgs: append([]string{"-d"}, requires...), + }) + if err != nil { + return nil, err + } + + // Put the upgrade code lenses above the first require block or statement. + rng, err := firstRequireRange(fh, pm) + if err != nil { + return nil, err + } + + return append(lenses, []protocol.CodeLens{ + {Range: rng, Command: checkUpgrade}, + {Range: rng, Command: upgradeTransitive}, + {Range: rng, Command: upgradeDirect}, + }...), nil +} + +func tidyLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil || pm.File == nil { + return nil, err + } + uri := protocol.URIFromSpanURI(fh.URI()) + cmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: []protocol.DocumentURI{uri}}) + if err != nil { + return nil, err + } + rng, err := moduleStmtRange(fh, pm) + if err != nil { + return nil, err + } + return []protocol.CodeLens{{ + Range: rng, + Command: cmd, + }}, nil +} + +func vendorLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil || pm.File == nil { + return nil, err + } + if len(pm.File.Require) == 0 { + // Nothing to vendor. + return nil, nil + } + rng, err := moduleStmtRange(fh, pm) + if err != nil { + return nil, err + } + title := "Create vendor directory" + uri := protocol.URIFromSpanURI(fh.URI()) + cmd, err := command.NewVendorCommand(title, command.URIArg{URI: uri}) + if err != nil { + return nil, err + } + // Change the message depending on whether or not the module already has a + // vendor directory. + vendorDir := filepath.Join(filepath.Dir(fh.URI().Filename()), "vendor") + if info, _ := os.Stat(vendorDir); info != nil && info.IsDir() { + title = "Sync vendor directory" + } + return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil +} + +func moduleStmtRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) { + if pm.File == nil || pm.File.Module == nil || pm.File.Module.Syntax == nil { + return protocol.Range{}, fmt.Errorf("no module statement in %s", fh.URI()) + } + syntax := pm.File.Module.Syntax + return pm.Mapper.OffsetRange(syntax.Start.Byte, syntax.End.Byte) +} + +// firstRequireRange returns the range for the first "require" in the given +// go.mod file. This is either a require block or an individual require line. +func firstRequireRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) { + if len(pm.File.Require) == 0 { + return protocol.Range{}, fmt.Errorf("no requires in the file %s", fh.URI()) + } + var start, end modfile.Position + for _, stmt := range pm.File.Syntax.Stmt { + if b, ok := stmt.(*modfile.LineBlock); ok && len(b.Token) == 1 && b.Token[0] == "require" { + start, end = b.Span() + break + } + } + + firstRequire := pm.File.Require[0].Syntax + if start.Byte == 0 || firstRequire.Start.Byte < start.Byte { + start, end = firstRequire.Start, firstRequire.End + } + return pm.Mapper.OffsetRange(start.Byte, end.Byte) +} + +func vulncheckLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil || pm.File == nil { + return nil, err + } + // Place the codelenses near the module statement. + // A module may not have the require block, + // but vulnerabilities can exist in standard libraries. + uri := protocol.URIFromSpanURI(fh.URI()) + rng, err := moduleStmtRange(fh, pm) + if err != nil { + return nil, err + } + + vulncheck, err := command.NewRunGovulncheckCommand("Run govulncheck", command.VulncheckArgs{ + URI: uri, + Pattern: "./...", + }) + if err != nil { + return nil, err + } + return []protocol.CodeLens{ + {Range: rng, Command: vulncheck}, + }, nil +} diff --git a/gopls/internal/lsp/mod/diagnostics.go b/gopls/internal/lsp/mod/diagnostics.go new file mode 100644 index 00000000000..ca2971dbcb9 --- /dev/null +++ b/gopls/internal/lsp/mod/diagnostics.go @@ -0,0 +1,565 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mod provides core features related to go.mod file +// handling for use by Go editors and tools. +package mod + +import ( + "context" + "fmt" + "sort" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/semver" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/vuln/osv" +) + +// Diagnostics returns diagnostics for the modules in the workspace. +// +// It waits for completion of type-checking of all active packages. +func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) { + ctx, done := event.Start(ctx, "mod.Diagnostics", source.SnapshotLabels(snapshot)...) + defer done() + + return collectDiagnostics(ctx, snapshot, ModDiagnostics) +} + +// UpgradeDiagnostics returns upgrade diagnostics for the modules in the +// workspace with known upgrades. +func UpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) { + ctx, done := event.Start(ctx, "mod.UpgradeDiagnostics", source.SnapshotLabels(snapshot)...) + defer done() + + return collectDiagnostics(ctx, snapshot, ModUpgradeDiagnostics) +} + +// VulnerabilityDiagnostics returns vulnerability diagnostics for the active modules in the +// workspace with known vulnerabilities. +func VulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) { + ctx, done := event.Start(ctx, "mod.VulnerabilityDiagnostics", source.SnapshotLabels(snapshot)...) + defer done() + + return collectDiagnostics(ctx, snapshot, ModVulnerabilityDiagnostics) +} + +func collectDiagnostics(ctx context.Context, snapshot source.Snapshot, diagFn func(context.Context, source.Snapshot, source.FileHandle) ([]*source.Diagnostic, error)) (map[span.URI][]*source.Diagnostic, error) { + reports := make(map[span.URI][]*source.Diagnostic) + for _, uri := range snapshot.ModFiles() { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + reports[fh.URI()] = []*source.Diagnostic{} + diagnostics, err := diagFn(ctx, snapshot, fh) + if err != nil { + return nil, err + } + for _, d := range diagnostics { + fh, err := snapshot.GetFile(ctx, d.URI) + if err != nil { + return nil, err + } + reports[fh.URI()] = append(reports[fh.URI()], d) + } + } + return reports, nil +} + +// ModDiagnostics waits for completion of type-checking of all active +// packages, then returns diagnostics from diagnosing the packages in +// the workspace and from tidying the go.mod file. +func ModDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (diagnostics []*source.Diagnostic, err error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + if pm == nil || len(pm.ParseErrors) == 0 { + return nil, err + } + return pm.ParseErrors, nil + } + + // Packages in the workspace can contribute diagnostics to go.mod files. + // TODO(rfindley): Try to avoid type checking all packages in the workspace here, + // for every go.mod file. If gc_details is enabled, it looks like this could lead to extra + // go command invocations (as gc details is not memoized). + active, err := snapshot.ActiveMetadata(ctx) + if err != nil && !source.IsNonFatalGoModError(err) { + event.Error(ctx, fmt.Sprintf("workspace packages: diagnosing %s", pm.URI), err) + } + if err == nil { + // Type-check packages in parallel and gather list/parse/type errors. + // (This may be the first operation after the initial metadata load + // to demand type-checking of all active packages.) + ids := make([]source.PackageID, len(active)) + for i, meta := range active { + ids[i] = meta.ID + } + pkgs, err := snapshot.TypeCheck(ctx, source.TypecheckFull, ids...) + if err != nil { + return nil, err + } + for _, pkg := range pkgs { + pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, fh.URI()) + if err != nil { + return nil, err + } + diagnostics = append(diagnostics, pkgDiags...) + } + } + + tidied, err := snapshot.ModTidy(ctx, pm) + if err != nil && !source.IsNonFatalGoModError(err) { + event.Error(ctx, fmt.Sprintf("tidy: diagnosing %s", pm.URI), err) + } + if err == nil { + for _, d := range tidied.Diagnostics { + if d.URI != fh.URI() { + continue + } + diagnostics = append(diagnostics, d) + } + } + return diagnostics, nil +} + +// ModUpgradeDiagnostics adds upgrade quick fixes for individual modules if the upgrades +// are recorded in the view. +func ModUpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (upgradeDiagnostics []*source.Diagnostic, err error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + // Don't return an error if there are parse error diagnostics to be shown, but also do not + // continue since we won't be able to show the upgrade diagnostics. + if pm != nil && len(pm.ParseErrors) != 0 { + return nil, nil + } + return nil, err + } + + upgrades := snapshot.View().ModuleUpgrades(fh.URI()) + for _, req := range pm.File.Require { + ver, ok := upgrades[req.Mod.Path] + if !ok || req.Mod.Version == ver { + continue + } + rng, err := pm.Mapper.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte) + if err != nil { + return nil, err + } + // Upgrade to the exact version we offer the user, not the most recent. + title := fmt.Sprintf("%s%v", upgradeCodeActionPrefix, ver) + cmd, err := command.NewUpgradeDependencyCommand(title, command.DependencyArgs{ + URI: protocol.URIFromSpanURI(fh.URI()), + AddRequire: false, + GoCmdArgs: []string{req.Mod.Path + "@" + ver}, + }) + if err != nil { + return nil, err + } + upgradeDiagnostics = append(upgradeDiagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityInformation, + Source: source.UpgradeNotification, + Message: fmt.Sprintf("%v can be upgraded", req.Mod.Path), + SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, + }) + } + + return upgradeDiagnostics, nil +} + +const upgradeCodeActionPrefix = "Upgrade to " + +// ModVulnerabilityDiagnostics adds diagnostics for vulnerabilities in individual modules +// if the vulnerability is recorded in the view. +func ModVulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (vulnDiagnostics []*source.Diagnostic, err error) { + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + // Don't return an error if there are parse error diagnostics to be shown, but also do not + // continue since we won't be able to show the vulnerability diagnostics. + if pm != nil && len(pm.ParseErrors) != 0 { + return nil, nil + } + return nil, err + } + + diagSource := source.Govulncheck + vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()] + if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports { + vs, err = snapshot.ModVuln(ctx, fh.URI()) + if err != nil { + return nil, err + } + diagSource = source.Vulncheck + } + if vs == nil || len(vs.Vulns) == 0 { + return nil, nil + } + + suggestRunOrResetGovulncheck, err := suggestGovulncheckAction(diagSource == source.Govulncheck, fh.URI()) + if err != nil { + // must not happen + return nil, err // TODO: bug report + } + type modVuln struct { + mod *govulncheck.Module + vuln *govulncheck.Vuln + } + vulnsByModule := make(map[string][]modVuln) + for _, vuln := range vs.Vulns { + for _, mod := range vuln.Modules { + vulnsByModule[mod.Path] = append(vulnsByModule[mod.Path], modVuln{mod, vuln}) + } + } + + for _, req := range pm.File.Require { + vulns := vulnsByModule[req.Mod.Path] + if len(vulns) == 0 { + continue + } + // note: req.Syntax is the line corresponding to 'require', which means + // req.Syntax.Start can point to the beginning of the "require" keyword + // for a single line require (e.g. "require golang.org/x/mod v0.0.0"). + start := req.Syntax.Start.Byte + if len(req.Syntax.Token) == 3 { + start += len("require ") + } + rng, err := pm.Mapper.OffsetRange(start, req.Syntax.End.Byte) + if err != nil { + return nil, err + } + // Map affecting vulns to 'warning' level diagnostics, + // others to 'info' level diagnostics. + // Fixes will include only the upgrades for warning level diagnostics. + var warningFixes, infoFixes []source.SuggestedFix + var warning, info []string + var relatedInfo []source.RelatedInformation + for _, mv := range vulns { + mod, vuln := mv.mod, mv.vuln + // It is possible that the source code was changed since the last + // govulncheck run and information in the `vulns` info is stale. + // For example, imagine that a user is in the middle of updating + // problematic modules detected by the govulncheck run by applying + // quick fixes. Stale diagnostics can be confusing and prevent the + // user from quickly locating the next module to fix. + // Ideally we should rerun the analysis with the updated module + // dependencies or any other code changes, but we are not yet + // in the position of automatically triggering the analysis + // (govulncheck can take a while). We also don't know exactly what + // part of source code was changed since `vulns` was computed. + // As a heuristic, we assume that a user upgrades the affecting + // module to the version with the fix or the latest one, and if the + // version in the require statement is equal to or higher than the + // fixed version, skip generating a diagnostic about the vulnerability. + // Eventually, the user has to rerun govulncheck. + if mod.FixedVersion != "" && semver.IsValid(req.Mod.Version) && semver.Compare(mod.FixedVersion, req.Mod.Version) <= 0 { + continue + } + if !vuln.IsCalled() { + info = append(info, vuln.OSV.ID) + } else { + warning = append(warning, vuln.OSV.ID) + relatedInfo = append(relatedInfo, listRelatedInfo(ctx, snapshot, vuln)...) + } + // Upgrade to the exact version we offer the user, not the most recent. + if fixedVersion := mod.FixedVersion; semver.IsValid(fixedVersion) && semver.Compare(req.Mod.Version, fixedVersion) < 0 { + cmd, err := getUpgradeCodeAction(fh, req, fixedVersion) + if err != nil { + return nil, err // TODO: bug report + } + sf := source.SuggestedFixFromCommand(cmd, protocol.QuickFix) + if !vuln.IsCalled() { + infoFixes = append(infoFixes, sf) + } else { + warningFixes = append(warningFixes, sf) + } + } + } + + if len(warning) == 0 && len(info) == 0 { + continue + } + // Add an upgrade for module@latest. + // TODO(suzmue): verify if latest is the same as fixedVersion. + latest, err := getUpgradeCodeAction(fh, req, "latest") + if err != nil { + return nil, err // TODO: bug report + } + sf := source.SuggestedFixFromCommand(latest, protocol.QuickFix) + if len(warningFixes) > 0 { + warningFixes = append(warningFixes, sf) + } + if len(infoFixes) > 0 { + infoFixes = append(infoFixes, sf) + } + + sort.Strings(warning) + sort.Strings(info) + + if len(warning) > 0 { + warningFixes = append(warningFixes, suggestRunOrResetGovulncheck) + vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityWarning, + Source: diagSource, + Message: getVulnMessage(req.Mod.Path, warning, true, diagSource == source.Govulncheck), + SuggestedFixes: warningFixes, + Related: relatedInfo, + }) + } + if len(info) > 0 { + infoFixes = append(infoFixes, suggestRunOrResetGovulncheck) + vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityInformation, + Source: diagSource, + Message: getVulnMessage(req.Mod.Path, info, false, diagSource == source.Govulncheck), + SuggestedFixes: infoFixes, + Related: relatedInfo, + }) + } + } + + // TODO(hyangah): place this diagnostic on the `go` directive or `toolchain` directive + // after https://go.dev/issue/57001. + const diagnoseStdLib = false + if diagnoseStdLib { + // Add standard library vulnerabilities. + stdlibVulns := vulnsByModule["stdlib"] + if len(stdlibVulns) == 0 { + return vulnDiagnostics, nil + } + + // Put the standard library diagnostic on the module declaration. + rng, err := pm.Mapper.OffsetRange(pm.File.Module.Syntax.Start.Byte, pm.File.Module.Syntax.End.Byte) + if err != nil { + return vulnDiagnostics, nil // TODO: bug report + } + + stdlib := stdlibVulns[0].mod.FoundVersion + var warning, info []string + var relatedInfo []source.RelatedInformation + for _, mv := range stdlibVulns { + vuln := mv.vuln + stdlib = mv.mod.FoundVersion + if !vuln.IsCalled() { + info = append(info, vuln.OSV.ID) + } else { + warning = append(warning, vuln.OSV.ID) + relatedInfo = append(relatedInfo, listRelatedInfo(ctx, snapshot, vuln)...) + } + } + if len(warning) > 0 { + fixes := []source.SuggestedFix{suggestRunOrResetGovulncheck} + vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityWarning, + Source: diagSource, + Message: getVulnMessage(stdlib, warning, true, diagSource == source.Govulncheck), + SuggestedFixes: fixes, + Related: relatedInfo, + }) + } + if len(info) > 0 { + fixes := []source.SuggestedFix{suggestRunOrResetGovulncheck} + vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityInformation, + Source: diagSource, + Message: getVulnMessage(stdlib, info, false, diagSource == source.Govulncheck), + SuggestedFixes: fixes, + Related: relatedInfo, + }) + } + } + + return vulnDiagnostics, nil +} + +// suggestGovulncheckAction returns a code action that suggests either run govulncheck +// for more accurate investigation (if the present vulncheck diagnostics are based on +// analysis less accurate than govulncheck) or reset the existing govulncheck result +// (if the present vulncheck diagnostics are already based on govulncheck run). +func suggestGovulncheckAction(fromGovulncheck bool, uri span.URI) (source.SuggestedFix, error) { + if fromGovulncheck { + resetVulncheck, err := command.NewResetGoModDiagnosticsCommand("Reset govulncheck result", command.ResetGoModDiagnosticsArgs{ + URIArg: command.URIArg{URI: protocol.DocumentURI(uri)}, + DiagnosticSource: string(source.Govulncheck), + }) + if err != nil { + return source.SuggestedFix{}, err + } + return source.SuggestedFixFromCommand(resetVulncheck, protocol.QuickFix), nil + } + vulncheck, err := command.NewRunGovulncheckCommand("Run govulncheck to verify", command.VulncheckArgs{ + URI: protocol.DocumentURI(uri), + Pattern: "./...", + }) + if err != nil { + return source.SuggestedFix{}, err + } + return source.SuggestedFixFromCommand(vulncheck, protocol.QuickFix), nil +} + +func getVulnMessage(mod string, vulns []string, used, fromGovulncheck bool) string { + var b strings.Builder + if used { + switch len(vulns) { + case 1: + fmt.Fprintf(&b, "%v has a vulnerability used in the code: %v.", mod, vulns[0]) + default: + fmt.Fprintf(&b, "%v has vulnerabilities used in the code: %v.", mod, strings.Join(vulns, ", ")) + } + } else { + if fromGovulncheck { + switch len(vulns) { + case 1: + fmt.Fprintf(&b, "%v has a vulnerability %v that is not used in the code.", mod, vulns[0]) + default: + fmt.Fprintf(&b, "%v has known vulnerabilities %v that are not used in the code.", mod, strings.Join(vulns, ", ")) + } + } else { + switch len(vulns) { + case 1: + fmt.Fprintf(&b, "%v has a vulnerability %v.", mod, vulns[0]) + default: + fmt.Fprintf(&b, "%v has known vulnerabilities %v.", mod, strings.Join(vulns, ", ")) + } + } + } + return b.String() +} + +func listRelatedInfo(ctx context.Context, snapshot source.Snapshot, vuln *govulncheck.Vuln) []source.RelatedInformation { + var ri []source.RelatedInformation + for _, m := range vuln.Modules { + for _, p := range m.Packages { + for _, c := range p.CallStacks { + if len(c.Frames) == 0 { + continue + } + entry := c.Frames[0] + pos := entry.Position + if pos.Filename == "" { + continue // token.Position Filename is an optional field. + } + uri := span.URIFromPath(pos.Filename) + startPos := protocol.Position{ + Line: uint32(pos.Line) - 1, + // We need to read the file contents to precisesly map + // token.Position (pos) to the UTF16-based column offset + // protocol.Position requires. That can be expensive. + // We need this related info to just help users to open + // the entry points of the callstack and once the file is + // open, we will compute the precise location based on the + // open file contents. So, use the beginning of the line + // as the position here instead of precise UTF16-based + // position computation. + Character: 0, + } + ri = append(ri, source.RelatedInformation{ + URI: uri, + Range: protocol.Range{ + Start: startPos, + End: startPos, + }, + Message: fmt.Sprintf("[%v] %v -> %v.%v", vuln.OSV.ID, entry.Name(), p.Path, c.Symbol), + }) + } + } + } + return ri +} + +func formatMessage(v *govulncheck.Vuln) string { + details := []byte(v.OSV.Details) + // Remove any new lines that are not preceded or followed by a new line. + for i, r := range details { + if r == '\n' && i > 0 && details[i-1] != '\n' && i+1 < len(details) && details[i+1] != '\n' { + details[i] = ' ' + } + } + return strings.TrimSpace(strings.Replace(string(details), "\n\n", "\n\n ", -1)) +} + +// href returns the url for the vulnerability information. +// Eventually we should retrieve the url embedded in the osv.Entry. +// While vuln.go.dev is under development, this always returns +// the page in pkg.go.dev. +func href(vuln *osv.Entry) string { + return fmt.Sprintf("https://pkg.go.dev/vuln/%s", vuln.ID) +} + +func getUpgradeCodeAction(fh source.FileHandle, req *modfile.Require, version string) (protocol.Command, error) { + cmd, err := command.NewUpgradeDependencyCommand(upgradeTitle(version), command.DependencyArgs{ + URI: protocol.URIFromSpanURI(fh.URI()), + AddRequire: false, + GoCmdArgs: []string{req.Mod.Path + "@" + version}, + }) + if err != nil { + return protocol.Command{}, err + } + return cmd, nil +} + +func upgradeTitle(fixedVersion string) string { + title := fmt.Sprintf("%s%v", upgradeCodeActionPrefix, fixedVersion) + return title +} + +// SelectUpgradeCodeActions takes a list of code actions for a required module +// and returns a more selective list of upgrade code actions, +// where the code actions have been deduped. Code actions unrelated to upgrade +// are deduplicated by the name. +func SelectUpgradeCodeActions(actions []protocol.CodeAction) []protocol.CodeAction { + if len(actions) <= 1 { + return actions // return early if no sorting necessary + } + var versionedUpgrade, latestUpgrade, resetAction protocol.CodeAction + var chosenVersionedUpgrade string + var selected []protocol.CodeAction + + seen := make(map[string]bool) + + for _, action := range actions { + if strings.HasPrefix(action.Title, upgradeCodeActionPrefix) { + if v := getUpgradeVersion(action); v == "latest" && latestUpgrade.Title == "" { + latestUpgrade = action + } else if versionedUpgrade.Title == "" || semver.Compare(v, chosenVersionedUpgrade) > 0 { + chosenVersionedUpgrade = v + versionedUpgrade = action + } + } else if strings.HasPrefix(action.Title, "Reset govulncheck") { + resetAction = action + } else if !seen[action.Command.Title] { + seen[action.Command.Title] = true + selected = append(selected, action) + } + } + if versionedUpgrade.Title != "" { + selected = append(selected, versionedUpgrade) + } + if latestUpgrade.Title != "" { + selected = append(selected, latestUpgrade) + } + if resetAction.Title != "" { + selected = append(selected, resetAction) + } + return selected +} + +func getUpgradeVersion(p protocol.CodeAction) string { + return strings.TrimPrefix(p.Title, upgradeCodeActionPrefix) +} diff --git a/gopls/internal/lsp/mod/format.go b/gopls/internal/lsp/mod/format.go new file mode 100644 index 00000000000..9c3942ee06d --- /dev/null +++ b/gopls/internal/lsp/mod/format.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mod + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) { + ctx, done := event.Start(ctx, "mod.Format") + defer done() + + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + return nil, err + } + formatted, err := pm.File.Format() + if err != nil { + return nil, err + } + // Calculate the edits to be made due to the change. + diffs := snapshot.View().Options().ComputeEdits(string(pm.Mapper.Content), string(formatted)) + return source.ToProtocolEdits(pm.Mapper, diffs) +} diff --git a/gopls/internal/lsp/mod/hover.go b/gopls/internal/lsp/mod/hover.go new file mode 100644 index 00000000000..fbd3c000013 --- /dev/null +++ b/gopls/internal/lsp/mod/hover.go @@ -0,0 +1,358 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mod + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/semver" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) { + var found bool + for _, uri := range snapshot.ModFiles() { + if fh.URI() == uri { + found = true + break + } + } + + // We only provide hover information for the view's go.mod files. + if !found { + return nil, nil + } + + ctx, done := event.Start(ctx, "mod.Hover") + defer done() + + // Get the position of the cursor. + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + return nil, fmt.Errorf("getting modfile handle: %w", err) + } + offset, err := pm.Mapper.PositionOffset(position) + if err != nil { + return nil, fmt.Errorf("computing cursor position: %w", err) + } + + // If the cursor position is on a module statement + if hover, ok := hoverOnModuleStatement(ctx, pm, offset, snapshot, fh); ok { + return hover, nil + } + return hoverOnRequireStatement(ctx, pm, offset, snapshot, fh) +} + +func hoverOnRequireStatement(ctx context.Context, pm *source.ParsedModule, offset int, snapshot source.Snapshot, fh source.FileHandle) (*protocol.Hover, error) { + // Confirm that the cursor is at the position of a require statement. + var req *modfile.Require + var startOffset, endOffset int + for _, r := range pm.File.Require { + dep := []byte(r.Mod.Path) + s, e := r.Syntax.Start.Byte, r.Syntax.End.Byte + i := bytes.Index(pm.Mapper.Content[s:e], dep) + if i == -1 { + continue + } + // Shift the start position to the location of the + // dependency within the require statement. + startOffset, endOffset = s+i, e + if startOffset <= offset && offset <= endOffset { + req = r + break + } + } + // TODO(hyangah): find position for info about vulnerabilities in Go + + // The cursor position is not on a require statement. + if req == nil { + return nil, nil + } + + // Get the vulnerability info. + fromGovulncheck := true + vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()] + if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports { + var err error + vs, err = snapshot.ModVuln(ctx, fh.URI()) + if err != nil { + return nil, err + } + fromGovulncheck = false + } + affecting, nonaffecting := lookupVulns(vs, req.Mod.Path, req.Mod.Version) + + // Get the `go mod why` results for the given file. + why, err := snapshot.ModWhy(ctx, fh) + if err != nil { + return nil, err + } + explanation, ok := why[req.Mod.Path] + if !ok { + return nil, nil + } + + // Get the range to highlight for the hover. + // TODO(hyangah): adjust the hover range to include the version number + // to match the diagnostics' range. + rng, err := pm.Mapper.OffsetRange(startOffset, endOffset) + if err != nil { + return nil, err + } + options := snapshot.View().Options() + isPrivate := snapshot.View().IsGoPrivatePath(req.Mod.Path) + header := formatHeader(req.Mod.Path, options) + explanation = formatExplanation(explanation, req, options, isPrivate) + vulns := formatVulnerabilities(req.Mod.Path, affecting, nonaffecting, options, fromGovulncheck) + + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: options.PreferredContentFormat, + Value: header + vulns + explanation, + }, + Range: rng, + }, nil +} + +func hoverOnModuleStatement(ctx context.Context, pm *source.ParsedModule, offset int, snapshot source.Snapshot, fh source.FileHandle) (*protocol.Hover, bool) { + module := pm.File.Module + if module == nil { + return nil, false // no module stmt + } + if offset < module.Syntax.Start.Byte || offset > module.Syntax.End.Byte { + return nil, false // cursor not in module stmt + } + + rng, err := pm.Mapper.OffsetRange(module.Syntax.Start.Byte, module.Syntax.End.Byte) + if err != nil { + return nil, false + } + fromGovulncheck := true + vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()] + + if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports { + vs, err = snapshot.ModVuln(ctx, fh.URI()) + if err != nil { + return nil, false + } + fromGovulncheck = false + } + modpath := "stdlib" + goVersion := snapshot.View().GoVersionString() + affecting, nonaffecting := lookupVulns(vs, modpath, goVersion) + options := snapshot.View().Options() + vulns := formatVulnerabilities(modpath, affecting, nonaffecting, options, fromGovulncheck) + + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: options.PreferredContentFormat, + Value: vulns, + }, + Range: rng, + }, true +} + +func formatHeader(modpath string, options *source.Options) string { + var b strings.Builder + // Write the heading as an H3. + b.WriteString("#### " + modpath) + if options.PreferredContentFormat == protocol.Markdown { + b.WriteString("\n\n") + } else { + b.WriteRune('\n') + } + return b.String() +} + +func lookupVulns(vulns *govulncheck.Result, modpath, version string) (affecting, nonaffecting []*govulncheck.Vuln) { + if vulns == nil { + return nil, nil + } + for _, vuln := range vulns.Vulns { + for _, mod := range vuln.Modules { + if mod.Path != modpath { + continue + } + // It is possible that the source code was changed since the last + // govulncheck run and information in the `vulns` info is stale. + // For example, imagine that a user is in the middle of updating + // problematic modules detected by the govulncheck run by applying + // quick fixes. Stale diagnostics can be confusing and prevent the + // user from quickly locating the next module to fix. + // Ideally we should rerun the analysis with the updated module + // dependencies or any other code changes, but we are not yet + // in the position of automatically triggering the analysis + // (govulncheck can take a while). We also don't know exactly what + // part of source code was changed since `vulns` was computed. + // As a heuristic, we assume that a user upgrades the affecting + // module to the version with the fix or the latest one, and if the + // version in the require statement is equal to or higher than the + // fixed version, skip the vulnerability information in the hover. + // Eventually, the user has to rerun govulncheck. + if mod.FixedVersion != "" && semver.IsValid(version) && semver.Compare(mod.FixedVersion, version) <= 0 { + continue + } + if vuln.IsCalled() { + affecting = append(affecting, vuln) + } else { + nonaffecting = append(nonaffecting, vuln) + } + } + } + sort.Slice(nonaffecting, func(i, j int) bool { return nonaffecting[i].OSV.ID < nonaffecting[j].OSV.ID }) + sort.Slice(affecting, func(i, j int) bool { return affecting[i].OSV.ID < affecting[j].OSV.ID }) + return affecting, nonaffecting +} + +func formatVulnerabilities(modPath string, affecting, nonaffecting []*govulncheck.Vuln, options *source.Options, fromGovulncheck bool) string { + if len(affecting) == 0 && len(nonaffecting) == 0 { + return "" + } + + // TODO(hyangah): can we use go templates to generate hover messages? + // Then, we can use a different template for markdown case. + useMarkdown := options.PreferredContentFormat == protocol.Markdown + + var b strings.Builder + + if len(affecting) > 0 { + // TODO(hyangah): make the message more eyecatching (icon/codicon/color) + if len(affecting) == 1 { + b.WriteString(fmt.Sprintf("\n**WARNING:** Found %d reachable vulnerability.\n", len(affecting))) + } else { + b.WriteString(fmt.Sprintf("\n**WARNING:** Found %d reachable vulnerabilities.\n", len(affecting))) + } + } + for _, v := range affecting { + fix := fixedVersionInfo(v, modPath) + pkgs := vulnerablePkgsInfo(v, modPath, useMarkdown) + + if useMarkdown { + fmt.Fprintf(&b, "- [**%v**](%v) %v%v%v\n", v.OSV.ID, href(v.OSV), formatMessage(v), pkgs, fix) + } else { + fmt.Fprintf(&b, " - [%v] %v (%v) %v%v\n", v.OSV.ID, formatMessage(v), href(v.OSV), pkgs, fix) + } + } + if len(nonaffecting) > 0 { + if fromGovulncheck { + fmt.Fprintf(&b, "\n**Note:** The project imports packages with known vulnerabilities, but does not call the vulnerable code.\n") + } else { + fmt.Fprintf(&b, "\n**Note:** The project imports packages with known vulnerabilities. Use `govulncheck` to check if the project uses vulnerable symbols.\n") + } + } + for _, v := range nonaffecting { + fix := fixedVersionInfo(v, modPath) + pkgs := vulnerablePkgsInfo(v, modPath, useMarkdown) + if useMarkdown { + fmt.Fprintf(&b, "- [%v](%v) %v%v%v\n", v.OSV.ID, href(v.OSV), formatMessage(v), pkgs, fix) + } else { + fmt.Fprintf(&b, " - [%v] %v (%v) %v%v\n", v.OSV.ID, formatMessage(v), href(v.OSV), pkgs, fix) + } + } + b.WriteString("\n") + return b.String() +} + +func vulnerablePkgsInfo(v *govulncheck.Vuln, modPath string, useMarkdown bool) string { + var b bytes.Buffer + for _, m := range v.Modules { + if m.Path != modPath { + continue + } + if c := len(m.Packages); c == 1 { + b.WriteString("\n Vulnerable package is:") + } else if c > 1 { + b.WriteString("\n Vulnerable packages are:") + } + for _, pkg := range m.Packages { + if useMarkdown { + b.WriteString("\n * `") + } else { + b.WriteString("\n ") + } + b.WriteString(pkg.Path) + if useMarkdown { + b.WriteString("`") + } + } + } + if b.Len() == 0 { + return "" + } + return b.String() +} +func fixedVersionInfo(v *govulncheck.Vuln, modPath string) string { + fix := "\n\n **No fix is available.**" + for _, m := range v.Modules { + if m.Path != modPath { + continue + } + if m.FixedVersion != "" { + fix = "\n\n Fixed in " + m.FixedVersion + "." + } + break + } + return fix +} + +func formatExplanation(text string, req *modfile.Require, options *source.Options, isPrivate bool) string { + text = strings.TrimSuffix(text, "\n") + splt := strings.Split(text, "\n") + length := len(splt) + + var b strings.Builder + + // If the explanation is 2 lines, then it is of the form: + // # golang.org/x/text/encoding + // (main module does not need package golang.org/x/text/encoding) + if length == 2 { + b.WriteString(splt[1]) + return b.String() + } + + imp := splt[length-1] // import path + reference := imp + // See golang/go#36998: don't link to modules matching GOPRIVATE. + if !isPrivate && options.PreferredContentFormat == protocol.Markdown { + target := imp + if strings.ToLower(options.LinkTarget) == "pkg.go.dev" { + target = strings.Replace(target, req.Mod.Path, req.Mod.String(), 1) + } + reference = fmt.Sprintf("[%s](%s)", imp, source.BuildLink(options.LinkTarget, target, "")) + } + b.WriteString("This module is necessary because " + reference + " is imported in") + + // If the explanation is 3 lines, then it is of the form: + // # golang.org/x/tools + // modtest + // golang.org/x/tools/go/packages + if length == 3 { + msg := fmt.Sprintf(" `%s`.", splt[1]) + b.WriteString(msg) + return b.String() + } + + // If the explanation is more than 3 lines, then it is of the form: + // # golang.org/x/text/language + // rsc.io/quote + // rsc.io/sampler + // golang.org/x/text/language + b.WriteString(":\n```text") + dash := "" + for _, imp := range splt[1 : length-1] { + dash += "-" + b.WriteString("\n" + dash + " " + imp) + } + b.WriteString("\n```") + return b.String() +} diff --git a/internal/lsp/mod/mod_test.go b/gopls/internal/lsp/mod/mod_test.go similarity index 84% rename from internal/lsp/mod/mod_test.go rename to gopls/internal/lsp/mod/mod_test.go index b2d257caeeb..eead8acfc76 100644 --- a/internal/lsp/mod/mod_test.go +++ b/gopls/internal/lsp/mod/mod_test.go @@ -10,10 +10,10 @@ import ( "path/filepath" "testing" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/testenv" ) @@ -23,11 +23,8 @@ func TestMain(m *testing.M) { } func TestModfileRemainsUnchanged(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - ctx := tests.Context(t) - cache := cache.New(nil) - session := cache.NewSession(ctx) + session := cache.NewSession(ctx, cache.New(nil, nil), nil) options := source.DefaultOptions().Clone() tests.DefaultOptions(options) options.TempModfile = true @@ -46,10 +43,10 @@ func TestModfileRemainsUnchanged(t *testing.T) { t.Fatal(err) } _, _, release, err := session.NewView(ctx, "diagnostics_test", span.URIFromPath(folder), options) - release() if err != nil { t.Fatal(err) } + release() after, err := ioutil.ReadFile(filepath.Join(folder, "go.mod")) if err != nil { t.Fatal(err) diff --git a/internal/lsp/mod/testdata/unchanged/go.mod b/gopls/internal/lsp/mod/testdata/unchanged/go.mod similarity index 100% rename from internal/lsp/mod/testdata/unchanged/go.mod rename to gopls/internal/lsp/mod/testdata/unchanged/go.mod diff --git a/internal/lsp/mod/testdata/unchanged/main.go b/gopls/internal/lsp/mod/testdata/unchanged/main.go similarity index 100% rename from internal/lsp/mod/testdata/unchanged/main.go rename to gopls/internal/lsp/mod/testdata/unchanged/main.go diff --git a/internal/lsp/progress/progress.go b/gopls/internal/lsp/progress/progress.go similarity index 95% rename from internal/lsp/progress/progress.go rename to gopls/internal/lsp/progress/progress.go index d6794cf338b..32ac91186a9 100644 --- a/internal/lsp/progress/progress.go +++ b/gopls/internal/lsp/progress/progress.go @@ -12,9 +12,9 @@ import ( "strings" "sync" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/internal/event/tag" "golang.org/x/tools/internal/xcontext" ) @@ -118,7 +118,7 @@ func (t *Tracker) Start(ctx context.Context, title, message string, token protoc }, }) if err != nil { - event.Error(ctx, "generate progress begin", err) + event.Error(ctx, "progress begin", err) } return wd } @@ -167,7 +167,7 @@ func (wd *WorkDone) doCancel() { } } -// report reports an update on WorkDone report back to the client. +// Report reports an update on WorkDone report back to the client. func (wd *WorkDone) Report(ctx context.Context, message string, percentage float64) { ctx = xcontext.Detach(ctx) // progress messages should not be cancelled if wd == nil { @@ -202,7 +202,7 @@ func (wd *WorkDone) Report(ctx context.Context, message string, percentage float } } -// end reports a workdone completion back to the client. +// End reports a workdone completion back to the client. func (wd *WorkDone) End(ctx context.Context, message string) { ctx = xcontext.Detach(ctx) // progress messages should not be cancelled if wd == nil { @@ -260,8 +260,8 @@ type WorkDoneWriter struct { wd *WorkDone } -func NewWorkDoneWriter(wd *WorkDone) *WorkDoneWriter { - return &WorkDoneWriter{wd: wd} +func NewWorkDoneWriter(ctx context.Context, wd *WorkDone) *WorkDoneWriter { + return &WorkDoneWriter{ctx: ctx, wd: wd} } func (wdw *WorkDoneWriter) Write(p []byte) (n int, err error) { diff --git a/internal/lsp/progress/progress_test.go b/gopls/internal/lsp/progress/progress_test.go similarity index 98% rename from internal/lsp/progress/progress_test.go rename to gopls/internal/lsp/progress/progress_test.go index 6e901d17e97..ef87eba121a 100644 --- a/internal/lsp/progress/progress_test.go +++ b/gopls/internal/lsp/progress/progress_test.go @@ -10,7 +10,7 @@ import ( "sync" "testing" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) type fakeClient struct { diff --git a/internal/lsp/protocol/codeactionkind.go b/gopls/internal/lsp/protocol/codeactionkind.go similarity index 100% rename from internal/lsp/protocol/codeactionkind.go rename to gopls/internal/lsp/protocol/codeactionkind.go diff --git a/internal/lsp/protocol/context.go b/gopls/internal/lsp/protocol/context.go similarity index 100% rename from internal/lsp/protocol/context.go rename to gopls/internal/lsp/protocol/context.go diff --git a/gopls/internal/lsp/protocol/doc.go b/gopls/internal/lsp/protocol/doc.go new file mode 100644 index 00000000000..4eb03a00751 --- /dev/null +++ b/gopls/internal/lsp/protocol/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protocol contains the structs that map directly to the +// request and response messages of the Language Server Protocol. +// +// It is a literal transcription, with unmodified comments, and only the changes +// required to make it go code. +// Names are uppercased to export them. +// All fields have JSON tags added to correct the names. +// Fields marked with a ? are also marked as "omitempty" +// Fields that are "|| null" are made pointers +// Fields that are string or number are left as string +// Fields that are type "number" are made float64 +package protocol diff --git a/internal/lsp/protocol/enums.go b/gopls/internal/lsp/protocol/enums.go similarity index 94% rename from internal/lsp/protocol/enums.go rename to gopls/internal/lsp/protocol/enums.go index 434808eeb18..82398e22189 100644 --- a/internal/lsp/protocol/enums.go +++ b/gopls/internal/lsp/protocol/enums.go @@ -10,7 +10,6 @@ import ( var ( namesTextDocumentSyncKind [int(Incremental) + 1]string - namesInitializeError [int(UnknownProtocolVersion) + 1]string namesMessageType [int(Log) + 1]string namesFileChangeType [int(Deleted) + 1]string namesWatchKind [int(WatchDelete) + 1]string @@ -29,8 +28,6 @@ func init() { namesTextDocumentSyncKind[int(Full)] = "Full" namesTextDocumentSyncKind[int(Incremental)] = "Incremental" - namesInitializeError[int(UnknownProtocolVersion)] = "UnknownProtocolVersion" - namesMessageType[int(Error)] = "Error" namesMessageType[int(Warning)] = "Warning" namesMessageType[int(Info)] = "Info" @@ -149,14 +146,6 @@ func ParseTextDocumentSyncKind(s string) TextDocumentSyncKind { return TextDocumentSyncKind(parseEnum(s, namesTextDocumentSyncKind[:])) } -func (e InitializeError) Format(f fmt.State, c rune) { - formatEnum(f, c, int(e), namesInitializeError[:], "InitializeError") -} - -func ParseInitializeError(s string) InitializeError { - return InitializeError(parseEnum(s, namesInitializeError[:])) -} - func (e MessageType) Format(f fmt.State, c rune) { formatEnum(f, c, int(e), namesMessageType[:], "MessageType") } @@ -173,10 +162,6 @@ func ParseFileChangeType(s string) FileChangeType { return FileChangeType(parseEnum(s, namesFileChangeType[:])) } -func (e WatchKind) Format(f fmt.State, c rune) { - formatEnum(f, c, int(e), namesWatchKind[:], "WatchKind") -} - func ParseWatchKind(s string) WatchKind { return WatchKind(parseEnum(s, namesWatchKind[:])) } diff --git a/gopls/internal/lsp/protocol/generate/README.md b/gopls/internal/lsp/protocol/generate/README.md new file mode 100644 index 00000000000..7c34c623ea2 --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/README.md @@ -0,0 +1,138 @@ +# LSP Support for gopls + +## The protocol + +The LSP protocol exchanges json-encoded messages between the client and the server. +(gopls is the server.) The messages are either Requests, which require Responses, or +Notifications, which generate no response. Each Request or Notification has a method name +such as "textDocument/hover" that indicates its meaning and determines which function in the server will handle it. +The protocol is described in a +[web page](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.18/specification/), +in words, and in a json file (metaModel.json) available either linked towards the bottom of the +web page, or in the vscode-languageserver-node repository. This code uses the latter so the +exact version can be tied to a githash. Download the repository with + +`git clone https://github.com/microsoft/vscode-languageserver-node.git` + +The specification has five sections +1. Requests, which describe the Request and Response types for request methods (e.g., *textDocument/didChange*), +2. Notifications, which describe the Request types for notification methods, +3. Structures, which describe named struct-like types, +4. TypeAliases, which describe type aliases, +5. Enumerations, which describe named constants. + +Requests and Notifications are tagged with a Method (e.g., `"textDocument/hover"`). +The specification does not specify the names of the functions that handle the messages. These +names are specified by the `methodNames` map. Enumerations generate Go `const`s, but +in Typescript they are scoped to namespaces, while in Go they are scoped to a package, so the Go names +may need to be modified to avoid name collisions. (See the `disambiguate` map, and its use.) + +Finally, the specified types are Typescript types, which are quite different from Go types. + +### Optionality +The specification can mark fields in structs as Optional. The client distinguishes between missing +fields and `null` fields in some cases. The Go translation for an optional type +should be making sure the field's value +can be `nil`, and adding the json tag `,omitempty`. The former condition would be satisfied by +adding `*` to the field's type if the type is not a reference type. + +### Types +The specification uses a number of different types, only a few of which correspond directly to Go types. +The specification's types are "base", "reference", "map", "literal", "stringLiteral", "tuple", "and", "or". +The "base" types correspond directly to Go types, although some Go types needs to be chosen for `URI` and `DocumentUri`. (The "base" types`RegExp`, `BooleanLiteral`, `NumericLiteral` never occur.) + +"reference" types are the struct-like types in the Structures section of the specification. The given +names are suitable for Go to use, except the code needs to change names like `_Initialze` to `XInitialize` so +they are exported for json marshaling and unmarshaling. + +"map" types are just like Go. (The key type in all of them is `DocumentUri`.) + +"stringLiteral" types are types whose type name and value are a single string. The chosen Go equivalent +is to make the type `string` and the value a constant. (The alternative would be to generate a new +named type, which seemed redundant.) + +"literal" types are like Go anonymous structs, so they have to be given a name. (All instances +of the remaining types have to be given names. One approach is to construct the name from the components +of the type, but this leads to misleading punning, and is unstable if components are added. The other approach +is to construct the name from the context of the definition, that is, from the types it is defined within. +For instance `Lit__InitializeParams_clientInfo` is the "literal" type at the +`clientInfo` field in the `_InitializeParams` +struct. Although this choice is sensitive to the ordering of the components, the code uses this approach, +presuming that reordering components is an unlikely protocol change.) + +"tuple" types are generated as Go structs. (There is only one, with two `uint32` fields.) + +"and" types are Go structs with embedded type names. (There is only one, `And_Param_workspace_configuration`.) + +"or" types are the most complicated. There are a lot of them and there is no simple Go equivalent. +They are defined as structs with a single `Value interface{}` field and custom json marshaling +and unmarshaling code. Users can assign anything to `Value` but the type will be checked, and +correctly marshaled, by the custom marshaling code. The unmarshaling code checks types, so `Value` +will have one of the permitted types. (`nil` is always allowed.) There are about 40 "or" types that +have a single non-null component, and these are converted to the component type. + +## Processing +The code parses the json specification file, and scans all the types. It assigns names, as described +above, to the types that are unnamed in the specification, and constructs Go equivalents as required. +(Most of this code is in typenames.go.) + +There are four output files. tsclient.go and tsserver.go contain the definition and implementation +of the `protocol.Client` and `protocol.Server` types and the code that dispatches on the Method +of the Request or Notification. tsjson.go contains the custom marshaling and unmarshaling code. +And tsprotocol.go contains the type and const definitions. + +### Accommodating gopls +As the code generates output, mostly in generateoutput.go and main.go, +it makes adjustments so that no changes are required to the existing Go code. +(Organizing the computation this way makes the code's structure simpler, but results in +a lot of unused types.) +There are three major classes of these adjustments, and leftover special cases. + +The first major +adjustment is to change generated type names to the ones gopls expects. Some of these don't change the +semantics of the type, just the name. +But for historical reasons a lot of them replace "or" types by a single +component of the type. (Until fairly recently Go only saw or used only one of components.) +The `goplsType` map in tables.go controls this process. + +The second major adjustment is to the types of fields of structs, which is done using the +`renameProp` map in tables.go. + +The third major adjustment handles optionality, controlling `*` and `,omitempty` placement when +the default rules don't match what gopls is expecting. (The map is `goplsStar`, also in tables.go) +(If the intermediate components in expressions of the form `A.B.C.S` were optional, the code would need +a lot of useless checking for nils. Typescript has a language construct to avoid most checks.) + +Then there are some additional special cases. There are a few places with adjustments to avoid +recursive types. For instance `LSPArray` is `[]LSPAny`, but `LSPAny` is an "or" type including `LSPArray`. +The solution is to make `LSPAny` an `interface{}`. Another instance is `_InitializeParams.trace` +whose type is an "or" of 3 stringLiterals, which just becomes a `string`. + +### Checking +`TestAll(t *testing.T)` checks that there are no unexpected fields in the json specification. + +While the code is executing, it checks that all the entries in the maps in tables.go are used. +It also checks that the entries in `renameProp` and `goplsStar` are not redundant. + +As a one-time check on the first release of this code, diff-ing the existing and generated tsclient.go +and tsserver.go code results in only whitespace and comment diffs. The existing and generated +tsprotocol.go differ in whitespace and comments, and in a substantial number of new type definitions +that the older, more heuristic, code did not generate. (And the unused type `_InitializeParams` differs +slightly between the new and the old, and is not worth fixing.) + +### Some history +The original stub code was written by hand, but with the protocol under active development, that +couldn't last. The web page existed before the json specification, but it lagged the implementation +and was hard to process by machine. So the earlier version of the generating code was written in Typescript, and +used the Typescript compiler's API to parse the protocol code in the repository. +It then used a set of heuristics +to pick out the elements of the protocol, and another set of overlapping heuristics to create the Go code. +The output was functional, but idiosyncratic, and the code was fragile and barely maintainable. + +### The future +Most of the adjustments using the maps in tables.go could be removed by making changes, mostly to names, +in the gopls code. Using more "or" types in gopls requires more elaborate, but stereotyped, changes. +But even without all the adjustments, making this its own module would face problems; a number of +dependencies would have to be factored out. And, it is fragile. The custom unmarshaling code knows what +types it expects. A design that return an 'any' on unexpected types would match the json +'ignore unexpected values' philosophy better, but the the Go code would need extra checking. diff --git a/gopls/internal/lsp/protocol/generate/generate.go b/gopls/internal/lsp/protocol/generate/generate.go new file mode 100644 index 00000000000..3746228275d --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/generate.go @@ -0,0 +1,107 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "bytes" + "fmt" + "log" + "strings" +) + +// a newType is a type that needs a name and a definition +// These are the various types that the json specification doesn't name +type newType struct { + name string + properties Properties // for struct/literal types + items []*Type // for other types ("and", "tuple") + line int + kind string // Or, And, Tuple, Lit, Map + typ *Type +} + +func generateDoc(out *bytes.Buffer, doc string) { + if doc == "" { + return + } + + if !strings.Contains(doc, "\n") { + fmt.Fprintf(out, "// %s\n", doc) + return + } + out.WriteString("/*\n") + for _, line := range strings.Split(doc, "\n") { + fmt.Fprintf(out, " * %s\n", line) + } + out.WriteString(" */\n") +} + +// decide if a property is optional, and if it needs a * +// return ",omitempty" if it is optional, and "*" if it needs a pointer +func propStar(name string, t NameType, gotype string) (string, string) { + var opt, star string + if t.Optional { + star = "*" + opt = ",omitempty" + } + if strings.HasPrefix(gotype, "[]") || strings.HasPrefix(gotype, "map[") { + star = "" // passed by reference, so no need for * + } else { + switch gotype { + case "bool", "uint32", "int32", "string", "interface{}": + star = "" // gopls compatibility if t.Optional + } + } + ostar, oopt := star, opt + if newStar, ok := goplsStar[prop{name, t.Name}]; ok { + switch newStar { + case nothing: + star, opt = "", "" + case wantStar: + star, opt = "*", "" + case wantOpt: + star, opt = "", ",omitempty" + case wantOptStar: + star, opt = "*", ",omitempty" + } + if star == ostar && opt == oopt { // no change + log.Printf("goplsStar[ {%q, %q} ](%d) useless %s/%s %s/%s", name, t.Name, t.Line, ostar, star, oopt, opt) + } + usedGoplsStar[prop{name, t.Name}] = true + } + + return opt, star +} + +func goName(s string) string { + // Go naming conventions + if strings.HasSuffix(s, "Id") { + s = s[:len(s)-len("Id")] + "ID" + } else if strings.HasSuffix(s, "Uri") { + s = s[:len(s)-3] + "URI" + } else if s == "uri" { + s = "URI" + } else if s == "id" { + s = "ID" + } + + // renames for temporary GOPLS compatibility + if news := goplsType[s]; news != "" { + usedGoplsType[s] = true + s = news + } + // Names beginning _ are not exported + if strings.HasPrefix(s, "_") { + s = strings.Replace(s, "_", "X", 1) + } + if s != "string" { // base types are unchanged (textDocuemnt/diagnostic) + // Title is deprecated, but a) s is only one word, b) replacement is too heavy-weight + s = strings.Title(s) + } + return s +} diff --git a/gopls/internal/lsp/protocol/generate/main.go b/gopls/internal/lsp/protocol/generate/main.go new file mode 100644 index 00000000000..b00a00e000f --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/main.go @@ -0,0 +1,342 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "go/format" + "log" + "os" + "path/filepath" + "strings" + "time" +) + +var ( + // git clone https://github.com/microsoft/vscode-languageserver-node.git + repodir = flag.String("d", "", "directory of vscode-languageserver-node") + outputdir = flag.String("o", "gen", "output directory") + // PJW: not for real code + cmpdir = flag.String("c", "", "directory of earlier code") + doboth = flag.String("b", "", "generate and compare") +) + +func main() { + log.SetFlags(log.Lshortfile) // log file name and line number, not time + flag.Parse() + + processinline() +} + +func processinline() { + if *repodir == "" { + *repodir = filepath.Join(os.Getenv("HOME"), "vscode-languageserver-node") + } + + model := parse(filepath.Join(*repodir, "protocol/metaModel.json")) + + findTypeNames(model) + generateOutput(model) + + fileHdr = fileHeader(model) + + // write the files + writeclient() + writeserver() + writeprotocol() + writejsons() + + checkTables() +} + +// common file header for output files +var fileHdr string + +func writeclient() { + out := new(bytes.Buffer) + fmt.Fprintln(out, fileHdr) + out.WriteString( + `import ( + "context" + "encoding/json" + + "golang.org/x/tools/internal/jsonrpc2" +) +`) + out.WriteString("type Client interface {\n") + for _, k := range cdecls.keys() { + out.WriteString(cdecls[k]) + } + out.WriteString("}\n\n") + out.WriteString("func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {\n") + out.WriteString("\tswitch r.Method() {\n") + for _, k := range ccases.keys() { + out.WriteString(ccases[k]) + } + out.WriteString(("\tdefault:\n\t\treturn false, nil\n\t}\n}\n\n")) + for _, k := range cfuncs.keys() { + out.WriteString(cfuncs[k]) + } + + x, err := format.Source(out.Bytes()) + if err != nil { + os.WriteFile("/tmp/a.go", out.Bytes(), 0644) + log.Fatalf("tsclient.go: %v", err) + } + + if err := os.WriteFile(filepath.Join(*outputdir, "tsclient.go"), x, 0644); err != nil { + log.Fatalf("%v writing tsclient.go", err) + } +} + +func writeserver() { + out := new(bytes.Buffer) + fmt.Fprintln(out, fileHdr) + out.WriteString( + `import ( + "context" + "encoding/json" + + "golang.org/x/tools/internal/jsonrpc2" +) +`) + out.WriteString("type Server interface {\n") + for _, k := range sdecls.keys() { + out.WriteString(sdecls[k]) + } + out.WriteString(` NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) +} + +func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { + switch r.Method() { +`) + for _, k := range scases.keys() { + out.WriteString(scases[k]) + } + out.WriteString(("\tdefault:\n\t\treturn false, nil\n\t}\n}\n\n")) + for _, k := range sfuncs.keys() { + out.WriteString(sfuncs[k]) + } + out.WriteString(`func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) { + var result interface{} + if err := s.sender.Call(ctx, method, params, &result); err != nil { + return nil, err + } + return result, nil +} +`) + + x, err := format.Source(out.Bytes()) + if err != nil { + os.WriteFile("/tmp/a.go", out.Bytes(), 0644) + log.Fatalf("tsserver.go: %v", err) + } + + if err := os.WriteFile(filepath.Join(*outputdir, "tsserver.go"), x, 0644); err != nil { + log.Fatalf("%v writing tsserver.go", err) + } +} + +func writeprotocol() { + out := new(bytes.Buffer) + fmt.Fprintln(out, fileHdr) + out.WriteString("import \"encoding/json\"\n\n") + + // The followiing are unneeded, but make the new code a superset of the old + hack := func(newer, existing string) { + if _, ok := types[existing]; !ok { + log.Fatalf("types[%q] not found", existing) + } + types[newer] = strings.Replace(types[existing], existing, newer, 1) + } + hack("ConfigurationParams", "ParamConfiguration") + hack("InitializeParams", "ParamInitialize") + hack("PreviousResultId", "PreviousResultID") + hack("WorkspaceFoldersServerCapabilities", "WorkspaceFolders5Gn") + hack("_InitializeParams", "XInitializeParams") + // and some aliases to make the new code contain the old + types["PrepareRename2Gn"] = "type PrepareRename2Gn = Msg_PrepareRename2Gn // (alias) line 13927\n" + types["PrepareRenameResult"] = "type PrepareRenameResult = Msg_PrepareRename2Gn // (alias) line 13927\n" + for _, k := range types.keys() { + if k == "WatchKind" { + types[k] = "type WatchKind = uint32 // line 13505" // strict gopls compatibility needs the '=' + } + out.WriteString(types[k]) + } + + out.WriteString("\nconst (\n") + for _, k := range consts.keys() { + out.WriteString(consts[k]) + } + out.WriteString(")\n\n") + x, err := format.Source(out.Bytes()) + if err != nil { + os.WriteFile("/tmp/a.go", out.Bytes(), 0644) + log.Fatalf("tsprotocol.go: %v", err) + } + if err := os.WriteFile(filepath.Join(*outputdir, "tsprotocol.go"), x, 0644); err != nil { + log.Fatalf("%v writing tsprotocol.go", err) + } +} + +func writejsons() { + out := new(bytes.Buffer) + fmt.Fprintln(out, fileHdr) + out.WriteString("import \"encoding/json\"\n\n") + out.WriteString("import \"errors\"\n") + out.WriteString("import \"fmt\"\n") + + for _, k := range jsons.keys() { + out.WriteString(jsons[k]) + } + x, err := format.Source(out.Bytes()) + if err != nil { + os.WriteFile("/tmp/a.go", out.Bytes(), 0644) + log.Fatalf("tsjson.go: %v", err) + } + if err := os.WriteFile(filepath.Join(*outputdir, "tsjson.go"), x, 0644); err != nil { + log.Fatalf("%v writing tsjson.go", err) + } +} + +// create the common file header for the output files +func fileHeader(model Model) string { + fname := filepath.Join(*repodir, ".git", "HEAD") + buf, err := os.ReadFile(fname) + if err != nil { + log.Fatal(err) + } + buf = bytes.TrimSpace(buf) + var githash string + if len(buf) == 40 { + githash = string(buf[:40]) + } else if bytes.HasPrefix(buf, []byte("ref: ")) { + fname = filepath.Join(*repodir, ".git", string(buf[5:])) + buf, err = os.ReadFile(fname) + if err != nil { + log.Fatal(err) + } + githash = string(buf[:40]) + } else { + log.Fatalf("githash cannot be recovered from %s", fname) + } + + format := `// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated for LSP. DO NOT EDIT. + +package protocol + +// Code generated from version %s of protocol/metaModel.json. +// git hash %s (as of %s) + + +` + now := time.Now().Format("2006-01-02") + return fmt.Sprintf(format, model.Version.Version, githash, now) +} + +func parse(fname string) Model { + buf, err := os.ReadFile(fname) + if err != nil { + log.Fatal(err) + } + buf = addLineNumbers(buf) + var model Model + if err := json.Unmarshal(buf, &model); err != nil { + log.Fatal(err) + } + return model +} + +// Type.Value has to be treated specially for literals and maps +func (t *Type) UnmarshalJSON(data []byte) error { + // First unmarshal only the unambiguous fields. + var x struct { + Kind string `json:"kind"` + Items []*Type `json:"items"` + Element *Type `json:"element"` + Name string `json:"name"` + Key *Type `json:"key"` + Value any `json:"value"` + Line int `json:"line"` + } + if err := json.Unmarshal(data, &x); err != nil { + return err + } + *t = Type{ + Kind: x.Kind, + Items: x.Items, + Element: x.Element, + Name: x.Name, + Value: x.Value, + Line: x.Line, + } + + // Then unmarshal the 'value' field based on the kind. + // This depends on Unmarshal ignoring fields it doesn't know about. + switch x.Kind { + case "map": + var x struct { + Key *Type `json:"key"` + Value *Type `json:"value"` + } + if err := json.Unmarshal(data, &x); err != nil { + return fmt.Errorf("Type.kind=map: %v", err) + } + t.Key = x.Key + t.Value = x.Value + + case "literal": + var z struct { + Value ParseLiteral `json:"value"` + } + + if err := json.Unmarshal(data, &z); err != nil { + return fmt.Errorf("Type.kind=literal: %v", err) + } + t.Value = z.Value + + case "base", "reference", "array", "and", "or", "tuple", + "stringLiteral": + // no-op. never seen integerLiteral or booleanLiteral. + + default: + return fmt.Errorf("cannot decode Type.kind %q: %s", x.Kind, data) + } + return nil +} + +// which table entries were not used +func checkTables() { + for k := range disambiguate { + if !usedDisambiguate[k] { + log.Printf("disambiguate[%v] unused", k) + } + } + for k := range renameProp { + if !usedRenameProp[k] { + log.Printf("renameProp {%q, %q} unused", k[0], k[1]) + } + } + for k := range goplsStar { + if !usedGoplsStar[k] { + log.Printf("goplsStar {%q, %q} unused", k[0], k[1]) + } + } + for k := range goplsType { + if !usedGoplsType[k] { + log.Printf("unused goplsType[%q]->%s", k, goplsType[k]) + } + } +} diff --git a/gopls/internal/lsp/protocol/generate/main_test.go b/gopls/internal/lsp/protocol/generate/main_test.go new file mode 100644 index 00000000000..f887066ee2d --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/main_test.go @@ -0,0 +1,118 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "encoding/json" + "fmt" + "log" + "os" + "testing" +) + +// These tests require the result of +//"git clone https://github.com/microsoft/vscode-languageserver-node" in the HOME directory + +// this is not a test, but a way to get code coverage, +// (in vscode, just run the test with "go.coverOnSingleTest": true) +func TestAll(t *testing.T) { + t.Skip("needs vscode-languageserver-node repository") + log.SetFlags(log.Lshortfile) + main() +} + +// check that the parsed file includes all the information +// from the json file. This test will fail if the spec +// introduces new fields. (one can test this test by +// commenting out the version field in Model.) +func TestParseContents(t *testing.T) { + t.Skip("needs vscode-languageserver-node repository") + log.SetFlags(log.Lshortfile) + + // compute our parse of the specification + dir := os.Getenv("HOME") + "/vscode-languageserver-node" + fname := dir + "/protocol/metaModel.json" + v := parse(fname) + out, err := json.Marshal(v) + if err != nil { + t.Fatal(err) + } + var our interface{} + if err := json.Unmarshal(out, &our); err != nil { + t.Fatal(err) + } + + // process the json file + buf, err := os.ReadFile(fname) + if err != nil { + t.Fatalf("could not read metaModel.json: %v", err) + } + var raw interface{} + if err := json.Unmarshal(buf, &raw); err != nil { + t.Fatal(err) + } + + // convert to strings showing the fields + them := flatten(raw) + us := flatten(our) + + // everything in them should be in us + lesser := make(sortedMap[bool]) + for _, s := range them { + lesser[s] = true + } + greater := make(sortedMap[bool]) // set of fields we have + for _, s := range us { + greater[s] = true + } + for _, k := range lesser.keys() { // set if fields they have + if !greater[k] { + t.Errorf("missing %s", k) + } + } +} + +// flatten(nil) = "nil" +// flatten(v string) = fmt.Sprintf("%q", v) +// flatten(v float64)= fmt.Sprintf("%g", v) +// flatten(v bool) = fmt.Sprintf("%v", v) +// flatten(v []any) = []string{"[0]"flatten(v[0]), "[1]"flatten(v[1]), ...} +// flatten(v map[string]any) = {"key1": flatten(v["key1"]), "key2": flatten(v["key2"]), ...} +func flatten(x any) []string { + switch v := x.(type) { + case nil: + return []string{"nil"} + case string: + return []string{fmt.Sprintf("%q", v)} + case float64: + return []string{fmt.Sprintf("%g", v)} + case bool: + return []string{fmt.Sprintf("%v", v)} + case []any: + var ans []string + for i, x := range v { + idx := fmt.Sprintf("[%.3d]", i) + for _, s := range flatten(x) { + ans = append(ans, idx+s) + } + } + return ans + case map[string]any: + var ans []string + for k, x := range v { + idx := fmt.Sprintf("%q:", k) + for _, s := range flatten(x) { + ans = append(ans, idx+s) + } + } + return ans + default: + log.Fatalf("unexpected type %T", x) + return nil + } +} diff --git a/gopls/internal/lsp/protocol/generate/output.go b/gopls/internal/lsp/protocol/generate/output.go new file mode 100644 index 00000000000..04f1080c195 --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/output.go @@ -0,0 +1,420 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +var ( + // tsclient.go has 3 sections + cdecls = make(sortedMap[string]) + ccases = make(sortedMap[string]) + cfuncs = make(sortedMap[string]) + // tsserver.go has 3 sections + sdecls = make(sortedMap[string]) + scases = make(sortedMap[string]) + sfuncs = make(sortedMap[string]) + // tsprotocol.go has 2 sections + types = make(sortedMap[string]) + consts = make(sortedMap[string]) + // tsjson has 1 section + jsons = make(sortedMap[string]) +) + +func generateOutput(model Model) { + for _, r := range model.Requests { + genDecl(r.Method, r.Params, r.Result, r.Direction) + genCase(r.Method, r.Params, r.Result, r.Direction) + genFunc(r.Method, r.Params, r.Result, r.Direction, false) + } + for _, n := range model.Notifications { + if n.Method == "$/cancelRequest" { + continue // handled internally by jsonrpc2 + } + genDecl(n.Method, n.Params, nil, n.Direction) + genCase(n.Method, n.Params, nil, n.Direction) + genFunc(n.Method, n.Params, nil, n.Direction, true) + } + genStructs(model) + genAliases(model) + genGenTypes() // generate the unnamed types + genConsts(model) + genMarshal() +} + +func genDecl(method string, param, result *Type, dir string) { + fname := methodNames[method] + p := "" + if notNil(param) { + p = ", *" + goplsName(param) + } + ret := "error" + if notNil(result) { + tp := goplsName(result) + if !hasNilValue(tp) { + tp = "*" + tp + } + ret = fmt.Sprintf("(%s, error)", tp) + } + // special gopls compatibility case (PJW: still needed?) + switch method { + case "workspace/configuration": + // was And_Param_workspace_configuration, but the type substitution doesn't work, + // as ParamConfiguration is embedded in And_Param_workspace_configuration + p = ", *ParamConfiguration" + ret = "([]LSPAny, error)" + } + msg := fmt.Sprintf("\t%s(context.Context%s) %s // %s\n", fname, p, ret, method) + switch dir { + case "clientToServer": + sdecls[method] = msg + case "serverToClient": + cdecls[method] = msg + case "both": + sdecls[method] = msg + cdecls[method] = msg + default: + log.Fatalf("impossible direction %q", dir) + } +} + +func genCase(method string, param, result *Type, dir string) { + out := new(bytes.Buffer) + fmt.Fprintf(out, "\tcase %q:\n", method) + var p string + fname := methodNames[method] + if notNil(param) { + nm := goplsName(param) + if method == "workspace/configuration" { // gopls compatibility + // was And_Param_workspace_configuration, which contains ParamConfiguration + // so renaming the type leads to circular definitions + nm = "ParamConfiguration" // gopls compatibility + } + fmt.Fprintf(out, "\t\tvar params %s\n", nm) + fmt.Fprintf(out, "\t\tif err := json.Unmarshal(r.Params(), ¶ms); err != nil {\n") + fmt.Fprintf(out, "\t\t\treturn true, sendParseError(ctx, reply, err)\n\t\t}\n") + p = ", ¶ms" + } + if notNil(result) { + fmt.Fprintf(out, "\t\tresp, err := %%s.%s(ctx%s)\n", fname, p) + out.WriteString("\t\tif err != nil {\n") + out.WriteString("\t\t\treturn true, reply(ctx, nil, err)\n") + out.WriteString("\t\t}\n") + out.WriteString("\t\treturn true, reply(ctx, resp, nil)\n") + } else { + fmt.Fprintf(out, "\t\terr := %%s.%s(ctx%s)\n", fname, p) + out.WriteString("\t\treturn true, reply(ctx, nil, err)\n") + } + msg := out.String() + switch dir { + case "clientToServer": + scases[method] = fmt.Sprintf(msg, "server") + case "serverToClient": + ccases[method] = fmt.Sprintf(msg, "client") + case "both": + scases[method] = fmt.Sprintf(msg, "server") + ccases[method] = fmt.Sprintf(msg, "client") + default: + log.Fatalf("impossible direction %q", dir) + } +} + +func genFunc(method string, param, result *Type, dir string, isnotify bool) { + out := new(bytes.Buffer) + var p, r string + var goResult string + if notNil(param) { + p = ", params *" + goplsName(param) + } + if notNil(result) { + goResult = goplsName(result) + if !hasNilValue(goResult) { + goResult = "*" + goResult + } + r = fmt.Sprintf("(%s, error)", goResult) + } else { + r = "error" + } + // special gopls compatibility case + switch method { + case "workspace/configuration": + // was And_Param_workspace_configuration, but the type substitution doesn't work, + // as ParamConfiguration is embedded in And_Param_workspace_configuration + p = ", params *ParamConfiguration" + r = "([]LSPAny, error)" + goResult = "[]LSPAny" + } + fname := methodNames[method] + fmt.Fprintf(out, "func (s *%%sDispatcher) %s(ctx context.Context%s) %s {\n", + fname, p, r) + + if !notNil(result) { + if isnotify { + if notNil(param) { + fmt.Fprintf(out, "\treturn s.sender.Notify(ctx, %q, params)\n", method) + } else { + fmt.Fprintf(out, "\treturn s.sender.Notify(ctx, %q, nil)\n", method) + } + } else { + if notNil(param) { + fmt.Fprintf(out, "\treturn s.sender.Call(ctx, %q, params, nil)\n", method) + } else { + fmt.Fprintf(out, "\treturn s.sender.Call(ctx, %q, nil, nil)\n", method) + } + } + } else { + fmt.Fprintf(out, "\tvar result %s\n", goResult) + if isnotify { + if notNil(param) { + fmt.Fprintf(out, "\ts.sender.Notify(ctx, %q, params)\n", method) + } else { + fmt.Fprintf(out, "\t\tif err := s.sender.Notify(ctx, %q, nil); err != nil {\n", method) + } + } else { + if notNil(param) { + fmt.Fprintf(out, "\t\tif err := s.sender.Call(ctx, %q, params, &result); err != nil {\n", method) + } else { + fmt.Fprintf(out, "\t\tif err := s.sender.Call(ctx, %q, nil, &result); err != nil {\n", method) + } + } + fmt.Fprintf(out, "\t\treturn nil, err\n\t}\n\treturn result, nil\n") + } + out.WriteString("}\n") + msg := out.String() + switch dir { + case "clientToServer": + sfuncs[method] = fmt.Sprintf(msg, "server") + case "serverToClient": + cfuncs[method] = fmt.Sprintf(msg, "client") + case "both": + sfuncs[method] = fmt.Sprintf(msg, "server") + cfuncs[method] = fmt.Sprintf(msg, "client") + default: + log.Fatalf("impossible direction %q", dir) + } +} + +func genStructs(model Model) { + structures := make(map[string]*Structure) // for expanding Extends + for _, s := range model.Structures { + structures[s.Name] = s + } + for _, s := range model.Structures { + out := new(bytes.Buffer) + generateDoc(out, s.Documentation) + nm := goName(s.Name) + if nm == "string" { // an unacceptable strut name + // a weird case, and needed only so the generated code contains the old gopls code + nm = "DocumentDiagnosticParams" + } + fmt.Fprintf(out, "type %s struct { // line %d\n", nm, s.Line) + // for gpls compatibilitye, embed most extensions, but expand the rest some day + props := append([]NameType{}, s.Properties...) + if s.Name == "SymbolInformation" { // but expand this one + for _, ex := range s.Extends { + fmt.Fprintf(out, "\t// extends %s\n", ex.Name) + props = append(props, structures[ex.Name].Properties...) + } + genProps(out, props, nm) + } else { + genProps(out, props, nm) + for _, ex := range s.Extends { + fmt.Fprintf(out, "\t%s\n", goName(ex.Name)) + } + } + for _, ex := range s.Mixins { + fmt.Fprintf(out, "\t%s\n", goName(ex.Name)) + } + out.WriteString("}\n") + types[nm] = out.String() + } + // base types + types["DocumentURI"] = "type DocumentURI string\n" + types["URI"] = "type URI = string\n" + + types["LSPAny"] = "type LSPAny = interface{}\n" + // A special case, the only previously existing Or type + types["DocumentDiagnosticReport"] = "type DocumentDiagnosticReport = Or_DocumentDiagnosticReport // (alias) line 13909\n" + +} + +func genProps(out *bytes.Buffer, props []NameType, name string) { + for _, p := range props { + tp := goplsName(p.Type) + if newNm, ok := renameProp[prop{name, p.Name}]; ok { + usedRenameProp[prop{name, p.Name}] = true + if tp == newNm { + log.Printf("renameProp useless {%q, %q} for %s", name, p.Name, tp) + } + tp = newNm + } + // it's a pointer if it is optional, or for gopls compatibility + opt, star := propStar(name, p, tp) + json := fmt.Sprintf(" `json:\"%s%s\"`", p.Name, opt) + generateDoc(out, p.Documentation) + fmt.Fprintf(out, "\t%s %s%s %s\n", goName(p.Name), star, tp, json) + } +} + +func genAliases(model Model) { + for _, ta := range model.TypeAliases { + out := new(bytes.Buffer) + generateDoc(out, ta.Documentation) + nm := goName(ta.Name) + if nm != ta.Name { + continue // renamed the type, e.g., "DocumentDiagnosticReport", an or-type to "string" + } + tp := goplsName(ta.Type) + fmt.Fprintf(out, "type %s = %s // (alias) line %d\n", nm, tp, ta.Line) + types[nm] = out.String() + } +} + +func genGenTypes() { + for _, nt := range genTypes { + out := new(bytes.Buffer) + nm := goplsName(nt.typ) + switch nt.kind { + case "literal": + fmt.Fprintf(out, "// created for Literal (%s)\n", nt.name) + fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1) + genProps(out, nt.properties, nt.name) // systematic name, not gopls name; is this a good choice? + case "or": + if !strings.HasPrefix(nm, "Or") { + // It was replaced by a narrower type defined elsewhere + continue + } + names := []string{} + for _, t := range nt.items { + if notNil(t) { + names = append(names, goplsName(t)) + } + } + sort.Strings(names) + fmt.Fprintf(out, "// created for Or %v\n", names) + fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1) + fmt.Fprintf(out, "\tValue interface{} `json:\"value\"`\n") + case "and": + fmt.Fprintf(out, "// created for And\n") + fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1) + for _, x := range nt.items { + nm := goplsName(x) + fmt.Fprintf(out, "\t%s\n", nm) + } + case "tuple": // there's only this one + nt.name = "UIntCommaUInt" + fmt.Fprintf(out, "//created for Tuple\ntype %s struct { // line %d\n", nm, nt.line+1) + fmt.Fprintf(out, "\tFld0 uint32 `json:\"fld0\"`\n") + fmt.Fprintf(out, "\tFld1 uint32 `json:\"fld1\"`\n") + default: + log.Fatalf("%s not handled", nt.kind) + } + out.WriteString("}\n") + types[nm] = out.String() + } +} +func genConsts(model Model) { + for _, e := range model.Enumerations { + out := new(bytes.Buffer) + generateDoc(out, e.Documentation) + tp := goplsName(e.Type) + nm := goName(e.Name) + fmt.Fprintf(out, "type %s %s // line %d\n", nm, tp, e.Line) + types[nm] = out.String() + vals := new(bytes.Buffer) + generateDoc(vals, e.Documentation) + for _, v := range e.Values { + generateDoc(vals, v.Documentation) + nm := goName(v.Name) + more, ok := disambiguate[e.Name] + if ok { + usedDisambiguate[e.Name] = true + nm = more.prefix + nm + more.suffix + nm = goName(nm) // stringType + } + var val string + switch v := v.Value.(type) { + case string: + val = fmt.Sprintf("%q", v) + case float64: + val = fmt.Sprintf("%d", int(v)) + default: + log.Fatalf("impossible type %T", v) + } + fmt.Fprintf(vals, "\t%s %s = %s // line %d\n", nm, e.Name, val, v.Line) + } + consts[nm] = vals.String() + } +} +func genMarshal() { + for _, nt := range genTypes { + nm := goplsName(nt.typ) + if !strings.HasPrefix(nm, "Or") { + continue + } + names := []string{} + for _, t := range nt.items { + if notNil(t) { + names = append(names, goplsName(t)) + } + } + sort.Strings(names) + var buf bytes.Buffer + fmt.Fprintf(&buf, "// from line %d\n", nt.line) + fmt.Fprintf(&buf, "func (t %s) MarshalJSON() ([]byte, error) {\n", nm) + buf.WriteString("\tswitch x := t.Value.(type){\n") + for _, nmx := range names { + fmt.Fprintf(&buf, "\tcase %s:\n", nmx) + fmt.Fprintf(&buf, "\t\treturn json.Marshal(x)\n") + } + buf.WriteString("\tcase nil:\n\t\treturn []byte(\"null\"), nil\n\t}\n") + fmt.Fprintf(&buf, "\treturn nil, fmt.Errorf(\"type %%T not one of %v\", t)\n", names) + buf.WriteString("}\n\n") + + fmt.Fprintf(&buf, "func (t *%s) UnmarshalJSON(x []byte) error {\n", nm) + buf.WriteString("\tif string(x) == \"null\" {\n\t\tt.Value = nil\n\t\t\treturn nil\n\t}\n") + for i, nmx := range names { + fmt.Fprintf(&buf, "\tvar h%d %s\n", i, nmx) + fmt.Fprintf(&buf, "\tif err := json.Unmarshal(x, &h%d); err == nil {\n\t\tt.Value = h%d\n\t\t\treturn nil\n\t\t}\n", i, i) + } + fmt.Fprintf(&buf, "return errors.New(\"unmarshal failed to match one of %v\")\n", names) + buf.WriteString("}\n\n") + jsons[nm] = buf.String() + } +} + +func goplsName(t *Type) string { + nm := typeNames[t] + // translate systematic name to gopls name + if newNm, ok := goplsType[nm]; ok { + usedGoplsType[nm] = true + nm = newNm + } + return nm +} + +func notNil(t *Type) bool { // shutdwon is the special case that needs this + return t != nil && (t.Kind != "base" || t.Name != "null") +} + +func hasNilValue(t string) bool { + // this may be unreliable, and need a supplementary table + if strings.HasPrefix(t, "[]") || strings.HasPrefix(t, "*") { + return true + } + if t == "interface{}" || t == "any" { + return true + } + // that's all the cases that occur currently + return false +} diff --git a/gopls/internal/lsp/protocol/generate/tables.go b/gopls/internal/lsp/protocol/generate/tables.go new file mode 100644 index 00000000000..838990c4137 --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/tables.go @@ -0,0 +1,420 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +// prop combines the name of a property with the name of the structure it is in. +type prop [2]string + +const ( + nothing = iota + wantStar + wantOpt + wantOptStar +) + +// goplsStar records the optionality of each field in the protocol. +var goplsStar = map[prop]int{ + {"ClientCapabilities", "textDocument"}: wantOpt, + {"ClientCapabilities", "window"}: wantOpt, + {"ClientCapabilities", "workspace"}: wantOpt, + {"CodeAction", "edit"}: wantOpt, + {"CodeAction", "kind"}: wantOpt, + {"CodeActionClientCapabilities", "codeActionLiteralSupport"}: wantOpt, + {"CodeActionContext", "triggerKind"}: wantOpt, + {"CodeLens", "command"}: wantOpt, + {"CompletionClientCapabilities", "completionItem"}: wantOpt, + {"CompletionClientCapabilities", "insertTextMode"}: wantOpt, + {"CompletionItem", "insertTextFormat"}: wantOpt, + {"CompletionItem", "insertTextMode"}: wantOpt, + {"CompletionItem", "kind"}: wantOpt, + {"CompletionParams", "context"}: wantOpt, + {"Diagnostic", "severity"}: wantOpt, + {"DidSaveTextDocumentParams", "text"}: wantOptStar, + {"DocumentHighlight", "kind"}: wantOpt, + {"FileOperationPattern", "matches"}: wantOpt, + {"FileSystemWatcher", "kind"}: wantOpt, + {"Hover", "range"}: wantOpt, + {"InitializeResult", "serverInfo"}: wantOpt, + {"InlayHint", "kind"}: wantOpt, + {"InlayHint", "position"}: wantStar, + + {"Lit_CompletionClientCapabilities_completionItem", "commitCharactersSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "deprecatedSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "documentationFormat"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "insertReplaceSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "insertTextModeSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "labelDetailsSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "preselectSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "resolveSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "snippetSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItem", "tagSupport"}: nothing, + {"Lit_CompletionClientCapabilities_completionItemKind", "valueSet"}: nothing, + {"Lit_CompletionClientCapabilities_completionList", "itemDefaults"}: nothing, + {"Lit_CompletionList_itemDefaults", "commitCharacters"}: nothing, + {"Lit_CompletionList_itemDefaults", "data"}: nothing, + {"Lit_CompletionList_itemDefaults", "editRange"}: nothing, + {"Lit_CompletionList_itemDefaults", "insertTextFormat"}: nothing, + {"Lit_CompletionList_itemDefaults", "insertTextMode"}: nothing, + {"Lit_CompletionOptions_completionItem", "labelDetailsSupport"}: nothing, + {"Lit_DocumentSymbolClientCapabilities_symbolKind", "valueSet"}: nothing, + {"Lit_FoldingRangeClientCapabilities_foldingRange", "collapsedText"}: nothing, + {"Lit_FoldingRangeClientCapabilities_foldingRangeKind", "valueSet"}: nothing, + {"Lit_InitializeResult_serverInfo", "version"}: nothing, + {"Lit_NotebookDocumentChangeEvent_cells", "data"}: nothing, + {"Lit_NotebookDocumentChangeEvent_cells", "structure"}: nothing, + {"Lit_NotebookDocumentChangeEvent_cells", "textContent"}: nothing, + {"Lit_NotebookDocumentChangeEvent_cells_structure", "didClose"}: nothing, + {"Lit_NotebookDocumentChangeEvent_cells_structure", "didOpen"}: nothing, + {"Lit_NotebookDocumentFilter_Item0", "pattern"}: nothing, + {"Lit_NotebookDocumentFilter_Item0", "scheme"}: nothing, + {"Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0", "cells"}: nothing, + {"Lit_SemanticTokensClientCapabilities_requests", "full"}: nothing, + {"Lit_SemanticTokensClientCapabilities_requests", "range"}: nothing, + {"Lit_SemanticTokensClientCapabilities_requests_full_Item1", "delta"}: nothing, + {"Lit_SemanticTokensOptions_full_Item1", "delta"}: nothing, + {"Lit_ServerCapabilities_workspace", "fileOperations"}: nothing, + {"Lit_ServerCapabilities_workspace", "workspaceFolders"}: nothing, + + {"Lit_ShowMessageRequestClientCapabilities_messageActionItem", "additionalPropertiesSupport"}: nothing, + {"Lit_SignatureHelpClientCapabilities_signatureInformation", "activeParameterSupport"}: nothing, + {"Lit_SignatureHelpClientCapabilities_signatureInformation", "documentationFormat"}: nothing, + {"Lit_SignatureHelpClientCapabilities_signatureInformation", "parameterInformation"}: nothing, + {"Lit_SignatureHelpClientCapabilities_signatureInformation_parameterInformation", "labelOffsetSupport"}: nothing, + + {"Lit_TextDocumentContentChangeEvent_Item0", "range"}: wantStar, + {"Lit_TextDocumentContentChangeEvent_Item0", "rangeLength"}: nothing, + {"Lit_TextDocumentFilter_Item0", "pattern"}: nothing, + {"Lit_TextDocumentFilter_Item0", "scheme"}: nothing, + {"Lit_TextDocumentFilter_Item1", "language"}: nothing, + {"Lit_TextDocumentFilter_Item1", "pattern"}: nothing, + + {"Lit_WorkspaceEditClientCapabilities_changeAnnotationSupport", "groupsOnLabel"}: nothing, + {"Lit_WorkspaceSymbolClientCapabilities_symbolKind", "valueSet"}: nothing, + {"Lit__InitializeParams_clientInfo", "version"}: nothing, + + {"Moniker", "kind"}: wantOpt, + {"PartialResultParams", "partialResultToken"}: wantOpt, + {"ResourceOperation", "annotationId"}: wantOpt, + {"ServerCapabilities", "completionProvider"}: wantOpt, + {"ServerCapabilities", "documentLinkProvider"}: wantOpt, + {"ServerCapabilities", "executeCommandProvider"}: wantOpt, + {"ServerCapabilities", "positionEncoding"}: wantOpt, + {"ServerCapabilities", "signatureHelpProvider"}: wantOpt, + {"ServerCapabilities", "workspace"}: wantOpt, + {"TextDocumentClientCapabilities", "codeAction"}: wantOpt, + {"TextDocumentClientCapabilities", "completion"}: wantOpt, + {"TextDocumentClientCapabilities", "documentSymbol"}: wantOpt, + {"TextDocumentClientCapabilities", "foldingRange"}: wantOpt, + {"TextDocumentClientCapabilities", "hover"}: wantOpt, + {"TextDocumentClientCapabilities", "publishDiagnostics"}: wantOpt, + {"TextDocumentClientCapabilities", "rename"}: wantOpt, + {"TextDocumentClientCapabilities", "semanticTokens"}: wantOpt, + {"TextDocumentSyncOptions", "change"}: wantOpt, + {"TextDocumentSyncOptions", "save"}: wantOpt, + {"WorkDoneProgressParams", "workDoneToken"}: wantOpt, + {"WorkspaceClientCapabilities", "didChangeConfiguration"}: wantOpt, + {"WorkspaceClientCapabilities", "didChangeWatchedFiles"}: wantOpt, + {"WorkspaceEditClientCapabilities", "failureHandling"}: wantOpt, + {"XInitializeParams", "clientInfo"}: wantOpt, +} + +// keep track of which entries in goplsStar are used +var usedGoplsStar = make(map[prop]bool) + +// For gopls compatibility, use a different, typically more restrictive, type for some fields. +var renameProp = map[prop]string{ + {"CancelParams", "id"}: "interface{}", + {"Command", "arguments"}: "[]json.RawMessage", + {"CompletionItem", "textEdit"}: "TextEdit", + {"Diagnostic", "code"}: "interface{}", + + {"DocumentDiagnosticReportPartialResult", "relatedDocuments"}: "map[DocumentURI]interface{}", + + {"ExecuteCommandParams", "arguments"}: "[]json.RawMessage", + {"FoldingRange", "kind"}: "string", + {"Hover", "contents"}: "MarkupContent", + {"InlayHint", "label"}: "[]InlayHintLabelPart", + + {"Lit_NotebookDocumentChangeEvent_cells", "textContent"}: "[]FTextContentPCells", + {"Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0", "cells"}: "[]FCellsPNotebookSelector", + + {"Lit_SemanticTokensClientCapabilities_requests", "full"}: "interface{}", + {"Lit_SemanticTokensClientCapabilities_requests", "range"}: "bool", + {"NotebookCellTextDocumentFilter", "notebook"}: "NotebookDocumentFilter", + {"RelatedFullDocumentDiagnosticReport", "relatedDocuments"}: "map[DocumentURI]interface{}", + {"RelatedUnchangedDocumentDiagnosticReport", "relatedDocuments"}: "map[DocumentURI]interface{}", + + {"RenameClientCapabilities", "prepareSupportDefaultBehavior"}: "interface{}", + + {"SemanticTokensClientCapabilities", "formats"}: "[]string", + {"SemanticTokensOptions", "full"}: "bool", + {"SemanticTokensOptions", "range"}: "interface{}", + {"ServerCapabilities", "callHierarchyProvider"}: "interface{}", + {"ServerCapabilities", "codeActionProvider"}: "interface{}", + {"ServerCapabilities", "colorProvider"}: "interface{}", + {"ServerCapabilities", "declarationProvider"}: "bool", + {"ServerCapabilities", "definitionProvider"}: "bool", + {"ServerCapabilities", "diagnosticProvider"}: "interface{}", + {"ServerCapabilities", "documentFormattingProvider"}: "bool", + {"ServerCapabilities", "documentHighlightProvider"}: "bool", + {"ServerCapabilities", "documentRangeFormattingProvider"}: "bool", + {"ServerCapabilities", "documentSymbolProvider"}: "bool", + {"ServerCapabilities", "foldingRangeProvider"}: "interface{}", + {"ServerCapabilities", "hoverProvider"}: "bool", + {"ServerCapabilities", "implementationProvider"}: "interface{}", + {"ServerCapabilities", "inlayHintProvider"}: "interface{}", + {"ServerCapabilities", "inlineValueProvider"}: "interface{}", + {"ServerCapabilities", "linkedEditingRangeProvider"}: "interface{}", + {"ServerCapabilities", "monikerProvider"}: "interface{}", + {"ServerCapabilities", "notebookDocumentSync"}: "interface{}", + {"ServerCapabilities", "referencesProvider"}: "bool", + {"ServerCapabilities", "renameProvider"}: "interface{}", + {"ServerCapabilities", "selectionRangeProvider"}: "interface{}", + {"ServerCapabilities", "semanticTokensProvider"}: "interface{}", + {"ServerCapabilities", "textDocumentSync"}: "interface{}", + {"ServerCapabilities", "typeDefinitionProvider"}: "interface{}", + {"ServerCapabilities", "typeHierarchyProvider"}: "interface{}", + {"ServerCapabilities", "workspaceSymbolProvider"}: "bool", + {"TextDocumentEdit", "edits"}: "[]TextEdit", + {"TextDocumentSyncOptions", "save"}: "SaveOptions", + {"WorkspaceEdit", "documentChanges"}: "[]DocumentChanges", +} + +// which entries of renameProp were used +var usedRenameProp = make(map[prop]bool) + +type adjust struct { + prefix, suffix string +} + +// disambiguate specifies prefixes or suffixes to add to all values of +// some enum types to avoid name conflicts +var disambiguate = map[string]adjust{ + "CodeActionTriggerKind": {"CodeAction", ""}, + "CompletionItemKind": {"", "Completion"}, + "CompletionItemTag": {"Compl", ""}, + "DiagnosticSeverity": {"Severity", ""}, + "DocumentDiagnosticReportKind": {"Diagnostic", ""}, + "FileOperationPatternKind": {"", "Pattern"}, + "InsertTextFormat": {"", "TextFormat"}, + "SemanticTokenModifiers": {"Mod", ""}, + "SemanticTokenTypes": {"", "Type"}, + "SignatureHelpTriggerKind": {"Sig", ""}, + "SymbolTag": {"", "Symbol"}, + "WatchKind": {"Watch", ""}, +} + +// which entries of disambiguate got used +var usedDisambiguate = make(map[string]bool) + +// for gopls compatibility, replace generated type names with existing ones +var goplsType = map[string]string{ + "And_RegOpt_textDocument_colorPresentation": "WorkDoneProgressOptionsAndTextDocumentRegistrationOptions", + "ConfigurationParams": "ParamConfiguration", + "DocumentDiagnosticParams": "string", + "DocumentDiagnosticReport": "string", + "DocumentUri": "DocumentURI", + "InitializeParams": "ParamInitialize", + "LSPAny": "interface{}", + + "Lit_CodeActionClientCapabilities_codeActionLiteralSupport": "PCodeActionLiteralSupportPCodeAction", + "Lit_CodeActionClientCapabilities_codeActionLiteralSupport_codeActionKind": "FCodeActionKindPCodeActionLiteralSupport", + + "Lit_CodeActionClientCapabilities_resolveSupport": "PResolveSupportPCodeAction", + "Lit_CodeAction_disabled": "PDisabledMsg_textDocument_codeAction", + "Lit_CompletionClientCapabilities_completionItem": "PCompletionItemPCompletion", + "Lit_CompletionClientCapabilities_completionItemKind": "PCompletionItemKindPCompletion", + + "Lit_CompletionClientCapabilities_completionItem_insertTextModeSupport": "FInsertTextModeSupportPCompletionItem", + + "Lit_CompletionClientCapabilities_completionItem_resolveSupport": "FResolveSupportPCompletionItem", + "Lit_CompletionClientCapabilities_completionItem_tagSupport": "FTagSupportPCompletionItem", + + "Lit_CompletionClientCapabilities_completionList": "PCompletionListPCompletion", + "Lit_CompletionList_itemDefaults": "PItemDefaultsMsg_textDocument_completion", + "Lit_CompletionList_itemDefaults_editRange_Item1": "FEditRangePItemDefaults", + "Lit_CompletionOptions_completionItem": "PCompletionItemPCompletionProvider", + "Lit_DocumentSymbolClientCapabilities_symbolKind": "PSymbolKindPDocumentSymbol", + "Lit_DocumentSymbolClientCapabilities_tagSupport": "PTagSupportPDocumentSymbol", + "Lit_FoldingRangeClientCapabilities_foldingRange": "PFoldingRangePFoldingRange", + "Lit_FoldingRangeClientCapabilities_foldingRangeKind": "PFoldingRangeKindPFoldingRange", + "Lit_GeneralClientCapabilities_staleRequestSupport": "PStaleRequestSupportPGeneral", + "Lit_InitializeResult_serverInfo": "PServerInfoMsg_initialize", + "Lit_InlayHintClientCapabilities_resolveSupport": "PResolveSupportPInlayHint", + "Lit_MarkedString_Item1": "Msg_MarkedString", + "Lit_NotebookDocumentChangeEvent_cells": "PCellsPChange", + "Lit_NotebookDocumentChangeEvent_cells_structure": "FStructurePCells", + "Lit_NotebookDocumentChangeEvent_cells_textContent_Elem": "FTextContentPCells", + "Lit_NotebookDocumentFilter_Item0": "Msg_NotebookDocumentFilter", + + "Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0": "PNotebookSelectorPNotebookDocumentSync", + + "Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_cells_Elem": "FCellsPNotebookSelector", + "Lit_PrepareRenameResult_Item1": "Msg_PrepareRename2Gn", + + "Lit_PublishDiagnosticsClientCapabilities_tagSupport": "PTagSupportPPublishDiagnostics", + "Lit_SemanticTokensClientCapabilities_requests": "PRequestsPSemanticTokens", + "Lit_SemanticTokensClientCapabilities_requests_full_Item1": "FFullPRequests", + "Lit_SemanticTokensClientCapabilities_requests_range_Item1": "FRangePRequests", + + "Lit_SemanticTokensOptions_full_Item1": "PFullESemanticTokensOptions", + "Lit_SemanticTokensOptions_range_Item1": "PRangeESemanticTokensOptions", + "Lit_ServerCapabilities_workspace": "Workspace6Gn", + + "Lit_ShowMessageRequestClientCapabilities_messageActionItem": "PMessageActionItemPShowMessage", + "Lit_SignatureHelpClientCapabilities_signatureInformation": "PSignatureInformationPSignatureHelp", + + "Lit_SignatureHelpClientCapabilities_signatureInformation_parameterInformation": "FParameterInformationPSignatureInformation", + + "Lit_TextDocumentContentChangeEvent_Item0": "Msg_TextDocumentContentChangeEvent", + "Lit_TextDocumentFilter_Item0": "Msg_TextDocumentFilter", + "Lit_TextDocumentFilter_Item1": "Msg_TextDocumentFilter", + "Lit_WorkspaceEditClientCapabilities_changeAnnotationSupport": "PChangeAnnotationSupportPWorkspaceEdit", + "Lit_WorkspaceSymbolClientCapabilities_resolveSupport": "PResolveSupportPSymbol", + "Lit_WorkspaceSymbolClientCapabilities_symbolKind": "PSymbolKindPSymbol", + "Lit_WorkspaceSymbolClientCapabilities_tagSupport": "PTagSupportPSymbol", + "Lit_WorkspaceSymbol_location_Item1": "PLocationMsg_workspace_symbol", + "Lit__InitializeParams_clientInfo": "Msg_XInitializeParams_clientInfo", + "Or_CompletionList_itemDefaults_editRange": "OrFEditRangePItemDefaults", + "Or_Declaration": "[]Location", + "Or_DidChangeConfigurationRegistrationOptions_section": "OrPSection_workspace_didChangeConfiguration", + "Or_GlobPattern": "string", + "Or_InlayHintLabelPart_tooltip": "OrPTooltipPLabel", + "Or_InlayHint_tooltip": "OrPTooltip_textDocument_inlayHint", + "Or_LSPAny": "interface{}", + "Or_NotebookDocumentFilter": "Msg_NotebookDocumentFilter", + "Or_NotebookDocumentSyncOptions_notebookSelector_Elem": "PNotebookSelectorPNotebookDocumentSync", + + "Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_notebook": "OrFNotebookPNotebookSelector", + + "Or_ParameterInformation_documentation": "string", + "Or_ParameterInformation_label": "string", + "Or_PrepareRenameResult": "Msg_PrepareRename2Gn", + "Or_ProgressToken": "interface{}", + "Or_Result_textDocument_completion": "CompletionList", + "Or_Result_textDocument_declaration": "Or_textDocument_declaration", + "Or_Result_textDocument_definition": "[]Location", + "Or_Result_textDocument_documentSymbol": "[]interface{}", + "Or_Result_textDocument_implementation": "[]Location", + "Or_Result_textDocument_semanticTokens_full_delta": "interface{}", + "Or_Result_textDocument_typeDefinition": "[]Location", + "Or_Result_workspace_symbol": "[]SymbolInformation", + "Or_TextDocumentContentChangeEvent": "Msg_TextDocumentContentChangeEvent", + "Or_TextDocumentFilter": "Msg_TextDocumentFilter", + "Or_WorkspaceFoldersServerCapabilities_changeNotifications": "string", + "Or_WorkspaceSymbol_location": "OrPLocation_workspace_symbol", + "PrepareRenameResult": "PrepareRename2Gn", + "Tuple_ParameterInformation_label_Item1": "UIntCommaUInt", + "WorkspaceFoldersServerCapabilities": "WorkspaceFolders5Gn", + "[]LSPAny": "[]interface{}", + "[]Or_NotebookDocumentSyncOptions_notebookSelector_Elem": "[]PNotebookSelectorPNotebookDocumentSync", + "[]Or_Result_textDocument_codeAction_Item0_Elem": "[]CodeAction", + "[]PreviousResultId": "[]PreviousResultID", + "[]uinteger": "[]uint32", + "boolean": "bool", + "decimal": "float64", + "integer": "int32", + "map[DocumentUri][]TextEdit": "map[DocumentURI][]TextEdit", + "uinteger": "uint32", +} + +var usedGoplsType = make(map[string]bool) + +// methodNames is a map from the method to the name of the function that handles it +var methodNames = map[string]string{ + "$/cancelRequest": "CancelRequest", + "$/logTrace": "LogTrace", + "$/progress": "Progress", + "$/setTrace": "SetTrace", + "callHierarchy/incomingCalls": "IncomingCalls", + "callHierarchy/outgoingCalls": "OutgoingCalls", + "client/registerCapability": "RegisterCapability", + "client/unregisterCapability": "UnregisterCapability", + "codeAction/resolve": "ResolveCodeAction", + "codeLens/resolve": "ResolveCodeLens", + "completionItem/resolve": "ResolveCompletionItem", + "documentLink/resolve": "ResolveDocumentLink", + "exit": "Exit", + "initialize": "Initialize", + "initialized": "Initialized", + "inlayHint/resolve": "Resolve", + "notebookDocument/didChange": "DidChangeNotebookDocument", + "notebookDocument/didClose": "DidCloseNotebookDocument", + "notebookDocument/didOpen": "DidOpenNotebookDocument", + "notebookDocument/didSave": "DidSaveNotebookDocument", + "shutdown": "Shutdown", + "telemetry/event": "Event", + "textDocument/codeAction": "CodeAction", + "textDocument/codeLens": "CodeLens", + "textDocument/colorPresentation": "ColorPresentation", + "textDocument/completion": "Completion", + "textDocument/declaration": "Declaration", + "textDocument/definition": "Definition", + "textDocument/diagnostic": "Diagnostic", + "textDocument/didChange": "DidChange", + "textDocument/didClose": "DidClose", + "textDocument/didOpen": "DidOpen", + "textDocument/didSave": "DidSave", + "textDocument/documentColor": "DocumentColor", + "textDocument/documentHighlight": "DocumentHighlight", + "textDocument/documentLink": "DocumentLink", + "textDocument/documentSymbol": "DocumentSymbol", + "textDocument/foldingRange": "FoldingRange", + "textDocument/formatting": "Formatting", + "textDocument/hover": "Hover", + "textDocument/implementation": "Implementation", + "textDocument/inlayHint": "InlayHint", + "textDocument/inlineValue": "InlineValue", + "textDocument/linkedEditingRange": "LinkedEditingRange", + "textDocument/moniker": "Moniker", + "textDocument/onTypeFormatting": "OnTypeFormatting", + "textDocument/prepareCallHierarchy": "PrepareCallHierarchy", + "textDocument/prepareRename": "PrepareRename", + "textDocument/prepareTypeHierarchy": "PrepareTypeHierarchy", + "textDocument/publishDiagnostics": "PublishDiagnostics", + "textDocument/rangeFormatting": "RangeFormatting", + "textDocument/references": "References", + "textDocument/rename": "Rename", + "textDocument/selectionRange": "SelectionRange", + "textDocument/semanticTokens/full": "SemanticTokensFull", + "textDocument/semanticTokens/full/delta": "SemanticTokensFullDelta", + "textDocument/semanticTokens/range": "SemanticTokensRange", + "textDocument/signatureHelp": "SignatureHelp", + "textDocument/typeDefinition": "TypeDefinition", + "textDocument/willSave": "WillSave", + "textDocument/willSaveWaitUntil": "WillSaveWaitUntil", + "typeHierarchy/subtypes": "Subtypes", + "typeHierarchy/supertypes": "Supertypes", + "window/logMessage": "LogMessage", + "window/showDocument": "ShowDocument", + "window/showMessage": "ShowMessage", + "window/showMessageRequest": "ShowMessageRequest", + "window/workDoneProgress/cancel": "WorkDoneProgressCancel", + "window/workDoneProgress/create": "WorkDoneProgressCreate", + "workspace/applyEdit": "ApplyEdit", + "workspace/codeLens/refresh": "CodeLensRefresh", + "workspace/configuration": "Configuration", + "workspace/diagnostic": "DiagnosticWorkspace", + "workspace/diagnostic/refresh": "DiagnosticRefresh", + "workspace/didChangeConfiguration": "DidChangeConfiguration", + "workspace/didChangeWatchedFiles": "DidChangeWatchedFiles", + "workspace/didChangeWorkspaceFolders": "DidChangeWorkspaceFolders", + "workspace/didCreateFiles": "DidCreateFiles", + "workspace/didDeleteFiles": "DidDeleteFiles", + "workspace/didRenameFiles": "DidRenameFiles", + "workspace/executeCommand": "ExecuteCommand", + "workspace/inlayHint/refresh": "InlayHintRefresh", + "workspace/inlineValue/refresh": "InlineValueRefresh", + "workspace/semanticTokens/refresh": "SemanticTokensRefresh", + "workspace/symbol": "Symbol", + "workspace/willCreateFiles": "WillCreateFiles", + "workspace/willDeleteFiles": "WillDeleteFiles", + "workspace/willRenameFiles": "WillRenameFiles", + "workspace/workspaceFolders": "WorkspaceFolders", + "workspaceSymbol/resolve": "ResolveWorkspaceSymbol", +} diff --git a/gopls/internal/lsp/protocol/generate/typenames.go b/gopls/internal/lsp/protocol/generate/typenames.go new file mode 100644 index 00000000000..237d19e4f06 --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/typenames.go @@ -0,0 +1,184 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "fmt" + "log" + "strings" +) + +var typeNames = make(map[*Type]string) +var genTypes = make(map[*Type]*newType) + +func findTypeNames(model Model) { + for _, s := range model.Structures { + for _, e := range s.Extends { + nameType(e, nil) // all references + } + for _, m := range s.Mixins { + nameType(m, nil) // all references + } + for _, p := range s.Properties { + nameType(p.Type, []string{s.Name, p.Name}) + } + } + for _, t := range model.Enumerations { + nameType(t.Type, []string{t.Name}) + } + for _, t := range model.TypeAliases { + nameType(t.Type, []string{t.Name}) + } + for _, r := range model.Requests { + nameType(r.Params, []string{"Param", r.Method}) + nameType(r.Result, []string{"Result", r.Method}) + nameType(r.RegistrationOptions, []string{"RegOpt", r.Method}) + } + for _, n := range model.Notifications { + nameType(n.Params, []string{"Param", n.Method}) + nameType(n.RegistrationOptions, []string{"RegOpt", n.Method}) + } +} + +// nameType populates typeNames[t] with the computed name of the type. +// path is the list of enclosing constructs in the JSON model. +func nameType(t *Type, path []string) string { + if t == nil || typeNames[t] != "" { + return "" + } + switch t.Kind { + case "base": + typeNames[t] = t.Name + return t.Name + case "reference": + typeNames[t] = t.Name + return t.Name + case "array": + nm := "[]" + nameType(t.Element, append(path, "Elem")) + typeNames[t] = nm + return nm + case "map": + key := nameType(t.Key, nil) // never a generated type + value := nameType(t.Value.(*Type), append(path, "Value")) + nm := "map[" + key + "]" + value + typeNames[t] = nm + return nm + // generated types + case "and": + nm := nameFromPath("And", path) + typeNames[t] = nm + for _, it := range t.Items { + nameType(it, append(path, "Item")) + } + genTypes[t] = &newType{ + name: nm, + typ: t, + kind: "and", + items: t.Items, + line: t.Line, + } + return nm + case "literal": + nm := nameFromPath("Lit", path) + typeNames[t] = nm + for _, p := range t.Value.(ParseLiteral).Properties { + nameType(p.Type, append(path, p.Name)) + } + genTypes[t] = &newType{ + name: nm, + typ: t, + kind: "literal", + properties: t.Value.(ParseLiteral).Properties, + line: t.Line, + } + return nm + case "tuple": + nm := nameFromPath("Tuple", path) + typeNames[t] = nm + for _, it := range t.Items { + nameType(it, append(path, "Item")) + } + genTypes[t] = &newType{ + name: nm, + typ: t, + kind: "tuple", + items: t.Items, + line: t.Line, + } + return nm + case "or": + nm := nameFromPath("Or", path) + typeNames[t] = nm + for i, it := range t.Items { + // these names depend on the ordering within the "or" type + nameType(it, append(path, fmt.Sprintf("Item%d", i))) + } + // this code handles an "or" of stringLiterals (_InitializeParams.trace) + names := make(map[string]int) + msg := "" + for _, it := range t.Items { + if line, ok := names[typeNames[it]]; ok { + // duplicate component names are bad + msg += fmt.Sprintf("lines %d %d dup, %s for %s\n", line, it.Line, typeNames[it], nm) + } + names[typeNames[it]] = t.Line + } + // this code handles an "or" of stringLiterals (_InitializeParams.trace) + if len(names) == 1 { + var solekey string + for k := range names { + solekey = k // the sole name + } + if solekey == "string" { // _InitializeParams.trace + typeNames[t] = "string" + return "string" + } + // otherwise unexpected + log.Printf("unexpected: single-case 'or' type has non-string key %s: %s", nm, solekey) + log.Fatal(msg) + } else if len(names) == 2 { + // if one of the names is null, just use the other, rather than generating an "or". + // This removes about 40 types from the generated code. An entry in goplsStar + // could be added to handle the null case, if necessary. + newNm := "" + sawNull := false + for k := range names { + if k == "null" { + sawNull = true + } else { + newNm = k + } + } + if sawNull { + typeNames[t] = newNm + return newNm + } + } + genTypes[t] = &newType{ + name: nm, + typ: t, + kind: "or", + items: t.Items, + line: t.Line, + } + return nm + case "stringLiteral": // a single type, like 'kind' or 'rename' + typeNames[t] = "string" + return "string" + default: + log.Fatalf("nameType: %T unexpected, line:%d path:%v", t, t.Line, path) + panic("unreachable in nameType") + } +} + +func nameFromPath(prefix string, path []string) string { + nm := prefix + "_" + strings.Join(path, "_") + // methods have slashes + nm = strings.ReplaceAll(nm, "/", "_") + return nm +} diff --git a/gopls/internal/lsp/protocol/generate/types.go b/gopls/internal/lsp/protocol/generate/types.go new file mode 100644 index 00000000000..a8d5af1e649 --- /dev/null +++ b/gopls/internal/lsp/protocol/generate/types.go @@ -0,0 +1,171 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package main + +import ( + "fmt" + "sort" +) + +// Model contains the parsed version of the spec +type Model struct { + Version Metadata `json:"metaData"` + Requests []*Request `json:"requests"` + Notifications []*Notification `json:"notifications"` + Structures []*Structure `json:"structures"` + Enumerations []*Enumeration `json:"enumerations"` + TypeAliases []*TypeAlias `json:"typeAliases"` + Line int `json:"line"` +} + +// Metadata is information about the version of the spec +type Metadata struct { + Version string `json:"version"` + Line int `json:"line"` +} + +// A Request is the parsed version of an LSP request +type Request struct { + Documentation string `json:"documentation"` + ErrorData *Type `json:"errorData"` + Direction string `json:"messageDirection"` + Method string `json:"method"` + Params *Type `json:"params"` + PartialResult *Type `json:"partialResult"` + Proposed bool `json:"proposed"` + RegistrationMethod string `json:"registrationMethod"` + RegistrationOptions *Type `json:"registrationOptions"` + Result *Type `json:"result"` + Since string `json:"since"` + Line int `json:"line"` +} + +// A Notificatin is the parsed version of an LSP notification +type Notification struct { + Documentation string `json:"documentation"` + Direction string `json:"messageDirection"` + Method string `json:"method"` + Params *Type `json:"params"` + Proposed bool `json:"proposed"` + RegistrationMethod string `json:"registrationMethod"` + RegistrationOptions *Type `json:"registrationOptions"` + Since string `json:"since"` + Line int `json:"line"` +} + +// A Structure is the parsed version of an LSP structure from the spec +type Structure struct { + Documentation string `json:"documentation"` + Extends []*Type `json:"extends"` + Mixins []*Type `json:"mixins"` + Name string `json:"name"` + Properties []NameType `json:"properties"` + Proposed bool `json:"proposed"` + Since string `json:"since"` + Line int `json:"line"` +} + +// An enumeration is the parsed version of an LSP enumeration from the spec +type Enumeration struct { + Documentation string `json:"documentation"` + Name string `json:"name"` + Proposed bool `json:"proposed"` + Since string `json:"since"` + SupportsCustomValues bool `json:"supportsCustomValues"` + Type *Type `json:"type"` + Values []NameValue `json:"values"` + Line int `json:"line"` +} + +// A TypeAlias is the parsed version of an LSP type alias from the spec +type TypeAlias struct { + Documentation string `json:"documentation"` + Deprecated string `json:"deprecated"` + Name string `json:"name"` + Proposed bool `json:"proposed"` + Since string `json:"since"` + Type *Type `json:"type"` + Line int `json:"line"` +} + +// A NameValue describes an enumeration constant +type NameValue struct { + Documentation string `json:"documentation"` + Name string `json:"name"` + Proposed bool `json:"proposed"` + Since string `json:"since"` + Value any `json:"value"` // number or string + Line int `json:"line"` +} + +// A Type is the parsed version of an LSP type from the spec, +// or a Type the code constructs +type Type struct { + Kind string `json:"kind"` // -- which kind goes with which field -- + Items []*Type `json:"items"` // "and", "or", "tuple" + Element *Type `json:"element"` // "array" + Name string `json:"name"` // "base", "reference" + Key *Type `json:"key"` // "map" + Value any `json:"value"` // "map", "stringLiteral", "literal" + Line int `json:"line"` // JSON source line +} + +// ParsedLiteral is Type.Value when Type.Kind is "literal" +type ParseLiteral struct { + Properties `json:"properties"` +} + +// A NameType represents the name and type of a structure element +type NameType struct { + Name string `json:"name"` + Type *Type `json:"type"` + Optional bool `json:"optional"` + Documentation string `json:"documentation"` + Deprecated string `json:"deprecated"` + Since string `json:"since"` + Proposed bool `json:"proposed"` + Line int `json:"line"` +} + +// Properties are the collection of structure fields +type Properties []NameType + +// addLineNumbers adds a "line" field to each object in the JSON. +func addLineNumbers(buf []byte) []byte { + var ans []byte + // In the specification .json file, the delimiter '{' is + // always followed by a newline. There are other {s embedded in strings. + // json.Token does not return \n, or :, or , so using it would + // require parsing the json to reconstruct the missing information. + // TODO(pjw): should linecnt start at 1 (editor) or 0 (compatibility)? + for linecnt, i := 0, 0; i < len(buf); i++ { + ans = append(ans, buf[i]) + switch buf[i] { + case '{': + if buf[i+1] == '\n' { + ans = append(ans, fmt.Sprintf(`"line": %d, `, linecnt)...) + // warning: this would fail if the spec file had + // `"value": {\n}`, but it does not, as comma is a separator. + } + case '\n': + linecnt++ + } + } + return ans +} + +type sortedMap[T any] map[string]T + +func (s sortedMap[T]) keys() []string { + var keys []string + for k := range s { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/internal/lsp/protocol/log.go b/gopls/internal/lsp/protocol/log.go similarity index 100% rename from internal/lsp/protocol/log.go rename to gopls/internal/lsp/protocol/log.go diff --git a/gopls/internal/lsp/protocol/mapper.go b/gopls/internal/lsp/protocol/mapper.go new file mode 100644 index 00000000000..d61524d836e --- /dev/null +++ b/gopls/internal/lsp/protocol/mapper.go @@ -0,0 +1,529 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protocol + +// This file defines Mapper, which wraps a file content buffer +// ([]byte) and provides efficient conversion between every kind of +// position representation. +// +// gopls uses four main representations of position: +// +// 1. byte offsets, e.g. (start, end int), starting from zero. +// +// 2. go/token notation. Use these types when interacting directly +// with the go/* syntax packages: +// +// token.Pos +// token.FileSet +// token.File +// +// Because File.Offset and File.Pos panic on invalid inputs, +// we do not call them directly and instead use the safetoken package +// for these conversions. This is enforced by a static check. +// +// Beware also that the methods of token.File have two bugs for which +// safetoken contains workarounds: +// - #57490, whereby the parser may create ast.Nodes during error +// recovery whose computed positions are out of bounds (EOF+1). +// - #41029, whereby the wrong line number is returned for the EOF position. +// +// 3. the span package. +// +// span.Point = (line, col8, offset). +// span.Span = (uri URI, start, end span.Point) +// +// Line and column are 1-based. +// Columns are measured in bytes (UTF-8 codes). +// All fields are optional. +// +// These types are useful as intermediate conversions of validated +// ranges (though MappedRange is superior as it is self contained +// and universally convertible). Since their fields are optional +// they are also useful for parsing user-provided positions (e.g. in +// the CLI) before we have access to file contents. +// +// 4. protocol, the LSP RPC message format. +// +// protocol.Position = (Line, Character uint32) +// protocol.Range = (start, end Position) +// protocol.Location = (URI, protocol.Range) +// +// Line and Character are 0-based. +// Characters (columns) are measured in UTF-16 codes. +// +// protocol.Mapper holds the (URI, Content) of a file, enabling +// efficient mapping between byte offsets, span ranges, and +// protocol ranges. +// +// protocol.MappedRange holds a protocol.Mapper and valid (start, +// end int) byte offsets, enabling infallible, efficient conversion +// to any other format. + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "path/filepath" + "sort" + "strings" + "sync" + "unicode/utf8" + + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" +) + +// A Mapper wraps the content of a file and provides mapping +// between byte offsets and notations of position such as: +// +// - (line, col8) pairs, where col8 is a 1-based UTF-8 column number +// (bytes), as used by the go/token and span packages. +// +// - (line, col16) pairs, where col16 is a 1-based UTF-16 column +// number, as used by the LSP protocol. +// +// All conversion methods are named "FromTo", where From and To are the two types. +// For example, the PointPosition method converts from a Point to a Position. +// +// Mapper does not intrinsically depend on go/token-based +// representations. Use safetoken to map between token.Pos <=> byte +// offsets, or the convenience methods such as PosPosition, +// NodePosition, or NodeRange. +// +// See overview comments at top of this file. +type Mapper struct { + URI span.URI + Content []byte + + // Line-number information is requested only for a tiny + // fraction of Mappers, so we compute it lazily. + // Call initLines() before accessing fields below. + linesOnce sync.Once + lineStart []int // byte offset of start of ith line (0-based); last=EOF iff \n-terminated + nonASCII bool + + // TODO(adonovan): adding an extra lineStart entry for EOF + // might simplify every method that accesses it. Try it out. +} + +// NewMapper creates a new mapper for the given URI and content. +func NewMapper(uri span.URI, content []byte) *Mapper { + return &Mapper{URI: uri, Content: content} +} + +// initLines populates the lineStart table. +func (m *Mapper) initLines() { + m.linesOnce.Do(func() { + nlines := bytes.Count(m.Content, []byte("\n")) + m.lineStart = make([]int, 1, nlines+1) // initially []int{0} + for offset, b := range m.Content { + if b == '\n' { + m.lineStart = append(m.lineStart, offset+1) + } + if b >= utf8.RuneSelf { + m.nonASCII = true + } + } + }) +} + +// -- conversions from span (UTF-8) domain -- + +// SpanLocation converts a (UTF-8) span to a protocol (UTF-16) range. +// Precondition: the URIs of SpanLocation and Mapper match. +func (m *Mapper) SpanLocation(s span.Span) (Location, error) { + rng, err := m.SpanRange(s) + if err != nil { + return Location{}, err + } + return m.RangeLocation(rng), nil +} + +// SpanRange converts a (UTF-8) span to a protocol (UTF-16) range. +// Precondition: the URIs of Span and Mapper match. +func (m *Mapper) SpanRange(s span.Span) (Range, error) { + // Assert that we aren't using the wrong mapper. + // We check only the base name, and case insensitively, + // because we can't assume clean paths, no symbolic links, + // case-sensitive directories. The authoritative answer + // requires querying the file system, and we don't want + // to do that. + if !strings.EqualFold(filepath.Base(string(m.URI)), filepath.Base(string(s.URI()))) { + return Range{}, bug.Errorf("mapper is for file %q instead of %q", m.URI, s.URI()) + } + start, err := m.PointPosition(s.Start()) + if err != nil { + return Range{}, fmt.Errorf("start: %w", err) + } + end, err := m.PointPosition(s.End()) + if err != nil { + return Range{}, fmt.Errorf("end: %w", err) + } + return Range{Start: start, End: end}, nil +} + +// PointPosition converts a valid span (UTF-8) point to a protocol (UTF-16) position. +func (m *Mapper) PointPosition(p span.Point) (Position, error) { + if p.HasPosition() { + line, col8 := p.Line()-1, p.Column()-1 // both 0-based + m.initLines() + if line >= len(m.lineStart) { + return Position{}, fmt.Errorf("line number %d out of range (max %d)", line, len(m.lineStart)) + } + offset := m.lineStart[line] + end := offset + col8 + + // Validate column. + if end > len(m.Content) { + return Position{}, fmt.Errorf("column is beyond end of file") + } else if line+1 < len(m.lineStart) && end >= m.lineStart[line+1] { + return Position{}, fmt.Errorf("column is beyond end of line") + } + + char := UTF16Len(m.Content[offset:end]) + return Position{Line: uint32(line), Character: uint32(char)}, nil + } + if p.HasOffset() { + return m.OffsetPosition(p.Offset()) + } + return Position{}, fmt.Errorf("point has neither offset nor line/column") +} + +// -- conversions from byte offsets -- + +// OffsetLocation converts a byte-offset interval to a protocol (UTF-16) location. +func (m *Mapper) OffsetLocation(start, end int) (Location, error) { + rng, err := m.OffsetRange(start, end) + if err != nil { + return Location{}, err + } + return m.RangeLocation(rng), nil +} + +// OffsetRange converts a byte-offset interval to a protocol (UTF-16) range. +func (m *Mapper) OffsetRange(start, end int) (Range, error) { + if start > end { + return Range{}, fmt.Errorf("start offset (%d) > end (%d)", start, end) + } + startPosition, err := m.OffsetPosition(start) + if err != nil { + return Range{}, fmt.Errorf("start: %v", err) + } + endPosition, err := m.OffsetPosition(end) + if err != nil { + return Range{}, fmt.Errorf("end: %v", err) + } + return Range{Start: startPosition, End: endPosition}, nil +} + +// OffsetSpan converts a byte-offset interval to a (UTF-8) span. +// The resulting span contains line, column, and offset information. +func (m *Mapper) OffsetSpan(start, end int) (span.Span, error) { + if start > end { + return span.Span{}, fmt.Errorf("start offset (%d) > end (%d)", start, end) + } + startPoint, err := m.OffsetPoint(start) + if err != nil { + return span.Span{}, fmt.Errorf("start: %v", err) + } + endPoint, err := m.OffsetPoint(end) + if err != nil { + return span.Span{}, fmt.Errorf("end: %v", err) + } + return span.New(m.URI, startPoint, endPoint), nil +} + +// OffsetPosition converts a byte offset to a protocol (UTF-16) position. +func (m *Mapper) OffsetPosition(offset int) (Position, error) { + if !(0 <= offset && offset <= len(m.Content)) { + return Position{}, fmt.Errorf("invalid offset %d (want 0-%d)", offset, len(m.Content)) + } + // No error may be returned after this point, + // even if the offset does not fall at a rune boundary. + // (See panic in MappedRange.Range reachable.) + + line, col16 := m.lineCol16(offset) + return Position{Line: uint32(line), Character: uint32(col16)}, nil +} + +// lineCol16 converts a valid byte offset to line and UTF-16 column numbers, both 0-based. +func (m *Mapper) lineCol16(offset int) (int, int) { + line, start, cr := m.line(offset) + var col16 int + if m.nonASCII { + col16 = UTF16Len(m.Content[start:offset]) + } else { + col16 = offset - start + } + if cr { + col16-- // retreat from \r at line end + } + return line, col16 +} + +// lineCol8 converts a valid byte offset to line and UTF-8 column numbers, both 0-based. +func (m *Mapper) lineCol8(offset int) (int, int) { + line, start, cr := m.line(offset) + col8 := offset - start + if cr { + col8-- // retreat from \r at line end + } + return line, col8 +} + +// line returns: +// - the 0-based index of the line that encloses the (valid) byte offset; +// - the start offset of that line; and +// - whether the offset denotes a carriage return (\r) at line end. +func (m *Mapper) line(offset int) (int, int, bool) { + m.initLines() + // In effect, binary search returns a 1-based result. + line := sort.Search(len(m.lineStart), func(i int) bool { + return offset < m.lineStart[i] + }) + + // Adjustment for line-endings: \r|\n is the same as |\r\n. + var eol int + if line == len(m.lineStart) { + eol = len(m.Content) // EOF + } else { + eol = m.lineStart[line] - 1 + } + cr := offset == eol && offset > 0 && m.Content[offset-1] == '\r' + + line-- // 0-based + + return line, m.lineStart[line], cr +} + +// OffsetPoint converts a byte offset to a span (UTF-8) point. +// The resulting point contains line, column, and offset information. +func (m *Mapper) OffsetPoint(offset int) (span.Point, error) { + if !(0 <= offset && offset <= len(m.Content)) { + return span.Point{}, fmt.Errorf("invalid offset %d (want 0-%d)", offset, len(m.Content)) + } + line, col8 := m.lineCol8(offset) + return span.NewPoint(line+1, col8+1, offset), nil +} + +// OffsetMappedRange returns a MappedRange for the given byte offsets. +// A MappedRange can be converted to any other form. +func (m *Mapper) OffsetMappedRange(start, end int) (MappedRange, error) { + if !(0 <= start && start <= end && end <= len(m.Content)) { + return MappedRange{}, fmt.Errorf("invalid offsets (%d, %d) (file %s has size %d)", start, end, m.URI, len(m.Content)) + } + return MappedRange{m, start, end}, nil +} + +// -- conversions from protocol (UTF-16) domain -- + +// LocationSpan converts a protocol (UTF-16) Location to a (UTF-8) span. +// Precondition: the URIs of Location and Mapper match. +func (m *Mapper) LocationSpan(l Location) (span.Span, error) { + // TODO(adonovan): check that l.URI matches m.URI. + return m.RangeSpan(l.Range) +} + +// RangeSpan converts a protocol (UTF-16) range to a (UTF-8) span. +// The resulting span has valid Positions and Offsets. +func (m *Mapper) RangeSpan(r Range) (span.Span, error) { + start, end, err := m.RangeOffsets(r) + if err != nil { + return span.Span{}, err + } + return m.OffsetSpan(start, end) +} + +// RangeOffsets converts a protocol (UTF-16) range to start/end byte offsets. +func (m *Mapper) RangeOffsets(r Range) (int, int, error) { + start, err := m.PositionOffset(r.Start) + if err != nil { + return 0, 0, err + } + end, err := m.PositionOffset(r.End) + if err != nil { + return 0, 0, err + } + return start, end, nil +} + +// PositionOffset converts a protocol (UTF-16) position to a byte offset. +func (m *Mapper) PositionOffset(p Position) (int, error) { + m.initLines() + + // Validate line number. + if p.Line > uint32(len(m.lineStart)) { + return 0, fmt.Errorf("line number %d out of range 0-%d", p.Line, len(m.lineStart)) + } else if p.Line == uint32(len(m.lineStart)) { + if p.Character == 0 { + return len(m.Content), nil // EOF + } + return 0, fmt.Errorf("column is beyond end of file") + } + + offset := m.lineStart[p.Line] + content := m.Content[offset:] // rest of file from start of enclosing line + + // Advance bytes up to the required number of UTF-16 codes. + col8 := 0 + for col16 := 0; col16 < int(p.Character); col16++ { + r, sz := utf8.DecodeRune(content) + if sz == 0 { + return 0, fmt.Errorf("column is beyond end of file") + } + if r == '\n' { + return 0, fmt.Errorf("column is beyond end of line") + } + if sz == 1 && r == utf8.RuneError { + return 0, fmt.Errorf("buffer contains invalid UTF-8 text") + } + content = content[sz:] + + if r >= 0x10000 { + col16++ // rune was encoded by a pair of surrogate UTF-16 codes + + if col16 == int(p.Character) { + break // requested position is in the middle of a rune + } + } + col8 += sz + } + return offset + col8, nil +} + +// PositionPoint converts a protocol (UTF-16) position to a span (UTF-8) point. +// The resulting point has a valid Position and Offset. +func (m *Mapper) PositionPoint(p Position) (span.Point, error) { + offset, err := m.PositionOffset(p) + if err != nil { + return span.Point{}, err + } + line, col8 := m.lineCol8(offset) + + return span.NewPoint(line+1, col8+1, offset), nil +} + +// -- go/token domain convenience methods -- + +// PosPosition converts a token pos to a protocol (UTF-16) position. +func (m *Mapper) PosPosition(tf *token.File, pos token.Pos) (Position, error) { + offset, err := safetoken.Offset(tf, pos) + if err != nil { + return Position{}, err + } + return m.OffsetPosition(offset) +} + +// PosLocation converts a token range to a protocol (UTF-16) location. +func (m *Mapper) PosLocation(tf *token.File, start, end token.Pos) (Location, error) { + startOffset, endOffset, err := safetoken.Offsets(tf, start, end) + if err != nil { + return Location{}, err + } + rng, err := m.OffsetRange(startOffset, endOffset) + if err != nil { + return Location{}, err + } + return m.RangeLocation(rng), nil +} + +// PosRange converts a token range to a protocol (UTF-16) range. +func (m *Mapper) PosRange(tf *token.File, start, end token.Pos) (Range, error) { + startOffset, endOffset, err := safetoken.Offsets(tf, start, end) + if err != nil { + return Range{}, err + } + return m.OffsetRange(startOffset, endOffset) +} + +// NodeRange converts a syntax node range to a protocol (UTF-16) range. +func (m *Mapper) NodeRange(tf *token.File, node ast.Node) (Range, error) { + return m.PosRange(tf, node.Pos(), node.End()) +} + +// RangeLocation pairs a protocol Range with its URI, in a Location. +func (m *Mapper) RangeLocation(rng Range) Location { + return Location{URI: URIFromSpanURI(m.URI), Range: rng} +} + +// PosMappedRange returns a MappedRange for the given token.Pos range. +func (m *Mapper) PosMappedRange(tf *token.File, start, end token.Pos) (MappedRange, error) { + startOffset, endOffset, err := safetoken.Offsets(tf, start, end) + if err != nil { + return MappedRange{}, nil + } + return m.OffsetMappedRange(startOffset, endOffset) +} + +// NodeMappedRange returns a MappedRange for the given node range. +func (m *Mapper) NodeMappedRange(tf *token.File, node ast.Node) (MappedRange, error) { + return m.PosMappedRange(tf, node.Pos(), node.End()) +} + +// -- MappedRange -- + +// A MappedRange represents a valid byte-offset range of a file. +// Through its Mapper it can be converted into other forms such +// as protocol.Range or span.Span. +// +// Construct one by calling Mapper.OffsetMappedRange with start/end offsets. +// From the go/token domain, call safetoken.Offsets first, +// or use a helper such as ParsedGoFile.MappedPosRange. +// +// Two MappedRanges produced the same Mapper are equal if and only if they +// denote the same range. Two MappedRanges produced by different Mappers +// are unequal even when they represent the same range of the same file. +type MappedRange struct { + Mapper *Mapper + start, end int // valid byte offsets: 0 <= start <= end <= len(Mapper.Content) +} + +// Offsets returns the (start, end) byte offsets of this range. +func (mr MappedRange) Offsets() (start, end int) { return mr.start, mr.end } + +// -- convenience functions -- + +// URI returns the URI of the range's file. +func (mr MappedRange) URI() span.URI { + return mr.Mapper.URI +} + +// Range returns the range in protocol (UTF-16) form. +func (mr MappedRange) Range() Range { + rng, err := mr.Mapper.OffsetRange(mr.start, mr.end) + if err != nil { + panic(err) // can't happen + } + return rng +} + +// Location returns the range in protocol location (UTF-16) form. +func (mr MappedRange) Location() Location { + return mr.Mapper.RangeLocation(mr.Range()) +} + +// Span returns the range in span (UTF-8) form. +func (mr MappedRange) Span() span.Span { + spn, err := mr.Mapper.OffsetSpan(mr.start, mr.end) + if err != nil { + panic(err) // can't happen + } + return spn +} + +// String formats the range in span (UTF-8) notation. +func (mr MappedRange) String() string { + return fmt.Sprint(mr.Span()) +} + +// LocationTextDocumentPositionParams converts its argument to its result. +func LocationTextDocumentPositionParams(loc Location) TextDocumentPositionParams { + return TextDocumentPositionParams{ + TextDocument: TextDocumentIdentifier{URI: loc.URI}, + Position: loc.Range.Start, + } +} diff --git a/gopls/internal/lsp/protocol/mapper_test.go b/gopls/internal/lsp/protocol/mapper_test.go new file mode 100644 index 00000000000..0780491de69 --- /dev/null +++ b/gopls/internal/lsp/protocol/mapper_test.go @@ -0,0 +1,441 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protocol_test + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" +) + +// This file tests Mapper's logic for converting between +// span.Point and UTF-16 columns. (The strange form attests to an +// earlier abstraction.) + +// 𐐀 is U+10400 = [F0 90 90 80] in UTF-8, [D801 DC00] in UTF-16. +var funnyString = []byte("𐐀23\n𐐀45") + +var toUTF16Tests = []struct { + scenario string + input []byte + line int // 1-indexed count + col int // 1-indexed byte position in line + offset int // 0-indexed byte offset into input + resUTF16col int // 1-indexed UTF-16 col number + pre string // everything before the cursor on the line + post string // everything from the cursor onwards + err string // expected error string in call to ToUTF16Column + issue *bool +}{ + { + scenario: "cursor missing content", + input: nil, + offset: -1, + err: "point has neither offset nor line/column", + }, + { + scenario: "cursor missing position", + input: funnyString, + line: -1, + col: -1, + offset: -1, + err: "point has neither offset nor line/column", + }, + { + scenario: "zero length input; cursor at first col, first line", + input: []byte(""), + line: 1, + col: 1, + offset: 0, + resUTF16col: 1, + }, + { + scenario: "cursor before funny character; first line", + input: funnyString, + line: 1, + col: 1, + offset: 0, + resUTF16col: 1, + pre: "", + post: "𐐀23", + }, + { + scenario: "cursor after funny character; first line", + input: funnyString, + line: 1, + col: 5, // 4 + 1 (1-indexed) + offset: 4, // (unused since we have line+col) + resUTF16col: 3, // 2 + 1 (1-indexed) + pre: "𐐀", + post: "23", + }, + { + scenario: "cursor after last character on first line", + input: funnyString, + line: 1, + col: 7, // 4 + 1 + 1 + 1 (1-indexed) + offset: 6, // 4 + 1 + 1 (unused since we have line+col) + resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed) + pre: "𐐀23", + post: "", + }, + { + scenario: "cursor before funny character; second line", + input: funnyString, + line: 2, + col: 1, + offset: 7, // length of first line (unused since we have line+col) + resUTF16col: 1, + pre: "", + post: "𐐀45", + }, + { + scenario: "cursor after funny character; second line", + input: funnyString, + line: 1, + col: 5, // 4 + 1 (1-indexed) + offset: 11, // 7 (length of first line) + 4 (unused since we have line+col) + resUTF16col: 3, // 2 + 1 (1-indexed) + pre: "𐐀", + post: "45", + }, + { + scenario: "cursor after last character on second line", + input: funnyString, + line: 2, + col: 7, // 4 + 1 + 1 + 1 (1-indexed) + offset: 13, // 7 (length of first line) + 4 + 1 + 1 (unused since we have line+col) + resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed) + pre: "𐐀45", + post: "", + }, + { + scenario: "cursor beyond end of file", + input: funnyString, + line: 2, + col: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed) + offset: 14, // 4 + 1 + 1 + 1 (unused since we have line+col) + err: "column is beyond end of file", + }, +} + +var fromUTF16Tests = []struct { + scenario string + input []byte + line int // 1-indexed line number (isn't actually used) + utf16col int // 1-indexed UTF-16 col number + resCol int // 1-indexed byte position in line + resOffset int // 0-indexed byte offset into input + pre string // everything before the cursor on the line + post string // everything from the cursor onwards + err string // expected error string in call to ToUTF16Column +}{ + { + scenario: "zero length input; cursor at first col, first line", + input: []byte(""), + line: 1, + utf16col: 1, + resCol: 1, + resOffset: 0, + pre: "", + post: "", + }, + { + scenario: "cursor before funny character", + input: funnyString, + line: 1, + utf16col: 1, + resCol: 1, + resOffset: 0, + pre: "", + post: "𐐀23", + }, + { + scenario: "cursor after funny character", + input: funnyString, + line: 1, + utf16col: 3, + resCol: 5, + resOffset: 4, + pre: "𐐀", + post: "23", + }, + { + scenario: "cursor after last character on line", + input: funnyString, + line: 1, + utf16col: 5, + resCol: 7, + resOffset: 6, + pre: "𐐀23", + post: "", + }, + { + scenario: "cursor beyond last character on line", + input: funnyString, + line: 1, + utf16col: 6, + resCol: 7, + resOffset: 6, + pre: "𐐀23", + post: "", + err: "column is beyond end of line", + }, + { + scenario: "cursor before funny character; second line", + input: funnyString, + line: 2, + utf16col: 1, + resCol: 1, + resOffset: 7, + pre: "", + post: "𐐀45", + }, + { + scenario: "cursor after funny character; second line", + input: funnyString, + line: 2, + utf16col: 3, // 2 + 1 (1-indexed) + resCol: 5, // 4 + 1 (1-indexed) + resOffset: 11, // 7 (length of first line) + 4 + pre: "𐐀", + post: "45", + }, + { + scenario: "cursor after last character on second line", + input: funnyString, + line: 2, + utf16col: 5, // 2 + 1 + 1 + 1 (1-indexed) + resCol: 7, // 4 + 1 + 1 + 1 (1-indexed) + resOffset: 13, // 7 (length of first line) + 4 + 1 + 1 + pre: "𐐀45", + post: "", + }, + { + scenario: "cursor beyond end of file", + input: funnyString, + line: 2, + utf16col: 6, // 2 + 1 + 1 + 1 + 1(1-indexed) + resCol: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed) + resOffset: 14, // 7 (length of first line) + 4 + 1 + 1 + 1 + err: "column is beyond end of file", + }, +} + +func TestToUTF16(t *testing.T) { + for _, e := range toUTF16Tests { + t.Run(e.scenario, func(t *testing.T) { + if e.issue != nil && !*e.issue { + t.Skip("expected to fail") + } + p := span.NewPoint(e.line, e.col, e.offset) + m := protocol.NewMapper("", e.input) + pos, err := m.PointPosition(p) + if err != nil { + if err.Error() != e.err { + t.Fatalf("expected error %v; got %v", e.err, err) + } + return + } + if e.err != "" { + t.Fatalf("unexpected success; wanted %v", e.err) + } + got := int(pos.Character) + 1 + if got != e.resUTF16col { + t.Fatalf("expected result %v; got %v", e.resUTF16col, got) + } + pre, post := getPrePost(e.input, p.Offset()) + if string(pre) != e.pre { + t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre) + } + if string(post) != e.post { + t.Fatalf("expected #%d, post %q; got %q", p.Offset(), e.post, post) + } + }) + } +} + +func TestFromUTF16(t *testing.T) { + for _, e := range fromUTF16Tests { + t.Run(e.scenario, func(t *testing.T) { + m := protocol.NewMapper("", []byte(e.input)) + p, err := m.PositionPoint(protocol.Position{ + Line: uint32(e.line - 1), + Character: uint32(e.utf16col - 1), + }) + if err != nil { + if err.Error() != e.err { + t.Fatalf("expected error %v; got %v", e.err, err) + } + return + } + if e.err != "" { + t.Fatalf("unexpected success; wanted %v", e.err) + } + if p.Column() != e.resCol { + t.Fatalf("expected resulting col %v; got %v", e.resCol, p.Column()) + } + if p.Offset() != e.resOffset { + t.Fatalf("expected resulting offset %v; got %v", e.resOffset, p.Offset()) + } + pre, post := getPrePost(e.input, p.Offset()) + if string(pre) != e.pre { + t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre) + } + if string(post) != e.post { + t.Fatalf("expected #%d post %q; got %q", p.Offset(), e.post, post) + } + }) + } +} + +func getPrePost(content []byte, offset int) (string, string) { + pre, post := string(content)[:offset], string(content)[offset:] + if i := strings.LastIndex(pre, "\n"); i >= 0 { + pre = pre[i+1:] + } + if i := strings.IndexRune(post, '\n'); i >= 0 { + post = post[:i] + } + return pre, post +} + +// -- these are the historical lsppos tests -- + +type testCase struct { + content string // input text + substrOrOffset interface{} // explicit integer offset, or a substring + wantLine, wantChar int // expected LSP position information +} + +// offset returns the test case byte offset +func (c testCase) offset() int { + switch x := c.substrOrOffset.(type) { + case int: + return x + case string: + i := strings.Index(c.content, x) + if i < 0 { + panic(fmt.Sprintf("%q does not contain substring %q", c.content, x)) + } + return i + } + panic("substrOrIndex must be an integer or string") +} + +var tests = []testCase{ + {"a𐐀b", "a", 0, 0}, + {"a𐐀b", "𐐀", 0, 1}, + {"a𐐀b", "b", 0, 3}, + {"a𐐀b\n", "\n", 0, 4}, + {"a𐐀b\r\n", "\n", 0, 4}, // \r|\n is not a valid position, so we move back to the end of the first line. + {"a𐐀b\r\nx", "x", 1, 0}, + {"a𐐀b\r\nx\ny", "y", 2, 0}, + + // Testing EOL and EOF positions + {"", 0, 0, 0}, // 0th position of an empty buffer is (0, 0) + {"abc", "c", 0, 2}, + {"abc", 3, 0, 3}, + {"abc\n", "\n", 0, 3}, + {"abc\n", 4, 1, 0}, // position after a newline is on the next line +} + +func TestLineChar(t *testing.T) { + for _, test := range tests { + m := protocol.NewMapper("", []byte(test.content)) + offset := test.offset() + posn, _ := m.OffsetPosition(offset) + gotLine, gotChar := int(posn.Line), int(posn.Character) + if gotLine != test.wantLine || gotChar != test.wantChar { + t.Errorf("LineChar(%d) = (%d,%d), want (%d,%d)", offset, gotLine, gotChar, test.wantLine, test.wantChar) + } + } +} + +func TestInvalidOffset(t *testing.T) { + content := []byte("a𐐀b\r\nx\ny") + m := protocol.NewMapper("", content) + for _, offset := range []int{-1, 100} { + posn, err := m.OffsetPosition(offset) + if err == nil { + t.Errorf("OffsetPosition(%d) = %s, want error", offset, posn) + } + } +} + +func TestPosition(t *testing.T) { + for _, test := range tests { + m := protocol.NewMapper("", []byte(test.content)) + offset := test.offset() + got, err := m.OffsetPosition(offset) + if err != nil { + t.Errorf("OffsetPosition(%d) failed: %v", offset, err) + continue + } + want := protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)} + if got != want { + t.Errorf("Position(%d) = %v, want %v", offset, got, want) + } + } +} + +func TestRange(t *testing.T) { + for _, test := range tests { + m := protocol.NewMapper("", []byte(test.content)) + offset := test.offset() + got, err := m.OffsetRange(0, offset) + if err != nil { + t.Fatal(err) + } + want := protocol.Range{ + End: protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)}, + } + if got != want { + t.Errorf("Range(%d) = %v, want %v", offset, got, want) + } + } +} + +func TestBytesOffset(t *testing.T) { + tests := []struct { + text string + pos protocol.Position + want int + }{ + // U+10400 encodes as [F0 90 90 80] in UTF-8 and [D801 DC00] in UTF-16. + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 0}, want: 0}, + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 1}, want: 1}, + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 2}, want: 1}, + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 3}, want: 5}, + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 4}, want: 6}, + {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 5}, want: -1}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 3}, want: 3}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 4}, want: -1}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 0}, want: 4}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 3}, want: 7}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 4}, want: -1}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8}, + {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 1}, want: -1}, + {text: "aaa\nbbb\n\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8}, + } + + for i, test := range tests { + fname := fmt.Sprintf("test %d", i) + uri := span.URIFromPath(fname) + mapper := protocol.NewMapper(uri, []byte(test.text)) + got, err := mapper.PositionPoint(test.pos) + if err != nil && test.want != -1 { + t.Errorf("%d: unexpected error: %v", i, err) + } + if err == nil && got.Offset() != test.want { + t.Errorf("want %d for %q(Line:%d,Character:%d), but got %d", test.want, test.text, int(test.pos.Line), int(test.pos.Character), got.Offset()) + } + } +} + +// -- end -- diff --git a/internal/lsp/protocol/protocol.go b/gopls/internal/lsp/protocol/protocol.go similarity index 100% rename from internal/lsp/protocol/protocol.go rename to gopls/internal/lsp/protocol/protocol.go diff --git a/gopls/internal/lsp/protocol/span.go b/gopls/internal/lsp/protocol/span.go new file mode 100644 index 00000000000..d484f8f7413 --- /dev/null +++ b/gopls/internal/lsp/protocol/span.go @@ -0,0 +1,118 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protocol + +import ( + "fmt" + "unicode/utf8" + + "golang.org/x/tools/gopls/internal/span" +) + +func URIFromSpanURI(uri span.URI) DocumentURI { + return DocumentURI(uri) // simple conversion +} + +func URIFromPath(path string) DocumentURI { + return URIFromSpanURI(span.URIFromPath(path)) // normalizing conversion +} + +func (u DocumentURI) SpanURI() span.URI { + return span.URIFromURI(string(u)) // normalizing conversion +} + +func IsPoint(r Range) bool { + return r.Start.Line == r.End.Line && r.Start.Character == r.End.Character +} + +// CompareLocation defines a three-valued comparison over locations, +// lexicographically ordered by (URI, Range). +func CompareLocation(x, y Location) int { + if x.URI != y.URI { + if x.URI < y.URI { + return -1 + } else { + return +1 + } + } + return CompareRange(x.Range, y.Range) +} + +// CompareRange returns -1 if a is before b, 0 if a == b, and 1 if a is after b. +// +// A range a is defined to be 'before' b if a.Start is before b.Start, or +// a.Start == b.Start and a.End is before b.End. +func CompareRange(a, b Range) int { + if r := ComparePosition(a.Start, b.Start); r != 0 { + return r + } + return ComparePosition(a.End, b.End) +} + +// ComparePosition returns -1 if a is before b, 0 if a == b, and 1 if a is after b. +func ComparePosition(a, b Position) int { + if a.Line != b.Line { + if a.Line < b.Line { + return -1 + } else { + return +1 + } + } + if a.Character != b.Character { + if a.Character < b.Character { + return -1 + } else { + return +1 + } + } + return 0 +} + +func Intersect(a, b Range) bool { + if a.Start.Line > b.End.Line || a.End.Line < b.Start.Line { + return false + } + return !((a.Start.Line == b.End.Line) && a.Start.Character > b.End.Character || + (a.End.Line == b.Start.Line) && a.End.Character < b.Start.Character) +} + +// Format implements fmt.Formatter. +// +// Note: Formatter is implemented instead of Stringer (presumably) for +// performance reasons, though it is not clear that it matters in practice. +func (r Range) Format(f fmt.State, _ rune) { + fmt.Fprintf(f, "%v-%v", r.Start, r.End) +} + +// Format implements fmt.Formatter. +// +// See Range.Format for discussion of why the Formatter interface is +// implemented rather than Stringer. +func (p Position) Format(f fmt.State, _ rune) { + fmt.Fprintf(f, "%v:%v", p.Line, p.Character) +} + +// -- implementation helpers -- + +// UTF16Len returns the number of codes in the UTF-16 transcoding of s. +func UTF16Len(s []byte) int { + var n int + for len(s) > 0 { + n++ + + // Fast path for ASCII. + if s[0] < 0x80 { + s = s[1:] + continue + } + + r, size := utf8.DecodeRune(s) + if r >= 0x10000 { + n++ // surrogate pair + } + s = s[size:] + } + return n +} diff --git a/gopls/internal/lsp/protocol/tsclient.go b/gopls/internal/lsp/protocol/tsclient.go new file mode 100644 index 00000000000..96ec28474dd --- /dev/null +++ b/gopls/internal/lsp/protocol/tsclient.go @@ -0,0 +1,248 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated for LSP. DO NOT EDIT. + +package protocol + +// Code generated from version 3.17.0 of protocol/metaModel.json. +// git hash 9b742021fb04ad081aa3676a9eecf4fa612084b4 (as of 2023-01-30) + +import ( + "context" + "encoding/json" + + "golang.org/x/tools/internal/jsonrpc2" +) + +type Client interface { + LogTrace(context.Context, *LogTraceParams) error // $/logTrace + Progress(context.Context, *ProgressParams) error // $/progress + RegisterCapability(context.Context, *RegistrationParams) error // client/registerCapability + UnregisterCapability(context.Context, *UnregistrationParams) error // client/unregisterCapability + Event(context.Context, *interface{}) error // telemetry/event + PublishDiagnostics(context.Context, *PublishDiagnosticsParams) error // textDocument/publishDiagnostics + LogMessage(context.Context, *LogMessageParams) error // window/logMessage + ShowDocument(context.Context, *ShowDocumentParams) (*ShowDocumentResult, error) // window/showDocument + ShowMessage(context.Context, *ShowMessageParams) error // window/showMessage + ShowMessageRequest(context.Context, *ShowMessageRequestParams) (*MessageActionItem, error) // window/showMessageRequest + WorkDoneProgressCreate(context.Context, *WorkDoneProgressCreateParams) error // window/workDoneProgress/create + ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) // workspace/applyEdit + CodeLensRefresh(context.Context) error // workspace/codeLens/refresh + Configuration(context.Context, *ParamConfiguration) ([]LSPAny, error) // workspace/configuration + DiagnosticRefresh(context.Context) error // workspace/diagnostic/refresh + InlayHintRefresh(context.Context) error // workspace/inlayHint/refresh + InlineValueRefresh(context.Context) error // workspace/inlineValue/refresh + SemanticTokensRefresh(context.Context) error // workspace/semanticTokens/refresh + WorkspaceFolders(context.Context) ([]WorkspaceFolder, error) // workspace/workspaceFolders +} + +func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { + switch r.Method() { + case "$/logTrace": + var params LogTraceParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.LogTrace(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "$/progress": + var params ProgressParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.Progress(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "client/registerCapability": + var params RegistrationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.RegisterCapability(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "client/unregisterCapability": + var params UnregistrationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.UnregisterCapability(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "telemetry/event": + var params interface{} + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.Event(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/publishDiagnostics": + var params PublishDiagnosticsParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.PublishDiagnostics(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "window/logMessage": + var params LogMessageParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.LogMessage(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "window/showDocument": + var params ShowDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := client.ShowDocument(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "window/showMessage": + var params ShowMessageParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.ShowMessage(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "window/showMessageRequest": + var params ShowMessageRequestParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := client.ShowMessageRequest(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "window/workDoneProgress/create": + var params WorkDoneProgressCreateParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := client.WorkDoneProgressCreate(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/applyEdit": + var params ApplyWorkspaceEditParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := client.ApplyEdit(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/codeLens/refresh": + err := client.CodeLensRefresh(ctx) + return true, reply(ctx, nil, err) + case "workspace/configuration": + var params ParamConfiguration + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := client.Configuration(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/diagnostic/refresh": + err := client.DiagnosticRefresh(ctx) + return true, reply(ctx, nil, err) + case "workspace/inlayHint/refresh": + err := client.InlayHintRefresh(ctx) + return true, reply(ctx, nil, err) + case "workspace/inlineValue/refresh": + err := client.InlineValueRefresh(ctx) + return true, reply(ctx, nil, err) + case "workspace/semanticTokens/refresh": + err := client.SemanticTokensRefresh(ctx) + return true, reply(ctx, nil, err) + case "workspace/workspaceFolders": + resp, err := client.WorkspaceFolders(ctx) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + default: + return false, nil + } +} + +func (s *clientDispatcher) LogTrace(ctx context.Context, params *LogTraceParams) error { + return s.sender.Notify(ctx, "$/logTrace", params) +} +func (s *clientDispatcher) Progress(ctx context.Context, params *ProgressParams) error { + return s.sender.Notify(ctx, "$/progress", params) +} +func (s *clientDispatcher) RegisterCapability(ctx context.Context, params *RegistrationParams) error { + return s.sender.Call(ctx, "client/registerCapability", params, nil) +} +func (s *clientDispatcher) UnregisterCapability(ctx context.Context, params *UnregistrationParams) error { + return s.sender.Call(ctx, "client/unregisterCapability", params, nil) +} +func (s *clientDispatcher) Event(ctx context.Context, params *interface{}) error { + return s.sender.Notify(ctx, "telemetry/event", params) +} +func (s *clientDispatcher) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) error { + return s.sender.Notify(ctx, "textDocument/publishDiagnostics", params) +} +func (s *clientDispatcher) LogMessage(ctx context.Context, params *LogMessageParams) error { + return s.sender.Notify(ctx, "window/logMessage", params) +} +func (s *clientDispatcher) ShowDocument(ctx context.Context, params *ShowDocumentParams) (*ShowDocumentResult, error) { + var result *ShowDocumentResult + if err := s.sender.Call(ctx, "window/showDocument", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *clientDispatcher) ShowMessage(ctx context.Context, params *ShowMessageParams) error { + return s.sender.Notify(ctx, "window/showMessage", params) +} +func (s *clientDispatcher) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (*MessageActionItem, error) { + var result *MessageActionItem + if err := s.sender.Call(ctx, "window/showMessageRequest", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *clientDispatcher) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) error { + return s.sender.Call(ctx, "window/workDoneProgress/create", params, nil) +} +func (s *clientDispatcher) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) { + var result *ApplyWorkspaceEditResult + if err := s.sender.Call(ctx, "workspace/applyEdit", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *clientDispatcher) CodeLensRefresh(ctx context.Context) error { + return s.sender.Call(ctx, "workspace/codeLens/refresh", nil, nil) +} +func (s *clientDispatcher) Configuration(ctx context.Context, params *ParamConfiguration) ([]LSPAny, error) { + var result []LSPAny + if err := s.sender.Call(ctx, "workspace/configuration", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *clientDispatcher) DiagnosticRefresh(ctx context.Context) error { + return s.sender.Call(ctx, "workspace/diagnostic/refresh", nil, nil) +} +func (s *clientDispatcher) InlayHintRefresh(ctx context.Context) error { + return s.sender.Call(ctx, "workspace/inlayHint/refresh", nil, nil) +} +func (s *clientDispatcher) InlineValueRefresh(ctx context.Context) error { + return s.sender.Call(ctx, "workspace/inlineValue/refresh", nil, nil) +} +func (s *clientDispatcher) SemanticTokensRefresh(ctx context.Context) error { + return s.sender.Call(ctx, "workspace/semanticTokens/refresh", nil, nil) +} +func (s *clientDispatcher) WorkspaceFolders(ctx context.Context) ([]WorkspaceFolder, error) { + var result []WorkspaceFolder + if err := s.sender.Call(ctx, "workspace/workspaceFolders", nil, &result); err != nil { + return nil, err + } + return result, nil +} diff --git a/gopls/internal/lsp/protocol/tsdocument_changes.go b/gopls/internal/lsp/protocol/tsdocument_changes.go new file mode 100644 index 00000000000..2c7a524e178 --- /dev/null +++ b/gopls/internal/lsp/protocol/tsdocument_changes.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protocol + +import ( + "encoding/json" + "fmt" +) + +// DocumentChanges is a union of a file edit and directory rename operations +// for package renaming feature. At most one field of this struct is non-nil. +type DocumentChanges struct { + TextDocumentEdit *TextDocumentEdit + RenameFile *RenameFile +} + +func (d *DocumentChanges) UnmarshalJSON(data []byte) error { + var m map[string]interface{} + + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + if _, ok := m["textDocument"]; ok { + d.TextDocumentEdit = new(TextDocumentEdit) + return json.Unmarshal(data, d.TextDocumentEdit) + } + + d.RenameFile = new(RenameFile) + return json.Unmarshal(data, d.RenameFile) +} + +func (d *DocumentChanges) MarshalJSON() ([]byte, error) { + if d.TextDocumentEdit != nil { + return json.Marshal(d.TextDocumentEdit) + } else if d.RenameFile != nil { + return json.Marshal(d.RenameFile) + } + return nil, fmt.Errorf("Empty DocumentChanges union value") +} diff --git a/gopls/internal/lsp/protocol/tsjson.go b/gopls/internal/lsp/protocol/tsjson.go new file mode 100644 index 00000000000..a2904aa2dda --- /dev/null +++ b/gopls/internal/lsp/protocol/tsjson.go @@ -0,0 +1,1987 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated for LSP. DO NOT EDIT. + +package protocol + +// Code generated from version 3.17.0 of protocol/metaModel.json. +// git hash 9b742021fb04ad081aa3676a9eecf4fa612084b4 (as of 2023-01-30) + +import "encoding/json" + +import "errors" +import "fmt" + +// from line 4768 +func (t OrFEditRangePItemDefaults) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FEditRangePItemDefaults: + return json.Marshal(x) + case Range: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FEditRangePItemDefaults Range]", t) +} + +func (t *OrFEditRangePItemDefaults) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FEditRangePItemDefaults + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 Range + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FEditRangePItemDefaults Range]") +} + +// from line 9810 +func (t OrFNotebookPNotebookSelector) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case NotebookDocumentFilter: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t) +} + +func (t *OrFNotebookPNotebookSelector) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 NotebookDocumentFilter + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [NotebookDocumentFilter string]") +} + +// from line 5519 +func (t OrPLocation_workspace_symbol) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case Location: + return json.Marshal(x) + case PLocationMsg_workspace_symbol: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [Location PLocationMsg_workspace_symbol]", t) +} + +func (t *OrPLocation_workspace_symbol) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 Location + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 PLocationMsg_workspace_symbol + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [Location PLocationMsg_workspace_symbol]") +} + +// from line 4162 +func (t OrPSection_workspace_didChangeConfiguration) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case []string: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [[]string string]", t) +} + +func (t *OrPSection_workspace_didChangeConfiguration) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 []string + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [[]string string]") +} + +// from line 7074 +func (t OrPTooltipPLabel) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MarkupContent: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t) +} + +func (t *OrPTooltipPLabel) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MarkupContent + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [MarkupContent string]") +} + +// from line 3698 +func (t OrPTooltip_textDocument_inlayHint) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MarkupContent: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t) +} + +func (t *OrPTooltip_textDocument_inlayHint) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MarkupContent + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [MarkupContent string]") +} + +// from line 6183 +func (t Or_CancelParams_id) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case int32: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [int32 string]", t) +} + +func (t *Or_CancelParams_id) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 int32 + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [int32 string]") +} + +// from line 4581 +func (t Or_CompletionItem_documentation) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MarkupContent: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t) +} + +func (t *Or_CompletionItem_documentation) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MarkupContent + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [MarkupContent string]") +} + +// from line 4664 +func (t Or_CompletionItem_textEdit) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case InsertReplaceEdit: + return json.Marshal(x) + case TextEdit: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [InsertReplaceEdit TextEdit]", t) +} + +func (t *Or_CompletionItem_textEdit) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 InsertReplaceEdit + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TextEdit + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [InsertReplaceEdit TextEdit]") +} + +// from line 13752 +func (t Or_Definition) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case Location: + return json.Marshal(x) + case []Location: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [Location []Location]", t) +} + +func (t *Or_Definition) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 Location + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 []Location + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [Location []Location]") +} + +// from line 8546 +func (t Or_Diagnostic_code) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case int32: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [int32 string]", t) +} + +func (t *Or_Diagnostic_code) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 int32 + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [int32 string]") +} + +// from line 13884 +func (t Or_DocumentDiagnosticReport) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case RelatedFullDocumentDiagnosticReport: + return json.Marshal(x) + case RelatedUnchangedDocumentDiagnosticReport: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport]", t) +} + +func (t *Or_DocumentDiagnosticReport) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 RelatedFullDocumentDiagnosticReport + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 RelatedUnchangedDocumentDiagnosticReport + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport]") +} + +// from line 3821 +func (t Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FullDocumentDiagnosticReport: + return json.Marshal(x) + case UnchangedDocumentDiagnosticReport: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t) +} + +func (t *Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FullDocumentDiagnosticReport + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 UnchangedDocumentDiagnosticReport + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]") +} + +// from line 14094 +func (t Or_DocumentFilter) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case NotebookCellTextDocumentFilter: + return json.Marshal(x) + case TextDocumentFilter: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [NotebookCellTextDocumentFilter TextDocumentFilter]", t) +} + +func (t *Or_DocumentFilter) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 NotebookCellTextDocumentFilter + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TextDocumentFilter + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [NotebookCellTextDocumentFilter TextDocumentFilter]") +} + +// from line 4890 +func (t Or_Hover_contents) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MarkedString: + return json.Marshal(x) + case MarkupContent: + return json.Marshal(x) + case []MarkedString: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MarkedString MarkupContent []MarkedString]", t) +} + +func (t *Or_Hover_contents) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MarkedString + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 MarkupContent + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 []MarkedString + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [MarkedString MarkupContent []MarkedString]") +} + +// from line 3657 +func (t Or_InlayHint_label) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case []InlayHintLabelPart: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [[]InlayHintLabelPart string]", t) +} + +func (t *Or_InlayHint_label) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 []InlayHintLabelPart + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [[]InlayHintLabelPart string]") +} + +// from line 13862 +func (t Or_InlineValue) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case InlineValueEvaluatableExpression: + return json.Marshal(x) + case InlineValueText: + return json.Marshal(x) + case InlineValueVariableLookup: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup]", t) +} + +func (t *Or_InlineValue) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 InlineValueEvaluatableExpression + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 InlineValueText + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 InlineValueVariableLookup + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup]") +} + +// from line 14059 +func (t Or_MarkedString) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case Msg_MarkedString: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [Msg_MarkedString string]", t) +} + +func (t *Or_MarkedString) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 Msg_MarkedString + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [Msg_MarkedString string]") +} + +// from line 10117 +func (t Or_NotebookCellTextDocumentFilter_notebook) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case NotebookDocumentFilter: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t) +} + +func (t *Or_NotebookCellTextDocumentFilter_notebook) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 NotebookDocumentFilter + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [NotebookDocumentFilter string]") +} + +// from line 9856 +func (t Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case NotebookDocumentFilter: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t) +} + +func (t *Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 NotebookDocumentFilter + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [NotebookDocumentFilter string]") +} + +// from line 7167 +func (t Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FullDocumentDiagnosticReport: + return json.Marshal(x) + case UnchangedDocumentDiagnosticReport: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t) +} + +func (t *Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FullDocumentDiagnosticReport + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 UnchangedDocumentDiagnosticReport + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]") +} + +// from line 7206 +func (t Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FullDocumentDiagnosticReport: + return json.Marshal(x) + case UnchangedDocumentDiagnosticReport: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t) +} + +func (t *Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FullDocumentDiagnosticReport + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 UnchangedDocumentDiagnosticReport + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]") +} + +// from line 10740 +func (t Or_RelativePattern_baseUri) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case URI: + return json.Marshal(x) + case WorkspaceFolder: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [URI WorkspaceFolder]", t) +} + +func (t *Or_RelativePattern_baseUri) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 URI + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 WorkspaceFolder + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [URI WorkspaceFolder]") +} + +// from line 1370 +func (t Or_Result_textDocument_codeAction_Item0_Elem) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case CodeAction: + return json.Marshal(x) + case Command: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [CodeAction Command]", t) +} + +func (t *Or_Result_textDocument_codeAction_Item0_Elem) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 CodeAction + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 Command + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [CodeAction Command]") +} + +// from line 12196 +func (t Or_SemanticTokensClientCapabilities_requests_full) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FFullPRequests: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FFullPRequests bool]", t) +} + +func (t *Or_SemanticTokensClientCapabilities_requests_full) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FFullPRequests + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FFullPRequests bool]") +} + +// from line 12176 +func (t Or_SemanticTokensClientCapabilities_requests_range) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FRangePRequests: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FRangePRequests bool]", t) +} + +func (t *Or_SemanticTokensClientCapabilities_requests_range) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FRangePRequests + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [FRangePRequests bool]") +} + +// from line 6578 +func (t Or_SemanticTokensOptions_full) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case PFullESemanticTokensOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [PFullESemanticTokensOptions bool]", t) +} + +func (t *Or_SemanticTokensOptions_full) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 PFullESemanticTokensOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [PFullESemanticTokensOptions bool]") +} + +// from line 6558 +func (t Or_SemanticTokensOptions_range) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case PRangeESemanticTokensOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [PRangeESemanticTokensOptions bool]", t) +} + +func (t *Or_SemanticTokensOptions_range) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 PRangeESemanticTokensOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [PRangeESemanticTokensOptions bool]") +} + +// from line 8226 +func (t Or_ServerCapabilities_callHierarchyProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case CallHierarchyOptions: + return json.Marshal(x) + case CallHierarchyRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [CallHierarchyOptions CallHierarchyRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_callHierarchyProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 CallHierarchyOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 CallHierarchyRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [CallHierarchyOptions CallHierarchyRegistrationOptions bool]") +} + +// from line 8034 +func (t Or_ServerCapabilities_codeActionProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case CodeActionOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [CodeActionOptions bool]", t) +} + +func (t *Or_ServerCapabilities_codeActionProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 CodeActionOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [CodeActionOptions bool]") +} + +// from line 8070 +func (t Or_ServerCapabilities_colorProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DocumentColorOptions: + return json.Marshal(x) + case DocumentColorRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DocumentColorOptions DocumentColorRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_colorProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DocumentColorOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 DocumentColorRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [DocumentColorOptions DocumentColorRegistrationOptions bool]") +} + +// from line 7896 +func (t Or_ServerCapabilities_declarationProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DeclarationOptions: + return json.Marshal(x) + case DeclarationRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DeclarationOptions DeclarationRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_declarationProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DeclarationOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 DeclarationRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [DeclarationOptions DeclarationRegistrationOptions bool]") +} + +// from line 7918 +func (t Or_ServerCapabilities_definitionProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DefinitionOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DefinitionOptions bool]", t) +} + +func (t *Or_ServerCapabilities_definitionProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DefinitionOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DefinitionOptions bool]") +} + +// from line 8383 +func (t Or_ServerCapabilities_diagnosticProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DiagnosticOptions: + return json.Marshal(x) + case DiagnosticRegistrationOptions: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DiagnosticOptions DiagnosticRegistrationOptions]", t) +} + +func (t *Or_ServerCapabilities_diagnosticProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DiagnosticOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 DiagnosticRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DiagnosticOptions DiagnosticRegistrationOptions]") +} + +// from line 8110 +func (t Or_ServerCapabilities_documentFormattingProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DocumentFormattingOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DocumentFormattingOptions bool]", t) +} + +func (t *Or_ServerCapabilities_documentFormattingProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DocumentFormattingOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DocumentFormattingOptions bool]") +} + +// from line 7998 +func (t Or_ServerCapabilities_documentHighlightProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DocumentHighlightOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DocumentHighlightOptions bool]", t) +} + +func (t *Or_ServerCapabilities_documentHighlightProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DocumentHighlightOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DocumentHighlightOptions bool]") +} + +// from line 8128 +func (t Or_ServerCapabilities_documentRangeFormattingProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DocumentRangeFormattingOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DocumentRangeFormattingOptions bool]", t) +} + +func (t *Or_ServerCapabilities_documentRangeFormattingProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DocumentRangeFormattingOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DocumentRangeFormattingOptions bool]") +} + +// from line 8016 +func (t Or_ServerCapabilities_documentSymbolProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case DocumentSymbolOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [DocumentSymbolOptions bool]", t) +} + +func (t *Or_ServerCapabilities_documentSymbolProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 DocumentSymbolOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [DocumentSymbolOptions bool]") +} + +// from line 8173 +func (t Or_ServerCapabilities_foldingRangeProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case FoldingRangeOptions: + return json.Marshal(x) + case FoldingRangeRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [FoldingRangeOptions FoldingRangeRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_foldingRangeProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 FoldingRangeOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 FoldingRangeRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [FoldingRangeOptions FoldingRangeRegistrationOptions bool]") +} + +// from line 7869 +func (t Or_ServerCapabilities_hoverProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case HoverOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [HoverOptions bool]", t) +} + +func (t *Or_ServerCapabilities_hoverProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 HoverOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [HoverOptions bool]") +} + +// from line 7958 +func (t Or_ServerCapabilities_implementationProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case ImplementationOptions: + return json.Marshal(x) + case ImplementationRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [ImplementationOptions ImplementationRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_implementationProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 ImplementationOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 ImplementationRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [ImplementationOptions ImplementationRegistrationOptions bool]") +} + +// from line 8360 +func (t Or_ServerCapabilities_inlayHintProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case InlayHintOptions: + return json.Marshal(x) + case InlayHintRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [InlayHintOptions InlayHintRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_inlayHintProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 InlayHintOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 InlayHintRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [InlayHintOptions InlayHintRegistrationOptions bool]") +} + +// from line 8337 +func (t Or_ServerCapabilities_inlineValueProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case InlineValueOptions: + return json.Marshal(x) + case InlineValueRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [InlineValueOptions InlineValueRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_inlineValueProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 InlineValueOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 InlineValueRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [InlineValueOptions InlineValueRegistrationOptions bool]") +} + +// from line 8249 +func (t Or_ServerCapabilities_linkedEditingRangeProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case LinkedEditingRangeOptions: + return json.Marshal(x) + case LinkedEditingRangeRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_linkedEditingRangeProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 LinkedEditingRangeOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 LinkedEditingRangeRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool]") +} + +// from line 8291 +func (t Or_ServerCapabilities_monikerProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MonikerOptions: + return json.Marshal(x) + case MonikerRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MonikerOptions MonikerRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_monikerProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MonikerOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 MonikerRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [MonikerOptions MonikerRegistrationOptions bool]") +} + +// from line 7841 +func (t Or_ServerCapabilities_notebookDocumentSync) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case NotebookDocumentSyncOptions: + return json.Marshal(x) + case NotebookDocumentSyncRegistrationOptions: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions]", t) +} + +func (t *Or_ServerCapabilities_notebookDocumentSync) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 NotebookDocumentSyncOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 NotebookDocumentSyncRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions]") +} + +// from line 7980 +func (t Or_ServerCapabilities_referencesProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case ReferenceOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [ReferenceOptions bool]", t) +} + +func (t *Or_ServerCapabilities_referencesProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 ReferenceOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [ReferenceOptions bool]") +} + +// from line 8155 +func (t Or_ServerCapabilities_renameProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case RenameOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [RenameOptions bool]", t) +} + +func (t *Or_ServerCapabilities_renameProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 RenameOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [RenameOptions bool]") +} + +// from line 8195 +func (t Or_ServerCapabilities_selectionRangeProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case SelectionRangeOptions: + return json.Marshal(x) + case SelectionRangeRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [SelectionRangeOptions SelectionRangeRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_selectionRangeProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 SelectionRangeOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 SelectionRangeRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [SelectionRangeOptions SelectionRangeRegistrationOptions bool]") +} + +// from line 8272 +func (t Or_ServerCapabilities_semanticTokensProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case SemanticTokensOptions: + return json.Marshal(x) + case SemanticTokensRegistrationOptions: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [SemanticTokensOptions SemanticTokensRegistrationOptions]", t) +} + +func (t *Or_ServerCapabilities_semanticTokensProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 SemanticTokensOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 SemanticTokensRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [SemanticTokensOptions SemanticTokensRegistrationOptions]") +} + +// from line 7823 +func (t Or_ServerCapabilities_textDocumentSync) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case TextDocumentSyncKind: + return json.Marshal(x) + case TextDocumentSyncOptions: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [TextDocumentSyncKind TextDocumentSyncOptions]", t) +} + +func (t *Or_ServerCapabilities_textDocumentSync) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 TextDocumentSyncKind + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TextDocumentSyncOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [TextDocumentSyncKind TextDocumentSyncOptions]") +} + +// from line 7936 +func (t Or_ServerCapabilities_typeDefinitionProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case TypeDefinitionOptions: + return json.Marshal(x) + case TypeDefinitionRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_typeDefinitionProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 TypeDefinitionOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TypeDefinitionRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool]") +} + +// from line 8314 +func (t Or_ServerCapabilities_typeHierarchyProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case TypeHierarchyOptions: + return json.Marshal(x) + case TypeHierarchyRegistrationOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool]", t) +} + +func (t *Or_ServerCapabilities_typeHierarchyProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 TypeHierarchyOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TypeHierarchyRegistrationOptions + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 bool + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + return errors.New("unmarshal failed to match one of [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool]") +} + +// from line 8092 +func (t Or_ServerCapabilities_workspaceSymbolProvider) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case WorkspaceSymbolOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [WorkspaceSymbolOptions bool]", t) +} + +func (t *Or_ServerCapabilities_workspaceSymbolProvider) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 WorkspaceSymbolOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [WorkspaceSymbolOptions bool]") +} + +// from line 8840 +func (t Or_SignatureInformation_documentation) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case MarkupContent: + return json.Marshal(x) + case string: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t) +} + +func (t *Or_SignatureInformation_documentation) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 MarkupContent + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 string + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [MarkupContent string]") +} + +// from line 6691 +func (t Or_TextDocumentEdit_edits_Elem) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case AnnotatedTextEdit: + return json.Marshal(x) + case TextEdit: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [AnnotatedTextEdit TextEdit]", t) +} + +func (t *Or_TextDocumentEdit_edits_Elem) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 AnnotatedTextEdit + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 TextEdit + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [AnnotatedTextEdit TextEdit]") +} + +// from line 9776 +func (t Or_TextDocumentSyncOptions_save) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case SaveOptions: + return json.Marshal(x) + case bool: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [SaveOptions bool]", t) +} + +func (t *Or_TextDocumentSyncOptions_save) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 SaveOptions + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 bool + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [SaveOptions bool]") +} + +// from line 13985 +func (t Or_WorkspaceDocumentDiagnosticReport) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case WorkspaceFullDocumentDiagnosticReport: + return json.Marshal(x) + case WorkspaceUnchangedDocumentDiagnosticReport: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport]", t) +} + +func (t *Or_WorkspaceDocumentDiagnosticReport) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 WorkspaceFullDocumentDiagnosticReport + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 WorkspaceUnchangedDocumentDiagnosticReport + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport]") +} + +// from line 3218 +func (t Or_WorkspaceEdit_documentChanges_Elem) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case CreateFile: + return json.Marshal(x) + case DeleteFile: + return json.Marshal(x) + case RenameFile: + return json.Marshal(x) + case TextDocumentEdit: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [CreateFile DeleteFile RenameFile TextDocumentEdit]", t) +} + +func (t *Or_WorkspaceEdit_documentChanges_Elem) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 CreateFile + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 DeleteFile + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + var h2 RenameFile + if err := json.Unmarshal(x, &h2); err == nil { + t.Value = h2 + return nil + } + var h3 TextDocumentEdit + if err := json.Unmarshal(x, &h3); err == nil { + t.Value = h3 + return nil + } + return errors.New("unmarshal failed to match one of [CreateFile DeleteFile RenameFile TextDocumentEdit]") +} + +// from line 247 +func (t Or_textDocument_declaration) MarshalJSON() ([]byte, error) { + switch x := t.Value.(type) { + case Declaration: + return json.Marshal(x) + case []DeclarationLink: + return json.Marshal(x) + case nil: + return []byte("null"), nil + } + return nil, fmt.Errorf("type %T not one of [Declaration []DeclarationLink]", t) +} + +func (t *Or_textDocument_declaration) UnmarshalJSON(x []byte) error { + if string(x) == "null" { + t.Value = nil + return nil + } + var h0 Declaration + if err := json.Unmarshal(x, &h0); err == nil { + t.Value = h0 + return nil + } + var h1 []DeclarationLink + if err := json.Unmarshal(x, &h1); err == nil { + t.Value = h1 + return nil + } + return errors.New("unmarshal failed to match one of [Declaration []DeclarationLink]") +} diff --git a/gopls/internal/lsp/protocol/tsprotocol.go b/gopls/internal/lsp/protocol/tsprotocol.go new file mode 100644 index 00000000000..e1170fe940c --- /dev/null +++ b/gopls/internal/lsp/protocol/tsprotocol.go @@ -0,0 +1,6565 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated for LSP. DO NOT EDIT. + +package protocol + +// Code generated from version 3.17.0 of protocol/metaModel.json. +// git hash 9b742021fb04ad081aa3676a9eecf4fa612084b4 (as of 2023-01-30) + +import "encoding/json" + +/* + * A special text edit with an additional change annotation. + * + * @since 3.16.0. + */ +type AnnotatedTextEdit struct { // line 9371 + // The actual identifier of the change annotation + AnnotationID ChangeAnnotationIdentifier `json:"annotationId"` + TextEdit +} + +// The parameters passed via a apply workspace edit request. +type ApplyWorkspaceEditParams struct { // line 5983 + /* + * An optional label of the workspace edit. This label is + * presented in the user interface for example on an undo + * stack to undo the workspace edit. + */ + Label string `json:"label,omitempty"` + // The edits to apply. + Edit WorkspaceEdit `json:"edit"` +} + +/* + * The result returned from the apply workspace edit request. + * + * @since 3.17 renamed from ApplyWorkspaceEditResponse + */ +type ApplyWorkspaceEditResult struct { // line 6006 + // Indicates whether the edit was applied or not. + Applied bool `json:"applied"` + /* + * An optional textual description for why the edit was not applied. + * This may be used by the server for diagnostic logging or to provide + * a suitable error for a request that triggered the edit. + */ + FailureReason string `json:"failureReason,omitempty"` + /* + * Depending on the client's failure handling strategy `failedChange` might + * contain the index of the change that failed. This property is only available + * if the client signals a `failureHandlingStrategy` in its client capabilities. + */ + FailedChange uint32 `json:"failedChange,omitempty"` +} + +// A base for all symbol information. +type BaseSymbolInformation struct { // line 8965 + // The name of this symbol. + Name string `json:"name"` + // The kind of this symbol. + Kind SymbolKind `json:"kind"` + /* + * Tags for this symbol. + * + * @since 3.16.0 + */ + Tags []SymbolTag `json:"tags,omitempty"` + /* + * The name of the symbol containing this symbol. This information is for + * user interface purposes (e.g. to render a qualifier in the user interface + * if necessary). It can't be used to re-infer a hierarchy for the document + * symbols. + */ + ContainerName string `json:"containerName,omitempty"` +} + +// @since 3.16.0 +type CallHierarchyClientCapabilities struct { // line 12140 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +/* + * Represents an incoming call, e.g. a caller of a method or constructor. + * + * @since 3.16.0 + */ +type CallHierarchyIncomingCall struct { // line 2778 + // The item that makes the call. + From CallHierarchyItem `json:"from"` + /* + * The ranges at which the calls appear. This is relative to the caller + * denoted by {@link CallHierarchyIncomingCall.from `this.from`}. + */ + FromRanges []Range `json:"fromRanges"` +} + +/* + * The parameter of a `callHierarchy/incomingCalls` request. + * + * @since 3.16.0 + */ +type CallHierarchyIncomingCallsParams struct { // line 2754 + Item CallHierarchyItem `json:"item"` + WorkDoneProgressParams + PartialResultParams +} + +/* + * Represents programming constructs like functions or constructors in the context + * of call hierarchy. + * + * @since 3.16.0 + */ +type CallHierarchyItem struct { // line 2655 + // The name of this item. + Name string `json:"name"` + // The kind of this item. + Kind SymbolKind `json:"kind"` + // Tags for this item. + Tags []SymbolTag `json:"tags,omitempty"` + // More detail for this item, e.g. the signature of a function. + Detail string `json:"detail,omitempty"` + // The resource identifier of this item. + URI DocumentURI `json:"uri"` + // The range enclosing this symbol not including leading/trailing whitespace but everything else, e.g. comments and code. + Range Range `json:"range"` + /* + * The range that should be selected and revealed when this symbol is being picked, e.g. the name of a function. + * Must be contained by the {@link CallHierarchyItem.range `range`}. + */ + SelectionRange Range `json:"selectionRange"` + /* + * A data entry field that is preserved between a call hierarchy prepare and + * incoming calls or outgoing calls requests. + */ + Data interface{} `json:"data,omitempty"` +} + +/* + * Call hierarchy options used during static registration. + * + * @since 3.16.0 + */ +type CallHierarchyOptions struct { // line 6533 + WorkDoneProgressOptions +} + +/* + * Represents an outgoing call, e.g. calling a getter from a method or a method from a constructor etc. + * + * @since 3.16.0 + */ +type CallHierarchyOutgoingCall struct { // line 2828 + // The item that is called. + To CallHierarchyItem `json:"to"` + /* + * The range at which this item is called. This is the range relative to the caller, e.g the item + * passed to {@link CallHierarchyItemProvider.provideCallHierarchyOutgoingCalls `provideCallHierarchyOutgoingCalls`} + * and not {@link CallHierarchyOutgoingCall.to `this.to`}. + */ + FromRanges []Range `json:"fromRanges"` +} + +/* + * The parameter of a `callHierarchy/outgoingCalls` request. + * + * @since 3.16.0 + */ +type CallHierarchyOutgoingCallsParams struct { // line 2804 + Item CallHierarchyItem `json:"item"` + WorkDoneProgressParams + PartialResultParams +} + +/* + * The parameter of a `textDocument/prepareCallHierarchy` request. + * + * @since 3.16.0 + */ +type CallHierarchyPrepareParams struct { // line 2637 + TextDocumentPositionParams + WorkDoneProgressParams +} + +/* + * Call hierarchy options used during static or dynamic registration. + * + * @since 3.16.0 + */ +type CallHierarchyRegistrationOptions struct { // line 2732 + TextDocumentRegistrationOptions + CallHierarchyOptions + StaticRegistrationOptions +} +type CancelParams struct { // line 6178 + // The request id to cancel. + ID interface{} `json:"id"` +} + +/* + * Additional information that describes document changes. + * + * @since 3.16.0 + */ +type ChangeAnnotation struct { // line 6830 + /* + * A human-readable string describing the actual change. The string + * is rendered prominent in the user interface. + */ + Label string `json:"label"` + /* + * A flag which indicates that user confirmation is needed + * before applying the change. + */ + NeedsConfirmation bool `json:"needsConfirmation,omitempty"` + /* + * A human-readable string which is rendered less prominent in + * the user interface. + */ + Description string `json:"description,omitempty"` +} + +// An identifier to refer to a change annotation stored with a workspace edit. +type ChangeAnnotationIdentifier = string // (alias) line 13975 +// Defines the capabilities provided by the client. +type ClientCapabilities struct { // line 9673 + // Workspace specific client capabilities. + Workspace WorkspaceClientCapabilities `json:"workspace,omitempty"` + // Text document specific client capabilities. + TextDocument TextDocumentClientCapabilities `json:"textDocument,omitempty"` + /* + * Capabilities specific to the notebook document support. + * + * @since 3.17.0 + */ + NotebookDocument *NotebookDocumentClientCapabilities `json:"notebookDocument,omitempty"` + // Window specific client capabilities. + Window WindowClientCapabilities `json:"window,omitempty"` + /* + * General client capabilities. + * + * @since 3.16.0 + */ + General *GeneralClientCapabilities `json:"general,omitempty"` + // Experimental client capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} + +/* + * A code action represents a change that can be performed in code, e.g. to fix a problem or + * to refactor code. + * + * A CodeAction must set either `edit` and/or a `command`. If both are supplied, the `edit` is applied first, then the `command` is executed. + */ +type CodeAction struct { // line 5381 + // A short, human-readable, title for this code action. + Title string `json:"title"` + /* + * The kind of the code action. + * + * Used to filter code actions. + */ + Kind CodeActionKind `json:"kind,omitempty"` + // The diagnostics that this code action resolves. + Diagnostics []Diagnostic `json:"diagnostics,omitempty"` + /* + * Marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted + * by keybindings. + * + * A quick fix should be marked preferred if it properly addresses the underlying error. + * A refactoring should be marked preferred if it is the most reasonable choice of actions to take. + * + * @since 3.15.0 + */ + IsPreferred bool `json:"isPreferred,omitempty"` + /* + * Marks that the code action cannot currently be applied. + * + * Clients should follow the following guidelines regarding disabled code actions: + * + * - Disabled code actions are not shown in automatic [lightbulbs](https://code.visualstudio.com/docs/editor/editingevolved#_code-action) + * code action menus. + * + * - Disabled actions are shown as faded out in the code action menu when the user requests a more specific type + * of code action, such as refactorings. + * + * - If the user has a [keybinding](https://code.visualstudio.com/docs/editor/refactoring#_keybindings-for-code-actions) + * that auto applies a code action and only disabled code actions are returned, the client should show the user an + * error message with `reason` in the editor. + * + * @since 3.16.0 + */ + Disabled *PDisabledMsg_textDocument_codeAction `json:"disabled,omitempty"` + // The workspace edit this code action performs. + Edit WorkspaceEdit `json:"edit,omitempty"` + /* + * A command this code action executes. If a code action + * provides an edit and a command, first the edit is + * executed and then the command. + */ + Command *Command `json:"command,omitempty"` + /* + * A data entry field that is preserved on a code action between + * a `textDocument/codeAction` and a `codeAction/resolve` request. + * + * @since 3.16.0 + */ + Data interface{} `json:"data,omitempty"` +} + +// The Client Capabilities of a {@link CodeActionRequest}. +type CodeActionClientCapabilities struct { // line 11720 + // Whether code action supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client support code action literals of type `CodeAction` as a valid + * response of the `textDocument/codeAction` request. If the property is not + * set the request can only return `Command` literals. + * + * @since 3.8.0 + */ + CodeActionLiteralSupport PCodeActionLiteralSupportPCodeAction `json:"codeActionLiteralSupport,omitempty"` + /* + * Whether code action supports the `isPreferred` property. + * + * @since 3.15.0 + */ + IsPreferredSupport bool `json:"isPreferredSupport,omitempty"` + /* + * Whether code action supports the `disabled` property. + * + * @since 3.16.0 + */ + DisabledSupport bool `json:"disabledSupport,omitempty"` + /* + * Whether code action supports the `data` property which is + * preserved between a `textDocument/codeAction` and a + * `codeAction/resolve` request. + * + * @since 3.16.0 + */ + DataSupport bool `json:"dataSupport,omitempty"` + /* + * Whether the client supports resolving additional code action + * properties via a separate `codeAction/resolve` request. + * + * @since 3.16.0 + */ + ResolveSupport *PResolveSupportPCodeAction `json:"resolveSupport,omitempty"` + /* + * Whether the client honors the change annotations in + * text edits and resource operations returned via the + * `CodeAction#edit` property by for example presenting + * the workspace edit in the user interface and asking + * for confirmation. + * + * @since 3.16.0 + */ + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +/* + * Contains additional diagnostic information about the context in which + * a {@link CodeActionProvider.provideCodeActions code action} is run. + */ +type CodeActionContext struct { // line 9031 + /* + * An array of diagnostics known on the client side overlapping the range provided to the + * `textDocument/codeAction` request. They are provided so that the server knows which + * errors are currently presented to the user for the given range. There is no guarantee + * that these accurately reflect the error state of the resource. The primary parameter + * to compute code actions is the provided range. + */ + Diagnostics []Diagnostic `json:"diagnostics"` + /* + * Requested kind of actions to return. + * + * Actions not of this kind are filtered out by the client before being shown. So servers + * can omit computing them. + */ + Only []CodeActionKind `json:"only,omitempty"` + /* + * The reason why code actions were requested. + * + * @since 3.17.0 + */ + TriggerKind CodeActionTriggerKind `json:"triggerKind,omitempty"` +} + +// A set of predefined code action kinds +type CodeActionKind string // line 13325 +// Provider options for a {@link CodeActionRequest}. +type CodeActionOptions struct { // line 9070 + /* + * CodeActionKinds that this server may return. + * + * The list of kinds may be generic, such as `CodeActionKind.Refactor`, or the server + * may list out every specific kind they provide. + */ + CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"` + /* + * The server provides support to resolve additional + * information for a code action. + * + * @since 3.16.0 + */ + ResolveProvider bool `json:"resolveProvider,omitempty"` + WorkDoneProgressOptions +} + +// The parameters of a {@link CodeActionRequest}. +type CodeActionParams struct { // line 5307 + // The document in which the command was invoked. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The range for which the command was invoked. + Range Range `json:"range"` + // Context carrying additional information. + Context CodeActionContext `json:"context"` + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link CodeActionRequest}. +type CodeActionRegistrationOptions struct { // line 5475 + TextDocumentRegistrationOptions + CodeActionOptions +} + +/* + * The reason why code actions were requested. + * + * @since 3.17.0 + */ +type CodeActionTriggerKind uint32 // line 13605 +/* + * Structure to capture a description for an error code. + * + * @since 3.16.0 + */ +type CodeDescription struct { // line 10025 + // An URI to open with more information about the diagnostic error. + Href URI `json:"href"` +} + +/* + * A code lens represents a {@link Command command} that should be shown along with + * source text, like the number of references, a way to run tests, etc. + * + * A code lens is _unresolved_ when no command is associated to it. For performance + * reasons the creation of a code lens and resolving should be done in two stages. + */ +type CodeLens struct { // line 5598 + // The range in which this code lens is valid. Should only span a single line. + Range Range `json:"range"` + // The command this code lens represents. + Command Command `json:"command,omitempty"` + /* + * A data entry field that is preserved on a code lens item between + * a {@link CodeLensRequest} and a [CodeLensResolveRequest] + * (#CodeLensResolveRequest) + */ + Data interface{} `json:"data,omitempty"` +} + +// The client capabilities of a {@link CodeLensRequest}. +type CodeLensClientCapabilities struct { // line 11834 + // Whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// Code Lens provider options of a {@link CodeLensRequest}. +type CodeLensOptions struct { // line 9126 + // Code lens has a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` + WorkDoneProgressOptions +} + +// The parameters of a {@link CodeLensRequest}. +type CodeLensParams struct { // line 5574 + // The document to request code lens for. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link CodeLensRequest}. +type CodeLensRegistrationOptions struct { // line 5630 + TextDocumentRegistrationOptions + CodeLensOptions +} + +// @since 3.16.0 +type CodeLensWorkspaceClientCapabilities struct { // line 10992 + /* + * Whether the client implementation supports a refresh request sent from the + * server to the client. + * + * Note that this event is global and will force the client to refresh all + * code lenses currently shown. It should be used with absolute care and is + * useful for situation where a server for example detect a project wide + * change that requires such a calculation. + */ + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +// Represents a color in RGBA space. +type Color struct { // line 6432 + // The red component of this color in the range [0-1]. + Red float64 `json:"red"` + // The green component of this color in the range [0-1]. + Green float64 `json:"green"` + // The blue component of this color in the range [0-1]. + Blue float64 `json:"blue"` + // The alpha component of this color in the range [0-1]. + Alpha float64 `json:"alpha"` +} + +// Represents a color range from a document. +type ColorInformation struct { // line 2238 + // The range in the document where this color appears. + Range Range `json:"range"` + // The actual color value for this color range. + Color Color `json:"color"` +} +type ColorPresentation struct { // line 2320 + /* + * The label of this color presentation. It will be shown on the color + * picker header. By default this is also the text that is inserted when selecting + * this color presentation. + */ + Label string `json:"label"` + /* + * An {@link TextEdit edit} which is applied to a document when selecting + * this presentation for the color. When `falsy` the {@link ColorPresentation.label label} + * is used. + */ + TextEdit *TextEdit `json:"textEdit,omitempty"` + /* + * An optional array of additional {@link TextEdit text edits} that are applied when + * selecting this color presentation. Edits must not overlap with the main {@link ColorPresentation.textEdit edit} nor with themselves. + */ + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` +} + +// Parameters for a {@link ColorPresentationRequest}. +type ColorPresentationParams struct { // line 2280 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The color to request presentations for. + Color Color `json:"color"` + // The range where the color would be inserted. Serves as a context. + Range Range `json:"range"` + WorkDoneProgressParams + PartialResultParams +} + +/* + * Represents a reference to a command. Provides a title which + * will be used to represent a command in the UI and, optionally, + * an array of arguments which will be passed to the command handler + * function when invoked. + */ +type Command struct { // line 5347 + // Title of the command, like `save`. + Title string `json:"title"` + // The identifier of the actual command handler. + Command string `json:"command"` + /* + * Arguments that the command handler should be + * invoked with. + */ + Arguments []json.RawMessage `json:"arguments,omitempty"` +} + +// Completion client capabilities +type CompletionClientCapabilities struct { // line 11167 + // Whether completion supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client supports the following `CompletionItem` specific + * capabilities. + */ + CompletionItem PCompletionItemPCompletion `json:"completionItem,omitempty"` + CompletionItemKind *PCompletionItemKindPCompletion `json:"completionItemKind,omitempty"` + /* + * Defines how the client handles whitespace and indentation + * when accepting a completion item that uses multi line + * text in either `insertText` or `textEdit`. + * + * @since 3.17.0 + */ + InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` + /* + * The client supports to send additional context information for a + * `textDocument/completion` request. + */ + ContextSupport bool `json:"contextSupport,omitempty"` + /* + * The client supports the following `CompletionList` specific + * capabilities. + * + * @since 3.17.0 + */ + CompletionList *PCompletionListPCompletion `json:"completionList,omitempty"` +} + +// Contains additional information about the context in which a completion request is triggered. +type CompletionContext struct { // line 8627 + // How the completion was triggered. + TriggerKind CompletionTriggerKind `json:"triggerKind"` + /* + * The trigger character (a single character) that has trigger code complete. + * Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` + */ + TriggerCharacter string `json:"triggerCharacter,omitempty"` +} + +/* + * A completion item represents a text snippet that is + * proposed to complete text that is being typed. + */ +type CompletionItem struct { // line 4527 + /* + * The label of this completion item. + * + * The label property is also by default the text that + * is inserted when selecting this completion. + * + * If label details are provided the label itself should + * be an unqualified name of the completion item. + */ + Label string `json:"label"` + /* + * Additional details for the label + * + * @since 3.17.0 + */ + LabelDetails *CompletionItemLabelDetails `json:"labelDetails,omitempty"` + /* + * The kind of this completion item. Based of the kind + * an icon is chosen by the editor. + */ + Kind CompletionItemKind `json:"kind,omitempty"` + /* + * Tags for this completion item. + * + * @since 3.15.0 + */ + Tags []CompletionItemTag `json:"tags,omitempty"` + /* + * A human-readable string with additional information + * about this item, like type or symbol information. + */ + Detail string `json:"detail,omitempty"` + // A human-readable string that represents a doc-comment. + Documentation *Or_CompletionItem_documentation `json:"documentation,omitempty"` + /* + * Indicates if this item is deprecated. + * @deprecated Use `tags` instead. + */ + Deprecated bool `json:"deprecated,omitempty"` + /* + * Select this item when showing. + * + * *Note* that only one completion item can be selected and that the + * tool / client decides which item that is. The rule is that the *first* + * item of those that match best is selected. + */ + Preselect bool `json:"preselect,omitempty"` + /* + * A string that should be used when comparing this item + * with other items. When `falsy` the {@link CompletionItem.label label} + * is used. + */ + SortText string `json:"sortText,omitempty"` + /* + * A string that should be used when filtering a set of + * completion items. When `falsy` the {@link CompletionItem.label label} + * is used. + */ + FilterText string `json:"filterText,omitempty"` + /* + * A string that should be inserted into a document when selecting + * this completion. When `falsy` the {@link CompletionItem.label label} + * is used. + * + * The `insertText` is subject to interpretation by the client side. + * Some tools might not take the string literally. For example + * VS Code when code complete is requested in this example + * `con` and a completion item with an `insertText` of + * `console` is provided it will only insert `sole`. Therefore it is + * recommended to use `textEdit` instead since it avoids additional client + * side interpretation. + */ + InsertText string `json:"insertText,omitempty"` + /* + * The format of the insert text. The format applies to both the + * `insertText` property and the `newText` property of a provided + * `textEdit`. If omitted defaults to `InsertTextFormat.PlainText`. + * + * Please note that the insertTextFormat doesn't apply to + * `additionalTextEdits`. + */ + InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"` + /* + * How whitespace and indentation is handled during completion + * item insertion. If not provided the clients default value depends on + * the `textDocument.completion.insertTextMode` client capability. + * + * @since 3.16.0 + */ + InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` + /* + * An {@link TextEdit edit} which is applied to a document when selecting + * this completion. When an edit is provided the value of + * {@link CompletionItem.insertText insertText} is ignored. + * + * Most editors support two different operations when accepting a completion + * item. One is to insert a completion text and the other is to replace an + * existing text with a completion text. Since this can usually not be + * predetermined by a server it can report both ranges. Clients need to + * signal support for `InsertReplaceEdits` via the + * `textDocument.completion.insertReplaceSupport` client capability + * property. + * + * *Note 1:* The text edit's range as well as both ranges from an insert + * replace edit must be a [single line] and they must contain the position + * at which completion has been requested. + * *Note 2:* If an `InsertReplaceEdit` is returned the edit's insert range + * must be a prefix of the edit's replace range, that means it must be + * contained and starting at the same position. + * + * @since 3.16.0 additional type `InsertReplaceEdit` + */ + TextEdit *TextEdit `json:"textEdit,omitempty"` + /* + * The edit text used if the completion item is part of a CompletionList and + * CompletionList defines an item default for the text edit range. + * + * Clients will only honor this property if they opt into completion list + * item defaults using the capability `completionList.itemDefaults`. + * + * If not provided and a list's default range is provided the label + * property is used as a text. + * + * @since 3.17.0 + */ + TextEditText string `json:"textEditText,omitempty"` + /* + * An optional array of additional {@link TextEdit text edits} that are applied when + * selecting this completion. Edits must not overlap (including the same insert position) + * with the main {@link CompletionItem.textEdit edit} nor with themselves. + * + * Additional text edits should be used to change text unrelated to the current cursor position + * (for example adding an import statement at the top of the file if the completion item will + * insert an unqualified type). + */ + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` + /* + * An optional set of characters that when pressed while this completion is active will accept it first and + * then type that character. *Note* that all commit characters should have `length=1` and that superfluous + * characters will be ignored. + */ + CommitCharacters []string `json:"commitCharacters,omitempty"` + /* + * An optional {@link Command command} that is executed *after* inserting this completion. *Note* that + * additional modifications to the current document should be described with the + * {@link CompletionItem.additionalTextEdits additionalTextEdits}-property. + */ + Command *Command `json:"command,omitempty"` + /* + * A data entry field that is preserved on a completion item between a + * {@link CompletionRequest} and a {@link CompletionResolveRequest}. + */ + Data interface{} `json:"data,omitempty"` +} + +// The kind of a completion entry. +type CompletionItemKind uint32 // line 13133 +/* + * Additional details for a completion item label. + * + * @since 3.17.0 + */ +type CompletionItemLabelDetails struct { // line 8650 + /* + * An optional string which is rendered less prominently directly after {@link CompletionItem.label label}, + * without any spacing. Should be used for function signatures and type annotations. + */ + Detail string `json:"detail,omitempty"` + /* + * An optional string which is rendered less prominently after {@link CompletionItem.detail}. Should be used + * for fully qualified names and file paths. + */ + Description string `json:"description,omitempty"` +} + +/* + * Completion item tags are extra annotations that tweak the rendering of a completion + * item. + * + * @since 3.15.0 + */ +type CompletionItemTag uint32 // line 13243 +/* + * Represents a collection of {@link CompletionItem completion items} to be presented + * in the editor. + */ +type CompletionList struct { // line 4736 + /* + * This list it not complete. Further typing results in recomputing this list. + * + * Recomputed lists have all their items replaced (not appended) in the + * incomplete completion sessions. + */ + IsIncomplete bool `json:"isIncomplete"` + /* + * In many cases the items of an actual completion result share the same + * value for properties like `commitCharacters` or the range of a text + * edit. A completion list can therefore define item defaults which will + * be used if a completion item itself doesn't specify the value. + * + * If a completion list specifies a default value and a completion item + * also specifies a corresponding value the one from the item is used. + * + * Servers are only allowed to return default values if the client + * signals support for this via the `completionList.itemDefaults` + * capability. + * + * @since 3.17.0 + */ + ItemDefaults *PItemDefaultsMsg_textDocument_completion `json:"itemDefaults,omitempty"` + // The completion items. + Items []CompletionItem `json:"items"` +} + +// Completion options. +type CompletionOptions struct { // line 8706 + /* + * Most tools trigger completion request automatically without explicitly requesting + * it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user + * starts to type an identifier. For example if the user types `c` in a JavaScript file + * code complete will automatically pop up present `console` besides others as a + * completion item. Characters that make up identifiers don't need to be listed here. + * + * If code complete should automatically be trigger on characters not being valid inside + * an identifier (for example `.` in JavaScript) list them in `triggerCharacters`. + */ + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + /* + * The list of all possible characters that commit a completion. This field can be used + * if clients don't support individual commit characters per completion item. See + * `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport` + * + * If a server provides both `allCommitCharacters` and commit characters on an individual + * completion item the ones on the completion item win. + * + * @since 3.2.0 + */ + AllCommitCharacters []string `json:"allCommitCharacters,omitempty"` + /* + * The server provides support to resolve additional + * information for a completion item. + */ + ResolveProvider bool `json:"resolveProvider,omitempty"` + /* + * The server supports the following `CompletionItem` specific + * capabilities. + * + * @since 3.17.0 + */ + CompletionItem *PCompletionItemPCompletionProvider `json:"completionItem,omitempty"` + WorkDoneProgressOptions +} + +// Completion parameters +type CompletionParams struct { // line 4496 + /* + * The completion context. This is only available it the client specifies + * to send this using the client capability `textDocument.completion.contextSupport === true` + */ + Context CompletionContext `json:"context,omitempty"` + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link CompletionRequest}. +type CompletionRegistrationOptions struct { // line 4853 + TextDocumentRegistrationOptions + CompletionOptions +} + +// How a completion was triggered +type CompletionTriggerKind uint32 // line 13554 +type ConfigurationItem struct { // line 6395 + // The scope to get the configuration section for. + ScopeURI string `json:"scopeUri,omitempty"` + // The configuration section asked for. + Section string `json:"section,omitempty"` +} + +// The parameters of a configuration request. +type ConfigurationParams struct { // line 2198 + Items []ConfigurationItem `json:"items"` +} + +// Create file operation. +type CreateFile struct { // line 6711 + // A create + Kind string `json:"kind"` + // The resource to create. + URI DocumentURI `json:"uri"` + // Additional options + Options *CreateFileOptions `json:"options,omitempty"` + ResourceOperation +} + +// Options to create a file. +type CreateFileOptions struct { // line 9416 + // Overwrite existing file. Overwrite wins over `ignoreIfExists` + Overwrite bool `json:"overwrite,omitempty"` + // Ignore if exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +/* + * The parameters sent in notifications/requests for user-initiated creation of + * files. + * + * @since 3.16.0 + */ +type CreateFilesParams struct { // line 3174 + // An array of all files/folders created in this operation. + Files []FileCreate `json:"files"` +} + +// The declaration of a symbol representation as one or many {@link Location locations}. +type Declaration = []Location // (alias) line 13832 +// @since 3.14.0 +type DeclarationClientCapabilities struct { // line 11508 + /* + * Whether declaration supports dynamic registration. If this is set to `true` + * the client supports the new `DeclarationRegistrationOptions` return value + * for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // The client supports additional metadata in the form of declaration links. + LinkSupport bool `json:"linkSupport,omitempty"` +} + +/* + * Information about where a symbol is declared. + * + * Provides additional metadata over normal {@link Location location} declarations, including the range of + * the declaring symbol. + * + * Servers should prefer returning `DeclarationLink` over `Declaration` if supported + * by the client. + */ +type DeclarationLink = LocationLink // (alias) line 13852 +type DeclarationOptions struct { // line 6490 + WorkDoneProgressOptions +} +type DeclarationParams struct { // line 2493 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} +type DeclarationRegistrationOptions struct { // line 2513 + DeclarationOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +/* + * The definition of a symbol represented as one or many {@link Location locations}. + * For most programming languages there is only one location at which a symbol is + * defined. + * + * Servers should prefer returning `DefinitionLink` over `Definition` if supported + * by the client. + */ +type Definition = Or_Definition // (alias) line 13750 +// Client Capabilities for a {@link DefinitionRequest}. +type DefinitionClientCapabilities struct { // line 11533 + // Whether definition supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client supports additional metadata in the form of definition links. + * + * @since 3.14.0 + */ + LinkSupport bool `json:"linkSupport,omitempty"` +} + +/* + * Information about where a symbol is defined. + * + * Provides additional metadata over normal {@link Location location} definitions, including the range of + * the defining symbol + */ +type DefinitionLink = LocationLink // (alias) line 13770 +// Server Capabilities for a {@link DefinitionRequest}. +type DefinitionOptions struct { // line 8918 + WorkDoneProgressOptions +} + +// Parameters for a {@link DefinitionRequest}. +type DefinitionParams struct { // line 5017 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link DefinitionRequest}. +type DefinitionRegistrationOptions struct { // line 5038 + TextDocumentRegistrationOptions + DefinitionOptions +} + +// Delete file operation +type DeleteFile struct { // line 6793 + // A delete + Kind string `json:"kind"` + // The file to delete. + URI DocumentURI `json:"uri"` + // Delete options. + Options *DeleteFileOptions `json:"options,omitempty"` + ResourceOperation +} + +// Delete file options +type DeleteFileOptions struct { // line 9464 + // Delete the content recursively if a folder is denoted. + Recursive bool `json:"recursive,omitempty"` + // Ignore the operation if the file doesn't exist. + IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"` +} + +/* + * The parameters sent in notifications/requests for user-initiated deletes of + * files. + * + * @since 3.16.0 + */ +type DeleteFilesParams struct { // line 3299 + // An array of all files/folders deleted in this operation. + Files []FileDelete `json:"files"` +} + +/* + * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects + * are only valid in the scope of a resource. + */ +type Diagnostic struct { // line 8524 + // The range at which the message applies + Range Range `json:"range"` + /* + * The diagnostic's severity. Can be omitted. If omitted it is up to the + * client to interpret diagnostics as error, warning, info or hint. + */ + Severity DiagnosticSeverity `json:"severity,omitempty"` + // The diagnostic's code, which usually appear in the user interface. + Code interface{} `json:"code,omitempty"` + /* + * An optional property to describe the error code. + * Requires the code field (above) to be present/not null. + * + * @since 3.16.0 + */ + CodeDescription *CodeDescription `json:"codeDescription,omitempty"` + /* + * A human-readable string describing the source of this + * diagnostic, e.g. 'typescript' or 'super lint'. It usually + * appears in the user interface. + */ + Source string `json:"source,omitempty"` + // The diagnostic's message. It usually appears in the user interface + Message string `json:"message"` + /* + * Additional metadata about the diagnostic. + * + * @since 3.15.0 + */ + Tags []DiagnosticTag `json:"tags,omitempty"` + /* + * An array of related diagnostic information, e.g. when symbol-names within + * a scope collide all definitions can be marked via this property. + */ + RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"` + /* + * A data entry field that is preserved between a `textDocument/publishDiagnostics` + * notification and `textDocument/codeAction` request. + * + * @since 3.16.0 + */ + Data interface{} `json:"data,omitempty"` +} + +/* + * Client capabilities specific to diagnostic pull requests. + * + * @since 3.17.0 + */ +type DiagnosticClientCapabilities struct { // line 12407 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // Whether the clients supports related documents for document diagnostic pulls. + RelatedDocumentSupport bool `json:"relatedDocumentSupport,omitempty"` +} + +/* + * Diagnostic options. + * + * @since 3.17.0 + */ +type DiagnosticOptions struct { // line 7292 + /* + * An optional identifier under which the diagnostics are + * managed by the client. + */ + Identifier string `json:"identifier,omitempty"` + /* + * Whether the language has inter file dependencies meaning that + * editing code in one file can result in a different diagnostic + * set in another file. Inter file dependencies are common for + * most programming languages and typically uncommon for linters. + */ + InterFileDependencies bool `json:"interFileDependencies"` + // The server provides support for workspace diagnostics as well. + WorkspaceDiagnostics bool `json:"workspaceDiagnostics"` + WorkDoneProgressOptions +} + +/* + * Diagnostic registration options. + * + * @since 3.17.0 + */ +type DiagnosticRegistrationOptions struct { // line 3854 + TextDocumentRegistrationOptions + DiagnosticOptions + StaticRegistrationOptions +} + +/* + * Represents a related message and source code location for a diagnostic. This should be + * used to point to code locations that cause or related to a diagnostics, e.g when duplicating + * a symbol in a scope. + */ +type DiagnosticRelatedInformation struct { // line 10040 + // The location of this related diagnostic information. + Location Location `json:"location"` + // The message of this related diagnostic information. + Message string `json:"message"` +} + +/* + * Cancellation data returned from a diagnostic request. + * + * @since 3.17.0 + */ +type DiagnosticServerCancellationData struct { // line 3840 + RetriggerRequest bool `json:"retriggerRequest"` +} + +// The diagnostic's severity. +type DiagnosticSeverity uint32 // line 13503 +/* + * The diagnostic tags. + * + * @since 3.15.0 + */ +type DiagnosticTag uint32 // line 13533 +/* + * Workspace client capabilities specific to diagnostic pull requests. + * + * @since 3.17.0 + */ +type DiagnosticWorkspaceClientCapabilities struct { // line 11110 + /* + * Whether the client implementation supports a refresh request sent from + * the server to the client. + * + * Note that this event is global and will force the client to refresh all + * pulled diagnostics currently shown. It should be used with absolute care and + * is useful for situation where a server for example detects a project wide + * change that requires such a calculation. + */ + RefreshSupport bool `json:"refreshSupport,omitempty"` +} +type DidChangeConfigurationClientCapabilities struct { // line 10836 + // Did change configuration notification supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// The parameters of a change configuration notification. +type DidChangeConfigurationParams struct { // line 4143 + // The actual changed settings + Settings interface{} `json:"settings"` +} +type DidChangeConfigurationRegistrationOptions struct { // line 4157 + Section *OrPSection_workspace_didChangeConfiguration `json:"section,omitempty"` +} + +/* + * The params sent in a change notebook document notification. + * + * @since 3.17.0 + */ +type DidChangeNotebookDocumentParams struct { // line 3973 + /* + * The notebook document that did change. The version number points + * to the version after all provided changes have been applied. If + * only the text document content of a cell changes the notebook version + * doesn't necessarily have to change. + */ + NotebookDocument VersionedNotebookDocumentIdentifier `json:"notebookDocument"` + /* + * The actual changes to the notebook document. + * + * The changes describe single state changes to the notebook document. + * So if there are two changes c1 (at array index 0) and c2 (at array + * index 1) for a notebook in state S then c1 moves the notebook from + * S to S' and c2 from S' to S''. So c1 is computed on the state S and + * c2 is computed on the state S'. + * + * To mirror the content of a notebook using change events use the following approach: + * - start with the same initial content + * - apply the 'notebookDocument/didChange' notifications in the order you receive them. + * - apply the `NotebookChangeEvent`s in a single notification in the order + * you receive them. + */ + Change NotebookDocumentChangeEvent `json:"change"` +} + +// The change text document notification's parameters. +type DidChangeTextDocumentParams struct { // line 4286 + /* + * The document that did change. The version number points + * to the version after all provided content changes have + * been applied. + */ + TextDocument VersionedTextDocumentIdentifier `json:"textDocument"` + /* + * The actual content changes. The content changes describe single state changes + * to the document. So if there are two content changes c1 (at array index 0) and + * c2 (at array index 1) for a document in state S then c1 moves the document from + * S to S' and c2 from S' to S''. So c1 is computed on the state S and c2 is computed + * on the state S'. + * + * To mirror the content of a document using change events use the following approach: + * - start with the same initial content + * - apply the 'textDocument/didChange' notifications in the order you receive them. + * - apply the `TextDocumentContentChangeEvent`s in a single notification in the order + * you receive them. + */ + ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"` +} +type DidChangeWatchedFilesClientCapabilities struct { // line 10850 + /* + * Did change watched files notification supports dynamic registration. Please note + * that the current protocol doesn't support static configuration for file changes + * from the server side. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Whether the client has support for {@link RelativePattern relative pattern} + * or not. + * + * @since 3.17.0 + */ + RelativePatternSupport bool `json:"relativePatternSupport,omitempty"` +} + +// The watched files change notification's parameters. +type DidChangeWatchedFilesParams struct { // line 4427 + // The actual file events. + Changes []FileEvent `json:"changes"` +} + +// Describe options to be used when registered for text document change events. +type DidChangeWatchedFilesRegistrationOptions struct { // line 4444 + // The watchers to register. + Watchers []FileSystemWatcher `json:"watchers"` +} + +// The parameters of a `workspace/didChangeWorkspaceFolders` notification. +type DidChangeWorkspaceFoldersParams struct { // line 2184 + // The actual workspace folder change event. + Event WorkspaceFoldersChangeEvent `json:"event"` +} + +/* + * The params sent in a close notebook document notification. + * + * @since 3.17.0 + */ +type DidCloseNotebookDocumentParams struct { // line 4011 + // The notebook document that got closed. + NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"` + /* + * The text documents that represent the content + * of a notebook cell that got closed. + */ + CellTextDocuments []TextDocumentIdentifier `json:"cellTextDocuments"` +} + +// The parameters sent in a close text document notification +type DidCloseTextDocumentParams struct { // line 4331 + // The document that was closed. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +/* + * The params sent in an open notebook document notification. + * + * @since 3.17.0 + */ +type DidOpenNotebookDocumentParams struct { // line 3947 + // The notebook document that got opened. + NotebookDocument NotebookDocument `json:"notebookDocument"` + /* + * The text documents that represent the content + * of a notebook cell. + */ + CellTextDocuments []TextDocumentItem `json:"cellTextDocuments"` +} + +// The parameters sent in an open text document notification +type DidOpenTextDocumentParams struct { // line 4272 + // The document that was opened. + TextDocument TextDocumentItem `json:"textDocument"` +} + +/* + * The params sent in a save notebook document notification. + * + * @since 3.17.0 + */ +type DidSaveNotebookDocumentParams struct { // line 3996 + // The notebook document that got saved. + NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"` +} + +// The parameters sent in a save text document notification +type DidSaveTextDocumentParams struct { // line 4345 + // The document that was saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` + /* + * Optional the content when saved. Depends on the includeText value + * when the save notification was requested. + */ + Text *string `json:"text,omitempty"` +} +type DocumentColorClientCapabilities struct { // line 11874 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `DocumentColorRegistrationOptions` return value + * for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} +type DocumentColorOptions struct { // line 6470 + WorkDoneProgressOptions +} + +// Parameters for a {@link DocumentColorRequest}. +type DocumentColorParams struct { // line 2214 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} +type DocumentColorRegistrationOptions struct { // line 2260 + TextDocumentRegistrationOptions + DocumentColorOptions + StaticRegistrationOptions +} + +/* + * Parameters of the document diagnostic request. + * + * @since 3.17.0 + */ +type DocumentDiagnosticParams struct { // line 3767 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The additional identifier provided during registration. + Identifier string `json:"identifier,omitempty"` + // The result id of a previous response if provided. + PreviousResultID string `json:"previousResultId,omitempty"` + WorkDoneProgressParams + PartialResultParams +} +type DocumentDiagnosticReport = Or_DocumentDiagnosticReport // (alias) line 13909 +/* + * The document diagnostic report kinds. + * + * @since 3.17.0 + */ +type DocumentDiagnosticReportKind string // line 12721 +/* + * A partial result for a document diagnostic report. + * + * @since 3.17.0 + */ +type DocumentDiagnosticReportPartialResult struct { // line 3810 + RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments"` +} + +/* + * A document filter describes a top level text document or + * a notebook cell document. + * + * @since 3.17.0 - proposed support for NotebookCellTextDocumentFilter. + */ +type DocumentFilter = Or_DocumentFilter // (alias) line 14092 +// Client capabilities of a {@link DocumentFormattingRequest}. +type DocumentFormattingClientCapabilities struct { // line 11888 + // Whether formatting supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// Provider options for a {@link DocumentFormattingRequest}. +type DocumentFormattingOptions struct { // line 9220 + WorkDoneProgressOptions +} + +// The parameters of a {@link DocumentFormattingRequest}. +type DocumentFormattingParams struct { // line 5726 + // The document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The format options. + Options FormattingOptions `json:"options"` + WorkDoneProgressParams +} + +// Registration options for a {@link DocumentFormattingRequest}. +type DocumentFormattingRegistrationOptions struct { // line 5754 + TextDocumentRegistrationOptions + DocumentFormattingOptions +} + +/* + * A document highlight is a range inside a text document which deserves + * special attention. Usually a document highlight is visualized by changing + * the background color of its range. + */ +type DocumentHighlight struct { // line 5118 + // The range this highlight applies to. + Range Range `json:"range"` + // The highlight kind, default is {@link DocumentHighlightKind.Text text}. + Kind DocumentHighlightKind `json:"kind,omitempty"` +} + +// Client Capabilities for a {@link DocumentHighlightRequest}. +type DocumentHighlightClientCapabilities struct { // line 11623 + // Whether document highlight supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// A document highlight kind. +type DocumentHighlightKind uint32 // line 13300 +// Provider options for a {@link DocumentHighlightRequest}. +type DocumentHighlightOptions struct { // line 8954 + WorkDoneProgressOptions +} + +// Parameters for a {@link DocumentHighlightRequest}. +type DocumentHighlightParams struct { // line 5097 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link DocumentHighlightRequest}. +type DocumentHighlightRegistrationOptions struct { // line 5141 + TextDocumentRegistrationOptions + DocumentHighlightOptions +} + +/* + * A document link is a range in a text document that links to an internal or external resource, like another + * text document or a web site. + */ +type DocumentLink struct { // line 5669 + // The range this link applies to. + Range Range `json:"range"` + // The uri this link points to. If missing a resolve request is sent later. + Target string `json:"target,omitempty"` + /* + * The tooltip text when you hover over this link. + * + * If a tooltip is provided, is will be displayed in a string that includes instructions on how to + * trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS, + * user settings, and localization. + * + * @since 3.15.0 + */ + Tooltip string `json:"tooltip,omitempty"` + /* + * A data entry field that is preserved on a document link between a + * DocumentLinkRequest and a DocumentLinkResolveRequest. + */ + Data interface{} `json:"data,omitempty"` +} + +// The client capabilities of a {@link DocumentLinkRequest}. +type DocumentLinkClientCapabilities struct { // line 11849 + // Whether document link supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Whether the client supports the `tooltip` property on `DocumentLink`. + * + * @since 3.15.0 + */ + TooltipSupport bool `json:"tooltipSupport,omitempty"` +} + +// Provider options for a {@link DocumentLinkRequest}. +type DocumentLinkOptions struct { // line 9147 + // Document links have a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` + WorkDoneProgressOptions +} + +// The parameters of a {@link DocumentLinkRequest}. +type DocumentLinkParams struct { // line 5645 + // The document to provide document links for. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link DocumentLinkRequest}. +type DocumentLinkRegistrationOptions struct { // line 5711 + TextDocumentRegistrationOptions + DocumentLinkOptions +} + +// Client capabilities of a {@link DocumentOnTypeFormattingRequest}. +type DocumentOnTypeFormattingClientCapabilities struct { // line 11918 + // Whether on type formatting supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// Provider options for a {@link DocumentOnTypeFormattingRequest}. +type DocumentOnTypeFormattingOptions struct { // line 9242 + // A character on which formatting should be triggered, like `{`. + FirstTriggerCharacter string `json:"firstTriggerCharacter"` + // More trigger characters. + MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"` +} + +// The parameters of a {@link DocumentOnTypeFormattingRequest}. +type DocumentOnTypeFormattingParams struct { // line 5820 + // The document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + /* + * The position around which the on type formatting should happen. + * This is not necessarily the exact position where the character denoted + * by the property `ch` got typed. + */ + Position Position `json:"position"` + /* + * The character that has been typed that triggered the formatting + * on type request. That is not necessarily the last character that + * got inserted into the document since the client could auto insert + * characters as well (e.g. like automatic brace completion). + */ + Ch string `json:"ch"` + // The formatting options. + Options FormattingOptions `json:"options"` +} + +// Registration options for a {@link DocumentOnTypeFormattingRequest}. +type DocumentOnTypeFormattingRegistrationOptions struct { // line 5858 + TextDocumentRegistrationOptions + DocumentOnTypeFormattingOptions +} + +// Client capabilities of a {@link DocumentRangeFormattingRequest}. +type DocumentRangeFormattingClientCapabilities struct { // line 11903 + // Whether range formatting supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// Provider options for a {@link DocumentRangeFormattingRequest}. +type DocumentRangeFormattingOptions struct { // line 9231 + WorkDoneProgressOptions +} + +// The parameters of a {@link DocumentRangeFormattingRequest}. +type DocumentRangeFormattingParams struct { // line 5769 + // The document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The range to format + Range Range `json:"range"` + // The format options + Options FormattingOptions `json:"options"` + WorkDoneProgressParams +} + +// Registration options for a {@link DocumentRangeFormattingRequest}. +type DocumentRangeFormattingRegistrationOptions struct { // line 5805 + TextDocumentRegistrationOptions + DocumentRangeFormattingOptions +} + +/* + * A document selector is the combination of one or many document filters. + * + * @sample `let sel:DocumentSelector = [{ language: 'typescript' }, { language: 'json', pattern: '**āˆ•tsconfig.json' }]`; + * + * The use of a string as a document filter is deprecated @since 3.16.0. + */ +type DocumentSelector = []DocumentFilter // (alias) line 13947 +/* + * Represents programming constructs like variables, classes, interfaces etc. + * that appear in a document. Document symbols can be hierarchical and they + * have two ranges: one that encloses its definition and one that points to + * its most interesting range, e.g. the range of an identifier. + */ +type DocumentSymbol struct { // line 5210 + /* + * The name of this symbol. Will be displayed in the user interface and therefore must not be + * an empty string or a string only consisting of white spaces. + */ + Name string `json:"name"` + // More detail for this symbol, e.g the signature of a function. + Detail string `json:"detail,omitempty"` + // The kind of this symbol. + Kind SymbolKind `json:"kind"` + /* + * Tags for this document symbol. + * + * @since 3.16.0 + */ + Tags []SymbolTag `json:"tags,omitempty"` + /* + * Indicates if this symbol is deprecated. + * + * @deprecated Use tags instead + */ + Deprecated bool `json:"deprecated,omitempty"` + /* + * The range enclosing this symbol not including leading/trailing whitespace but everything else + * like comments. This information is typically used to determine if the clients cursor is + * inside the symbol to reveal in the symbol in the UI. + */ + Range Range `json:"range"` + /* + * The range that should be selected and revealed when this symbol is being picked, e.g the name of a function. + * Must be contained by the `range`. + */ + SelectionRange Range `json:"selectionRange"` + // Children of this symbol, e.g. properties of a class. + Children []DocumentSymbol `json:"children,omitempty"` +} + +// Client Capabilities for a {@link DocumentSymbolRequest}. +type DocumentSymbolClientCapabilities struct { // line 11638 + // Whether document symbol supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Specific capabilities for the `SymbolKind` in the + * `textDocument/documentSymbol` request. + */ + SymbolKind *PSymbolKindPDocumentSymbol `json:"symbolKind,omitempty"` + // The client supports hierarchical document symbols. + HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"` + /* + * The client supports tags on `SymbolInformation`. Tags are supported on + * `DocumentSymbol` if `hierarchicalDocumentSymbolSupport` is set to true. + * Clients supporting tags have to handle unknown tags gracefully. + * + * @since 3.16.0 + */ + TagSupport *PTagSupportPDocumentSymbol `json:"tagSupport,omitempty"` + /* + * The client supports an additional label presented in the UI when + * registering a document symbol provider. + * + * @since 3.16.0 + */ + LabelSupport bool `json:"labelSupport,omitempty"` +} + +// Provider options for a {@link DocumentSymbolRequest}. +type DocumentSymbolOptions struct { // line 9009 + /* + * A human-readable string that is shown when multiple outlines trees + * are shown for the same document. + * + * @since 3.16.0 + */ + Label string `json:"label,omitempty"` + WorkDoneProgressOptions +} + +// Parameters for a {@link DocumentSymbolRequest}. +type DocumentSymbolParams struct { // line 5156 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link DocumentSymbolRequest}. +type DocumentSymbolRegistrationOptions struct { // line 5292 + TextDocumentRegistrationOptions + DocumentSymbolOptions +} +type DocumentURI string + +// Predefined error codes. +type ErrorCodes int32 // line 12742 +// The client capabilities of a {@link ExecuteCommandRequest}. +type ExecuteCommandClientCapabilities struct { // line 10961 + // Execute command supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// The server capabilities of a {@link ExecuteCommandRequest}. +type ExecuteCommandOptions struct { // line 9290 + // The commands to be executed on the server + Commands []string `json:"commands"` + WorkDoneProgressOptions +} + +// The parameters of a {@link ExecuteCommandRequest}. +type ExecuteCommandParams struct { // line 5940 + // The identifier of the actual command handler. + Command string `json:"command"` + // Arguments that the command should be invoked with. + Arguments []json.RawMessage `json:"arguments,omitempty"` + WorkDoneProgressParams +} + +// Registration options for a {@link ExecuteCommandRequest}. +type ExecuteCommandRegistrationOptions struct { // line 5972 + ExecuteCommandOptions +} +type ExecutionSummary struct { // line 10161 + /* + * A strict monotonically increasing value + * indicating the execution order of a cell + * inside a notebook. + */ + ExecutionOrder uint32 `json:"executionOrder"` + /* + * Whether the execution was successful or + * not if known by the client. + */ + Success bool `json:"success,omitempty"` +} + +// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_cells_Elem) +type FCellsPNotebookSelector struct { // line 9830 + Language string `json:"language"` +} + +// created for Literal (Lit_CodeActionClientCapabilities_codeActionLiteralSupport_codeActionKind) +type FCodeActionKindPCodeActionLiteralSupport struct { // line 11741 + /* + * The code action kind values the client supports. When this + * property exists the client also guarantees that it will + * handle values outside its set gracefully and falls back + * to a default value when unknown. + */ + ValueSet []CodeActionKind `json:"valueSet"` +} + +// created for Literal (Lit_CompletionList_itemDefaults_editRange_Item1) +type FEditRangePItemDefaults struct { // line 4776 + Insert Range `json:"insert"` + Replace Range `json:"replace"` +} + +// created for Literal (Lit_SemanticTokensClientCapabilities_requests_full_Item1) +type FFullPRequests struct { // line 12204 + /* + * The client will send the `textDocument/semanticTokens/full/delta` request if + * the server provides a corresponding handler. + */ + Delta bool `json:"delta"` +} + +// created for Literal (Lit_CompletionClientCapabilities_completionItem_insertTextModeSupport) +type FInsertTextModeSupportPCompletionItem struct { // line 11294 + ValueSet []InsertTextMode `json:"valueSet"` +} + +// created for Literal (Lit_SignatureHelpClientCapabilities_signatureInformation_parameterInformation) +type FParameterInformationPSignatureInformation struct { // line 11460 + /* + * The client supports processing label offsets instead of a + * simple label string. + * + * @since 3.14.0 + */ + LabelOffsetSupport bool `json:"labelOffsetSupport"` +} + +// created for Literal (Lit_SemanticTokensClientCapabilities_requests_range_Item1) +type FRangePRequests struct { // line 12184 +} + +// created for Literal (Lit_CompletionClientCapabilities_completionItem_resolveSupport) +type FResolveSupportPCompletionItem struct { // line 11270 + // The properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// created for Literal (Lit_NotebookDocumentChangeEvent_cells_structure) +type FStructurePCells struct { // line 7486 + // The change to the cell array. + Array NotebookCellArrayChange `json:"array"` + // Additional opened cell text documents. + DidOpen []TextDocumentItem `json:"didOpen"` + // Additional closed cell text documents. + DidClose []TextDocumentIdentifier `json:"didClose"` +} + +// created for Literal (Lit_CompletionClientCapabilities_completionItem_tagSupport) +type FTagSupportPCompletionItem struct { // line 11236 + // The tags supported by the client. + ValueSet []CompletionItemTag `json:"valueSet"` +} + +// created for Literal (Lit_NotebookDocumentChangeEvent_cells_textContent_Elem) +type FTextContentPCells struct { // line 7544 + Document VersionedTextDocumentIdentifier `json:"document"` + Changes []TextDocumentContentChangeEvent `json:"changes"` +} +type FailureHandlingKind string // line 13692 +// The file event type +type FileChangeType uint32 // line 13453 +/* + * Represents information on a file/folder create. + * + * @since 3.16.0 + */ +type FileCreate struct { // line 6661 + // A file:// URI for the location of the file/folder being created. + URI string `json:"uri"` +} + +/* + * Represents information on a file/folder delete. + * + * @since 3.16.0 + */ +type FileDelete struct { // line 6910 + // A file:// URI for the location of the file/folder being deleted. + URI string `json:"uri"` +} + +// An event describing a file change. +type FileEvent struct { // line 8479 + // The file's uri. + URI DocumentURI `json:"uri"` + // The change type. + Type FileChangeType `json:"type"` +} + +/* + * Capabilities relating to events from file operations by the user in the client. + * + * These events do not come from the file system, they come from user operations + * like renaming a file in the UI. + * + * @since 3.16.0 + */ +type FileOperationClientCapabilities struct { // line 11008 + // Whether the client supports dynamic registration for file requests/notifications. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // The client has support for sending didCreateFiles notifications. + DidCreate bool `json:"didCreate,omitempty"` + // The client has support for sending willCreateFiles requests. + WillCreate bool `json:"willCreate,omitempty"` + // The client has support for sending didRenameFiles notifications. + DidRename bool `json:"didRename,omitempty"` + // The client has support for sending willRenameFiles requests. + WillRename bool `json:"willRename,omitempty"` + // The client has support for sending didDeleteFiles notifications. + DidDelete bool `json:"didDelete,omitempty"` + // The client has support for sending willDeleteFiles requests. + WillDelete bool `json:"willDelete,omitempty"` +} + +/* + * A filter to describe in which file operation requests or notifications + * the server is interested in receiving. + * + * @since 3.16.0 + */ +type FileOperationFilter struct { // line 6863 + // A Uri scheme like `file` or `untitled`. + Scheme string `json:"scheme,omitempty"` + // The actual file operation pattern. + Pattern FileOperationPattern `json:"pattern"` +} + +/* + * Options for notifications/requests for user operations on files. + * + * @since 3.16.0 + */ +type FileOperationOptions struct { // line 9964 + // The server is interested in receiving didCreateFiles notifications. + DidCreate *FileOperationRegistrationOptions `json:"didCreate,omitempty"` + // The server is interested in receiving willCreateFiles requests. + WillCreate *FileOperationRegistrationOptions `json:"willCreate,omitempty"` + // The server is interested in receiving didRenameFiles notifications. + DidRename *FileOperationRegistrationOptions `json:"didRename,omitempty"` + // The server is interested in receiving willRenameFiles requests. + WillRename *FileOperationRegistrationOptions `json:"willRename,omitempty"` + // The server is interested in receiving didDeleteFiles file notifications. + DidDelete *FileOperationRegistrationOptions `json:"didDelete,omitempty"` + // The server is interested in receiving willDeleteFiles file requests. + WillDelete *FileOperationRegistrationOptions `json:"willDelete,omitempty"` +} + +/* + * A pattern to describe in which file operation requests or notifications + * the server is interested in receiving. + * + * @since 3.16.0 + */ +type FileOperationPattern struct { // line 9488 + /* + * The glob pattern to match. Glob patterns can have the following syntax: + * - `*` to match one or more characters in a path segment + * - `?` to match on one character in a path segment + * - `**` to match any number of path segments, including none + * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) + * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + */ + Glob string `json:"glob"` + /* + * Whether to match files or folders with this pattern. + * + * Matches both if undefined. + */ + Matches FileOperationPatternKind `json:"matches,omitempty"` + // Additional options used during matching. + Options *FileOperationPatternOptions `json:"options,omitempty"` +} + +/* + * A pattern kind describing if a glob pattern matches a file a folder or + * both. + * + * @since 3.16.0 + */ +type FileOperationPatternKind string // line 13626 +/* + * Matching options for the file operation pattern. + * + * @since 3.16.0 + */ +type FileOperationPatternOptions struct { // line 10145 + // The pattern should be matched ignoring casing. + IgnoreCase bool `json:"ignoreCase,omitempty"` +} + +/* + * The options to register for file operations. + * + * @since 3.16.0 + */ +type FileOperationRegistrationOptions struct { // line 3263 + // The actual filters. + Filters []FileOperationFilter `json:"filters"` +} + +/* + * Represents information on a file/folder rename. + * + * @since 3.16.0 + */ +type FileRename struct { // line 6887 + // A file:// URI for the original location of the file/folder being renamed. + OldURI string `json:"oldUri"` + // A file:// URI for the new location of the file/folder being renamed. + NewURI string `json:"newUri"` +} +type FileSystemWatcher struct { // line 8501 + /* + * The glob pattern to watch. See {@link GlobPattern glob pattern} for more detail. + * + * @since 3.17.0 support for relative patterns. + */ + GlobPattern GlobPattern `json:"globPattern"` + /* + * The kind of events of interest. If omitted it defaults + * to WatchKind.Create | WatchKind.Change | WatchKind.Delete + * which is 7. + */ + Kind WatchKind `json:"kind,omitempty"` +} + +/* + * Represents a folding range. To be valid, start and end line must be bigger than zero and smaller + * than the number of lines in the document. Clients are free to ignore invalid ranges. + */ +type FoldingRange struct { // line 2414 + /* + * The zero-based start line of the range to fold. The folded area starts after the line's last character. + * To be valid, the end must be zero or larger and smaller than the number of lines in the document. + */ + StartLine uint32 `json:"startLine"` + // The zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line. + StartCharacter uint32 `json:"startCharacter,omitempty"` + /* + * The zero-based end line of the range to fold. The folded area ends with the line's last character. + * To be valid, the end must be zero or larger and smaller than the number of lines in the document. + */ + EndLine uint32 `json:"endLine"` + // The zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line. + EndCharacter uint32 `json:"endCharacter,omitempty"` + /* + * Describes the kind of the folding range such as `comment' or 'region'. The kind + * is used to categorize folding ranges and used by commands like 'Fold all comments'. + * See {@link FoldingRangeKind} for an enumeration of standardized kinds. + */ + Kind string `json:"kind,omitempty"` + /* + * The text that the client should show when the specified range is + * collapsed. If not defined or not supported by the client, a default + * will be chosen by the client. + * + * @since 3.17.0 + */ + CollapsedText string `json:"collapsedText,omitempty"` +} +type FoldingRangeClientCapabilities struct { // line 11977 + /* + * Whether implementation supports dynamic registration for folding range + * providers. If this is set to `true` the client supports the new + * `FoldingRangeRegistrationOptions` return value for the corresponding + * server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The maximum number of folding ranges that the client prefers to receive + * per document. The value serves as a hint, servers are free to follow the + * limit. + */ + RangeLimit uint32 `json:"rangeLimit,omitempty"` + /* + * If set, the client signals that it only supports folding complete lines. + * If set, client will ignore specified `startCharacter` and `endCharacter` + * properties in a FoldingRange. + */ + LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"` + /* + * Specific options for the folding range kind. + * + * @since 3.17.0 + */ + FoldingRangeKind *PFoldingRangeKindPFoldingRange `json:"foldingRangeKind,omitempty"` + /* + * Specific options for the folding range. + * + * @since 3.17.0 + */ + FoldingRange *PFoldingRangePFoldingRange `json:"foldingRange,omitempty"` +} + +// A set of predefined range kinds. +type FoldingRangeKind string // line 12814 +type FoldingRangeOptions struct { // line 6480 + WorkDoneProgressOptions +} + +// Parameters for a {@link FoldingRangeRequest}. +type FoldingRangeParams struct { // line 2390 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} +type FoldingRangeRegistrationOptions struct { // line 2473 + TextDocumentRegistrationOptions + FoldingRangeOptions + StaticRegistrationOptions +} + +// Value-object describing what options formatting should use. +type FormattingOptions struct { // line 9168 + // Size of a tab in spaces. + TabSize uint32 `json:"tabSize"` + // Prefer spaces over tabs. + InsertSpaces bool `json:"insertSpaces"` + /* + * Trim trailing whitespace on a line. + * + * @since 3.15.0 + */ + TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"` + /* + * Insert a newline character at the end of the file if one does not exist. + * + * @since 3.15.0 + */ + InsertFinalNewline bool `json:"insertFinalNewline,omitempty"` + /* + * Trim all newlines after the final newline at the end of the file. + * + * @since 3.15.0 + */ + TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"` +} + +/* + * A diagnostic report with a full set of problems. + * + * @since 3.17.0 + */ +type FullDocumentDiagnosticReport struct { // line 7234 + // A full document diagnostic report. + Kind string `json:"kind"` + /* + * An optional result id. If provided it will + * be sent on the next diagnostic request for the + * same document. + */ + ResultID string `json:"resultId,omitempty"` + // The actual items. + Items []Diagnostic `json:"items"` +} + +/* + * General client capabilities. + * + * @since 3.16.0 + */ +type GeneralClientCapabilities struct { // line 10663 + /* + * Client capability that signals how the client + * handles stale requests (e.g. a request + * for which the client will not process the response + * anymore since the information is outdated). + * + * @since 3.17.0 + */ + StaleRequestSupport *PStaleRequestSupportPGeneral `json:"staleRequestSupport,omitempty"` + /* + * Client capabilities specific to regular expressions. + * + * @since 3.16.0 + */ + RegularExpressions *RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"` + /* + * Client capabilities specific to the client's markdown parser. + * + * @since 3.16.0 + */ + Markdown *MarkdownClientCapabilities `json:"markdown,omitempty"` + /* + * The position encodings supported by the client. Client and server + * have to agree on the same position encoding to ensure that offsets + * (e.g. character position in a line) are interpreted the same on both + * sides. + * + * To keep the protocol backwards compatible the following applies: if + * the value 'utf-16' is missing from the array of position encodings + * servers can assume that the client supports UTF-16. UTF-16 is + * therefore a mandatory encoding. + * + * If omitted it defaults to ['utf-16']. + * + * Implementation considerations: since the conversion from one encoding + * into another requires the content of the file / line the conversion + * is best done where the file is read which is usually on the server + * side. + * + * @since 3.17.0 + */ + PositionEncodings []PositionEncodingKind `json:"positionEncodings,omitempty"` +} + +/* + * The glob pattern. Either a string pattern or a relative pattern. + * + * @since 3.17.0 + */ +type GlobPattern = string // (alias) line 14126 +// The result of a hover request. +type Hover struct { // line 4885 + // The hover's content + Contents MarkupContent `json:"contents"` + /* + * An optional range inside the text document that is used to + * visualize the hover, e.g. by changing the background color. + */ + Range Range `json:"range,omitempty"` +} +type HoverClientCapabilities struct { // line 11401 + // Whether hover supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Client supports the following content formats for the content + * property. The order describes the preferred format of the client. + */ + ContentFormat []MarkupKind `json:"contentFormat,omitempty"` +} + +// Hover options. +type HoverOptions struct { // line 8775 + WorkDoneProgressOptions +} + +// Parameters for a {@link HoverRequest}. +type HoverParams struct { // line 4868 + TextDocumentPositionParams + WorkDoneProgressParams +} + +// Registration options for a {@link HoverRequest}. +type HoverRegistrationOptions struct { // line 4924 + TextDocumentRegistrationOptions + HoverOptions +} + +// @since 3.6.0 +type ImplementationClientCapabilities struct { // line 11582 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `ImplementationRegistrationOptions` return value + * for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client supports additional metadata in the form of definition links. + * + * @since 3.14.0 + */ + LinkSupport bool `json:"linkSupport,omitempty"` +} +type ImplementationOptions struct { // line 6332 + WorkDoneProgressOptions +} +type ImplementationParams struct { // line 2062 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} +type ImplementationRegistrationOptions struct { // line 2102 + TextDocumentRegistrationOptions + ImplementationOptions + StaticRegistrationOptions +} + +/* + * The data type of the ResponseError if the + * initialize request fails. + */ +type InitializeError struct { // line 4125 + /* + * Indicates whether the client execute the following retry logic: + * (1) show the message provided by the ResponseError to the user + * (2) user selects retry or cancel + * (3) if user selected retry the initialize method is sent again. + */ + Retry bool `json:"retry"` +} +type InitializeParams struct { // line 4067 + XInitializeParams + WorkspaceFoldersInitializeParams +} + +// The result returned from an initialize request. +type InitializeResult struct { // line 4081 + // The capabilities the language server provides. + Capabilities ServerCapabilities `json:"capabilities"` + /* + * Information about the server. + * + * @since 3.15.0 + */ + ServerInfo PServerInfoMsg_initialize `json:"serverInfo,omitempty"` +} +type InitializedParams struct { // line 4139 +} + +/* + * Inlay hint information. + * + * @since 3.17.0 + */ +type InlayHint struct { // line 3644 + // The position of this hint. + Position *Position `json:"position"` + /* + * The label of this hint. A human readable string or an array of + * InlayHintLabelPart label parts. + * + * *Note* that neither the string nor the label part can be empty. + */ + Label []InlayHintLabelPart `json:"label"` + /* + * The kind of this hint. Can be omitted in which case the client + * should fall back to a reasonable default. + */ + Kind InlayHintKind `json:"kind,omitempty"` + /* + * Optional text edits that are performed when accepting this inlay hint. + * + * *Note* that edits are expected to change the document so that the inlay + * hint (or its nearest variant) is now part of the document and the inlay + * hint itself is now obsolete. + */ + TextEdits []TextEdit `json:"textEdits,omitempty"` + // The tooltip text when you hover over this item. + Tooltip *OrPTooltip_textDocument_inlayHint `json:"tooltip,omitempty"` + /* + * Render padding before the hint. + * + * Note: Padding should use the editor's background color, not the + * background color of the hint itself. That means padding can be used + * to visually align/separate an inlay hint. + */ + PaddingLeft bool `json:"paddingLeft,omitempty"` + /* + * Render padding after the hint. + * + * Note: Padding should use the editor's background color, not the + * background color of the hint itself. That means padding can be used + * to visually align/separate an inlay hint. + */ + PaddingRight bool `json:"paddingRight,omitempty"` + /* + * A data entry field that is preserved on an inlay hint between + * a `textDocument/inlayHint` and a `inlayHint/resolve` request. + */ + Data interface{} `json:"data,omitempty"` +} + +/* + * Inlay hint client capabilities. + * + * @since 3.17.0 + */ +type InlayHintClientCapabilities struct { // line 12368 + // Whether inlay hints support dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Indicates which properties a client can resolve lazily on an inlay + * hint. + */ + ResolveSupport *PResolveSupportPInlayHint `json:"resolveSupport,omitempty"` +} + +/* + * Inlay hint kinds. + * + * @since 3.17.0 + */ +type InlayHintKind uint32 // line 13032 +/* + * An inlay hint label part allows for interactive and composite labels + * of inlay hints. + * + * @since 3.17.0 + */ +type InlayHintLabelPart struct { // line 7061 + // The value of this label part. + Value string `json:"value"` + /* + * The tooltip text when you hover over this label part. Depending on + * the client capability `inlayHint.resolveSupport` clients might resolve + * this property late using the resolve request. + */ + Tooltip *OrPTooltipPLabel `json:"tooltip,omitempty"` + /* + * An optional source code location that represents this + * label part. + * + * The editor will use this location for the hover and for code navigation + * features: This part will become a clickable link that resolves to the + * definition of the symbol at the given location (not necessarily the + * location itself), it shows the hover that shows at the given location, + * and it shows a context menu with further code navigation commands. + * + * Depending on the client capability `inlayHint.resolveSupport` clients + * might resolve this property late using the resolve request. + */ + Location *Location `json:"location,omitempty"` + /* + * An optional command for this label part. + * + * Depending on the client capability `inlayHint.resolveSupport` clients + * might resolve this property late using the resolve request. + */ + Command *Command `json:"command,omitempty"` +} + +/* + * Inlay hint options used during static registration. + * + * @since 3.17.0 + */ +type InlayHintOptions struct { // line 7134 + /* + * The server provides support to resolve additional + * information for an inlay hint item. + */ + ResolveProvider bool `json:"resolveProvider,omitempty"` + WorkDoneProgressOptions +} + +/* + * A parameter literal used in inlay hint requests. + * + * @since 3.17.0 + */ +type InlayHintParams struct { // line 3615 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The document range for which inlay hints should be computed. + Range Range `json:"range"` + WorkDoneProgressParams +} + +/* + * Inlay hint options used during static or dynamic registration. + * + * @since 3.17.0 + */ +type InlayHintRegistrationOptions struct { // line 3745 + InlayHintOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +/* + * Client workspace capabilities specific to inlay hints. + * + * @since 3.17.0 + */ +type InlayHintWorkspaceClientCapabilities struct { // line 11094 + /* + * Whether the client implementation supports a refresh request sent from + * the server to the client. + * + * Note that this event is global and will force the client to refresh all + * inlay hints currently shown. It should be used with absolute care and + * is useful for situation where a server for example detects a project wide + * change that requires such a calculation. + */ + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +/* + * Inline value information can be provided by different means: + * - directly as a text value (class InlineValueText). + * - as a name to use for a variable lookup (class InlineValueVariableLookup) + * - as an evaluatable expression (class InlineValueEvaluatableExpression) + * The InlineValue types combines all inline value types into one type. + * + * @since 3.17.0 + */ +type InlineValue = Or_InlineValue // (alias) line 13860 +/* + * Client capabilities specific to inline values. + * + * @since 3.17.0 + */ +type InlineValueClientCapabilities struct { // line 12352 + // Whether implementation supports dynamic registration for inline value providers. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// @since 3.17.0 +type InlineValueContext struct { // line 6947 + // The stack frame (as a DAP Id) where the execution has stopped. + FrameID int32 `json:"frameId"` + /* + * The document range where execution has stopped. + * Typically the end position of the range denotes the line where the inline values are shown. + */ + StoppedLocation Range `json:"stoppedLocation"` +} + +/* + * Provide an inline value through an expression evaluation. + * If only a range is specified, the expression will be extracted from the underlying document. + * An optional expression can be used to override the extracted expression. + * + * @since 3.17.0 + */ +type InlineValueEvaluatableExpression struct { // line 7025 + /* + * The document range for which the inline value applies. + * The range is used to extract the evaluatable expression from the underlying document. + */ + Range Range `json:"range"` + // If specified the expression overrides the extracted expression. + Expression string `json:"expression,omitempty"` +} + +/* + * Inline value options used during static registration. + * + * @since 3.17.0 + */ +type InlineValueOptions struct { // line 7049 + WorkDoneProgressOptions +} + +/* + * A parameter literal used in inline value requests. + * + * @since 3.17.0 + */ +type InlineValueParams struct { // line 3556 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The document range for which inline values should be computed. + Range Range `json:"range"` + /* + * Additional information about the context in which inline values were + * requested. + */ + Context InlineValueContext `json:"context"` + WorkDoneProgressParams +} + +/* + * Inline value options used during static or dynamic registration. + * + * @since 3.17.0 + */ +type InlineValueRegistrationOptions struct { // line 3593 + InlineValueOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +/* + * Provide inline value as text. + * + * @since 3.17.0 + */ +type InlineValueText struct { // line 6970 + // The document range for which the inline value applies. + Range Range `json:"range"` + // The text of the inline value. + Text string `json:"text"` +} + +/* + * Provide inline value through a variable lookup. + * If only a range is specified, the variable name will be extracted from the underlying document. + * An optional variable name can be used to override the extracted name. + * + * @since 3.17.0 + */ +type InlineValueVariableLookup struct { // line 6993 + /* + * The document range for which the inline value applies. + * The range is used to extract the variable name from the underlying document. + */ + Range Range `json:"range"` + // If specified the name of the variable to look up. + VariableName string `json:"variableName,omitempty"` + // How to perform the lookup. + CaseSensitiveLookup bool `json:"caseSensitiveLookup"` +} + +/* + * Client workspace capabilities specific to inline values. + * + * @since 3.17.0 + */ +type InlineValueWorkspaceClientCapabilities struct { // line 11078 + /* + * Whether the client implementation supports a refresh request sent from the + * server to the client. + * + * Note that this event is global and will force the client to refresh all + * inline values currently shown. It should be used with absolute care and is + * useful for situation where a server for example detects a project wide + * change that requires such a calculation. + */ + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +/* + * A special text edit to provide an insert and a replace operation. + * + * @since 3.16.0 + */ +type InsertReplaceEdit struct { // line 8675 + // The string to be inserted. + NewText string `json:"newText"` + // The range if the insert is requested + Insert Range `json:"insert"` + // The range if the replace is requested. + Replace Range `json:"replace"` +} + +/* + * Defines whether the insert text in a completion item should be interpreted as + * plain text or a snippet. + */ +type InsertTextFormat uint32 // line 13259 +/* + * How whitespace and indentation is handled during completion + * item insertion. + * + * @since 3.16.0 + */ +type InsertTextMode uint32 // line 13279 +type LSPAny = interface{} + +/* + * LSP arrays. + * @since 3.17.0 + */ +type LSPArray = []interface{} // (alias) line 13778 +type LSPErrorCodes int32 // line 12782 +/* + * LSP object definition. + * @since 3.17.0 + */ +type LSPObject = map[string]LSPAny // (alias) line 14110 +/* + * Client capabilities for the linked editing range request. + * + * @since 3.16.0 + */ +type LinkedEditingRangeClientCapabilities struct { // line 12304 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} +type LinkedEditingRangeOptions struct { // line 6651 + WorkDoneProgressOptions +} +type LinkedEditingRangeParams struct { // line 3111 + TextDocumentPositionParams + WorkDoneProgressParams +} +type LinkedEditingRangeRegistrationOptions struct { // line 3154 + TextDocumentRegistrationOptions + LinkedEditingRangeOptions + StaticRegistrationOptions +} + +/* + * The result of a linked editing range request. + * + * @since 3.16.0 + */ +type LinkedEditingRanges struct { // line 3127 + /* + * A list of ranges that can be edited together. The ranges must have + * identical length and contain identical text content. The ranges cannot overlap. + */ + Ranges []Range `json:"ranges"` + /* + * An optional word pattern (regular expression) that describes valid contents for + * the given ranges. If no pattern is provided, the client configuration's word + * pattern will be used. + */ + WordPattern string `json:"wordPattern,omitempty"` +} + +// created for Literal (Lit_NotebookDocumentFilter_Item1) +type Lit_NotebookDocumentFilter_Item1 struct { // line 14292 + // The type of the enclosing notebook. + NotebookType string `json:"notebookType,omitempty"` + // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`. + Scheme string `json:"scheme"` + // A glob pattern. + Pattern string `json:"pattern,omitempty"` +} + +// created for Literal (Lit_NotebookDocumentFilter_Item2) +type Lit_NotebookDocumentFilter_Item2 struct { // line 14325 + // The type of the enclosing notebook. + NotebookType string `json:"notebookType,omitempty"` + // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`. + Scheme string `json:"scheme,omitempty"` + // A glob pattern. + Pattern string `json:"pattern"` +} + +// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1) +type Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1 struct { // line 9851 + /* + * The notebook to be synced If a string + * value is provided it matches against the + * notebook type. '*' matches every notebook. + */ + Notebook *Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook `json:"notebook,omitempty"` + // The cells of the matching notebook to be synced. + Cells []Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem `json:"cells"` +} + +// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem) +type Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem struct { // line 9877 + Language string `json:"language"` +} + +// created for Literal (Lit_PrepareRenameResult_Item2) +type Lit_PrepareRenameResult_Item2 struct { // line 13931 + DefaultBehavior bool `json:"defaultBehavior"` +} + +// created for Literal (Lit_TextDocumentContentChangeEvent_Item1) +type Lit_TextDocumentContentChangeEvent_Item1 struct { // line 14039 + // The new text of the whole document. + Text string `json:"text"` +} + +// created for Literal (Lit_TextDocumentFilter_Item2) +type Lit_TextDocumentFilter_Item2 struct { // line 14216 + // A language id, like `typescript`. + Language string `json:"language,omitempty"` + // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`. + Scheme string `json:"scheme,omitempty"` + // A glob pattern, like `*.{ts,js}`. + Pattern string `json:"pattern"` +} + +/* + * Represents a location inside a resource, such as a line + * inside a text file. + */ +type Location struct { // line 2082 + URI DocumentURI `json:"uri"` + Range Range `json:"range"` +} + +/* + * Represents the connection of two locations. Provides additional metadata over normal {@link Location locations}, + * including an origin range. + */ +type LocationLink struct { // line 6271 + /* + * Span of the origin of this link. + * + * Used as the underlined span for mouse interaction. Defaults to the word range at + * the definition position. + */ + OriginSelectionRange *Range `json:"originSelectionRange,omitempty"` + // The target resource identifier of this link. + TargetURI DocumentURI `json:"targetUri"` + /* + * The full target range of this link. If the target for example is a symbol then target range is the + * range enclosing this symbol not including leading/trailing whitespace but everything else + * like comments. This information is typically used to highlight the range in the editor. + */ + TargetRange Range `json:"targetRange"` + /* + * The range that should be selected and revealed when this link is being followed, e.g the name of a function. + * Must be contained by the `targetRange`. See also `DocumentSymbol#range` + */ + TargetSelectionRange Range `json:"targetSelectionRange"` +} + +// The log message parameters. +type LogMessageParams struct { // line 4250 + // The message type. See {@link MessageType} + Type MessageType `json:"type"` + // The actual message. + Message string `json:"message"` +} +type LogTraceParams struct { // line 6158 + Message string `json:"message"` + Verbose string `json:"verbose,omitempty"` +} + +/* + * Client capabilities specific to the used markdown parser. + * + * @since 3.16.0 + */ +type MarkdownClientCapabilities struct { // line 12523 + // The name of the parser. + Parser string `json:"parser"` + // The version of the parser. + Version string `json:"version,omitempty"` + /* + * A list of HTML tags that the client allows / supports in + * Markdown. + * + * @since 3.17.0 + */ + AllowedTags []string `json:"allowedTags,omitempty"` +} + +/* + * MarkedString can be used to render human readable text. It is either a markdown string + * or a code-block that provides a language and a code snippet. The language identifier + * is semantically equal to the optional language identifier in fenced code blocks in GitHub + * issues. See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting + * + * The pair of a language and a value is an equivalent to markdown: + * ```${language} + * ${value} + * ``` + * + * Note that markdown strings will be sanitized - that means html will be escaped. + * @deprecated use MarkupContent instead. + */ +type MarkedString = Or_MarkedString // (alias) line 14057 +/* + * A `MarkupContent` literal represents a string value which content is interpreted base on its + * kind flag. Currently the protocol supports `plaintext` and `markdown` as markup kinds. + * + * If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues. + * See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting + * + * Here is an example how such a string can be constructed using JavaScript / TypeScript: + * ```ts + * let markdown: MarkdownContent = { + * kind: MarkupKind.Markdown, + * value: [ + * '# Header', + * 'Some text', + * '```typescript', + * 'someCode();', + * '```' + * ].join('\n') + * }; + * ``` + * + * *Please Note* that clients might sanitize the return markdown. A client could decide to + * remove HTML from the markdown to avoid script execution. + */ +type MarkupContent struct { // line 7112 + // The type of the Markup + Kind MarkupKind `json:"kind"` + // The content itself + Value string `json:"value"` +} + +/* + * Describes the content type that a client supports in various + * result literals like `Hover`, `ParameterInfo` or `CompletionItem`. + * + * Please note that `MarkupKinds` must not start with a `$`. This kinds + * are reserved for internal usage. + */ +type MarkupKind string // line 13406 +type MessageActionItem struct { // line 4237 + // A short title like 'Retry', 'Open Log' etc. + Title string `json:"title"` +} + +// The message type +type MessageType uint32 // line 13053 +/* + * Moniker definition to match LSIF 0.5 moniker definition. + * + * @since 3.16.0 + */ +type Moniker struct { // line 3337 + // The scheme of the moniker. For example tsc or .Net + Scheme string `json:"scheme"` + /* + * The identifier of the moniker. The value is opaque in LSIF however + * schema owners are allowed to define the structure if they want. + */ + Identifier string `json:"identifier"` + // The scope in which the moniker is unique + Unique UniquenessLevel `json:"unique"` + // The moniker kind if known. + Kind MonikerKind `json:"kind,omitempty"` +} + +/* + * Client capabilities specific to the moniker request. + * + * @since 3.16.0 + */ +type MonikerClientCapabilities struct { // line 12320 + /* + * Whether moniker supports dynamic registration. If this is set to `true` + * the client supports the new `MonikerRegistrationOptions` return value + * for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +/* + * The moniker kind. + * + * @since 3.16.0 + */ +type MonikerKind string // line 13006 +type MonikerOptions struct { // line 6925 + WorkDoneProgressOptions +} +type MonikerParams struct { // line 3317 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} +type MonikerRegistrationOptions struct { // line 3377 + TextDocumentRegistrationOptions + MonikerOptions +} + +// created for Literal (Lit_MarkedString_Item1) +type Msg_MarkedString struct { // line 14067 + Language string `json:"language"` + Value string `json:"value"` +} + +// created for Literal (Lit_NotebookDocumentFilter_Item0) +type Msg_NotebookDocumentFilter struct { // line 14259 + // The type of the enclosing notebook. + NotebookType string `json:"notebookType"` + // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`. + Scheme string `json:"scheme"` + // A glob pattern. + Pattern string `json:"pattern"` +} + +// created for Literal (Lit_PrepareRenameResult_Item1) +type Msg_PrepareRename2Gn struct { // line 13910 + Range Range `json:"range"` + Placeholder string `json:"placeholder"` +} + +// created for Literal (Lit_TextDocumentContentChangeEvent_Item0) +type Msg_TextDocumentContentChangeEvent struct { // line 14007 + // The range of the document that changed. + Range *Range `json:"range"` + /* + * The optional length of the range that got replaced. + * + * @deprecated use range instead. + */ + RangeLength uint32 `json:"rangeLength"` + // The new text for the provided range. + Text string `json:"text"` +} + +// created for Literal (Lit_TextDocumentFilter_Item0) +type Msg_TextDocumentFilter struct { // line 14150 + // A language id, like `typescript`. + Language string `json:"language"` + // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`. + Scheme string `json:"scheme"` + // A glob pattern, like `*.{ts,js}`. + Pattern string `json:"pattern"` +} + +// created for Literal (Lit__InitializeParams_clientInfo) +type Msg_XInitializeParams_clientInfo struct { // line 7672 + // The name of the client as defined by the client. + Name string `json:"name"` + // The client's version as defined by the client. + Version string `json:"version"` +} + +/* + * A notebook cell. + * + * A cell's document URI must be unique across ALL notebook + * cells and can therefore be used to uniquely identify a + * notebook cell or the cell's text document. + * + * @since 3.17.0 + */ +type NotebookCell struct { // line 9597 + // The cell's kind + Kind NotebookCellKind `json:"kind"` + /* + * The URI of the cell's text document + * content. + */ + Document DocumentURI `json:"document"` + /* + * Additional metadata stored with the cell. + * + * Note: should always be an object literal (e.g. LSPObject) + */ + Metadata *LSPObject `json:"metadata,omitempty"` + /* + * Additional execution summary information + * if supported by the client. + */ + ExecutionSummary *ExecutionSummary `json:"executionSummary,omitempty"` +} + +/* + * A change describing how to move a `NotebookCell` + * array from state S to S'. + * + * @since 3.17.0 + */ +type NotebookCellArrayChange struct { // line 9638 + // The start oftest of the cell that changed. + Start uint32 `json:"start"` + // The deleted cells + DeleteCount uint32 `json:"deleteCount"` + // The new cells, if any + Cells []NotebookCell `json:"cells,omitempty"` +} + +/* + * A notebook cell kind. + * + * @since 3.17.0 + */ +type NotebookCellKind uint32 // line 13647 +/* + * A notebook cell text document filter denotes a cell text + * document by different properties. + * + * @since 3.17.0 + */ +type NotebookCellTextDocumentFilter struct { // line 10112 + /* + * A filter that matches against the notebook + * containing the notebook cell. If a string + * value is provided it matches against the + * notebook type. '*' matches every notebook. + */ + Notebook NotebookDocumentFilter `json:"notebook"` + /* + * A language id like `python`. + * + * Will be matched against the language id of the + * notebook cell document. '*' matches every language. + */ + Language string `json:"language,omitempty"` +} + +/* + * A notebook document. + * + * @since 3.17.0 + */ +type NotebookDocument struct { // line 7353 + // The notebook document's uri. + URI URI `json:"uri"` + // The type of the notebook. + NotebookType string `json:"notebookType"` + /* + * The version number of this document (it will increase after each + * change, including undo/redo). + */ + Version int32 `json:"version"` + /* + * Additional metadata stored with the notebook + * document. + * + * Note: should always be an object literal (e.g. LSPObject) + */ + Metadata *LSPObject `json:"metadata,omitempty"` + // The cells of a notebook. + Cells []NotebookCell `json:"cells"` +} + +/* + * A change event for a notebook document. + * + * @since 3.17.0 + */ +type NotebookDocumentChangeEvent struct { // line 7465 + /* + * The changed meta data if any. + * + * Note: should always be an object literal (e.g. LSPObject) + */ + Metadata *LSPObject `json:"metadata,omitempty"` + // Changes to cells + Cells *PCellsPChange `json:"cells,omitempty"` +} + +/* + * Capabilities specific to the notebook document support. + * + * @since 3.17.0 + */ +type NotebookDocumentClientCapabilities struct { // line 10612 + /* + * Capabilities specific to notebook document synchronization + * + * @since 3.17.0 + */ + Synchronization NotebookDocumentSyncClientCapabilities `json:"synchronization"` +} + +/* + * A notebook document filter denotes a notebook document by + * different properties. The properties will be match + * against the notebook's URI (same as with documents) + * + * @since 3.17.0 + */ +type NotebookDocumentFilter = Msg_NotebookDocumentFilter // (alias) line 14253 +/* + * A literal to identify a notebook document in the client. + * + * @since 3.17.0 + */ +type NotebookDocumentIdentifier struct { // line 7581 + // The notebook document's uri. + URI URI `json:"uri"` +} + +/* + * Notebook specific client capabilities. + * + * @since 3.17.0 + */ +type NotebookDocumentSyncClientCapabilities struct { // line 12432 + /* + * Whether implementation supports dynamic registration. If this is + * set to `true` the client supports the new + * `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // The client supports sending execution summary data per cell. + ExecutionSummarySupport bool `json:"executionSummarySupport,omitempty"` +} + +/* + * Options specific to a notebook plus its cells + * to be synced to the server. + * + * If a selector provides a notebook document + * filter but no cell selector all cells of a + * matching notebook document will be synced. + * + * If a selector provides no notebook document + * filter but only a cell selector all notebook + * document that contain at least one matching + * cell will be synced. + * + * @since 3.17.0 + */ +type NotebookDocumentSyncOptions struct { // line 9794 + // The notebooks to be synced + NotebookSelector []PNotebookSelectorPNotebookDocumentSync `json:"notebookSelector"` + /* + * Whether save notification should be forwarded to + * the server. Will only be honored if mode === `notebook`. + */ + Save bool `json:"save,omitempty"` +} + +/* + * Registration options specific to a notebook. + * + * @since 3.17.0 + */ +type NotebookDocumentSyncRegistrationOptions struct { // line 9914 + NotebookDocumentSyncOptions + StaticRegistrationOptions +} + +// A text document identifier to optionally denote a specific version of a text document. +type OptionalVersionedTextDocumentIdentifier struct { // line 9342 + /* + * The version number of this document. If a versioned text document identifier + * is sent from the server to the client and the file is not open in the editor + * (the server has not received an open notification before) the server can send + * `null` to indicate that the version is unknown and the content on disk is the + * truth (as specified with document content ownership). + */ + Version int32 `json:"version"` + TextDocumentIdentifier +} + +// created for Or [FEditRangePItemDefaults Range] +type OrFEditRangePItemDefaults struct { // line 4769 + Value interface{} `json:"value"` +} + +// created for Or [NotebookDocumentFilter string] +type OrFNotebookPNotebookSelector struct { // line 9811 + Value interface{} `json:"value"` +} + +// created for Or [Location PLocationMsg_workspace_symbol] +type OrPLocation_workspace_symbol struct { // line 5520 + Value interface{} `json:"value"` +} + +// created for Or [[]string string] +type OrPSection_workspace_didChangeConfiguration struct { // line 4163 + Value interface{} `json:"value"` +} + +// created for Or [MarkupContent string] +type OrPTooltipPLabel struct { // line 7075 + Value interface{} `json:"value"` +} + +// created for Or [MarkupContent string] +type OrPTooltip_textDocument_inlayHint struct { // line 3699 + Value interface{} `json:"value"` +} + +// created for Or [int32 string] +type Or_CancelParams_id struct { // line 6184 + Value interface{} `json:"value"` +} + +// created for Or [MarkupContent string] +type Or_CompletionItem_documentation struct { // line 4582 + Value interface{} `json:"value"` +} + +// created for Or [InsertReplaceEdit TextEdit] +type Or_CompletionItem_textEdit struct { // line 4665 + Value interface{} `json:"value"` +} + +// created for Or [Location []Location] +type Or_Definition struct { // line 13753 + Value interface{} `json:"value"` +} + +// created for Or [int32 string] +type Or_Diagnostic_code struct { // line 8547 + Value interface{} `json:"value"` +} + +// created for Or [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport] +type Or_DocumentDiagnosticReport struct { // line 13885 + Value interface{} `json:"value"` +} + +// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport] +type Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value struct { // line 3822 + Value interface{} `json:"value"` +} + +// created for Or [NotebookCellTextDocumentFilter TextDocumentFilter] +type Or_DocumentFilter struct { // line 14095 + Value interface{} `json:"value"` +} + +// created for Or [MarkedString MarkupContent []MarkedString] +type Or_Hover_contents struct { // line 4891 + Value interface{} `json:"value"` +} + +// created for Or [[]InlayHintLabelPart string] +type Or_InlayHint_label struct { // line 3658 + Value interface{} `json:"value"` +} + +// created for Or [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup] +type Or_InlineValue struct { // line 13863 + Value interface{} `json:"value"` +} + +// created for Or [Msg_MarkedString string] +type Or_MarkedString struct { // line 14060 + Value interface{} `json:"value"` +} + +// created for Or [NotebookDocumentFilter string] +type Or_NotebookCellTextDocumentFilter_notebook struct { // line 10118 + Value interface{} `json:"value"` +} + +// created for Or [NotebookDocumentFilter string] +type Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook struct { // line 9857 + Value interface{} `json:"value"` +} + +// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport] +type Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value struct { // line 7168 + Value interface{} `json:"value"` +} + +// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport] +type Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value struct { // line 7207 + Value interface{} `json:"value"` +} + +// created for Or [URI WorkspaceFolder] +type Or_RelativePattern_baseUri struct { // line 10741 + Value interface{} `json:"value"` +} + +// created for Or [CodeAction Command] +type Or_Result_textDocument_codeAction_Item0_Elem struct { // line 1371 + Value interface{} `json:"value"` +} + +// created for Or [FFullPRequests bool] +type Or_SemanticTokensClientCapabilities_requests_full struct { // line 12197 + Value interface{} `json:"value"` +} + +// created for Or [FRangePRequests bool] +type Or_SemanticTokensClientCapabilities_requests_range struct { // line 12177 + Value interface{} `json:"value"` +} + +// created for Or [PFullESemanticTokensOptions bool] +type Or_SemanticTokensOptions_full struct { // line 6579 + Value interface{} `json:"value"` +} + +// created for Or [PRangeESemanticTokensOptions bool] +type Or_SemanticTokensOptions_range struct { // line 6559 + Value interface{} `json:"value"` +} + +// created for Or [CallHierarchyOptions CallHierarchyRegistrationOptions bool] +type Or_ServerCapabilities_callHierarchyProvider struct { // line 8227 + Value interface{} `json:"value"` +} + +// created for Or [CodeActionOptions bool] +type Or_ServerCapabilities_codeActionProvider struct { // line 8035 + Value interface{} `json:"value"` +} + +// created for Or [DocumentColorOptions DocumentColorRegistrationOptions bool] +type Or_ServerCapabilities_colorProvider struct { // line 8071 + Value interface{} `json:"value"` +} + +// created for Or [DeclarationOptions DeclarationRegistrationOptions bool] +type Or_ServerCapabilities_declarationProvider struct { // line 7897 + Value interface{} `json:"value"` +} + +// created for Or [DefinitionOptions bool] +type Or_ServerCapabilities_definitionProvider struct { // line 7919 + Value interface{} `json:"value"` +} + +// created for Or [DiagnosticOptions DiagnosticRegistrationOptions] +type Or_ServerCapabilities_diagnosticProvider struct { // line 8384 + Value interface{} `json:"value"` +} + +// created for Or [DocumentFormattingOptions bool] +type Or_ServerCapabilities_documentFormattingProvider struct { // line 8111 + Value interface{} `json:"value"` +} + +// created for Or [DocumentHighlightOptions bool] +type Or_ServerCapabilities_documentHighlightProvider struct { // line 7999 + Value interface{} `json:"value"` +} + +// created for Or [DocumentRangeFormattingOptions bool] +type Or_ServerCapabilities_documentRangeFormattingProvider struct { // line 8129 + Value interface{} `json:"value"` +} + +// created for Or [DocumentSymbolOptions bool] +type Or_ServerCapabilities_documentSymbolProvider struct { // line 8017 + Value interface{} `json:"value"` +} + +// created for Or [FoldingRangeOptions FoldingRangeRegistrationOptions bool] +type Or_ServerCapabilities_foldingRangeProvider struct { // line 8174 + Value interface{} `json:"value"` +} + +// created for Or [HoverOptions bool] +type Or_ServerCapabilities_hoverProvider struct { // line 7870 + Value interface{} `json:"value"` +} + +// created for Or [ImplementationOptions ImplementationRegistrationOptions bool] +type Or_ServerCapabilities_implementationProvider struct { // line 7959 + Value interface{} `json:"value"` +} + +// created for Or [InlayHintOptions InlayHintRegistrationOptions bool] +type Or_ServerCapabilities_inlayHintProvider struct { // line 8361 + Value interface{} `json:"value"` +} + +// created for Or [InlineValueOptions InlineValueRegistrationOptions bool] +type Or_ServerCapabilities_inlineValueProvider struct { // line 8338 + Value interface{} `json:"value"` +} + +// created for Or [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool] +type Or_ServerCapabilities_linkedEditingRangeProvider struct { // line 8250 + Value interface{} `json:"value"` +} + +// created for Or [MonikerOptions MonikerRegistrationOptions bool] +type Or_ServerCapabilities_monikerProvider struct { // line 8292 + Value interface{} `json:"value"` +} + +// created for Or [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions] +type Or_ServerCapabilities_notebookDocumentSync struct { // line 7842 + Value interface{} `json:"value"` +} + +// created for Or [ReferenceOptions bool] +type Or_ServerCapabilities_referencesProvider struct { // line 7981 + Value interface{} `json:"value"` +} + +// created for Or [RenameOptions bool] +type Or_ServerCapabilities_renameProvider struct { // line 8156 + Value interface{} `json:"value"` +} + +// created for Or [SelectionRangeOptions SelectionRangeRegistrationOptions bool] +type Or_ServerCapabilities_selectionRangeProvider struct { // line 8196 + Value interface{} `json:"value"` +} + +// created for Or [SemanticTokensOptions SemanticTokensRegistrationOptions] +type Or_ServerCapabilities_semanticTokensProvider struct { // line 8273 + Value interface{} `json:"value"` +} + +// created for Or [TextDocumentSyncKind TextDocumentSyncOptions] +type Or_ServerCapabilities_textDocumentSync struct { // line 7824 + Value interface{} `json:"value"` +} + +// created for Or [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool] +type Or_ServerCapabilities_typeDefinitionProvider struct { // line 7937 + Value interface{} `json:"value"` +} + +// created for Or [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool] +type Or_ServerCapabilities_typeHierarchyProvider struct { // line 8315 + Value interface{} `json:"value"` +} + +// created for Or [WorkspaceSymbolOptions bool] +type Or_ServerCapabilities_workspaceSymbolProvider struct { // line 8093 + Value interface{} `json:"value"` +} + +// created for Or [MarkupContent string] +type Or_SignatureInformation_documentation struct { // line 8841 + Value interface{} `json:"value"` +} + +// created for Or [AnnotatedTextEdit TextEdit] +type Or_TextDocumentEdit_edits_Elem struct { // line 6692 + Value interface{} `json:"value"` +} + +// created for Or [SaveOptions bool] +type Or_TextDocumentSyncOptions_save struct { // line 9777 + Value interface{} `json:"value"` +} + +// created for Or [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport] +type Or_WorkspaceDocumentDiagnosticReport struct { // line 13986 + Value interface{} `json:"value"` +} + +// created for Or [CreateFile DeleteFile RenameFile TextDocumentEdit] +type Or_WorkspaceEdit_documentChanges_Elem struct { // line 3219 + Value interface{} `json:"value"` +} + +// created for Or [Declaration []DeclarationLink] +type Or_textDocument_declaration struct { // line 248 + Value interface{} `json:"value"` +} + +// created for Literal (Lit_NotebookDocumentChangeEvent_cells) +type PCellsPChange struct { // line 7480 + /* + * Changes to the cell structure to add or + * remove cells. + */ + Structure FStructurePCells `json:"structure"` + /* + * Changes to notebook cells properties like its + * kind, execution summary or metadata. + */ + Data []NotebookCell `json:"data"` + // Changes to the text content of notebook cells. + TextContent []FTextContentPCells `json:"textContent"` +} + +// created for Literal (Lit_WorkspaceEditClientCapabilities_changeAnnotationSupport) +type PChangeAnnotationSupportPWorkspaceEdit struct { // line 10815 + /* + * Whether the client groups edits with equal labels into tree nodes, + * for instance all edits labelled with "Changes in Strings" would + * be a tree node. + */ + GroupsOnLabel bool `json:"groupsOnLabel"` +} + +// created for Literal (Lit_CodeActionClientCapabilities_codeActionLiteralSupport) +type PCodeActionLiteralSupportPCodeAction struct { // line 11735 + /* + * The code action kind is support with the following value + * set. + */ + CodeActionKind FCodeActionKindPCodeActionLiteralSupport `json:"codeActionKind"` +} + +// created for Literal (Lit_CompletionClientCapabilities_completionItemKind) +type PCompletionItemKindPCompletion struct { // line 11333 + /* + * The completion item kind values the client supports. When this + * property exists the client also guarantees that it will + * handle values outside its set gracefully and falls back + * to a default value when unknown. + * + * If this property is not present the client only supports + * the completion items kinds from `Text` to `Reference` as defined in + * the initial version of the protocol. + */ + ValueSet []CompletionItemKind `json:"valueSet"` +} + +// created for Literal (Lit_CompletionClientCapabilities_completionItem) +type PCompletionItemPCompletion struct { // line 11182 + /* + * Client supports snippets as insert text. + * + * A snippet can define tab stops and placeholders with `$1`, `$2` + * and `${3:foo}`. `$0` defines the final tab stop, it defaults to + * the end of the snippet. Placeholders with equal identifiers are linked, + * that is typing in one will update others too. + */ + SnippetSupport bool `json:"snippetSupport"` + // Client supports commit characters on a completion item. + CommitCharactersSupport bool `json:"commitCharactersSupport"` + /* + * Client supports the following content formats for the documentation + * property. The order describes the preferred format of the client. + */ + DocumentationFormat []MarkupKind `json:"documentationFormat"` + // Client supports the deprecated property on a completion item. + DeprecatedSupport bool `json:"deprecatedSupport"` + // Client supports the preselect property on a completion item. + PreselectSupport bool `json:"preselectSupport"` + /* + * Client supports the tag property on a completion item. Clients supporting + * tags have to handle unknown tags gracefully. Clients especially need to + * preserve unknown tags when sending a completion item back to the server in + * a resolve call. + * + * @since 3.15.0 + */ + TagSupport FTagSupportPCompletionItem `json:"tagSupport"` + /* + * Client support insert replace edit to control different behavior if a + * completion item is inserted in the text or should replace text. + * + * @since 3.16.0 + */ + InsertReplaceSupport bool `json:"insertReplaceSupport"` + /* + * Indicates which properties a client can resolve lazily on a completion + * item. Before version 3.16.0 only the predefined properties `documentation` + * and `details` could be resolved lazily. + * + * @since 3.16.0 + */ + ResolveSupport FResolveSupportPCompletionItem `json:"resolveSupport"` + /* + * The client supports the `insertTextMode` property on + * a completion item to override the whitespace handling mode + * as defined by the client (see `insertTextMode`). + * + * @since 3.16.0 + */ + InsertTextModeSupport FInsertTextModeSupportPCompletionItem `json:"insertTextModeSupport"` + /* + * The client has support for completion item label + * details (see also `CompletionItemLabelDetails`). + * + * @since 3.17.0 + */ + LabelDetailsSupport bool `json:"labelDetailsSupport"` +} + +// created for Literal (Lit_CompletionOptions_completionItem) +type PCompletionItemPCompletionProvider struct { // line 8746 + /* + * The server has support for completion item label + * details (see also `CompletionItemLabelDetails`) when + * receiving a completion item in a resolve call. + * + * @since 3.17.0 + */ + LabelDetailsSupport bool `json:"labelDetailsSupport"` +} + +// created for Literal (Lit_CompletionClientCapabilities_completionList) +type PCompletionListPCompletion struct { // line 11375 + /* + * The client supports the following itemDefaults on + * a completion list. + * + * The value lists the supported property names of the + * `CompletionList.itemDefaults` object. If omitted + * no properties are supported. + * + * @since 3.17.0 + */ + ItemDefaults []string `json:"itemDefaults"` +} + +// created for Literal (Lit_CodeAction_disabled) +type PDisabledMsg_textDocument_codeAction struct { // line 5426 + /* + * Human readable description of why the code action is currently disabled. + * + * This is displayed in the code actions UI. + */ + Reason string `json:"reason"` +} + +// created for Literal (Lit_FoldingRangeClientCapabilities_foldingRangeKind) +type PFoldingRangeKindPFoldingRange struct { // line 12010 + /* + * The folding range kind values the client supports. When this + * property exists the client also guarantees that it will + * handle values outside its set gracefully and falls back + * to a default value when unknown. + */ + ValueSet []FoldingRangeKind `json:"valueSet"` +} + +// created for Literal (Lit_FoldingRangeClientCapabilities_foldingRange) +type PFoldingRangePFoldingRange struct { // line 12035 + /* + * If set, the client signals that it supports setting collapsedText on + * folding ranges to display custom labels instead of the default text. + * + * @since 3.17.0 + */ + CollapsedText bool `json:"collapsedText"` +} + +// created for Literal (Lit_SemanticTokensOptions_full_Item1) +type PFullESemanticTokensOptions struct { // line 6586 + // The server supports deltas for full documents. + Delta bool `json:"delta"` +} + +// created for Literal (Lit_CompletionList_itemDefaults) +type PItemDefaultsMsg_textDocument_completion struct { // line 4750 + /* + * A default commit character set. + * + * @since 3.17.0 + */ + CommitCharacters []string `json:"commitCharacters"` + /* + * A default edit range. + * + * @since 3.17.0 + */ + EditRange OrFEditRangePItemDefaults `json:"editRange"` + /* + * A default insert text format. + * + * @since 3.17.0 + */ + InsertTextFormat InsertTextFormat `json:"insertTextFormat"` + /* + * A default insert text mode. + * + * @since 3.17.0 + */ + InsertTextMode InsertTextMode `json:"insertTextMode"` + /* + * A default data value. + * + * @since 3.17.0 + */ + Data interface{} `json:"data"` +} + +// created for Literal (Lit_WorkspaceSymbol_location_Item1) +type PLocationMsg_workspace_symbol struct { // line 5527 + URI DocumentURI `json:"uri"` +} + +// created for Literal (Lit_ShowMessageRequestClientCapabilities_messageActionItem) +type PMessageActionItemPShowMessage struct { // line 12463 + /* + * Whether the client supports additional attributes which + * are preserved and send back to the server in the + * request's response. + */ + AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport"` +} + +// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0) +type PNotebookSelectorPNotebookDocumentSync struct { // line 9805 + /* + * The notebook to be synced If a string + * value is provided it matches against the + * notebook type. '*' matches every notebook. + */ + Notebook OrFNotebookPNotebookSelector `json:"notebook"` + // The cells of the matching notebook to be synced. + Cells []FCellsPNotebookSelector `json:"cells"` +} + +// created for Literal (Lit_SemanticTokensOptions_range_Item1) +type PRangeESemanticTokensOptions struct { // line 6566 +} + +// created for Literal (Lit_SemanticTokensClientCapabilities_requests) +type PRequestsPSemanticTokens struct { // line 12171 + /* + * The client will send the `textDocument/semanticTokens/range` request if + * the server provides a corresponding handler. + */ + Range bool `json:"range"` + /* + * The client will send the `textDocument/semanticTokens/full` request if + * the server provides a corresponding handler. + */ + Full interface{} `json:"full"` +} + +// created for Literal (Lit_CodeActionClientCapabilities_resolveSupport) +type PResolveSupportPCodeAction struct { // line 11800 + // The properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// created for Literal (Lit_InlayHintClientCapabilities_resolveSupport) +type PResolveSupportPInlayHint struct { // line 12383 + // The properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// created for Literal (Lit_WorkspaceSymbolClientCapabilities_resolveSupport) +type PResolveSupportPSymbol struct { // line 10937 + /* + * The properties that a client can resolve lazily. Usually + * `location.range` + */ + Properties []string `json:"properties"` +} + +// created for Literal (Lit_InitializeResult_serverInfo) +type PServerInfoMsg_initialize struct { // line 4095 + // The name of the server as defined by the server. + Name string `json:"name"` + // The server's version as defined by the server. + Version string `json:"version"` +} + +// created for Literal (Lit_SignatureHelpClientCapabilities_signatureInformation) +type PSignatureInformationPSignatureHelp struct { // line 11442 + /* + * Client supports the following content formats for the documentation + * property. The order describes the preferred format of the client. + */ + DocumentationFormat []MarkupKind `json:"documentationFormat"` + // Client capabilities specific to parameter information. + ParameterInformation FParameterInformationPSignatureInformation `json:"parameterInformation"` + /* + * The client supports the `activeParameter` property on `SignatureInformation` + * literal. + * + * @since 3.16.0 + */ + ActiveParameterSupport bool `json:"activeParameterSupport"` +} + +// created for Literal (Lit_GeneralClientCapabilities_staleRequestSupport) +type PStaleRequestSupportPGeneral struct { // line 10669 + // The client will actively cancel the request. + Cancel bool `json:"cancel"` + /* + * The list of requests for which the client + * will retry the request if it receives a + * response with error code `ContentModified` + */ + RetryOnContentModified []string `json:"retryOnContentModified"` +} + +// created for Literal (Lit_DocumentSymbolClientCapabilities_symbolKind) +type PSymbolKindPDocumentSymbol struct { // line 11653 + /* + * The symbol kind values the client supports. When this + * property exists the client also guarantees that it will + * handle values outside its set gracefully and falls back + * to a default value when unknown. + * + * If this property is not present the client only supports + * the symbol kinds from `File` to `Array` as defined in + * the initial version of the protocol. + */ + ValueSet []SymbolKind `json:"valueSet"` +} + +// created for Literal (Lit_WorkspaceSymbolClientCapabilities_symbolKind) +type PSymbolKindPSymbol struct { // line 10889 + /* + * The symbol kind values the client supports. When this + * property exists the client also guarantees that it will + * handle values outside its set gracefully and falls back + * to a default value when unknown. + * + * If this property is not present the client only supports + * the symbol kinds from `File` to `Array` as defined in + * the initial version of the protocol. + */ + ValueSet []SymbolKind `json:"valueSet"` +} + +// created for Literal (Lit_DocumentSymbolClientCapabilities_tagSupport) +type PTagSupportPDocumentSymbol struct { // line 11686 + // The tags supported by the client. + ValueSet []SymbolTag `json:"valueSet"` +} + +// created for Literal (Lit_PublishDiagnosticsClientCapabilities_tagSupport) +type PTagSupportPPublishDiagnostics struct { // line 12086 + // The tags supported by the client. + ValueSet []DiagnosticTag `json:"valueSet"` +} + +// created for Literal (Lit_WorkspaceSymbolClientCapabilities_tagSupport) +type PTagSupportPSymbol struct { // line 10913 + // The tags supported by the client. + ValueSet []SymbolTag `json:"valueSet"` +} + +// The parameters of a configuration request. +type ParamConfiguration struct { // line 2198 + Items []ConfigurationItem `json:"items"` +} +type ParamInitialize struct { // line 4067 + XInitializeParams + WorkspaceFoldersInitializeParams +} + +/* + * Represents a parameter of a callable-signature. A parameter can + * have a label and a doc-comment. + */ +type ParameterInformation struct { // line 10062 + /* + * The label of this parameter information. + * + * Either a string or an inclusive start and exclusive end offsets within its containing + * signature label. (see SignatureInformation.label). The offsets are based on a UTF-16 + * string representation as `Position` and `Range` does. + * + * *Note*: a label of type string should be a substring of its containing signature label. + * Its intended use case is to highlight the parameter label part in the `SignatureInformation.label`. + */ + Label string `json:"label"` + /* + * The human-readable doc-comment of this parameter. Will be shown + * in the UI but can be omitted. + */ + Documentation string `json:"documentation,omitempty"` +} +type PartialResultParams struct { // line 6257 + /* + * An optional token that a server can use to report partial results (e.g. streaming) to + * the client. + */ + PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` +} + +/* + * The glob pattern to watch relative to the base path. Glob patterns can have the following syntax: + * - `*` to match one or more characters in a path segment + * - `?` to match on one character in a path segment + * - `**` to match any number of path segments, including none + * - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) + * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + * + * @since 3.17.0 + */ +type Pattern = string // (alias) line 14362 +/* + * Position in a text document expressed as zero-based line and character + * offset. Prior to 3.17 the offsets were always based on a UTF-16 string + * representation. So a string of the form `a𐐀b` the character offset of the + * character `a` is 0, the character offset of `𐐀` is 1 and the character + * offset of b is 3 since `𐐀` is represented using two code units in UTF-16. + * Since 3.17 clients and servers can agree on a different string encoding + * representation (e.g. UTF-8). The client announces it's supported encoding + * via the client capability [`general.positionEncodings`](#clientCapabilities). + * The value is an array of position encodings the client supports, with + * decreasing preference (e.g. the encoding at index `0` is the most preferred + * one). To stay backwards compatible the only mandatory encoding is UTF-16 + * represented via the string `utf-16`. The server can pick one of the + * encodings offered by the client and signals that encoding back to the + * client via the initialize result's property + * [`capabilities.positionEncoding`](#serverCapabilities). If the string value + * `utf-16` is missing from the client's capability `general.positionEncodings` + * servers can safely assume that the client supports UTF-16. If the server + * omits the position encoding in its initialize result the encoding defaults + * to the string value `utf-16`. Implementation considerations: since the + * conversion from one encoding into another requires the content of the + * file / line the conversion is best done where the file is read which is + * usually on the server side. + * + * Positions are line end character agnostic. So you can not specify a position + * that denotes `\r|\n` or `\n|` where `|` represents the character offset. + * + * @since 3.17.0 - support for negotiated position encoding. + */ +type Position struct { // line 6500 + /* + * Line position in a document (zero-based). + * + * If a line number is greater than the number of lines in a document, it defaults back to the number of lines in the document. + * If a line number is negative, it defaults to 0. + */ + Line uint32 `json:"line"` + /* + * Character offset on a line in a document (zero-based). + * + * The meaning of this offset is determined by the negotiated + * `PositionEncodingKind`. + * + * If the character value is greater than the line length it defaults back to the + * line length. + */ + Character uint32 `json:"character"` +} + +/* + * A set of predefined position encoding kinds. + * + * @since 3.17.0 + */ +type PositionEncodingKind string // line 13426 +type PrepareRename2Gn = Msg_PrepareRename2Gn // (alias) line 13927 +type PrepareRenameParams struct { // line 5924 + TextDocumentPositionParams + WorkDoneProgressParams +} +type PrepareRenameResult = Msg_PrepareRename2Gn // (alias) line 13927 +type PrepareSupportDefaultBehavior uint32 // line 13721 +/* + * A previous result id in a workspace pull request. + * + * @since 3.17.0 + */ +type PreviousResultID struct { // line 7330 + /* + * The URI for which the client knowns a + * result id. + */ + URI DocumentURI `json:"uri"` + // The value of the previous result id. + Value string `json:"value"` +} + +/* + * A previous result id in a workspace pull request. + * + * @since 3.17.0 + */ +type PreviousResultId struct { // line 7330 + /* + * The URI for which the client knowns a + * result id. + */ + URI DocumentURI `json:"uri"` + // The value of the previous result id. + Value string `json:"value"` +} +type ProgressParams struct { // line 6200 + // The progress token provided by the client or server. + Token ProgressToken `json:"token"` + // The progress data. + Value interface{} `json:"value"` +} +type ProgressToken = interface{} // (alias) line 13959 +// The publish diagnostic client capabilities. +type PublishDiagnosticsClientCapabilities struct { // line 12071 + // Whether the clients accepts diagnostics with related information. + RelatedInformation bool `json:"relatedInformation,omitempty"` + /* + * Client supports the tag property to provide meta data about a diagnostic. + * Clients supporting tags have to handle unknown tags gracefully. + * + * @since 3.15.0 + */ + TagSupport *PTagSupportPPublishDiagnostics `json:"tagSupport,omitempty"` + /* + * Whether the client interprets the version property of the + * `textDocument/publishDiagnostics` notification's parameter. + * + * @since 3.15.0 + */ + VersionSupport bool `json:"versionSupport,omitempty"` + /* + * Client supports a codeDescription property + * + * @since 3.16.0 + */ + CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"` + /* + * Whether code action supports the `data` property which is + * preserved between a `textDocument/publishDiagnostics` and + * `textDocument/codeAction` request. + * + * @since 3.16.0 + */ + DataSupport bool `json:"dataSupport,omitempty"` +} + +// The publish diagnostic notification's parameters. +type PublishDiagnosticsParams struct { // line 4461 + // The URI for which diagnostic information is reported. + URI DocumentURI `json:"uri"` + /* + * Optional the version number of the document the diagnostics are published for. + * + * @since 3.15.0 + */ + Version int32 `json:"version,omitempty"` + // An array of diagnostic information items. + Diagnostics []Diagnostic `json:"diagnostics"` +} + +/* + * A range in a text document expressed as (zero-based) start and end positions. + * + * If you want to specify a range that contains a line including the line ending + * character(s) then use an end position denoting the start of the next line. + * For example: + * ```ts + * { + * start: { line: 5, character: 23 } + * end : { line 6, character : 0 } + * } + * ``` + */ +type Range struct { // line 6310 + // The range's start position. + Start Position `json:"start"` + // The range's end position. + End Position `json:"end"` +} + +// Client Capabilities for a {@link ReferencesRequest}. +type ReferenceClientCapabilities struct { // line 11608 + // Whether references supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +/* + * Value-object that contains additional information when + * requesting references. + */ +type ReferenceContext struct { // line 8929 + // Include the declaration of the current symbol. + IncludeDeclaration bool `json:"includeDeclaration"` +} + +// Reference options. +type ReferenceOptions struct { // line 8943 + WorkDoneProgressOptions +} + +// Parameters for a {@link ReferencesRequest}. +type ReferenceParams struct { // line 5053 + Context ReferenceContext `json:"context"` + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link ReferencesRequest}. +type ReferenceRegistrationOptions struct { // line 5082 + TextDocumentRegistrationOptions + ReferenceOptions +} + +// General parameters to to register for an notification or to register a provider. +type Registration struct { // line 7596 + /* + * The id used to register the request. The id can be used to deregister + * the request again. + */ + ID string `json:"id"` + // The method / capability to register for. + Method string `json:"method"` + // Options necessary for the registration. + RegisterOptions interface{} `json:"registerOptions,omitempty"` +} +type RegistrationParams struct { // line 4037 + Registrations []Registration `json:"registrations"` +} + +/* + * Client capabilities specific to regular expressions. + * + * @since 3.16.0 + */ +type RegularExpressionsClientCapabilities struct { // line 12499 + // The engine's name. + Engine string `json:"engine"` + // The engine's version. + Version string `json:"version,omitempty"` +} + +/* + * A full diagnostic report with a set of related documents. + * + * @since 3.17.0 + */ +type RelatedFullDocumentDiagnosticReport struct { // line 7156 + /* + * Diagnostics of related documents. This information is useful + * in programming languages where code in a file A can generate + * diagnostics in a file B which A depends on. An example of + * such a language is C/C++ where marco definitions in a file + * a.cpp and result in errors in a header file b.hpp. + * + * @since 3.17.0 + */ + RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments,omitempty"` + FullDocumentDiagnosticReport +} + +/* + * An unchanged diagnostic report with a set of related documents. + * + * @since 3.17.0 + */ +type RelatedUnchangedDocumentDiagnosticReport struct { // line 7195 + /* + * Diagnostics of related documents. This information is useful + * in programming languages where code in a file A can generate + * diagnostics in a file B which A depends on. An example of + * such a language is C/C++ where marco definitions in a file + * a.cpp and result in errors in a header file b.hpp. + * + * @since 3.17.0 + */ + RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments,omitempty"` + UnchangedDocumentDiagnosticReport +} + +/* + * A relative pattern is a helper to construct glob patterns that are matched + * relatively to a base URI. The common value for a `baseUri` is a workspace + * folder root, but it can be another absolute URI as well. + * + * @since 3.17.0 + */ +type RelativePattern struct { // line 10735 + /* + * A workspace folder or a base URI to which this pattern will be matched + * against relatively. + */ + BaseURI Or_RelativePattern_baseUri `json:"baseUri"` + // The actual glob pattern; + Pattern Pattern `json:"pattern"` +} +type RenameClientCapabilities struct { // line 11933 + // Whether rename supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Client supports testing for validity of rename operations + * before execution. + * + * @since 3.12.0 + */ + PrepareSupport bool `json:"prepareSupport,omitempty"` + /* + * Client supports the default behavior result. + * + * The value indicates the default behavior used by the + * client. + * + * @since 3.16.0 + */ + PrepareSupportDefaultBehavior interface{} `json:"prepareSupportDefaultBehavior,omitempty"` + /* + * Whether the client honors the change annotations in + * text edits and resource operations returned via the + * rename request's workspace edit by for example presenting + * the workspace edit in the user interface and asking + * for confirmation. + * + * @since 3.16.0 + */ + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +// Rename file operation +type RenameFile struct { // line 6748 + // A rename + Kind string `json:"kind"` + // The old (existing) location. + OldURI DocumentURI `json:"oldUri"` + // The new location. + NewURI DocumentURI `json:"newUri"` + // Rename options. + Options *RenameFileOptions `json:"options,omitempty"` + ResourceOperation +} + +// Rename file options +type RenameFileOptions struct { // line 9440 + // Overwrite target if existing. Overwrite wins over `ignoreIfExists` + Overwrite bool `json:"overwrite,omitempty"` + // Ignores if target exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +/* + * The parameters sent in notifications/requests for user-initiated renames of + * files. + * + * @since 3.16.0 + */ +type RenameFilesParams struct { // line 3281 + /* + * An array of all files/folders renamed in this operation. When a folder is renamed, only + * the folder will be included, and not its children. + */ + Files []FileRename `json:"files"` +} + +// Provider options for a {@link RenameRequest}. +type RenameOptions struct { // line 9268 + /* + * Renames should be checked and tested before being executed. + * + * @since version 3.12.0 + */ + PrepareProvider bool `json:"prepareProvider,omitempty"` + WorkDoneProgressOptions +} + +// The parameters of a {@link RenameRequest}. +type RenameParams struct { // line 5873 + // The document to rename. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The position at which this request was sent. + Position Position `json:"position"` + /* + * The new name of the symbol. If the given name is not valid the + * request must return a {@link ResponseError} with an + * appropriate message set. + */ + NewName string `json:"newName"` + WorkDoneProgressParams +} + +// Registration options for a {@link RenameRequest}. +type RenameRegistrationOptions struct { // line 5909 + TextDocumentRegistrationOptions + RenameOptions +} + +// A generic resource operation. +type ResourceOperation struct { // line 9392 + // The resource operation kind. + Kind string `json:"kind"` + /* + * An optional annotation identifier describing the operation. + * + * @since 3.16.0 + */ + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} +type ResourceOperationKind string // line 13668 +// Save options. +type SaveOptions struct { // line 8464 + // The client is supposed to include the content on save. + IncludeText bool `json:"includeText,omitempty"` +} + +/* + * A selection range represents a part of a selection hierarchy. A selection range + * may have a parent selection range that contains it. + */ +type SelectionRange struct { // line 2568 + // The {@link Range range} of this selection range. + Range Range `json:"range"` + // The parent selection range containing this range. Therefore `parent.range` must contain `this.range`. + Parent *SelectionRange `json:"parent,omitempty"` +} +type SelectionRangeClientCapabilities struct { // line 12057 + /* + * Whether implementation supports dynamic registration for selection range providers. If this is set to `true` + * the client supports the new `SelectionRangeRegistrationOptions` return value for the corresponding server + * capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} +type SelectionRangeOptions struct { // line 6523 + WorkDoneProgressOptions +} + +// A parameter literal used in selection range requests. +type SelectionRangeParams struct { // line 2533 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The positions inside the text document. + Positions []Position `json:"positions"` + WorkDoneProgressParams + PartialResultParams +} +type SelectionRangeRegistrationOptions struct { // line 2591 + SelectionRangeOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +/* + * A set of predefined token modifiers. This set is not fixed + * an clients can specify additional token types via the + * corresponding client capabilities. + * + * @since 3.16.0 + */ +type SemanticTokenModifiers string // line 12669 +/* + * A set of predefined token types. This set is not fixed + * an clients can specify additional token types via the + * corresponding client capabilities. + * + * @since 3.16.0 + */ +type SemanticTokenTypes string // line 12562 +// @since 3.16.0 +type SemanticTokens struct { // line 2879 + /* + * An optional result id. If provided and clients support delta updating + * the client will include the result id in the next semantic token request. + * A server can then instead of computing all semantic tokens again simply + * send a delta. + */ + ResultID string `json:"resultId,omitempty"` + // The actual tokens. + Data []uint32 `json:"data"` +} + +// @since 3.16.0 +type SemanticTokensClientCapabilities struct { // line 12156 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * Which requests the client supports and might send to the server + * depending on the server's capability. Please note that clients might not + * show semantic tokens or degrade some of the user experience if a range + * or full request is advertised by the client but not provided by the + * server. If for example the client capability `requests.full` and + * `request.range` are both set to true but the server only provides a + * range provider the client might not render a minimap correctly or might + * even decide to not show any semantic tokens at all. + */ + Requests PRequestsPSemanticTokens `json:"requests"` + // The token types that the client supports. + TokenTypes []string `json:"tokenTypes"` + // The token modifiers that the client supports. + TokenModifiers []string `json:"tokenModifiers"` + // The token formats the clients supports. + Formats []string `json:"formats"` + // Whether the client supports tokens that can overlap each other. + OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"` + // Whether the client supports tokens that can span multiple lines. + MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"` + /* + * Whether the client allows the server to actively cancel a + * semantic token request, e.g. supports returning + * LSPErrorCodes.ServerCancelled. If a server does the client + * needs to retrigger the request. + * + * @since 3.17.0 + */ + ServerCancelSupport bool `json:"serverCancelSupport,omitempty"` + /* + * Whether the client uses semantic tokens to augment existing + * syntax tokens. If set to `true` client side created syntax + * tokens and semantic tokens are both used for colorization. If + * set to `false` the client only uses the returned semantic tokens + * for colorization. + * + * If the value is `undefined` then the client behavior is not + * specified. + * + * @since 3.17.0 + */ + AugmentsSyntaxTokens bool `json:"augmentsSyntaxTokens,omitempty"` +} + +// @since 3.16.0 +type SemanticTokensDelta struct { // line 2978 + ResultID string `json:"resultId,omitempty"` + // The semantic token edits to transform a previous result into a new result. + Edits []SemanticTokensEdit `json:"edits"` +} + +// @since 3.16.0 +type SemanticTokensDeltaParams struct { // line 2945 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + /* + * The result id of a previous response. The result Id can either point to a full response + * or a delta response depending on what was received last. + */ + PreviousResultID string `json:"previousResultId"` + WorkDoneProgressParams + PartialResultParams +} + +// @since 3.16.0 +type SemanticTokensDeltaPartialResult struct { // line 3004 + Edits []SemanticTokensEdit `json:"edits"` +} + +// @since 3.16.0 +type SemanticTokensEdit struct { // line 6616 + // The start offset of the edit. + Start uint32 `json:"start"` + // The count of elements to remove. + DeleteCount uint32 `json:"deleteCount"` + // The elements to insert. + Data []uint32 `json:"data,omitempty"` +} + +// @since 3.16.0 +type SemanticTokensLegend struct { // line 9313 + // The token types a server uses. + TokenTypes []string `json:"tokenTypes"` + // The token modifiers a server uses. + TokenModifiers []string `json:"tokenModifiers"` +} + +// @since 3.16.0 +type SemanticTokensOptions struct { // line 6545 + // The legend used by the server + Legend SemanticTokensLegend `json:"legend"` + /* + * Server supports providing semantic tokens for a specific range + * of a document. + */ + Range interface{} `json:"range,omitempty"` + // Server supports providing semantic tokens for a full document. + Full bool `json:"full,omitempty"` + WorkDoneProgressOptions +} + +// @since 3.16.0 +type SemanticTokensParams struct { // line 2854 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + WorkDoneProgressParams + PartialResultParams +} + +// @since 3.16.0 +type SemanticTokensPartialResult struct { // line 2906 + Data []uint32 `json:"data"` +} + +// @since 3.16.0 +type SemanticTokensRangeParams struct { // line 3021 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The range the semantic tokens are requested for. + Range Range `json:"range"` + WorkDoneProgressParams + PartialResultParams +} + +// @since 3.16.0 +type SemanticTokensRegistrationOptions struct { // line 2923 + TextDocumentRegistrationOptions + SemanticTokensOptions + StaticRegistrationOptions +} + +// @since 3.16.0 +type SemanticTokensWorkspaceClientCapabilities struct { // line 10976 + /* + * Whether the client implementation supports a refresh request sent from + * the server to the client. + * + * Note that this event is global and will force the client to refresh all + * semantic tokens currently shown. It should be used with absolute care + * and is useful for situation where a server for example detects a project + * wide change that requires such a calculation. + */ + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +/* + * Defines the capabilities provided by a language + * server. + */ +type ServerCapabilities struct { // line 7808 + /* + * The position encoding the server picked from the encodings offered + * by the client via the client capability `general.positionEncodings`. + * + * If the client didn't provide any position encodings the only valid + * value that a server can return is 'utf-16'. + * + * If omitted it defaults to 'utf-16'. + * + * @since 3.17.0 + */ + PositionEncoding PositionEncodingKind `json:"positionEncoding,omitempty"` + /* + * Defines how text documents are synced. Is either a detailed structure + * defining each notification or for backwards compatibility the + * TextDocumentSyncKind number. + */ + TextDocumentSync interface{} `json:"textDocumentSync,omitempty"` + /* + * Defines how notebook documents are synced. + * + * @since 3.17.0 + */ + NotebookDocumentSync interface{} `json:"notebookDocumentSync,omitempty"` + // The server provides completion support. + CompletionProvider CompletionOptions `json:"completionProvider,omitempty"` + // The server provides hover support. + HoverProvider bool `json:"hoverProvider,omitempty"` + // The server provides signature help support. + SignatureHelpProvider SignatureHelpOptions `json:"signatureHelpProvider,omitempty"` + // The server provides Goto Declaration support. + DeclarationProvider bool `json:"declarationProvider,omitempty"` + // The server provides goto definition support. + DefinitionProvider bool `json:"definitionProvider,omitempty"` + // The server provides Goto Type Definition support. + TypeDefinitionProvider interface{} `json:"typeDefinitionProvider,omitempty"` + // The server provides Goto Implementation support. + ImplementationProvider interface{} `json:"implementationProvider,omitempty"` + // The server provides find references support. + ReferencesProvider bool `json:"referencesProvider,omitempty"` + // The server provides document highlight support. + DocumentHighlightProvider bool `json:"documentHighlightProvider,omitempty"` + // The server provides document symbol support. + DocumentSymbolProvider bool `json:"documentSymbolProvider,omitempty"` + /* + * The server provides code actions. CodeActionOptions may only be + * specified if the client states that it supports + * `codeActionLiteralSupport` in its initial `initialize` request. + */ + CodeActionProvider interface{} `json:"codeActionProvider,omitempty"` + // The server provides code lens. + CodeLensProvider *CodeLensOptions `json:"codeLensProvider,omitempty"` + // The server provides document link support. + DocumentLinkProvider DocumentLinkOptions `json:"documentLinkProvider,omitempty"` + // The server provides color provider support. + ColorProvider interface{} `json:"colorProvider,omitempty"` + // The server provides workspace symbol support. + WorkspaceSymbolProvider bool `json:"workspaceSymbolProvider,omitempty"` + // The server provides document formatting. + DocumentFormattingProvider bool `json:"documentFormattingProvider,omitempty"` + // The server provides document range formatting. + DocumentRangeFormattingProvider bool `json:"documentRangeFormattingProvider,omitempty"` + // The server provides document formatting on typing. + DocumentOnTypeFormattingProvider *DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"` + /* + * The server provides rename support. RenameOptions may only be + * specified if the client states that it supports + * `prepareSupport` in its initial `initialize` request. + */ + RenameProvider interface{} `json:"renameProvider,omitempty"` + // The server provides folding provider support. + FoldingRangeProvider interface{} `json:"foldingRangeProvider,omitempty"` + // The server provides selection range support. + SelectionRangeProvider interface{} `json:"selectionRangeProvider,omitempty"` + // The server provides execute command support. + ExecuteCommandProvider ExecuteCommandOptions `json:"executeCommandProvider,omitempty"` + /* + * The server provides call hierarchy support. + * + * @since 3.16.0 + */ + CallHierarchyProvider interface{} `json:"callHierarchyProvider,omitempty"` + /* + * The server provides linked editing range support. + * + * @since 3.16.0 + */ + LinkedEditingRangeProvider interface{} `json:"linkedEditingRangeProvider,omitempty"` + /* + * The server provides semantic tokens support. + * + * @since 3.16.0 + */ + SemanticTokensProvider interface{} `json:"semanticTokensProvider,omitempty"` + /* + * The server provides moniker support. + * + * @since 3.16.0 + */ + MonikerProvider interface{} `json:"monikerProvider,omitempty"` + /* + * The server provides type hierarchy support. + * + * @since 3.17.0 + */ + TypeHierarchyProvider interface{} `json:"typeHierarchyProvider,omitempty"` + /* + * The server provides inline values. + * + * @since 3.17.0 + */ + InlineValueProvider interface{} `json:"inlineValueProvider,omitempty"` + /* + * The server provides inlay hints. + * + * @since 3.17.0 + */ + InlayHintProvider interface{} `json:"inlayHintProvider,omitempty"` + /* + * The server has support for pull model diagnostics. + * + * @since 3.17.0 + */ + DiagnosticProvider interface{} `json:"diagnosticProvider,omitempty"` + // Workspace specific server capabilities. + Workspace Workspace6Gn `json:"workspace,omitempty"` + // Experimental server capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} +type SetTraceParams struct { // line 6146 + Value TraceValues `json:"value"` +} + +/* + * Client capabilities for the showDocument request. + * + * @since 3.16.0 + */ +type ShowDocumentClientCapabilities struct { // line 12484 + /* + * The client has support for the showDocument + * request. + */ + Support bool `json:"support"` +} + +/* + * Params to show a document. + * + * @since 3.16.0 + */ +type ShowDocumentParams struct { // line 3054 + // The document uri to show. + URI URI `json:"uri"` + /* + * Indicates to show the resource in an external program. + * To show for example `https://code.visualstudio.com/` + * in the default WEB browser set `external` to `true`. + */ + External bool `json:"external,omitempty"` + /* + * An optional property to indicate whether the editor + * showing the document should take focus or not. + * Clients might ignore this property if an external + * program is started. + */ + TakeFocus bool `json:"takeFocus,omitempty"` + /* + * An optional selection range if the document is a text + * document. Clients might ignore the property if an + * external program is started or the file is not a text + * file. + */ + Selection *Range `json:"selection,omitempty"` +} + +/* + * The result of a showDocument request. + * + * @since 3.16.0 + */ +type ShowDocumentResult struct { // line 3096 + // A boolean indicating if the show was successful. + Success bool `json:"success"` +} + +// The parameters of a notification message. +type ShowMessageParams struct { // line 4182 + // The message type. See {@link MessageType} + Type MessageType `json:"type"` + // The actual message. + Message string `json:"message"` +} + +// Show message request client capabilities +type ShowMessageRequestClientCapabilities struct { // line 12457 + // Capabilities specific to the `MessageActionItem` type. + MessageActionItem *PMessageActionItemPShowMessage `json:"messageActionItem,omitempty"` +} +type ShowMessageRequestParams struct { // line 4204 + // The message type. See {@link MessageType} + Type MessageType `json:"type"` + // The actual message. + Message string `json:"message"` + // The message action items to present. + Actions []MessageActionItem `json:"actions,omitempty"` +} + +/* + * Signature help represents the signature of something + * callable. There can be multiple signature but only one + * active and only one active parameter. + */ +type SignatureHelp struct { // line 4967 + // One or more signatures. + Signatures []SignatureInformation `json:"signatures"` + /* + * The active signature. If omitted or the value lies outside the + * range of `signatures` the value defaults to zero or is ignored if + * the `SignatureHelp` has no signatures. + * + * Whenever possible implementors should make an active decision about + * the active signature and shouldn't rely on a default value. + * + * In future version of the protocol this property might become + * mandatory to better express this. + */ + ActiveSignature uint32 `json:"activeSignature,omitempty"` + /* + * The active parameter of the active signature. If omitted or the value + * lies outside the range of `signatures[activeSignature].parameters` + * defaults to 0 if the active signature has parameters. If + * the active signature has no parameters it is ignored. + * In future version of the protocol this property might become + * mandatory to better express the active parameter if the + * active signature does have any. + */ + ActiveParameter uint32 `json:"activeParameter,omitempty"` +} + +// Client Capabilities for a {@link SignatureHelpRequest}. +type SignatureHelpClientCapabilities struct { // line 11427 + // Whether signature help supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client supports the following `SignatureInformation` + * specific properties. + */ + SignatureInformation *PSignatureInformationPSignatureHelp `json:"signatureInformation,omitempty"` + /* + * The client supports to send additional context information for a + * `textDocument/signatureHelp` request. A client that opts into + * contextSupport will also support the `retriggerCharacters` on + * `SignatureHelpOptions`. + * + * @since 3.15.0 + */ + ContextSupport bool `json:"contextSupport,omitempty"` +} + +/* + * Additional information about the context in which a signature help request was triggered. + * + * @since 3.15.0 + */ +type SignatureHelpContext struct { // line 8786 + // Action that caused signature help to be triggered. + TriggerKind SignatureHelpTriggerKind `json:"triggerKind"` + /* + * Character that caused signature help to be triggered. + * + * This is undefined when `triggerKind !== SignatureHelpTriggerKind.TriggerCharacter` + */ + TriggerCharacter string `json:"triggerCharacter,omitempty"` + /* + * `true` if signature help was already showing when it was triggered. + * + * Retriggers occurs when the signature help is already active and can be caused by actions such as + * typing a trigger character, a cursor move, or document content changes. + */ + IsRetrigger bool `json:"isRetrigger"` + /* + * The currently active `SignatureHelp`. + * + * The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field updated based on + * the user navigating through available signatures. + */ + ActiveSignatureHelp *SignatureHelp `json:"activeSignatureHelp,omitempty"` +} + +// Server Capabilities for a {@link SignatureHelpRequest}. +type SignatureHelpOptions struct { // line 8881 + // List of characters that trigger signature help automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + /* + * List of characters that re-trigger signature help. + * + * These trigger characters are only active when signature help is already showing. All trigger characters + * are also counted as re-trigger characters. + * + * @since 3.15.0 + */ + RetriggerCharacters []string `json:"retriggerCharacters,omitempty"` + WorkDoneProgressOptions +} + +// Parameters for a {@link SignatureHelpRequest}. +type SignatureHelpParams struct { // line 4939 + /* + * The signature help context. This is only available if the client specifies + * to send this using the client capability `textDocument.signatureHelp.contextSupport === true` + * + * @since 3.15.0 + */ + Context *SignatureHelpContext `json:"context,omitempty"` + TextDocumentPositionParams + WorkDoneProgressParams +} + +// Registration options for a {@link SignatureHelpRequest}. +type SignatureHelpRegistrationOptions struct { // line 5002 + TextDocumentRegistrationOptions + SignatureHelpOptions +} + +/* + * How a signature help was triggered. + * + * @since 3.15.0 + */ +type SignatureHelpTriggerKind uint32 // line 13579 +/* + * Represents the signature of something callable. A signature + * can have a label, like a function-name, a doc-comment, and + * a set of parameters. + */ +type SignatureInformation struct { // line 8827 + /* + * The label of this signature. Will be shown in + * the UI. + */ + Label string `json:"label"` + /* + * The human-readable doc-comment of this signature. Will be shown + * in the UI but can be omitted. + */ + Documentation *Or_SignatureInformation_documentation `json:"documentation,omitempty"` + // The parameters of this signature. + Parameters []ParameterInformation `json:"parameters,omitempty"` + /* + * The index of the active parameter. + * + * If provided, this is used in place of `SignatureHelp.activeParameter`. + * + * @since 3.16.0 + */ + ActiveParameter uint32 `json:"activeParameter,omitempty"` +} + +/* + * Static registration options to be returned in the initialize + * request. + */ +type StaticRegistrationOptions struct { // line 6342 + /* + * The id used to register the request. The id can be used to deregister + * the request again. See also Registration#id. + */ + ID string `json:"id,omitempty"` +} + +/* + * Represents information about programming constructs like variables, classes, + * interfaces etc. + */ +type SymbolInformation struct { // line 5180 + // extends BaseSymbolInformation + /* + * Indicates if this symbol is deprecated. + * + * @deprecated Use tags instead + */ + Deprecated bool `json:"deprecated,omitempty"` + /* + * The location of this symbol. The location's range is used by a tool + * to reveal the location in the editor. If the symbol is selected in the + * tool the range's start information is used to position the cursor. So + * the range usually spans more than the actual symbol's name and does + * normally include things like visibility modifiers. + * + * The range doesn't have to denote a node range in the sense of an abstract + * syntax tree. It can therefore not be used to re-construct a hierarchy of + * the symbols. + */ + Location Location `json:"location"` + // The name of this symbol. + Name string `json:"name"` + // The kind of this symbol. + Kind SymbolKind `json:"kind"` + /* + * Tags for this symbol. + * + * @since 3.16.0 + */ + Tags []SymbolTag `json:"tags,omitempty"` + /* + * The name of the symbol containing this symbol. This information is for + * user interface purposes (e.g. to render a qualifier in the user interface + * if necessary). It can't be used to re-infer a hierarchy for the document + * symbols. + */ + ContainerName string `json:"containerName,omitempty"` +} + +// A symbol kind. +type SymbolKind uint32 // line 12840 +/* + * Symbol tags are extra annotations that tweak the rendering of a symbol. + * + * @since 3.16 + */ +type SymbolTag uint32 // line 12954 +// Describe options to be used when registered for text document change events. +type TextDocumentChangeRegistrationOptions struct { // line 4311 + // How documents are synced to the server. + SyncKind TextDocumentSyncKind `json:"syncKind"` + TextDocumentRegistrationOptions +} + +// Text document specific client capabilities. +type TextDocumentClientCapabilities struct { // line 10322 + // Defines which synchronization capabilities the client supports. + Synchronization *TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"` + // Capabilities specific to the `textDocument/completion` request. + Completion CompletionClientCapabilities `json:"completion,omitempty"` + // Capabilities specific to the `textDocument/hover` request. + Hover HoverClientCapabilities `json:"hover,omitempty"` + // Capabilities specific to the `textDocument/signatureHelp` request. + SignatureHelp *SignatureHelpClientCapabilities `json:"signatureHelp,omitempty"` + /* + * Capabilities specific to the `textDocument/declaration` request. + * + * @since 3.14.0 + */ + Declaration *DeclarationClientCapabilities `json:"declaration,omitempty"` + // Capabilities specific to the `textDocument/definition` request. + Definition *DefinitionClientCapabilities `json:"definition,omitempty"` + /* + * Capabilities specific to the `textDocument/typeDefinition` request. + * + * @since 3.6.0 + */ + TypeDefinition *TypeDefinitionClientCapabilities `json:"typeDefinition,omitempty"` + /* + * Capabilities specific to the `textDocument/implementation` request. + * + * @since 3.6.0 + */ + Implementation *ImplementationClientCapabilities `json:"implementation,omitempty"` + // Capabilities specific to the `textDocument/references` request. + References *ReferenceClientCapabilities `json:"references,omitempty"` + // Capabilities specific to the `textDocument/documentHighlight` request. + DocumentHighlight *DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"` + // Capabilities specific to the `textDocument/documentSymbol` request. + DocumentSymbol DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"` + // Capabilities specific to the `textDocument/codeAction` request. + CodeAction CodeActionClientCapabilities `json:"codeAction,omitempty"` + // Capabilities specific to the `textDocument/codeLens` request. + CodeLens *CodeLensClientCapabilities `json:"codeLens,omitempty"` + // Capabilities specific to the `textDocument/documentLink` request. + DocumentLink *DocumentLinkClientCapabilities `json:"documentLink,omitempty"` + /* + * Capabilities specific to the `textDocument/documentColor` and the + * `textDocument/colorPresentation` request. + * + * @since 3.6.0 + */ + ColorProvider *DocumentColorClientCapabilities `json:"colorProvider,omitempty"` + // Capabilities specific to the `textDocument/formatting` request. + Formatting *DocumentFormattingClientCapabilities `json:"formatting,omitempty"` + // Capabilities specific to the `textDocument/rangeFormatting` request. + RangeFormatting *DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"` + // Capabilities specific to the `textDocument/onTypeFormatting` request. + OnTypeFormatting *DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"` + // Capabilities specific to the `textDocument/rename` request. + Rename RenameClientCapabilities `json:"rename,omitempty"` + /* + * Capabilities specific to the `textDocument/foldingRange` request. + * + * @since 3.10.0 + */ + FoldingRange FoldingRangeClientCapabilities `json:"foldingRange,omitempty"` + /* + * Capabilities specific to the `textDocument/selectionRange` request. + * + * @since 3.15.0 + */ + SelectionRange *SelectionRangeClientCapabilities `json:"selectionRange,omitempty"` + // Capabilities specific to the `textDocument/publishDiagnostics` notification. + PublishDiagnostics PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"` + /* + * Capabilities specific to the various call hierarchy requests. + * + * @since 3.16.0 + */ + CallHierarchy *CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"` + /* + * Capabilities specific to the various semantic token request. + * + * @since 3.16.0 + */ + SemanticTokens SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"` + /* + * Capabilities specific to the `textDocument/linkedEditingRange` request. + * + * @since 3.16.0 + */ + LinkedEditingRange *LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"` + /* + * Client capabilities specific to the `textDocument/moniker` request. + * + * @since 3.16.0 + */ + Moniker *MonikerClientCapabilities `json:"moniker,omitempty"` + /* + * Capabilities specific to the various type hierarchy requests. + * + * @since 3.17.0 + */ + TypeHierarchy *TypeHierarchyClientCapabilities `json:"typeHierarchy,omitempty"` + /* + * Capabilities specific to the `textDocument/inlineValue` request. + * + * @since 3.17.0 + */ + InlineValue *InlineValueClientCapabilities `json:"inlineValue,omitempty"` + /* + * Capabilities specific to the `textDocument/inlayHint` request. + * + * @since 3.17.0 + */ + InlayHint *InlayHintClientCapabilities `json:"inlayHint,omitempty"` + /* + * Capabilities specific to the diagnostic pull model. + * + * @since 3.17.0 + */ + Diagnostic *DiagnosticClientCapabilities `json:"diagnostic,omitempty"` +} + +/* + * An event describing a change to a text document. If only a text is provided + * it is considered to be the full content of the document. + */ +type TextDocumentContentChangeEvent = Msg_TextDocumentContentChangeEvent // (alias) line 14001 +/* + * Describes textual changes on a text document. A TextDocumentEdit describes all changes + * on a document version Si and after they are applied move the document to version Si+1. + * So the creator of a TextDocumentEdit doesn't need to sort the array of edits or do any + * kind of ordering. However the edits must be non overlapping. + */ +type TextDocumentEdit struct { // line 6676 + // The text document to change. + TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"` + /* + * The edits to be applied. + * + * @since 3.16.0 - support for AnnotatedTextEdit. This is guarded using a + * client capability. + */ + Edits []TextEdit `json:"edits"` +} + +/* + * A document filter denotes a document by different properties like + * the {@link TextDocument.languageId language}, the {@link Uri.scheme scheme} of + * its resource, or a glob-pattern that is applied to the {@link TextDocument.fileName path}. + * + * Glob patterns can have the following syntax: + * - `*` to match one or more characters in a path segment + * - `?` to match on one character in a path segment + * - `**` to match any number of path segments, including none + * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) + * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + * + * @sample A language filter that applies to typescript files on disk: `{ language: 'typescript', scheme: 'file' }` + * @sample A language filter that applies to all package.json paths: `{ language: 'json', pattern: '**package.json' }` + * + * @since 3.17.0 + */ +type TextDocumentFilter = Msg_TextDocumentFilter // (alias) line 14144 +// A literal to identify a text document in the client. +type TextDocumentIdentifier struct { // line 6418 + // The text document's uri. + URI DocumentURI `json:"uri"` +} + +/* + * An item to transfer a text document from the client to the + * server. + */ +type TextDocumentItem struct { // line 7404 + // The text document's uri. + URI DocumentURI `json:"uri"` + // The text document's language identifier. + LanguageID string `json:"languageId"` + /* + * The version number of this document (it will increase after each + * change, including undo/redo). + */ + Version int32 `json:"version"` + // The content of the opened text document. + Text string `json:"text"` +} + +/* + * A parameter literal used in requests to pass a text document and a position inside that + * document. + */ +type TextDocumentPositionParams struct { // line 6221 + // The text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The position inside the text document. + Position Position `json:"position"` +} + +// General text document registration options. +type TextDocumentRegistrationOptions struct { // line 2367 + /* + * A document selector to identify the scope of the registration. If set to null + * the document selector provided on the client side will be used. + */ + DocumentSelector DocumentSelector `json:"documentSelector"` +} + +// Represents reasons why a text document is saved. +type TextDocumentSaveReason uint32 // line 13108 +// Save registration options. +type TextDocumentSaveRegistrationOptions struct { // line 4368 + TextDocumentRegistrationOptions + SaveOptions +} +type TextDocumentSyncClientCapabilities struct { // line 11126 + // Whether text document synchronization supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // The client supports sending will save notifications. + WillSave bool `json:"willSave,omitempty"` + /* + * The client supports sending a will save request and + * waits for a response providing text edits which will + * be applied to the document before it is saved. + */ + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + // The client supports did save notifications. + DidSave bool `json:"didSave,omitempty"` +} + +/* + * Defines how the host (editor) should sync + * document changes to the language server. + */ +type TextDocumentSyncKind uint32 // line 13083 +type TextDocumentSyncOptions struct { // line 9735 + /* + * Open and close notifications are sent to the server. If omitted open close notification should not + * be sent. + */ + OpenClose bool `json:"openClose,omitempty"` + /* + * Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full + * and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None. + */ + Change TextDocumentSyncKind `json:"change,omitempty"` + /* + * If present will save notifications are sent to the server. If omitted the notification should not be + * sent. + */ + WillSave bool `json:"willSave,omitempty"` + /* + * If present will save wait until requests are sent to the server. If omitted the request should not be + * sent. + */ + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + /* + * If present save notifications are sent to the server. If omitted the notification should not be + * sent. + */ + Save SaveOptions `json:"save,omitempty"` +} + +// A text edit applicable to a text document. +type TextEdit struct { // line 4405 + /* + * The range of the text document to be manipulated. To insert + * text into a document create a range where start === end. + */ + Range Range `json:"range"` + /* + * The string to be inserted. For delete operations use an + * empty string. + */ + NewText string `json:"newText"` +} +type TokenFormat string // line 13735 +type TraceValues string // line 13382 +// Since 3.6.0 +type TypeDefinitionClientCapabilities struct { // line 11558 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `TypeDefinitionRegistrationOptions` return value + * for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + /* + * The client supports additional metadata in the form of definition links. + * + * Since 3.14.0 + */ + LinkSupport bool `json:"linkSupport,omitempty"` +} +type TypeDefinitionOptions struct { // line 6357 + WorkDoneProgressOptions +} +type TypeDefinitionParams struct { // line 2122 + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} +type TypeDefinitionRegistrationOptions struct { // line 2142 + TextDocumentRegistrationOptions + TypeDefinitionOptions + StaticRegistrationOptions +} + +// @since 3.17.0 +type TypeHierarchyClientCapabilities struct { // line 12336 + /* + * Whether implementation supports dynamic registration. If this is set to `true` + * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + * return value for the corresponding server capability as well. + */ + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// @since 3.17.0 +type TypeHierarchyItem struct { // line 3409 + // The name of this item. + Name string `json:"name"` + // The kind of this item. + Kind SymbolKind `json:"kind"` + // Tags for this item. + Tags []SymbolTag `json:"tags,omitempty"` + // More detail for this item, e.g. the signature of a function. + Detail string `json:"detail,omitempty"` + // The resource identifier of this item. + URI DocumentURI `json:"uri"` + /* + * The range enclosing this symbol not including leading/trailing whitespace + * but everything else, e.g. comments and code. + */ + Range Range `json:"range"` + /* + * The range that should be selected and revealed when this symbol is being + * picked, e.g. the name of a function. Must be contained by the + * {@link TypeHierarchyItem.range `range`}. + */ + SelectionRange Range `json:"selectionRange"` + /* + * A data entry field that is preserved between a type hierarchy prepare and + * supertypes or subtypes requests. It could also be used to identify the + * type hierarchy in the server, helping improve the performance on + * resolving supertypes and subtypes. + */ + Data interface{} `json:"data,omitempty"` +} + +/* + * Type hierarchy options used during static registration. + * + * @since 3.17.0 + */ +type TypeHierarchyOptions struct { // line 6935 + WorkDoneProgressOptions +} + +/* + * The parameter of a `textDocument/prepareTypeHierarchy` request. + * + * @since 3.17.0 + */ +type TypeHierarchyPrepareParams struct { // line 3391 + TextDocumentPositionParams + WorkDoneProgressParams +} + +/* + * Type hierarchy options used during static or dynamic registration. + * + * @since 3.17.0 + */ +type TypeHierarchyRegistrationOptions struct { // line 3486 + TextDocumentRegistrationOptions + TypeHierarchyOptions + StaticRegistrationOptions +} + +/* + * The parameter of a `typeHierarchy/subtypes` request. + * + * @since 3.17.0 + */ +type TypeHierarchySubtypesParams struct { // line 3532 + Item TypeHierarchyItem `json:"item"` + WorkDoneProgressParams + PartialResultParams +} + +/* + * The parameter of a `typeHierarchy/supertypes` request. + * + * @since 3.17.0 + */ +type TypeHierarchySupertypesParams struct { // line 3508 + Item TypeHierarchyItem `json:"item"` + WorkDoneProgressParams + PartialResultParams +} + +// created for Tuple +type UIntCommaUInt struct { // line 10075 + Fld0 uint32 `json:"fld0"` + Fld1 uint32 `json:"fld1"` +} +type URI = string + +/* + * A diagnostic report indicating that the last returned + * report is still accurate. + * + * @since 3.17.0 + */ +type UnchangedDocumentDiagnosticReport struct { // line 7269 + /* + * A document diagnostic report indicating + * no changes to the last result. A server can + * only return `unchanged` if result ids are + * provided. + */ + Kind string `json:"kind"` + /* + * A result id which will be sent on the next + * diagnostic request for the same document. + */ + ResultID string `json:"resultId"` +} + +/* + * Moniker uniqueness level to define scope of the moniker. + * + * @since 3.16.0 + */ +type UniquenessLevel string // line 12970 +// General parameters to unregister a request or notification. +type Unregistration struct { // line 7627 + /* + * The id used to unregister the request or notification. Usually an id + * provided during the register request. + */ + ID string `json:"id"` + // The method to unregister for. + Method string `json:"method"` +} +type UnregistrationParams struct { // line 4052 + Unregisterations []Unregistration `json:"unregisterations"` +} + +/* + * A versioned notebook document identifier. + * + * @since 3.17.0 + */ +type VersionedNotebookDocumentIdentifier struct { // line 7442 + // The version number of this notebook document. + Version int32 `json:"version"` + // The notebook document's uri. + URI URI `json:"uri"` +} + +// A text document identifier to denote a specific version of a text document. +type VersionedTextDocumentIdentifier struct { // line 8444 + // The version number of this document. + Version int32 `json:"version"` + TextDocumentIdentifier +} +type WatchKind = uint32 // line 13505// The parameters sent in a will save text document notification. +type WillSaveTextDocumentParams struct { // line 4383 + // The document that will be saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` + // The 'TextDocumentSaveReason'. + Reason TextDocumentSaveReason `json:"reason"` +} +type WindowClientCapabilities struct { // line 10628 + /* + * It indicates whether the client supports server initiated + * progress using the `window/workDoneProgress/create` request. + * + * The capability also controls Whether client supports handling + * of progress notifications. If set servers are allowed to report a + * `workDoneProgress` property in the request specific server + * capabilities. + * + * @since 3.15.0 + */ + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` + /* + * Capabilities specific to the showMessage request. + * + * @since 3.16.0 + */ + ShowMessage *ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"` + /* + * Capabilities specific to the showDocument request. + * + * @since 3.16.0 + */ + ShowDocument *ShowDocumentClientCapabilities `json:"showDocument,omitempty"` +} +type WorkDoneProgressBegin struct { // line 6039 + Kind string `json:"kind"` + /* + * Mandatory title of the progress operation. Used to briefly inform about + * the kind of operation being performed. + * + * Examples: "Indexing" or "Linking dependencies". + */ + Title string `json:"title"` + /* + * Controls if a cancel button should show to allow the user to cancel the + * long running operation. Clients that don't support cancellation are allowed + * to ignore the setting. + */ + Cancellable bool `json:"cancellable,omitempty"` + /* + * Optional, more detailed associated progress message. Contains + * complementary information to the `title`. + * + * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + * If unset, the previous progress message (if any) is still valid. + */ + Message string `json:"message,omitempty"` + /* + * Optional progress percentage to display (value 100 is considered 100%). + * If not provided infinite progress is assumed and clients are allowed + * to ignore the `percentage` value in subsequent in report notifications. + * + * The value should be steadily rising. Clients are free to ignore values + * that are not following this rule. The value range is [0, 100]. + */ + Percentage uint32 `json:"percentage,omitempty"` +} +type WorkDoneProgressCancelParams struct { // line 2624 + // The token to be used to report progress. + Token ProgressToken `json:"token"` +} +type WorkDoneProgressCreateParams struct { // line 2611 + // The token to be used to report progress. + Token ProgressToken `json:"token"` +} +type WorkDoneProgressEnd struct { // line 6125 + Kind string `json:"kind"` + /* + * Optional, a final message indicating to for example indicate the outcome + * of the operation. + */ + Message string `json:"message,omitempty"` +} +type WorkDoneProgressOptions struct { // line 2354 + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` +} + +// created for And +type WorkDoneProgressOptionsAndTextDocumentRegistrationOptions struct { // line 195 + WorkDoneProgressOptions + TextDocumentRegistrationOptions +} +type WorkDoneProgressParams struct { // line 6243 + // An optional token that a server can use to report work done progress. + WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` +} +type WorkDoneProgressReport struct { // line 6086 + Kind string `json:"kind"` + /* + * Controls enablement state of a cancel button. + * + * Clients that don't support cancellation or don't support controlling the button's + * enablement state are allowed to ignore the property. + */ + Cancellable bool `json:"cancellable,omitempty"` + /* + * Optional, more detailed associated progress message. Contains + * complementary information to the `title`. + * + * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + * If unset, the previous progress message (if any) is still valid. + */ + Message string `json:"message,omitempty"` + /* + * Optional progress percentage to display (value 100 is considered 100%). + * If not provided infinite progress is assumed and clients are allowed + * to ignore the `percentage` value in subsequent in report notifications. + * + * The value should be steadily rising. Clients are free to ignore values + * that are not following this rule. The value range is [0, 100] + */ + Percentage uint32 `json:"percentage,omitempty"` +} + +// created for Literal (Lit_ServerCapabilities_workspace) +type Workspace6Gn struct { // line 8403 + /* + * The server supports workspace folder. + * + * @since 3.6.0 + */ + WorkspaceFolders WorkspaceFolders5Gn `json:"workspaceFolders"` + /* + * The server is interested in notifications/requests for operations on files. + * + * @since 3.16.0 + */ + FileOperations FileOperationOptions `json:"fileOperations"` +} + +// Workspace specific client capabilities. +type WorkspaceClientCapabilities struct { // line 10183 + /* + * The client supports applying batch edits + * to the workspace by supporting the request + * 'workspace/applyEdit' + */ + ApplyEdit bool `json:"applyEdit,omitempty"` + // Capabilities specific to `WorkspaceEdit`s. + WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"` + // Capabilities specific to the `workspace/didChangeConfiguration` notification. + DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"` + // Capabilities specific to the `workspace/didChangeWatchedFiles` notification. + DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"` + // Capabilities specific to the `workspace/symbol` request. + Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` + // Capabilities specific to the `workspace/executeCommand` request. + ExecuteCommand *ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` + /* + * The client has support for workspace folders. + * + * @since 3.6.0 + */ + WorkspaceFolders bool `json:"workspaceFolders,omitempty"` + /* + * The client supports `workspace/configuration` requests. + * + * @since 3.6.0 + */ + Configuration bool `json:"configuration,omitempty"` + /* + * Capabilities specific to the semantic token requests scoped to the + * workspace. + * + * @since 3.16.0. + */ + SemanticTokens *SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` + /* + * Capabilities specific to the code lens requests scoped to the + * workspace. + * + * @since 3.16.0. + */ + CodeLens *CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` + /* + * The client has support for file notifications/requests for user operations on files. + * + * Since 3.16.0 + */ + FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"` + /* + * Capabilities specific to the inline values requests scoped to the + * workspace. + * + * @since 3.17.0. + */ + InlineValue *InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"` + /* + * Capabilities specific to the inlay hint requests scoped to the + * workspace. + * + * @since 3.17.0. + */ + InlayHint *InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"` + /* + * Capabilities specific to the diagnostic requests scoped to the + * workspace. + * + * @since 3.17.0. + */ + Diagnostics *DiagnosticWorkspaceClientCapabilities `json:"diagnostics,omitempty"` +} + +/* + * Parameters of the workspace diagnostic request. + * + * @since 3.17.0 + */ +type WorkspaceDiagnosticParams struct { // line 3876 + // The additional identifier provided during registration. + Identifier string `json:"identifier,omitempty"` + /* + * The currently known diagnostic reports with their + * previous result ids. + */ + PreviousResultIds []PreviousResultID `json:"previousResultIds"` + WorkDoneProgressParams + PartialResultParams +} + +/* + * A workspace diagnostic report. + * + * @since 3.17.0 + */ +type WorkspaceDiagnosticReport struct { // line 3913 + Items []WorkspaceDocumentDiagnosticReport `json:"items"` +} + +/* + * A partial result for a workspace diagnostic report. + * + * @since 3.17.0 + */ +type WorkspaceDiagnosticReportPartialResult struct { // line 3930 + Items []WorkspaceDocumentDiagnosticReport `json:"items"` +} + +/* + * A workspace diagnostic document report. + * + * @since 3.17.0 + */ +type WorkspaceDocumentDiagnosticReport = Or_WorkspaceDocumentDiagnosticReport // (alias) line 13983 +/* + * A workspace edit represents changes to many resources managed in the workspace. The edit + * should either provide `changes` or `documentChanges`. If documentChanges are present + * they are preferred over `changes` if the client can handle versioned document edits. + * + * Since version 3.13.0 a workspace edit can contain resource operations as well. If resource + * operations are present clients need to execute the operations in the order in which they + * are provided. So a workspace edit for example can consist of the following two changes: + * (1) a create file a.txt and (2) a text document edit which insert text into file a.txt. + * + * An invalid sequence (e.g. (1) delete file a.txt and (2) insert text into file a.txt) will + * cause failure of the operation. How the client recovers from the failure is described by + * the client capability: `workspace.workspaceEdit.failureHandling` + */ +type WorkspaceEdit struct { // line 3192 + // Holds changes to existing resources. + Changes map[DocumentURI][]TextEdit `json:"changes,omitempty"` + /* + * Depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes + * are either an array of `TextDocumentEdit`s to express changes to n different text documents + * where each text document edit addresses a specific version of a text document. Or it can contain + * above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations. + * + * Whether a client supports versioned document edits is expressed via + * `workspace.workspaceEdit.documentChanges` client capability. + * + * If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then + * only plain `TextEdit`s using the `changes` property are supported. + */ + DocumentChanges []DocumentChanges `json:"documentChanges,omitempty"` + /* + * A map of change annotations that can be referenced in `AnnotatedTextEdit`s or create, rename and + * delete file / folder operations. + * + * Whether clients honor this property depends on the client capability `workspace.changeAnnotationSupport`. + * + * @since 3.16.0 + */ + ChangeAnnotations map[ChangeAnnotationIdentifier]ChangeAnnotation `json:"changeAnnotations,omitempty"` +} +type WorkspaceEditClientCapabilities struct { // line 10767 + // The client supports versioned document changes in `WorkspaceEdit`s + DocumentChanges bool `json:"documentChanges,omitempty"` + /* + * The resource operations the client supports. Clients should at least + * support 'create', 'rename' and 'delete' files and folders. + * + * @since 3.13.0 + */ + ResourceOperations []ResourceOperationKind `json:"resourceOperations,omitempty"` + /* + * The failure handling strategy of a client if applying the workspace edit + * fails. + * + * @since 3.13.0 + */ + FailureHandling FailureHandlingKind `json:"failureHandling,omitempty"` + /* + * Whether the client normalizes line endings to the client specific + * setting. + * If set to `true` the client will normalize line ending characters + * in a workspace edit to the client-specified new line + * character. + * + * @since 3.16.0 + */ + NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"` + /* + * Whether the client in general supports change annotations on text edits, + * create file, rename file and delete file changes. + * + * @since 3.16.0 + */ + ChangeAnnotationSupport *PChangeAnnotationSupportPWorkspaceEdit `json:"changeAnnotationSupport,omitempty"` +} + +// A workspace folder inside a client. +type WorkspaceFolder struct { // line 2162 + // The associated URI for this workspace folder. + URI URI `json:"uri"` + /* + * The name of the workspace folder. Used to refer to this + * workspace folder in the user interface. + */ + Name string `json:"name"` +} +type WorkspaceFolders5Gn struct { // line 9932 + // The server has support for workspace folders + Supported bool `json:"supported,omitempty"` + /* + * Whether the server wants to receive workspace folder + * change notifications. + * + * If a string is provided the string is treated as an ID + * under which the notification is registered on the client + * side. The ID can be used to unregister for these events + * using the `client/unregisterCapability` request. + */ + ChangeNotifications string `json:"changeNotifications,omitempty"` +} + +// The workspace folder change event. +type WorkspaceFoldersChangeEvent struct { // line 6367 + // The array of added workspace folders + Added []WorkspaceFolder `json:"added"` + // The array of the removed workspace folders + Removed []WorkspaceFolder `json:"removed"` +} +type WorkspaceFoldersInitializeParams struct { // line 7781 + /* + * The workspace folders configured in the client when the server starts. + * + * This property is only available if the client supports workspace folders. + * It can be `null` if the client supports workspace folders but none are + * configured. + * + * @since 3.6.0 + */ + WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"` +} +type WorkspaceFoldersServerCapabilities struct { // line 9932 + // The server has support for workspace folders + Supported bool `json:"supported,omitempty"` + /* + * Whether the server wants to receive workspace folder + * change notifications. + * + * If a string is provided the string is treated as an ID + * under which the notification is registered on the client + * side. The ID can be used to unregister for these events + * using the `client/unregisterCapability` request. + */ + ChangeNotifications string `json:"changeNotifications,omitempty"` +} + +/* + * A full document diagnostic report for a workspace diagnostic result. + * + * @since 3.17.0 + */ +type WorkspaceFullDocumentDiagnosticReport struct { // line 9521 + // The URI for which diagnostic information is reported. + URI DocumentURI `json:"uri"` + /* + * The version number for which the diagnostics are reported. + * If the document is not marked as open `null` can be provided. + */ + Version int32 `json:"version"` + FullDocumentDiagnosticReport +} + +/* + * A special workspace symbol that supports locations without a range. + * + * See also SymbolInformation. + * + * @since 3.17.0 + */ +type WorkspaceSymbol struct { // line 5514 + /* + * The location of the symbol. Whether a server is allowed to + * return a location without a range depends on the client + * capability `workspace.symbol.resolveSupport`. + * + * See SymbolInformation#location for more details. + */ + Location OrPLocation_workspace_symbol `json:"location"` + /* + * A data entry field that is preserved on a workspace symbol between a + * workspace symbol request and a workspace symbol resolve request. + */ + Data interface{} `json:"data,omitempty"` + BaseSymbolInformation +} + +// Client capabilities for a {@link WorkspaceSymbolRequest}. +type WorkspaceSymbolClientCapabilities struct { // line 10874 + // Symbol request supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + // Specific capabilities for the `SymbolKind` in the `workspace/symbol` request. + SymbolKind *PSymbolKindPSymbol `json:"symbolKind,omitempty"` + /* + * The client supports tags on `SymbolInformation`. + * Clients supporting tags have to handle unknown tags gracefully. + * + * @since 3.16.0 + */ + TagSupport *PTagSupportPSymbol `json:"tagSupport,omitempty"` + /* + * The client support partial workspace symbols. The client will send the + * request `workspaceSymbol/resolve` to the server to resolve additional + * properties. + * + * @since 3.17.0 + */ + ResolveSupport *PResolveSupportPSymbol `json:"resolveSupport,omitempty"` +} + +// Server capabilities for a {@link WorkspaceSymbolRequest}. +type WorkspaceSymbolOptions struct { // line 9104 + /* + * The server provides support to resolve additional + * information for a workspace symbol. + * + * @since 3.17.0 + */ + ResolveProvider bool `json:"resolveProvider,omitempty"` + WorkDoneProgressOptions +} + +// The parameters of a {@link WorkspaceSymbolRequest}. +type WorkspaceSymbolParams struct { // line 5490 + /* + * A query string to filter symbols by. Clients may send an empty + * string here to request all symbols. + */ + Query string `json:"query"` + WorkDoneProgressParams + PartialResultParams +} + +// Registration options for a {@link WorkspaceSymbolRequest}. +type WorkspaceSymbolRegistrationOptions struct { // line 5563 + WorkspaceSymbolOptions +} + +/* + * An unchanged document diagnostic report for a workspace diagnostic result. + * + * @since 3.17.0 + */ +type WorkspaceUnchangedDocumentDiagnosticReport struct { // line 9559 + // The URI for which diagnostic information is reported. + URI DocumentURI `json:"uri"` + /* + * The version number for which the diagnostics are reported. + * If the document is not marked as open `null` can be provided. + */ + Version int32 `json:"version"` + UnchangedDocumentDiagnosticReport +} + +// The initialize parameters +type XInitializeParams struct { // line 7649 + /* + * The process Id of the parent process that started + * the server. + * + * Is `null` if the process has not been started by another process. + * If the parent process is not alive then the server should exit. + */ + ProcessID int32 `json:"processId"` + /* + * Information about the client + * + * @since 3.15.0 + */ + ClientInfo Msg_XInitializeParams_clientInfo `json:"clientInfo,omitempty"` + /* + * The locale the client is currently showing the user interface + * in. This must not necessarily be the locale of the operating + * system. + * + * Uses IETF language tags as the value's syntax + * (See https://en.wikipedia.org/wiki/IETF_language_tag) + * + * @since 3.16.0 + */ + Locale string `json:"locale,omitempty"` + /* + * The rootPath of the workspace. Is null + * if no folder is open. + * + * @deprecated in favour of rootUri. + */ + RootPath string `json:"rootPath,omitempty"` + /* + * The rootUri of the workspace. Is null if no + * folder is open. If both `rootPath` and `rootUri` are set + * `rootUri` wins. + * + * @deprecated in favour of workspaceFolders. + */ + RootURI DocumentURI `json:"rootUri"` + // The capabilities provided by the client (editor or tool) + Capabilities ClientCapabilities `json:"capabilities"` + // User provided initialization options. + InitializationOptions interface{} `json:"initializationOptions,omitempty"` + // The initial trace setting. If omitted trace is disabled ('off'). + Trace *TraceValues `json:"trace,omitempty"` + WorkDoneProgressParams +} + +// The initialize parameters +type _InitializeParams struct { // line 7649 + /* + * The process Id of the parent process that started + * the server. + * + * Is `null` if the process has not been started by another process. + * If the parent process is not alive then the server should exit. + */ + ProcessID int32 `json:"processId"` + /* + * Information about the client + * + * @since 3.15.0 + */ + ClientInfo Msg_XInitializeParams_clientInfo `json:"clientInfo,omitempty"` + /* + * The locale the client is currently showing the user interface + * in. This must not necessarily be the locale of the operating + * system. + * + * Uses IETF language tags as the value's syntax + * (See https://en.wikipedia.org/wiki/IETF_language_tag) + * + * @since 3.16.0 + */ + Locale string `json:"locale,omitempty"` + /* + * The rootPath of the workspace. Is null + * if no folder is open. + * + * @deprecated in favour of rootUri. + */ + RootPath string `json:"rootPath,omitempty"` + /* + * The rootUri of the workspace. Is null if no + * folder is open. If both `rootPath` and `rootUri` are set + * `rootUri` wins. + * + * @deprecated in favour of workspaceFolders. + */ + RootURI DocumentURI `json:"rootUri"` + // The capabilities provided by the client (editor or tool) + Capabilities ClientCapabilities `json:"capabilities"` + // User provided initialization options. + InitializationOptions interface{} `json:"initializationOptions,omitempty"` + // The initial trace setting. If omitted trace is disabled ('off'). + Trace *TraceValues `json:"trace,omitempty"` + WorkDoneProgressParams +} + +const ( + // A set of predefined code action kinds + // Empty kind. + Empty CodeActionKind = "" // line 13332 + // Base kind for quickfix actions: 'quickfix' + QuickFix CodeActionKind = "quickfix" // line 13337 + // Base kind for refactoring actions: 'refactor' + Refactor CodeActionKind = "refactor" // line 13342 + /* + * Base kind for refactoring extraction actions: 'refactor.extract' + * + * Example extract actions: + * + * - Extract method + * - Extract function + * - Extract variable + * - Extract interface from class + * - ... + */ + RefactorExtract CodeActionKind = "refactor.extract" // line 13347 + /* + * Base kind for refactoring inline actions: 'refactor.inline' + * + * Example inline actions: + * + * - Inline function + * - Inline variable + * - Inline constant + * - ... + */ + RefactorInline CodeActionKind = "refactor.inline" // line 13352 + /* + * Base kind for refactoring rewrite actions: 'refactor.rewrite' + * + * Example rewrite actions: + * + * - Convert JavaScript function to class + * - Add or remove parameter + * - Encapsulate field + * - Make method static + * - Move method to base class + * - ... + */ + RefactorRewrite CodeActionKind = "refactor.rewrite" // line 13357 + /* + * Base kind for source actions: `source` + * + * Source code actions apply to the entire file. + */ + Source CodeActionKind = "source" // line 13362 + // Base kind for an organize imports source action: `source.organizeImports` + SourceOrganizeImports CodeActionKind = "source.organizeImports" // line 13367 + /* + * Base kind for auto-fix source actions: `source.fixAll`. + * + * Fix all actions automatically fix errors that have a clear fix that do not require user input. + * They should not suppress errors or perform unsafe fixes such as generating new types or classes. + * + * @since 3.15.0 + */ + SourceFixAll CodeActionKind = "source.fixAll" // line 13372 + /* + * The reason why code actions were requested. + * + * @since 3.17.0 + */ + // Code actions were explicitly requested by the user or by an extension. + CodeActionInvoked CodeActionTriggerKind = 1 // line 13612 + /* + * Code actions were requested automatically. + * + * This typically happens when current selection in a file changes, but can + * also be triggered when file content changes. + */ + CodeActionAutomatic CodeActionTriggerKind = 2 // line 13617 + // The kind of a completion entry. + TextCompletion CompletionItemKind = 1 // line 13140 + MethodCompletion CompletionItemKind = 2 // line 13144 + FunctionCompletion CompletionItemKind = 3 // line 13148 + ConstructorCompletion CompletionItemKind = 4 // line 13152 + FieldCompletion CompletionItemKind = 5 // line 13156 + VariableCompletion CompletionItemKind = 6 // line 13160 + ClassCompletion CompletionItemKind = 7 // line 13164 + InterfaceCompletion CompletionItemKind = 8 // line 13168 + ModuleCompletion CompletionItemKind = 9 // line 13172 + PropertyCompletion CompletionItemKind = 10 // line 13176 + UnitCompletion CompletionItemKind = 11 // line 13180 + ValueCompletion CompletionItemKind = 12 // line 13184 + EnumCompletion CompletionItemKind = 13 // line 13188 + KeywordCompletion CompletionItemKind = 14 // line 13192 + SnippetCompletion CompletionItemKind = 15 // line 13196 + ColorCompletion CompletionItemKind = 16 // line 13200 + FileCompletion CompletionItemKind = 17 // line 13204 + ReferenceCompletion CompletionItemKind = 18 // line 13208 + FolderCompletion CompletionItemKind = 19 // line 13212 + EnumMemberCompletion CompletionItemKind = 20 // line 13216 + ConstantCompletion CompletionItemKind = 21 // line 13220 + StructCompletion CompletionItemKind = 22 // line 13224 + EventCompletion CompletionItemKind = 23 // line 13228 + OperatorCompletion CompletionItemKind = 24 // line 13232 + TypeParameterCompletion CompletionItemKind = 25 // line 13236 + /* + * Completion item tags are extra annotations that tweak the rendering of a completion + * item. + * + * @since 3.15.0 + */ + // Render a completion as obsolete, usually using a strike-out. + ComplDeprecated CompletionItemTag = 1 // line 13250 + // How a completion was triggered + /* + * Completion was triggered by typing an identifier (24x7 code + * complete), manual invocation (e.g Ctrl+Space) or via API. + */ + Invoked CompletionTriggerKind = 1 // line 13561 + /* + * Completion was triggered by a trigger character specified by + * the `triggerCharacters` properties of the `CompletionRegistrationOptions`. + */ + TriggerCharacter CompletionTriggerKind = 2 // line 13566 + // Completion was re-triggered as current completion list is incomplete + TriggerForIncompleteCompletions CompletionTriggerKind = 3 // line 13571 + // The diagnostic's severity. + // Reports an error. + SeverityError DiagnosticSeverity = 1 // line 13510 + // Reports a warning. + SeverityWarning DiagnosticSeverity = 2 // line 13515 + // Reports an information. + SeverityInformation DiagnosticSeverity = 3 // line 13520 + // Reports a hint. + SeverityHint DiagnosticSeverity = 4 // line 13525 + /* + * The diagnostic tags. + * + * @since 3.15.0 + */ + /* + * Unused or unnecessary code. + * + * Clients are allowed to render diagnostics with this tag faded out instead of having + * an error squiggle. + */ + Unnecessary DiagnosticTag = 1 // line 13540 + /* + * Deprecated or obsolete code. + * + * Clients are allowed to rendered diagnostics with this tag strike through. + */ + Deprecated DiagnosticTag = 2 // line 13545 + /* + * The document diagnostic report kinds. + * + * @since 3.17.0 + */ + /* + * A diagnostic report with a full + * set of problems. + */ + DiagnosticFull DocumentDiagnosticReportKind = "full" // line 12728 + /* + * A report indicating that the last + * returned report is still accurate. + */ + DiagnosticUnchanged DocumentDiagnosticReportKind = "unchanged" // line 12733 + // A document highlight kind. + // A textual occurrence. + Text DocumentHighlightKind = 1 // line 13307 + // Read-access of a symbol, like reading a variable. + Read DocumentHighlightKind = 2 // line 13312 + // Write-access of a symbol, like writing to a variable. + Write DocumentHighlightKind = 3 // line 13317 + // Predefined error codes. + ParseError ErrorCodes = -32700 // line 12749 + InvalidRequest ErrorCodes = -32600 // line 12753 + MethodNotFound ErrorCodes = -32601 // line 12757 + InvalidParams ErrorCodes = -32602 // line 12761 + InternalError ErrorCodes = -32603 // line 12765 + /* + * Error code indicating that a server received a notification or + * request before the server has received the `initialize` request. + */ + ServerNotInitialized ErrorCodes = -32002 // line 12769 + UnknownErrorCode ErrorCodes = -32001 // line 12774 + /* + * Applying the workspace change is simply aborted if one of the changes provided + * fails. All operations executed before the failing operation stay executed. + */ + Abort FailureHandlingKind = "abort" // line 13699 + /* + * All operations are executed transactional. That means they either all + * succeed or no changes at all are applied to the workspace. + */ + Transactional FailureHandlingKind = "transactional" // line 13704 + /* + * If the workspace edit contains only textual file changes they are executed transactional. + * If resource changes (create, rename or delete file) are part of the change the failure + * handling strategy is abort. + */ + TextOnlyTransactional FailureHandlingKind = "textOnlyTransactional" // line 13709 + /* + * The client tries to undo the operations already executed. But there is no + * guarantee that this is succeeding. + */ + Undo FailureHandlingKind = "undo" // line 13714 + // The file event type + // The file got created. + Created FileChangeType = 1 // line 13460 + // The file got changed. + Changed FileChangeType = 2 // line 13465 + // The file got deleted. + Deleted FileChangeType = 3 // line 13470 + /* + * A pattern kind describing if a glob pattern matches a file a folder or + * both. + * + * @since 3.16.0 + */ + // The pattern matches a file only. + FilePattern FileOperationPatternKind = "file" // line 13633 + // The pattern matches a folder only. + FolderPattern FileOperationPatternKind = "folder" // line 13638 + // A set of predefined range kinds. + // Folding range for a comment + Comment FoldingRangeKind = "comment" // line 12821 + // Folding range for an import or include + Imports FoldingRangeKind = "imports" // line 12826 + // Folding range for a region (e.g. `#region`) + Region FoldingRangeKind = "region" // line 12831 + /* + * Inlay hint kinds. + * + * @since 3.17.0 + */ + // An inlay hint that for a type annotation. + Type InlayHintKind = 1 // line 13039 + // An inlay hint that is for a parameter. + Parameter InlayHintKind = 2 // line 13044 + /* + * Defines whether the insert text in a completion item should be interpreted as + * plain text or a snippet. + */ + // The primary text to be inserted is treated as a plain string. + PlainTextTextFormat InsertTextFormat = 1 // line 13266 + /* + * The primary text to be inserted is treated as a snippet. + * + * A snippet can define tab stops and placeholders with `$1`, `$2` + * and `${3:foo}`. `$0` defines the final tab stop, it defaults to + * the end of the snippet. Placeholders with equal identifiers are linked, + * that is typing in one will update others too. + * + * See also: https://microsoft.github.io/language-server-protocol/specifications/specification-current/#snippet_syntax + */ + SnippetTextFormat InsertTextFormat = 2 // line 13271 + /* + * How whitespace and indentation is handled during completion + * item insertion. + * + * @since 3.16.0 + */ + /* + * The insertion or replace strings is taken as it is. If the + * value is multi line the lines below the cursor will be + * inserted using the indentation defined in the string value. + * The client will not apply any kind of adjustments to the + * string. + */ + AsIs InsertTextMode = 1 // line 13286 + /* + * The editor adjusts leading whitespace of new lines so that + * they match the indentation up to the cursor of the line for + * which the item is accepted. + * + * Consider a line like this: <2tabs><3tabs>foo. Accepting a + * multi line completion item is indented using 2 tabs and all + * following lines inserted will be indented using 2 tabs as well. + */ + AdjustIndentation InsertTextMode = 2 // line 13291 + /* + * A request failed but it was syntactically correct, e.g the + * method name was known and the parameters were valid. The error + * message should contain human readable information about why + * the request failed. + * + * @since 3.17.0 + */ + RequestFailed LSPErrorCodes = -32803 // line 12789 + /* + * The server cancelled the request. This error code should + * only be used for requests that explicitly support being + * server cancellable. + * + * @since 3.17.0 + */ + ServerCancelled LSPErrorCodes = -32802 // line 12795 + /* + * The server detected that the content of a document got + * modified outside normal conditions. A server should + * NOT send this error code if it detects a content change + * in it unprocessed messages. The result even computed + * on an older state might still be useful for the client. + * + * If a client decides that a result is not of any use anymore + * the client should cancel the request. + */ + ContentModified LSPErrorCodes = -32801 // line 12801 + /* + * The client has canceled a request and a server as detected + * the cancel. + */ + RequestCancelled LSPErrorCodes = -32800 // line 12806 + /* + * Describes the content type that a client supports in various + * result literals like `Hover`, `ParameterInfo` or `CompletionItem`. + * + * Please note that `MarkupKinds` must not start with a `$`. This kinds + * are reserved for internal usage. + */ + // Plain text is supported as a content format + PlainText MarkupKind = "plaintext" // line 13413 + // Markdown is supported as a content format + Markdown MarkupKind = "markdown" // line 13418 + // The message type + // An error message. + Error MessageType = 1 // line 13060 + // A warning message. + Warning MessageType = 2 // line 13065 + // An information message. + Info MessageType = 3 // line 13070 + // A log message. + Log MessageType = 4 // line 13075 + /* + * The moniker kind. + * + * @since 3.16.0 + */ + // The moniker represent a symbol that is imported into a project + Import MonikerKind = "import" // line 13013 + // The moniker represents a symbol that is exported from a project + Export MonikerKind = "export" // line 13018 + /* + * The moniker represents a symbol that is local to a project (e.g. a local + * variable of a function, a class not visible outside the project, ...) + */ + Local MonikerKind = "local" // line 13023 + /* + * A notebook cell kind. + * + * @since 3.17.0 + */ + // A markup-cell is formatted source that is used for display. + Markup NotebookCellKind = 1 // line 13654 + // A code-cell is source code. + Code NotebookCellKind = 2 // line 13659 + /* + * A set of predefined position encoding kinds. + * + * @since 3.17.0 + */ + // Character offsets count UTF-8 code units. + UTF8 PositionEncodingKind = "utf-8" // line 13433 + /* + * Character offsets count UTF-16 code units. + * + * This is the default and must always be supported + * by servers + */ + UTF16 PositionEncodingKind = "utf-16" // line 13438 + /* + * Character offsets count UTF-32 code units. + * + * Implementation note: these are the same as Unicode code points, + * so this `PositionEncodingKind` may also be used for an + * encoding-agnostic representation of character offsets. + */ + UTF32 PositionEncodingKind = "utf-32" // line 13443 + /* + * The client's default behavior is to select the identifier + * according the to language's syntax rule. + */ + Identifier PrepareSupportDefaultBehavior = 1 // line 13728 + // Supports creating new files and folders. + Create ResourceOperationKind = "create" // line 13675 + // Supports renaming existing files and folders. + Rename ResourceOperationKind = "rename" // line 13680 + // Supports deleting existing files and folders. + Delete ResourceOperationKind = "delete" // line 13685 + /* + * A set of predefined token modifiers. This set is not fixed + * an clients can specify additional token types via the + * corresponding client capabilities. + * + * @since 3.16.0 + */ + ModDeclaration SemanticTokenModifiers = "declaration" // line 12676 + ModDefinition SemanticTokenModifiers = "definition" // line 12680 + ModReadonly SemanticTokenModifiers = "readonly" // line 12684 + ModStatic SemanticTokenModifiers = "static" // line 12688 + ModDeprecated SemanticTokenModifiers = "deprecated" // line 12692 + ModAbstract SemanticTokenModifiers = "abstract" // line 12696 + ModAsync SemanticTokenModifiers = "async" // line 12700 + ModModification SemanticTokenModifiers = "modification" // line 12704 + ModDocumentation SemanticTokenModifiers = "documentation" // line 12708 + ModDefaultLibrary SemanticTokenModifiers = "defaultLibrary" // line 12712 + /* + * A set of predefined token types. This set is not fixed + * an clients can specify additional token types via the + * corresponding client capabilities. + * + * @since 3.16.0 + */ + NamespaceType SemanticTokenTypes = "namespace" // line 12569 + /* + * Represents a generic type. Acts as a fallback for types which can't be mapped to + * a specific type like class or enum. + */ + TypeType SemanticTokenTypes = "type" // line 12573 + ClassType SemanticTokenTypes = "class" // line 12578 + EnumType SemanticTokenTypes = "enum" // line 12582 + InterfaceType SemanticTokenTypes = "interface" // line 12586 + StructType SemanticTokenTypes = "struct" // line 12590 + TypeParameterType SemanticTokenTypes = "typeParameter" // line 12594 + ParameterType SemanticTokenTypes = "parameter" // line 12598 + VariableType SemanticTokenTypes = "variable" // line 12602 + PropertyType SemanticTokenTypes = "property" // line 12606 + EnumMemberType SemanticTokenTypes = "enumMember" // line 12610 + EventType SemanticTokenTypes = "event" // line 12614 + FunctionType SemanticTokenTypes = "function" // line 12618 + MethodType SemanticTokenTypes = "method" // line 12622 + MacroType SemanticTokenTypes = "macro" // line 12626 + KeywordType SemanticTokenTypes = "keyword" // line 12630 + ModifierType SemanticTokenTypes = "modifier" // line 12634 + CommentType SemanticTokenTypes = "comment" // line 12638 + StringType SemanticTokenTypes = "string" // line 12642 + NumberType SemanticTokenTypes = "number" // line 12646 + RegexpType SemanticTokenTypes = "regexp" // line 12650 + OperatorType SemanticTokenTypes = "operator" // line 12654 + // @since 3.17.0 + DecoratorType SemanticTokenTypes = "decorator" // line 12658 + /* + * How a signature help was triggered. + * + * @since 3.15.0 + */ + // Signature help was invoked manually by the user or by a command. + SigInvoked SignatureHelpTriggerKind = 1 // line 13586 + // Signature help was triggered by a trigger character. + SigTriggerCharacter SignatureHelpTriggerKind = 2 // line 13591 + // Signature help was triggered by the cursor moving or by the document content changing. + SigContentChange SignatureHelpTriggerKind = 3 // line 13596 + // A symbol kind. + File SymbolKind = 1 // line 12847 + Module SymbolKind = 2 // line 12851 + Namespace SymbolKind = 3 // line 12855 + Package SymbolKind = 4 // line 12859 + Class SymbolKind = 5 // line 12863 + Method SymbolKind = 6 // line 12867 + Property SymbolKind = 7 // line 12871 + Field SymbolKind = 8 // line 12875 + Constructor SymbolKind = 9 // line 12879 + Enum SymbolKind = 10 // line 12883 + Interface SymbolKind = 11 // line 12887 + Function SymbolKind = 12 // line 12891 + Variable SymbolKind = 13 // line 12895 + Constant SymbolKind = 14 // line 12899 + String SymbolKind = 15 // line 12903 + Number SymbolKind = 16 // line 12907 + Boolean SymbolKind = 17 // line 12911 + Array SymbolKind = 18 // line 12915 + Object SymbolKind = 19 // line 12919 + Key SymbolKind = 20 // line 12923 + Null SymbolKind = 21 // line 12927 + EnumMember SymbolKind = 22 // line 12931 + Struct SymbolKind = 23 // line 12935 + Event SymbolKind = 24 // line 12939 + Operator SymbolKind = 25 // line 12943 + TypeParameter SymbolKind = 26 // line 12947 + /* + * Symbol tags are extra annotations that tweak the rendering of a symbol. + * + * @since 3.16 + */ + // Render a symbol as obsolete, usually using a strike-out. + DeprecatedSymbol SymbolTag = 1 // line 12961 + // Represents reasons why a text document is saved. + /* + * Manually triggered, e.g. by the user pressing save, by starting debugging, + * or by an API call. + */ + Manual TextDocumentSaveReason = 1 // line 13115 + // Automatic after a delay. + AfterDelay TextDocumentSaveReason = 2 // line 13120 + // When the editor lost focus. + FocusOut TextDocumentSaveReason = 3 // line 13125 + /* + * Defines how the host (editor) should sync + * document changes to the language server. + */ + // Documents should not be synced at all. + None TextDocumentSyncKind = 0 // line 13090 + /* + * Documents are synced by always sending the full content + * of the document. + */ + Full TextDocumentSyncKind = 1 // line 13095 + /* + * Documents are synced by sending the full content on open. + * After that only incremental updates to the document are + * send. + */ + Incremental TextDocumentSyncKind = 2 // line 13100 + Relative TokenFormat = "relative" // line 13742 + // Turn tracing off. + Off TraceValues = "off" // line 13389 + // Trace messages only. + Messages TraceValues = "messages" // line 13394 + // Verbose message tracing. + Verbose TraceValues = "verbose" // line 13399 + /* + * Moniker uniqueness level to define scope of the moniker. + * + * @since 3.16.0 + */ + // The moniker is only unique inside a document + Document UniquenessLevel = "document" // line 12977 + // The moniker is unique inside a project for which a dump got created + Project UniquenessLevel = "project" // line 12982 + // The moniker is unique inside the group to which a project belongs + Group UniquenessLevel = "group" // line 12987 + // The moniker is unique inside the moniker scheme. + Scheme UniquenessLevel = "scheme" // line 12992 + // The moniker is globally unique + Global UniquenessLevel = "global" // line 12997 + // Interested in create events. + WatchCreate WatchKind = 1 // line 13485 + // Interested in change events + WatchChange WatchKind = 2 // line 13490 + // Interested in delete events + WatchDelete WatchKind = 4 // line 13495 +) diff --git a/gopls/internal/lsp/protocol/tsserver.go b/gopls/internal/lsp/protocol/tsserver.go new file mode 100644 index 00000000000..8669a4e798a --- /dev/null +++ b/gopls/internal/lsp/protocol/tsserver.go @@ -0,0 +1,1159 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated for LSP. DO NOT EDIT. + +package protocol + +// Code generated from version 3.17.0 of protocol/metaModel.json. +// git hash 9b742021fb04ad081aa3676a9eecf4fa612084b4 (as of 2023-01-30) + +import ( + "context" + "encoding/json" + + "golang.org/x/tools/internal/jsonrpc2" +) + +type Server interface { + Progress(context.Context, *ProgressParams) error // $/progress + SetTrace(context.Context, *SetTraceParams) error // $/setTrace + IncomingCalls(context.Context, *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall, error) // callHierarchy/incomingCalls + OutgoingCalls(context.Context, *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall, error) // callHierarchy/outgoingCalls + ResolveCodeAction(context.Context, *CodeAction) (*CodeAction, error) // codeAction/resolve + ResolveCodeLens(context.Context, *CodeLens) (*CodeLens, error) // codeLens/resolve + ResolveCompletionItem(context.Context, *CompletionItem) (*CompletionItem, error) // completionItem/resolve + ResolveDocumentLink(context.Context, *DocumentLink) (*DocumentLink, error) // documentLink/resolve + Exit(context.Context) error // exit + Initialize(context.Context, *ParamInitialize) (*InitializeResult, error) // initialize + Initialized(context.Context, *InitializedParams) error // initialized + Resolve(context.Context, *InlayHint) (*InlayHint, error) // inlayHint/resolve + DidChangeNotebookDocument(context.Context, *DidChangeNotebookDocumentParams) error // notebookDocument/didChange + DidCloseNotebookDocument(context.Context, *DidCloseNotebookDocumentParams) error // notebookDocument/didClose + DidOpenNotebookDocument(context.Context, *DidOpenNotebookDocumentParams) error // notebookDocument/didOpen + DidSaveNotebookDocument(context.Context, *DidSaveNotebookDocumentParams) error // notebookDocument/didSave + Shutdown(context.Context) error // shutdown + CodeAction(context.Context, *CodeActionParams) ([]CodeAction, error) // textDocument/codeAction + CodeLens(context.Context, *CodeLensParams) ([]CodeLens, error) // textDocument/codeLens + ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error) // textDocument/colorPresentation + Completion(context.Context, *CompletionParams) (*CompletionList, error) // textDocument/completion + Declaration(context.Context, *DeclarationParams) (*Or_textDocument_declaration, error) // textDocument/declaration + Definition(context.Context, *DefinitionParams) ([]Location, error) // textDocument/definition + Diagnostic(context.Context, *string) (*string, error) // textDocument/diagnostic + DidChange(context.Context, *DidChangeTextDocumentParams) error // textDocument/didChange + DidClose(context.Context, *DidCloseTextDocumentParams) error // textDocument/didClose + DidOpen(context.Context, *DidOpenTextDocumentParams) error // textDocument/didOpen + DidSave(context.Context, *DidSaveTextDocumentParams) error // textDocument/didSave + DocumentColor(context.Context, *DocumentColorParams) ([]ColorInformation, error) // textDocument/documentColor + DocumentHighlight(context.Context, *DocumentHighlightParams) ([]DocumentHighlight, error) // textDocument/documentHighlight + DocumentLink(context.Context, *DocumentLinkParams) ([]DocumentLink, error) // textDocument/documentLink + DocumentSymbol(context.Context, *DocumentSymbolParams) ([]interface{}, error) // textDocument/documentSymbol + FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange, error) // textDocument/foldingRange + Formatting(context.Context, *DocumentFormattingParams) ([]TextEdit, error) // textDocument/formatting + Hover(context.Context, *HoverParams) (*Hover, error) // textDocument/hover + Implementation(context.Context, *ImplementationParams) ([]Location, error) // textDocument/implementation + InlayHint(context.Context, *InlayHintParams) ([]InlayHint, error) // textDocument/inlayHint + InlineValue(context.Context, *InlineValueParams) ([]InlineValue, error) // textDocument/inlineValue + LinkedEditingRange(context.Context, *LinkedEditingRangeParams) (*LinkedEditingRanges, error) // textDocument/linkedEditingRange + Moniker(context.Context, *MonikerParams) ([]Moniker, error) // textDocument/moniker + OnTypeFormatting(context.Context, *DocumentOnTypeFormattingParams) ([]TextEdit, error) // textDocument/onTypeFormatting + PrepareCallHierarchy(context.Context, *CallHierarchyPrepareParams) ([]CallHierarchyItem, error) // textDocument/prepareCallHierarchy + PrepareRename(context.Context, *PrepareRenameParams) (*PrepareRename2Gn, error) // textDocument/prepareRename + PrepareTypeHierarchy(context.Context, *TypeHierarchyPrepareParams) ([]TypeHierarchyItem, error) // textDocument/prepareTypeHierarchy + RangeFormatting(context.Context, *DocumentRangeFormattingParams) ([]TextEdit, error) // textDocument/rangeFormatting + References(context.Context, *ReferenceParams) ([]Location, error) // textDocument/references + Rename(context.Context, *RenameParams) (*WorkspaceEdit, error) // textDocument/rename + SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange, error) // textDocument/selectionRange + SemanticTokensFull(context.Context, *SemanticTokensParams) (*SemanticTokens, error) // textDocument/semanticTokens/full + SemanticTokensFullDelta(context.Context, *SemanticTokensDeltaParams) (interface{}, error) // textDocument/semanticTokens/full/delta + SemanticTokensRange(context.Context, *SemanticTokensRangeParams) (*SemanticTokens, error) // textDocument/semanticTokens/range + SignatureHelp(context.Context, *SignatureHelpParams) (*SignatureHelp, error) // textDocument/signatureHelp + TypeDefinition(context.Context, *TypeDefinitionParams) ([]Location, error) // textDocument/typeDefinition + WillSave(context.Context, *WillSaveTextDocumentParams) error // textDocument/willSave + WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit, error) // textDocument/willSaveWaitUntil + Subtypes(context.Context, *TypeHierarchySubtypesParams) ([]TypeHierarchyItem, error) // typeHierarchy/subtypes + Supertypes(context.Context, *TypeHierarchySupertypesParams) ([]TypeHierarchyItem, error) // typeHierarchy/supertypes + WorkDoneProgressCancel(context.Context, *WorkDoneProgressCancelParams) error // window/workDoneProgress/cancel + DiagnosticWorkspace(context.Context, *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) // workspace/diagnostic + DidChangeConfiguration(context.Context, *DidChangeConfigurationParams) error // workspace/didChangeConfiguration + DidChangeWatchedFiles(context.Context, *DidChangeWatchedFilesParams) error // workspace/didChangeWatchedFiles + DidChangeWorkspaceFolders(context.Context, *DidChangeWorkspaceFoldersParams) error // workspace/didChangeWorkspaceFolders + DidCreateFiles(context.Context, *CreateFilesParams) error // workspace/didCreateFiles + DidDeleteFiles(context.Context, *DeleteFilesParams) error // workspace/didDeleteFiles + DidRenameFiles(context.Context, *RenameFilesParams) error // workspace/didRenameFiles + ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{}, error) // workspace/executeCommand + Symbol(context.Context, *WorkspaceSymbolParams) ([]SymbolInformation, error) // workspace/symbol + WillCreateFiles(context.Context, *CreateFilesParams) (*WorkspaceEdit, error) // workspace/willCreateFiles + WillDeleteFiles(context.Context, *DeleteFilesParams) (*WorkspaceEdit, error) // workspace/willDeleteFiles + WillRenameFiles(context.Context, *RenameFilesParams) (*WorkspaceEdit, error) // workspace/willRenameFiles + ResolveWorkspaceSymbol(context.Context, *WorkspaceSymbol) (*WorkspaceSymbol, error) // workspaceSymbol/resolve + NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) +} + +func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { + switch r.Method() { + case "$/progress": + var params ProgressParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.Progress(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "$/setTrace": + var params SetTraceParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.SetTrace(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "callHierarchy/incomingCalls": + var params CallHierarchyIncomingCallsParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.IncomingCalls(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "callHierarchy/outgoingCalls": + var params CallHierarchyOutgoingCallsParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.OutgoingCalls(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "codeAction/resolve": + var params CodeAction + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ResolveCodeAction(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "codeLens/resolve": + var params CodeLens + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ResolveCodeLens(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "completionItem/resolve": + var params CompletionItem + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ResolveCompletionItem(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "documentLink/resolve": + var params DocumentLink + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ResolveDocumentLink(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "exit": + err := server.Exit(ctx) + return true, reply(ctx, nil, err) + case "initialize": + var params ParamInitialize + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Initialize(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "initialized": + var params InitializedParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.Initialized(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "inlayHint/resolve": + var params InlayHint + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Resolve(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "notebookDocument/didChange": + var params DidChangeNotebookDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidChangeNotebookDocument(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "notebookDocument/didClose": + var params DidCloseNotebookDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidCloseNotebookDocument(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "notebookDocument/didOpen": + var params DidOpenNotebookDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidOpenNotebookDocument(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "notebookDocument/didSave": + var params DidSaveNotebookDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidSaveNotebookDocument(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "shutdown": + err := server.Shutdown(ctx) + return true, reply(ctx, nil, err) + case "textDocument/codeAction": + var params CodeActionParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.CodeAction(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/codeLens": + var params CodeLensParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.CodeLens(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/colorPresentation": + var params ColorPresentationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ColorPresentation(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/completion": + var params CompletionParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Completion(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/declaration": + var params DeclarationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Declaration(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/definition": + var params DefinitionParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Definition(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/diagnostic": + var params string + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Diagnostic(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/didChange": + var params DidChangeTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidChange(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/didClose": + var params DidCloseTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidClose(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/didOpen": + var params DidOpenTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidOpen(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/didSave": + var params DidSaveTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidSave(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/documentColor": + var params DocumentColorParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.DocumentColor(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/documentHighlight": + var params DocumentHighlightParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.DocumentHighlight(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/documentLink": + var params DocumentLinkParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.DocumentLink(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/documentSymbol": + var params DocumentSymbolParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.DocumentSymbol(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/foldingRange": + var params FoldingRangeParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.FoldingRange(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/formatting": + var params DocumentFormattingParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Formatting(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/hover": + var params HoverParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Hover(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/implementation": + var params ImplementationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Implementation(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/inlayHint": + var params InlayHintParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.InlayHint(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/inlineValue": + var params InlineValueParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.InlineValue(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/linkedEditingRange": + var params LinkedEditingRangeParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.LinkedEditingRange(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/moniker": + var params MonikerParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Moniker(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/onTypeFormatting": + var params DocumentOnTypeFormattingParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.OnTypeFormatting(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/prepareCallHierarchy": + var params CallHierarchyPrepareParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.PrepareCallHierarchy(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/prepareRename": + var params PrepareRenameParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.PrepareRename(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/prepareTypeHierarchy": + var params TypeHierarchyPrepareParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.PrepareTypeHierarchy(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/rangeFormatting": + var params DocumentRangeFormattingParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.RangeFormatting(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/references": + var params ReferenceParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.References(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/rename": + var params RenameParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Rename(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/selectionRange": + var params SelectionRangeParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.SelectionRange(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/semanticTokens/full": + var params SemanticTokensParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.SemanticTokensFull(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/semanticTokens/full/delta": + var params SemanticTokensDeltaParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.SemanticTokensFullDelta(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/semanticTokens/range": + var params SemanticTokensRangeParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.SemanticTokensRange(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/signatureHelp": + var params SignatureHelpParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.SignatureHelp(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/typeDefinition": + var params TypeDefinitionParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.TypeDefinition(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "textDocument/willSave": + var params WillSaveTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.WillSave(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "textDocument/willSaveWaitUntil": + var params WillSaveTextDocumentParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.WillSaveWaitUntil(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "typeHierarchy/subtypes": + var params TypeHierarchySubtypesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Subtypes(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "typeHierarchy/supertypes": + var params TypeHierarchySupertypesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Supertypes(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "window/workDoneProgress/cancel": + var params WorkDoneProgressCancelParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.WorkDoneProgressCancel(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/diagnostic": + var params WorkspaceDiagnosticParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.DiagnosticWorkspace(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/didChangeConfiguration": + var params DidChangeConfigurationParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidChangeConfiguration(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/didChangeWatchedFiles": + var params DidChangeWatchedFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidChangeWatchedFiles(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/didChangeWorkspaceFolders": + var params DidChangeWorkspaceFoldersParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidChangeWorkspaceFolders(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/didCreateFiles": + var params CreateFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidCreateFiles(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/didDeleteFiles": + var params DeleteFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidDeleteFiles(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/didRenameFiles": + var params RenameFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + err := server.DidRenameFiles(ctx, ¶ms) + return true, reply(ctx, nil, err) + case "workspace/executeCommand": + var params ExecuteCommandParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ExecuteCommand(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/symbol": + var params WorkspaceSymbolParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.Symbol(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/willCreateFiles": + var params CreateFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.WillCreateFiles(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/willDeleteFiles": + var params DeleteFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.WillDeleteFiles(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspace/willRenameFiles": + var params RenameFilesParams + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.WillRenameFiles(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + case "workspaceSymbol/resolve": + var params WorkspaceSymbol + if err := json.Unmarshal(r.Params(), ¶ms); err != nil { + return true, sendParseError(ctx, reply, err) + } + resp, err := server.ResolveWorkspaceSymbol(ctx, ¶ms) + if err != nil { + return true, reply(ctx, nil, err) + } + return true, reply(ctx, resp, nil) + default: + return false, nil + } +} + +func (s *serverDispatcher) Progress(ctx context.Context, params *ProgressParams) error { + return s.sender.Notify(ctx, "$/progress", params) +} +func (s *serverDispatcher) SetTrace(ctx context.Context, params *SetTraceParams) error { + return s.sender.Notify(ctx, "$/setTrace", params) +} +func (s *serverDispatcher) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall, error) { + var result []CallHierarchyIncomingCall + if err := s.sender.Call(ctx, "callHierarchy/incomingCalls", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall, error) { + var result []CallHierarchyOutgoingCall + if err := s.sender.Call(ctx, "callHierarchy/outgoingCalls", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ResolveCodeAction(ctx context.Context, params *CodeAction) (*CodeAction, error) { + var result *CodeAction + if err := s.sender.Call(ctx, "codeAction/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ResolveCodeLens(ctx context.Context, params *CodeLens) (*CodeLens, error) { + var result *CodeLens + if err := s.sender.Call(ctx, "codeLens/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ResolveCompletionItem(ctx context.Context, params *CompletionItem) (*CompletionItem, error) { + var result *CompletionItem + if err := s.sender.Call(ctx, "completionItem/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ResolveDocumentLink(ctx context.Context, params *DocumentLink) (*DocumentLink, error) { + var result *DocumentLink + if err := s.sender.Call(ctx, "documentLink/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Exit(ctx context.Context) error { + return s.sender.Notify(ctx, "exit", nil) +} +func (s *serverDispatcher) Initialize(ctx context.Context, params *ParamInitialize) (*InitializeResult, error) { + var result *InitializeResult + if err := s.sender.Call(ctx, "initialize", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Initialized(ctx context.Context, params *InitializedParams) error { + return s.sender.Notify(ctx, "initialized", params) +} +func (s *serverDispatcher) Resolve(ctx context.Context, params *InlayHint) (*InlayHint, error) { + var result *InlayHint + if err := s.sender.Call(ctx, "inlayHint/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DidChangeNotebookDocument(ctx context.Context, params *DidChangeNotebookDocumentParams) error { + return s.sender.Notify(ctx, "notebookDocument/didChange", params) +} +func (s *serverDispatcher) DidCloseNotebookDocument(ctx context.Context, params *DidCloseNotebookDocumentParams) error { + return s.sender.Notify(ctx, "notebookDocument/didClose", params) +} +func (s *serverDispatcher) DidOpenNotebookDocument(ctx context.Context, params *DidOpenNotebookDocumentParams) error { + return s.sender.Notify(ctx, "notebookDocument/didOpen", params) +} +func (s *serverDispatcher) DidSaveNotebookDocument(ctx context.Context, params *DidSaveNotebookDocumentParams) error { + return s.sender.Notify(ctx, "notebookDocument/didSave", params) +} +func (s *serverDispatcher) Shutdown(ctx context.Context) error { + return s.sender.Call(ctx, "shutdown", nil, nil) +} +func (s *serverDispatcher) CodeAction(ctx context.Context, params *CodeActionParams) ([]CodeAction, error) { + var result []CodeAction + if err := s.sender.Call(ctx, "textDocument/codeAction", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) CodeLens(ctx context.Context, params *CodeLensParams) ([]CodeLens, error) { + var result []CodeLens + if err := s.sender.Call(ctx, "textDocument/codeLens", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ColorPresentation(ctx context.Context, params *ColorPresentationParams) ([]ColorPresentation, error) { + var result []ColorPresentation + if err := s.sender.Call(ctx, "textDocument/colorPresentation", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Completion(ctx context.Context, params *CompletionParams) (*CompletionList, error) { + var result *CompletionList + if err := s.sender.Call(ctx, "textDocument/completion", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Declaration(ctx context.Context, params *DeclarationParams) (*Or_textDocument_declaration, error) { + var result *Or_textDocument_declaration + if err := s.sender.Call(ctx, "textDocument/declaration", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Definition(ctx context.Context, params *DefinitionParams) ([]Location, error) { + var result []Location + if err := s.sender.Call(ctx, "textDocument/definition", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Diagnostic(ctx context.Context, params *string) (*string, error) { + var result *string + if err := s.sender.Call(ctx, "textDocument/diagnostic", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) error { + return s.sender.Notify(ctx, "textDocument/didChange", params) +} +func (s *serverDispatcher) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) error { + return s.sender.Notify(ctx, "textDocument/didClose", params) +} +func (s *serverDispatcher) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) error { + return s.sender.Notify(ctx, "textDocument/didOpen", params) +} +func (s *serverDispatcher) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) error { + return s.sender.Notify(ctx, "textDocument/didSave", params) +} +func (s *serverDispatcher) DocumentColor(ctx context.Context, params *DocumentColorParams) ([]ColorInformation, error) { + var result []ColorInformation + if err := s.sender.Call(ctx, "textDocument/documentColor", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) ([]DocumentHighlight, error) { + var result []DocumentHighlight + if err := s.sender.Call(ctx, "textDocument/documentHighlight", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DocumentLink(ctx context.Context, params *DocumentLinkParams) ([]DocumentLink, error) { + var result []DocumentLink + if err := s.sender.Call(ctx, "textDocument/documentLink", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) ([]interface{}, error) { + var result []interface{} + if err := s.sender.Call(ctx, "textDocument/documentSymbol", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) FoldingRange(ctx context.Context, params *FoldingRangeParams) ([]FoldingRange, error) { + var result []FoldingRange + if err := s.sender.Call(ctx, "textDocument/foldingRange", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Formatting(ctx context.Context, params *DocumentFormattingParams) ([]TextEdit, error) { + var result []TextEdit + if err := s.sender.Call(ctx, "textDocument/formatting", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Hover(ctx context.Context, params *HoverParams) (*Hover, error) { + var result *Hover + if err := s.sender.Call(ctx, "textDocument/hover", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Implementation(ctx context.Context, params *ImplementationParams) ([]Location, error) { + var result []Location + if err := s.sender.Call(ctx, "textDocument/implementation", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) InlayHint(ctx context.Context, params *InlayHintParams) ([]InlayHint, error) { + var result []InlayHint + if err := s.sender.Call(ctx, "textDocument/inlayHint", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) InlineValue(ctx context.Context, params *InlineValueParams) ([]InlineValue, error) { + var result []InlineValue + if err := s.sender.Call(ctx, "textDocument/inlineValue", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (*LinkedEditingRanges, error) { + var result *LinkedEditingRanges + if err := s.sender.Call(ctx, "textDocument/linkedEditingRange", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Moniker(ctx context.Context, params *MonikerParams) ([]Moniker, error) { + var result []Moniker + if err := s.sender.Call(ctx, "textDocument/moniker", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) ([]TextEdit, error) { + var result []TextEdit + if err := s.sender.Call(ctx, "textDocument/onTypeFormatting", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) ([]CallHierarchyItem, error) { + var result []CallHierarchyItem + if err := s.sender.Call(ctx, "textDocument/prepareCallHierarchy", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) PrepareRename(ctx context.Context, params *PrepareRenameParams) (*PrepareRename2Gn, error) { + var result *PrepareRename2Gn + if err := s.sender.Call(ctx, "textDocument/prepareRename", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) PrepareTypeHierarchy(ctx context.Context, params *TypeHierarchyPrepareParams) ([]TypeHierarchyItem, error) { + var result []TypeHierarchyItem + if err := s.sender.Call(ctx, "textDocument/prepareTypeHierarchy", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) ([]TextEdit, error) { + var result []TextEdit + if err := s.sender.Call(ctx, "textDocument/rangeFormatting", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) References(ctx context.Context, params *ReferenceParams) ([]Location, error) { + var result []Location + if err := s.sender.Call(ctx, "textDocument/references", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Rename(ctx context.Context, params *RenameParams) (*WorkspaceEdit, error) { + var result *WorkspaceEdit + if err := s.sender.Call(ctx, "textDocument/rename", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange, error) { + var result []SelectionRange + if err := s.sender.Call(ctx, "textDocument/selectionRange", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (*SemanticTokens, error) { + var result *SemanticTokens + if err := s.sender.Call(ctx, "textDocument/semanticTokens/full", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (interface{}, error) { + var result interface{} + if err := s.sender.Call(ctx, "textDocument/semanticTokens/full/delta", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (*SemanticTokens, error) { + var result *SemanticTokens + if err := s.sender.Call(ctx, "textDocument/semanticTokens/range", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (*SignatureHelp, error) { + var result *SignatureHelp + if err := s.sender.Call(ctx, "textDocument/signatureHelp", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) ([]Location, error) { + var result []Location + if err := s.sender.Call(ctx, "textDocument/typeDefinition", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) error { + return s.sender.Notify(ctx, "textDocument/willSave", params) +} +func (s *serverDispatcher) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) ([]TextEdit, error) { + var result []TextEdit + if err := s.sender.Call(ctx, "textDocument/willSaveWaitUntil", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Subtypes(ctx context.Context, params *TypeHierarchySubtypesParams) ([]TypeHierarchyItem, error) { + var result []TypeHierarchyItem + if err := s.sender.Call(ctx, "typeHierarchy/subtypes", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Supertypes(ctx context.Context, params *TypeHierarchySupertypesParams) ([]TypeHierarchyItem, error) { + var result []TypeHierarchyItem + if err := s.sender.Call(ctx, "typeHierarchy/supertypes", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) error { + return s.sender.Notify(ctx, "window/workDoneProgress/cancel", params) +} +func (s *serverDispatcher) DiagnosticWorkspace(ctx context.Context, params *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) { + var result *WorkspaceDiagnosticReport + if err := s.sender.Call(ctx, "workspace/diagnostic", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) error { + return s.sender.Notify(ctx, "workspace/didChangeConfiguration", params) +} +func (s *serverDispatcher) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) error { + return s.sender.Notify(ctx, "workspace/didChangeWatchedFiles", params) +} +func (s *serverDispatcher) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) error { + return s.sender.Notify(ctx, "workspace/didChangeWorkspaceFolders", params) +} +func (s *serverDispatcher) DidCreateFiles(ctx context.Context, params *CreateFilesParams) error { + return s.sender.Notify(ctx, "workspace/didCreateFiles", params) +} +func (s *serverDispatcher) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) error { + return s.sender.Notify(ctx, "workspace/didDeleteFiles", params) +} +func (s *serverDispatcher) DidRenameFiles(ctx context.Context, params *RenameFilesParams) error { + return s.sender.Notify(ctx, "workspace/didRenameFiles", params) +} +func (s *serverDispatcher) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (interface{}, error) { + var result interface{} + if err := s.sender.Call(ctx, "workspace/executeCommand", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) Symbol(ctx context.Context, params *WorkspaceSymbolParams) ([]SymbolInformation, error) { + var result []SymbolInformation + if err := s.sender.Call(ctx, "workspace/symbol", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (*WorkspaceEdit, error) { + var result *WorkspaceEdit + if err := s.sender.Call(ctx, "workspace/willCreateFiles", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (*WorkspaceEdit, error) { + var result *WorkspaceEdit + if err := s.sender.Call(ctx, "workspace/willDeleteFiles", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (*WorkspaceEdit, error) { + var result *WorkspaceEdit + if err := s.sender.Call(ctx, "workspace/willRenameFiles", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) ResolveWorkspaceSymbol(ctx context.Context, params *WorkspaceSymbol) (*WorkspaceSymbol, error) { + var result *WorkspaceSymbol + if err := s.sender.Call(ctx, "workspaceSymbol/resolve", params, &result); err != nil { + return nil, err + } + return result, nil +} +func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) { + var result interface{} + if err := s.sender.Call(ctx, method, params, &result); err != nil { + return nil, err + } + return result, nil +} diff --git a/gopls/internal/lsp/references.go b/gopls/internal/lsp/references.go new file mode 100644 index 00000000000..190c1574150 --- /dev/null +++ b/gopls/internal/lsp/references.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" +) + +func (s *Server) references(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + if snapshot.View().FileKind(fh) == source.Tmpl { + return template.References(ctx, snapshot, fh, params) + } + return source.References(ctx, snapshot, fh, params.Position, params.Context.IncludeDeclaration) +} diff --git a/gopls/internal/lsp/regtest/doc.go b/gopls/internal/lsp/regtest/doc.go new file mode 100644 index 00000000000..4f4c7c020ba --- /dev/null +++ b/gopls/internal/lsp/regtest/doc.go @@ -0,0 +1,157 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package regtest provides a framework for writing gopls regression tests. +// +// User reported regressions are often expressed in terms of editor +// interactions. For example: "When I open my editor in this directory, +// navigate to this file, and change this line, I get a diagnostic that doesn't +// make sense". In these cases reproducing, diagnosing, and writing a test to +// protect against this regression can be difficult. +// +// The regtest package provides an API for developers to express these types of +// user interactions in ordinary Go tests, validate them, and run them in a +// variety of execution modes. +// +// # Test package setup +// +// The regression test package uses a couple of uncommon patterns to reduce +// boilerplate in test bodies. First, it is intended to be imported as "." so +// that helpers do not need to be qualified. Second, it requires some setup +// that is currently implemented in the regtest.Main function, which must be +// invoked by TestMain. Therefore, a minimal regtest testing package looks +// like this: +// +// package lsptests +// +// import ( +// "fmt" +// "testing" +// +// "golang.org/x/tools/gopls/internal/hooks" +// . "golang.org/x/tools/gopls/internal/lsp/regtest" +// ) +// +// func TestMain(m *testing.M) { +// Main(m, hooks.Options) +// } +// +// # Writing a simple regression test +// +// To run a regression test use the regtest.Run function, which accepts a +// txtar-encoded archive defining the initial workspace state. This function +// sets up the workspace in a temporary directory, creates a fake text editor, +// starts gopls, and initializes an LSP session. It then invokes the provided +// test function with an *Env handle encapsulating the newly created +// environment. Because gopls may be run in various modes (as a sidecar or +// daemon process, with different settings), the test runner may perform this +// process multiple times, re-running the test function each time with a new +// environment. +// +// func TestOpenFile(t *testing.T) { +// const files = ` +// -- go.mod -- +// module mod.com +// +// go 1.12 +// -- foo.go -- +// package foo +// ` +// Run(t, files, func(t *testing.T, env *Env) { +// env.OpenFile("foo.go") +// }) +// } +// +// # Configuring Regtest Execution +// +// The regtest package exposes several options that affect the setup process +// described above. To use these options, use the WithOptions function: +// +// WithOptions(opts...).Run(...) +// +// See options.go for a full list of available options. +// +// # Operating on editor state +// +// To operate on editor state within the test body, the Env type provides +// access to the workspace directory (Env.SandBox), text editor (Env.Editor), +// LSP server (Env.Server), and 'awaiter' (Env.Awaiter). +// +// In most cases, operations on these primitive building blocks of the +// regression test environment expect a Context (which should be a child of +// env.Ctx), and return an error. To avoid boilerplate, the Env exposes a set +// of wrappers in wrappers.go for use in scripting: +// +// env.CreateBuffer("c/c.go", "") +// env.EditBuffer("c/c.go", fake.Edit{ +// Text: `package c`, +// }) +// +// These wrappers thread through Env.Ctx, and call t.Fatal on any errors. +// +// # Expressing expectations +// +// The general pattern for a regression test is to script interactions with the +// fake editor and sandbox, and assert that gopls behaves correctly after each +// state change. Unfortunately, this is complicated by the fact that state +// changes are communicated to gopls via unidirectional client->server +// notifications (didOpen, didChange, etc.), and resulting gopls behavior such +// as diagnostics, logs, or messages is communicated back via server->client +// notifications. Therefore, within regression tests we must be able to say "do +// this, and then eventually gopls should do that". To achieve this, the +// regtest package provides a framework for expressing conditions that must +// eventually be met, in terms of the Expectation type. +// +// To express the assertion that "eventually gopls must meet these +// expectations", use env.Await(...): +// +// env.RegexpReplace("x/x.go", `package x`, `package main`) +// env.Await(env.DiagnosticAtRegexp("x/main.go", `fmt`)) +// +// Await evaluates the provided expectations atomically, whenever the client +// receives a state-changing notification from gopls. See expectation.go for a +// full list of available expectations. +// +// A fundamental problem with this model is that if gopls never meets the +// provided expectations, the test runner will hang until the test timeout +// (which defaults to 10m). There are two ways to work around this poor +// behavior: +// +// 1. Use a precondition to define precisely when we expect conditions to be +// met. Gopls provides the OnceMet(precondition, expectations...) pattern +// to express ("once this precondition is met, the following expectations +// must all hold"). To instrument preconditions, gopls uses verbose +// progress notifications to inform the client about ongoing work (see +// CompletedWork). The most common precondition is to wait for gopls to be +// done processing all change notifications, for which the regtest package +// provides the AfterChange helper. For example: +// +// // We expect diagnostics to be cleared after gopls is done processing the +// // didSave notification. +// env.SaveBuffer("a/go.mod") +// env.AfterChange(EmptyDiagnostics("a/go.mod")) +// +// 2. Set a shorter timeout during development, if you expect to be breaking +// tests. By setting the environment variable GOPLS_REGTEST_TIMEOUT=5s, +// regression tests will time out after 5 seconds. +// +// # Tips & Tricks +// +// Here are some tips and tricks for working with regression tests: +// +// 1. Set the environment variable GOPLS_REGTEST_TIMEOUT=5s during development. +// 2. Run tests with -short. This will only run regression tests in the +// default gopls execution mode. +// 3. Use capture groups to narrow regexp positions. All regular-expression +// based positions (such as DiagnosticAtRegexp) will match the position of +// the first capture group, if any are provided. This can be used to +// identify a specific position in the code for a pattern that may occur in +// multiple places. For example `var (mu) sync.Mutex` matches the position +// of "mu" within the variable declaration. +// 4. Read diagnostics into a variable to implement more complicated +// assertions about diagnostic state in the editor. To do this, use the +// pattern OnceMet(precondition, ReadDiagnostics("file.go", &d)) to capture +// the current diagnostics as soon as the precondition is met. This is +// preferable to accessing the diagnostics directly, as it avoids races. +package regtest diff --git a/gopls/internal/lsp/regtest/env.go b/gopls/internal/lsp/regtest/env.go new file mode 100644 index 00000000000..67e72072477 --- /dev/null +++ b/gopls/internal/lsp/regtest/env.go @@ -0,0 +1,368 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "context" + "fmt" + "strings" + "sync" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/jsonrpc2/servertest" +) + +// Env holds the building blocks of an editor testing environment, providing +// wrapper methods that hide the boilerplate of plumbing contexts and checking +// errors. +type Env struct { + T testing.TB // TODO(rfindley): rename to TB + Ctx context.Context + + // Most tests should not need to access the scratch area, editor, server, or + // connection, but they are available if needed. + Sandbox *fake.Sandbox + Server servertest.Connector + + // Editor is owned by the Env, and shut down + Editor *fake.Editor + + Awaiter *Awaiter +} + +// An Awaiter keeps track of relevant LSP state, so that it may be asserted +// upon with Expectations. +// +// Wire it into a fake.Editor using Awaiter.Hooks(). +// +// TODO(rfindley): consider simply merging Awaiter with the fake.Editor. It +// probably is not worth its own abstraction. +type Awaiter struct { + workdir *fake.Workdir + + mu sync.Mutex + // For simplicity, each waiter gets a unique ID. + nextWaiterID int + state State + waiters map[int]*condition +} + +func NewAwaiter(workdir *fake.Workdir) *Awaiter { + return &Awaiter{ + workdir: workdir, + state: State{ + diagnostics: make(map[string]*protocol.PublishDiagnosticsParams), + work: make(map[protocol.ProgressToken]*workProgress), + }, + waiters: make(map[int]*condition), + } +} + +func (a *Awaiter) Hooks() fake.ClientHooks { + return fake.ClientHooks{ + OnDiagnostics: a.onDiagnostics, + OnLogMessage: a.onLogMessage, + OnWorkDoneProgressCreate: a.onWorkDoneProgressCreate, + OnProgress: a.onProgress, + OnShowMessage: a.onShowMessage, + OnShowMessageRequest: a.onShowMessageRequest, + OnRegistration: a.onRegistration, + OnUnregistration: a.onUnregistration, + } +} + +// State encapsulates the server state TODO: explain more +type State struct { + // diagnostics are a map of relative path->diagnostics params + diagnostics map[string]*protocol.PublishDiagnosticsParams + logs []*protocol.LogMessageParams + showMessage []*protocol.ShowMessageParams + showMessageRequest []*protocol.ShowMessageRequestParams + + registrations []*protocol.RegistrationParams + registeredCapabilities map[string]protocol.Registration + unregistrations []*protocol.UnregistrationParams + + // outstandingWork is a map of token->work summary. All tokens are assumed to + // be string, though the spec allows for numeric tokens as well. When work + // completes, it is deleted from this map. + work map[protocol.ProgressToken]*workProgress +} + +// outstandingWork counts started but not complete work items by title. +func (s State) outstandingWork() map[string]uint64 { + outstanding := make(map[string]uint64) + for _, work := range s.work { + if !work.complete { + outstanding[work.title]++ + } + } + return outstanding +} + +// completedWork counts complete work items by title. +func (s State) completedWork() map[string]uint64 { + completed := make(map[string]uint64) + for _, work := range s.work { + if work.complete { + completed[work.title]++ + } + } + return completed +} + +// startedWork counts started (and possibly complete) work items. +func (s State) startedWork() map[string]uint64 { + started := make(map[string]uint64) + for _, work := range s.work { + started[work.title]++ + } + return started +} + +type workProgress struct { + title, msg, endMsg string + percent float64 + complete bool // seen 'end'. +} + +// This method, provided for debugging, accesses mutable fields without a lock, +// so it must not be called concurrent with any State mutation. +func (s State) String() string { + var b strings.Builder + b.WriteString("#### log messages (see RPC logs for full text):\n") + for _, msg := range s.logs { + summary := fmt.Sprintf("%v: %q", msg.Type, msg.Message) + if len(summary) > 60 { + summary = summary[:57] + "..." + } + // Some logs are quite long, and since they should be reproduced in the RPC + // logs on any failure we include here just a short summary. + fmt.Fprint(&b, "\t"+summary+"\n") + } + b.WriteString("\n") + b.WriteString("#### diagnostics:\n") + for name, params := range s.diagnostics { + fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version)) + for _, d := range params.Diagnostics { + fmt.Fprintf(&b, "\t\t(%d, %d) [%s]: %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Source, d.Message) + } + } + b.WriteString("\n") + b.WriteString("#### outstanding work:\n") + for token, state := range s.work { + if state.complete { + continue + } + name := state.title + if name == "" { + name = fmt.Sprintf("!NO NAME(token: %s)", token) + } + fmt.Fprintf(&b, "\t%s: %.2f\n", name, state.percent) + } + b.WriteString("#### completed work:\n") + for name, count := range s.completedWork() { + fmt.Fprintf(&b, "\t%s: %d\n", name, count) + } + return b.String() +} + +// A condition is satisfied when all expectations are simultaneously +// met. At that point, the 'met' channel is closed. On any failure, err is set +// and the failed channel is closed. +type condition struct { + expectations []Expectation + verdict chan Verdict +} + +func (a *Awaiter) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + pth := a.workdir.URIToPath(d.URI) + a.state.diagnostics[pth] = d + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.showMessage = append(a.state.showMessage, m) + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.showMessageRequest = append(a.state.showMessageRequest, m) + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.logs = append(a.state.logs, m) + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.work[m.Token] = &workProgress{} + return nil +} + +func (a *Awaiter) onProgress(_ context.Context, m *protocol.ProgressParams) error { + a.mu.Lock() + defer a.mu.Unlock() + work, ok := a.state.work[m.Token] + if !ok { + panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m)) + } + v := m.Value.(map[string]interface{}) + switch kind := v["kind"]; kind { + case "begin": + work.title = v["title"].(string) + if msg, ok := v["message"]; ok { + work.msg = msg.(string) + } + case "report": + if pct, ok := v["percentage"]; ok { + work.percent = pct.(float64) + } + if msg, ok := v["message"]; ok { + work.msg = msg.(string) + } + case "end": + work.complete = true + if msg, ok := v["message"]; ok { + work.endMsg = msg.(string) + } + } + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onRegistration(_ context.Context, m *protocol.RegistrationParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.registrations = append(a.state.registrations, m) + if a.state.registeredCapabilities == nil { + a.state.registeredCapabilities = make(map[string]protocol.Registration) + } + for _, reg := range m.Registrations { + a.state.registeredCapabilities[reg.Method] = reg + } + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error { + a.mu.Lock() + defer a.mu.Unlock() + + a.state.unregistrations = append(a.state.unregistrations, m) + a.checkConditionsLocked() + return nil +} + +func (a *Awaiter) checkConditionsLocked() { + for id, condition := range a.waiters { + if v, _ := checkExpectations(a.state, condition.expectations); v != Unmet { + delete(a.waiters, id) + condition.verdict <- v + } + } +} + +// checkExpectations reports whether s meets all expectations. +func checkExpectations(s State, expectations []Expectation) (Verdict, string) { + finalVerdict := Met + var summary strings.Builder + for _, e := range expectations { + v := e.Check(s) + if v > finalVerdict { + finalVerdict = v + } + fmt.Fprintf(&summary, "%v: %s\n", v, e.Description) + } + return finalVerdict, summary.String() +} + +// Await blocks until the given expectations are all simultaneously met. +// +// Generally speaking Await should be avoided because it blocks indefinitely if +// gopls ends up in a state where the expectations are never going to be met. +// Use AfterChange or OnceMet instead, so that the runner knows when to stop +// waiting. +func (e *Env) Await(expectations ...Expectation) { + e.T.Helper() + if err := e.Awaiter.Await(e.Ctx, expectations...); err != nil { + e.T.Fatal(err) + } +} + +// OnceMet blocks until the precondition is met by the state or becomes +// unmeetable. If it was met, OnceMet checks that the state meets all +// expectations in mustMeets. +func (e *Env) OnceMet(precondition Expectation, mustMeets ...Expectation) { + e.Await(OnceMet(precondition, mustMeets...)) +} + +// Await waits for all expectations to simultaneously be met. It should only be +// called from the main test goroutine. +func (a *Awaiter) Await(ctx context.Context, expectations ...Expectation) error { + a.mu.Lock() + // Before adding the waiter, we check if the condition is currently met or + // failed to avoid a race where the condition was realized before Await was + // called. + switch verdict, summary := checkExpectations(a.state, expectations); verdict { + case Met: + a.mu.Unlock() + return nil + case Unmeetable: + err := fmt.Errorf("unmeetable expectations:\n%s\nstate:\n%v", summary, a.state) + a.mu.Unlock() + return err + } + cond := &condition{ + expectations: expectations, + verdict: make(chan Verdict), + } + a.waiters[a.nextWaiterID] = cond + a.nextWaiterID++ + a.mu.Unlock() + + var err error + select { + case <-ctx.Done(): + err = ctx.Err() + case v := <-cond.verdict: + if v != Met { + err = fmt.Errorf("condition has final verdict %v", v) + } + } + a.mu.Lock() + defer a.mu.Unlock() + _, summary := checkExpectations(a.state, expectations) + + // Debugging an unmet expectation can be tricky, so we put some effort into + // nicely formatting the failure. + if err != nil { + return fmt.Errorf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, a.state) + } + return nil +} diff --git a/gopls/internal/lsp/regtest/env_test.go b/gopls/internal/lsp/regtest/env_test.go new file mode 100644 index 00000000000..e334faa905c --- /dev/null +++ b/gopls/internal/lsp/regtest/env_test.go @@ -0,0 +1,66 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "context" + "encoding/json" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +func TestProgressUpdating(t *testing.T) { + a := &Awaiter{ + state: State{ + work: make(map[protocol.ProgressToken]*workProgress), + }, + } + ctx := context.Background() + if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{ + Token: "foo", + }); err != nil { + t.Fatal(err) + } + if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{ + Token: "bar", + }); err != nil { + t.Fatal(err) + } + updates := []struct { + token string + value interface{} + }{ + {"foo", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "foo work"}}, + {"bar", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "bar work"}}, + {"foo", protocol.WorkDoneProgressEnd{Kind: "end"}}, + {"bar", protocol.WorkDoneProgressReport{Kind: "report", Percentage: 42}}, + } + for _, update := range updates { + params := &protocol.ProgressParams{ + Token: update.token, + Value: update.value, + } + data, err := json.Marshal(params) + if err != nil { + t.Fatal(err) + } + var unmarshaled protocol.ProgressParams + if err := json.Unmarshal(data, &unmarshaled); err != nil { + t.Fatal(err) + } + if err := a.onProgress(ctx, &unmarshaled); err != nil { + t.Fatal(err) + } + } + if !a.state.work["foo"].complete { + t.Error("work entry \"foo\" is incomplete, want complete") + } + got := *a.state.work["bar"] + want := workProgress{title: "bar work", percent: 42} + if got != want { + t.Errorf("work progress for \"bar\": %v, want %v", got, want) + } +} diff --git a/gopls/internal/lsp/regtest/expectation.go b/gopls/internal/lsp/regtest/expectation.go new file mode 100644 index 00000000000..9d9f023d92a --- /dev/null +++ b/gopls/internal/lsp/regtest/expectation.go @@ -0,0 +1,769 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +var ( + // InitialWorkspaceLoad is an expectation that the workspace initial load has + // completed. It is verified via workdone reporting. + InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1, false) +) + +// A Verdict is the result of checking an expectation against the current +// editor state. +type Verdict int + +// Order matters for the following constants: verdicts are sorted in order of +// decisiveness. +const ( + // Met indicates that an expectation is satisfied by the current state. + Met Verdict = iota + // Unmet indicates that an expectation is not currently met, but could be met + // in the future. + Unmet + // Unmeetable indicates that an expectation cannot be satisfied in the + // future. + Unmeetable +) + +func (v Verdict) String() string { + switch v { + case Met: + return "Met" + case Unmet: + return "Unmet" + case Unmeetable: + return "Unmeetable" + } + return fmt.Sprintf("unrecognized verdict %d", v) +} + +// An Expectation is an expected property of the state of the LSP client. +// The Check function reports whether the property is met. +// +// Expectations are combinators. By composing them, tests may express +// complex expectations in terms of simpler ones. +// +// TODO(rfindley): as expectations are combined, it becomes harder to identify +// why they failed. A better signature for Check would be +// +// func(State) (Verdict, string) +// +// returning a reason for the verdict that can be composed similarly to +// descriptions. +type Expectation struct { + Check func(State) Verdict + + // Description holds a noun-phrase identifying what the expectation checks. + // + // TODO(rfindley): revisit existing descriptions to ensure they compose nicely. + Description string +} + +// OnceMet returns an Expectation that, once the precondition is met, asserts +// that mustMeet is met. +func OnceMet(precondition Expectation, mustMeets ...Expectation) Expectation { + check := func(s State) Verdict { + switch pre := precondition.Check(s); pre { + case Unmeetable: + return Unmeetable + case Met: + for _, mustMeet := range mustMeets { + verdict := mustMeet.Check(s) + if verdict != Met { + return Unmeetable + } + } + return Met + default: + return Unmet + } + } + description := describeExpectations(mustMeets...) + return Expectation{ + Check: check, + Description: fmt.Sprintf("once %q is met, must have:\n%s", precondition.Description, description), + } +} + +func describeExpectations(expectations ...Expectation) string { + var descriptions []string + for _, e := range expectations { + descriptions = append(descriptions, e.Description) + } + return strings.Join(descriptions, "\n") +} + +// AnyOf returns an expectation that is satisfied when any of the given +// expectations is met. +func AnyOf(anyOf ...Expectation) Expectation { + check := func(s State) Verdict { + for _, e := range anyOf { + verdict := e.Check(s) + if verdict == Met { + return Met + } + } + return Unmet + } + description := describeExpectations(anyOf...) + return Expectation{ + Check: check, + Description: fmt.Sprintf("Any of:\n%s", description), + } +} + +// AllOf expects that all given expectations are met. +// +// TODO(rfindley): the problem with these types of combinators (OnceMet, AnyOf +// and AllOf) is that we lose the information of *why* they failed: the Awaiter +// is not smart enough to look inside. +// +// Refactor the API such that the Check function is responsible for explaining +// why an expectation failed. This should allow us to significantly improve +// test output: we won't need to summarize state at all, as the verdict +// explanation itself should describe clearly why the expectation not met. +func AllOf(allOf ...Expectation) Expectation { + check := func(s State) Verdict { + verdict := Met + for _, e := range allOf { + if v := e.Check(s); v > verdict { + verdict = v + } + } + return verdict + } + description := describeExpectations(allOf...) + return Expectation{ + Check: check, + Description: fmt.Sprintf("All of:\n%s", description), + } +} + +// ReadDiagnostics is an Expectation that stores the current diagnostics for +// fileName in into, whenever it is evaluated. +// +// It can be used in combination with OnceMet or AfterChange to capture the +// state of diagnostics when other expectations are satisfied. +func ReadDiagnostics(fileName string, into *protocol.PublishDiagnosticsParams) Expectation { + check := func(s State) Verdict { + diags, ok := s.diagnostics[fileName] + if !ok { + return Unmeetable + } + *into = *diags + return Met + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("read diagnostics for %q", fileName), + } +} + +// ReadAllDiagnostics is an expectation that stores all published diagnostics +// into the provided map, whenever it is evaluated. +// +// It can be used in combination with OnceMet or AfterChange to capture the +// state of diagnostics when other expectations are satisfied. +func ReadAllDiagnostics(into *map[string]*protocol.PublishDiagnosticsParams) Expectation { + check := func(s State) Verdict { + allDiags := make(map[string]*protocol.PublishDiagnosticsParams) + for name, diags := range s.diagnostics { + allDiags[name] = diags + } + *into = allDiags + return Met + } + return Expectation{ + Check: check, + Description: "read all diagnostics", + } +} + +// NoOutstandingWork asserts that there is no work initiated using the LSP +// $/progress API that has not completed. +func NoOutstandingWork() Expectation { + check := func(s State) Verdict { + if len(s.outstandingWork()) == 0 { + return Met + } + return Unmet + } + return Expectation{ + Check: check, + Description: "no outstanding work", + } +} + +// NoShownMessage asserts that the editor has not received a ShowMessage. +func NoShownMessage(subString string) Expectation { + check := func(s State) Verdict { + for _, m := range s.showMessage { + if strings.Contains(m.Message, subString) { + return Unmeetable + } + } + return Met + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("no ShowMessage received containing %q", subString), + } +} + +// ShownMessage asserts that the editor has received a ShowMessageRequest +// containing the given substring. +func ShownMessage(containing string) Expectation { + check := func(s State) Verdict { + for _, m := range s.showMessage { + if strings.Contains(m.Message, containing) { + return Met + } + } + return Unmet + } + return Expectation{ + Check: check, + Description: "received ShowMessage", + } +} + +// ShowMessageRequest asserts that the editor has received a ShowMessageRequest +// with an action item that has the given title. +func ShowMessageRequest(title string) Expectation { + check := func(s State) Verdict { + if len(s.showMessageRequest) == 0 { + return Unmet + } + // Only check the most recent one. + m := s.showMessageRequest[len(s.showMessageRequest)-1] + if len(m.Actions) == 0 || len(m.Actions) > 1 { + return Unmet + } + if m.Actions[0].Title == title { + return Met + } + return Unmet + } + return Expectation{ + Check: check, + Description: "received ShowMessageRequest", + } +} + +// DoneDiagnosingChanges expects that diagnostics are complete from common +// change notifications: didOpen, didChange, didSave, didChangeWatchedFiles, +// and didClose. +// +// This can be used when multiple notifications may have been sent, such as +// when a didChange is immediately followed by a didSave. It is insufficient to +// simply await NoOutstandingWork, because the LSP client has no control over +// when the server starts processing a notification. Therefore, we must keep +// track of +func (e *Env) DoneDiagnosingChanges() Expectation { + stats := e.Editor.Stats() + statsBySource := map[lsp.ModificationSource]uint64{ + lsp.FromDidOpen: stats.DidOpen, + lsp.FromDidChange: stats.DidChange, + lsp.FromDidSave: stats.DidSave, + lsp.FromDidChangeWatchedFiles: stats.DidChangeWatchedFiles, + lsp.FromDidClose: stats.DidClose, + } + + var expected []lsp.ModificationSource + for k, v := range statsBySource { + if v > 0 { + expected = append(expected, k) + } + } + + // Sort for stability. + sort.Slice(expected, func(i, j int) bool { + return expected[i] < expected[j] + }) + + var all []Expectation + for _, source := range expected { + all = append(all, CompletedWork(lsp.DiagnosticWorkTitle(source), statsBySource[source], true)) + } + + return AllOf(all...) +} + +// AfterChange expects that the given expectations will be met after all +// state-changing notifications have been processed by the server. +// +// It awaits the completion of all anticipated work before checking the given +// expectations. +func (e *Env) AfterChange(expectations ...Expectation) { + e.T.Helper() + e.OnceMet( + e.DoneDiagnosingChanges(), + expectations..., + ) +} + +// DoneWithOpen expects all didOpen notifications currently sent by the editor +// to be completely processed. +func (e *Env) DoneWithOpen() Expectation { + opens := e.Editor.Stats().DidOpen + return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens, true) +} + +// StartedChange expects that the server has at least started processing all +// didChange notifications sent from the client. +func (e *Env) StartedChange() Expectation { + changes := e.Editor.Stats().DidChange + return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes) +} + +// DoneWithChange expects all didChange notifications currently sent by the +// editor to be completely processed. +func (e *Env) DoneWithChange() Expectation { + changes := e.Editor.Stats().DidChange + return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes, true) +} + +// DoneWithSave expects all didSave notifications currently sent by the editor +// to be completely processed. +func (e *Env) DoneWithSave() Expectation { + saves := e.Editor.Stats().DidSave + return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves, true) +} + +// StartedChangeWatchedFiles expects that the server has at least started +// processing all didChangeWatchedFiles notifications sent from the client. +func (e *Env) StartedChangeWatchedFiles() Expectation { + changes := e.Editor.Stats().DidChangeWatchedFiles + return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes) +} + +// DoneWithChangeWatchedFiles expects all didChangeWatchedFiles notifications +// currently sent by the editor to be completely processed. +func (e *Env) DoneWithChangeWatchedFiles() Expectation { + changes := e.Editor.Stats().DidChangeWatchedFiles + return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes, true) +} + +// DoneWithClose expects all didClose notifications currently sent by the +// editor to be completely processed. +func (e *Env) DoneWithClose() Expectation { + changes := e.Editor.Stats().DidClose + return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes, true) +} + +// StartedWork expect a work item to have been started >= atLeast times. +// +// See CompletedWork. +func StartedWork(title string, atLeast uint64) Expectation { + check := func(s State) Verdict { + if s.startedWork()[title] >= atLeast { + return Met + } + return Unmet + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("started work %q at least %d time(s)", title, atLeast), + } +} + +// CompletedWork expects a work item to have been completed >= atLeast times. +// +// Since the Progress API doesn't include any hidden metadata, we must use the +// progress notification title to identify the work we expect to be completed. +func CompletedWork(title string, count uint64, atLeast bool) Expectation { + check := func(s State) Verdict { + completed := s.completedWork() + if completed[title] == count || atLeast && completed[title] > count { + return Met + } + return Unmet + } + desc := fmt.Sprintf("completed work %q %v times", title, count) + if atLeast { + desc = fmt.Sprintf("completed work %q at least %d time(s)", title, count) + } + return Expectation{ + Check: check, + Description: desc, + } +} + +type WorkStatus struct { + // Last seen message from either `begin` or `report` progress. + Msg string + // Message sent with `end` progress message. + EndMsg string +} + +// CompletedProgress expects that workDone progress is complete for the given +// progress token. When non-nil WorkStatus is provided, it will be filled +// when the expectation is met. +// +// If the token is not a progress token that the client has seen, this +// expectation is Unmeetable. +func CompletedProgress(token protocol.ProgressToken, into *WorkStatus) Expectation { + check := func(s State) Verdict { + work, ok := s.work[token] + if !ok { + return Unmeetable // TODO(rfindley): refactor to allow the verdict to explain this result + } + if work.complete { + if into != nil { + into.Msg = work.msg + into.EndMsg = work.endMsg + } + return Met + } + return Unmet + } + desc := fmt.Sprintf("completed work for token %v", token) + return Expectation{ + Check: check, + Description: desc, + } +} + +// OutstandingWork expects a work item to be outstanding. The given title must +// be an exact match, whereas the given msg must only be contained in the work +// item's message. +func OutstandingWork(title, msg string) Expectation { + check := func(s State) Verdict { + for _, work := range s.work { + if work.complete { + continue + } + if work.title == title && strings.Contains(work.msg, msg) { + return Met + } + } + return Unmet + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("outstanding work: %q containing %q", title, msg), + } +} + +// NoErrorLogs asserts that the client has not received any log messages of +// error severity. +func NoErrorLogs() Expectation { + return NoLogMatching(protocol.Error, "") +} + +// LogMatching asserts that the client has received a log message +// of type typ matching the regexp re a certain number of times. +// +// The count argument specifies the expected number of matching logs. If +// atLeast is set, this is a lower bound, otherwise there must be exactly count +// matching logs. +func LogMatching(typ protocol.MessageType, re string, count int, atLeast bool) Expectation { + rec, err := regexp.Compile(re) + if err != nil { + panic(err) + } + check := func(state State) Verdict { + var found int + for _, msg := range state.logs { + if msg.Type == typ && rec.Match([]byte(msg.Message)) { + found++ + } + } + // Check for an exact or "at least" match. + if found == count || (found >= count && atLeast) { + return Met + } + return Unmet + } + desc := fmt.Sprintf("log message matching %q expected %v times", re, count) + if atLeast { + desc = fmt.Sprintf("log message matching %q expected at least %v times", re, count) + } + return Expectation{ + Check: check, + Description: desc, + } +} + +// NoLogMatching asserts that the client has not received a log message +// of type typ matching the regexp re. If re is an empty string, any log +// message is considered a match. +func NoLogMatching(typ protocol.MessageType, re string) Expectation { + var r *regexp.Regexp + if re != "" { + var err error + r, err = regexp.Compile(re) + if err != nil { + panic(err) + } + } + check := func(state State) Verdict { + for _, msg := range state.logs { + if msg.Type != typ { + continue + } + if r == nil || r.Match([]byte(msg.Message)) { + return Unmeetable + } + } + return Met + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("no log message matching %q", re), + } +} + +// FileWatchMatching expects that a file registration matches re. +func FileWatchMatching(re string) Expectation { + return Expectation{ + Check: checkFileWatch(re, Met, Unmet), + Description: fmt.Sprintf("file watch matching %q", re), + } +} + +// NoFileWatchMatching expects that no file registration matches re. +func NoFileWatchMatching(re string) Expectation { + return Expectation{ + Check: checkFileWatch(re, Unmet, Met), + Description: fmt.Sprintf("no file watch matching %q", re), + } +} + +func checkFileWatch(re string, onMatch, onNoMatch Verdict) func(State) Verdict { + rec := regexp.MustCompile(re) + return func(s State) Verdict { + r := s.registeredCapabilities["workspace/didChangeWatchedFiles"] + watchers := jsonProperty(r.RegisterOptions, "watchers").([]interface{}) + for _, watcher := range watchers { + pattern := jsonProperty(watcher, "globPattern").(string) + if rec.MatchString(pattern) { + return onMatch + } + } + return onNoMatch + } +} + +// jsonProperty extracts a value from a path of JSON property names, assuming +// the default encoding/json unmarshaling to the empty interface (i.e.: that +// JSON objects are unmarshalled as map[string]interface{}) +// +// For example, if obj is unmarshalled from the following json: +// +// { +// "foo": { "bar": 3 } +// } +// +// Then jsonProperty(obj, "foo", "bar") will be 3. +func jsonProperty(obj interface{}, path ...string) interface{} { + if len(path) == 0 || obj == nil { + return obj + } + m := obj.(map[string]interface{}) + return jsonProperty(m[path[0]], path[1:]...) +} + +// RegistrationMatching asserts that the client has received a capability +// registration matching the given regexp. +// +// TODO(rfindley): remove this once TestWatchReplaceTargets has been revisited. +// +// Deprecated: use (No)FileWatchMatching +func RegistrationMatching(re string) Expectation { + rec := regexp.MustCompile(re) + check := func(s State) Verdict { + for _, p := range s.registrations { + for _, r := range p.Registrations { + if rec.Match([]byte(r.Method)) { + return Met + } + } + } + return Unmet + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("registration matching %q", re), + } +} + +// UnregistrationMatching asserts that the client has received an +// unregistration whose ID matches the given regexp. +func UnregistrationMatching(re string) Expectation { + rec := regexp.MustCompile(re) + check := func(s State) Verdict { + for _, p := range s.unregistrations { + for _, r := range p.Unregisterations { + if rec.Match([]byte(r.Method)) { + return Met + } + } + } + return Unmet + } + return Expectation{ + Check: check, + Description: fmt.Sprintf("unregistration matching %q", re), + } +} + +// Diagnostics asserts that there is at least one diagnostic matching the given +// filters. +func Diagnostics(filters ...DiagnosticFilter) Expectation { + check := func(s State) Verdict { + diags := flattenDiagnostics(s) + for _, filter := range filters { + var filtered []flatDiagnostic + for _, d := range diags { + if filter.check(d.name, d.diag) { + filtered = append(filtered, d) + } + } + if len(filtered) == 0 { + // TODO(rfindley): if/when expectations describe their own failure, we + // can provide more useful information here as to which filter caused + // the failure. + return Unmet + } + diags = filtered + } + return Met + } + var descs []string + for _, filter := range filters { + descs = append(descs, filter.desc) + } + return Expectation{ + Check: check, + Description: "any diagnostics " + strings.Join(descs, ", "), + } +} + +// NoDiagnostics asserts that there are no diagnostics matching the given +// filters. Notably, if no filters are supplied this assertion checks that +// there are no diagnostics at all, for any file. +func NoDiagnostics(filters ...DiagnosticFilter) Expectation { + check := func(s State) Verdict { + diags := flattenDiagnostics(s) + for _, filter := range filters { + var filtered []flatDiagnostic + for _, d := range diags { + if filter.check(d.name, d.diag) { + filtered = append(filtered, d) + } + } + diags = filtered + } + if len(diags) > 0 { + return Unmet + } + return Met + } + var descs []string + for _, filter := range filters { + descs = append(descs, filter.desc) + } + return Expectation{ + Check: check, + Description: "no diagnostics " + strings.Join(descs, ", "), + } +} + +type flatDiagnostic struct { + name string + diag protocol.Diagnostic +} + +func flattenDiagnostics(state State) []flatDiagnostic { + var result []flatDiagnostic + for name, diags := range state.diagnostics { + for _, diag := range diags.Diagnostics { + result = append(result, flatDiagnostic{name, diag}) + } + } + return result +} + +// -- Diagnostic filters -- + +// A DiagnosticFilter filters the set of diagnostics, for assertion with +// Diagnostics or NoDiagnostics. +type DiagnosticFilter struct { + desc string + check func(name string, _ protocol.Diagnostic) bool +} + +// ForFile filters to diagnostics matching the sandbox-relative file name. +func ForFile(name string) DiagnosticFilter { + return DiagnosticFilter{ + desc: fmt.Sprintf("for file %q", name), + check: func(diagName string, _ protocol.Diagnostic) bool { + return diagName == name + }, + } +} + +// FromSource filters to diagnostics matching the given diagnostics source. +func FromSource(source string) DiagnosticFilter { + return DiagnosticFilter{ + desc: fmt.Sprintf("with source %q", source), + check: func(_ string, d protocol.Diagnostic) bool { + return d.Source == source + }, + } +} + +// AtRegexp filters to diagnostics in the file with sandbox-relative path name, +// at the first position matching the given regexp pattern. +// +// TODO(rfindley): pass in the editor to expectations, so that they may depend +// on editor state and AtRegexp can be a function rather than a method. +func (e *Env) AtRegexp(name, pattern string) DiagnosticFilter { + loc := e.RegexpSearch(name, pattern) + return DiagnosticFilter{ + desc: fmt.Sprintf("at the first position matching %#q in %q", pattern, name), + check: func(diagName string, d protocol.Diagnostic) bool { + return diagName == name && d.Range.Start == loc.Range.Start + }, + } +} + +// AtPosition filters to diagnostics at location name:line:character, for a +// sandbox-relative path name. +// +// Line and character are 0-based, and character measures UTF-16 codes. +// +// Note: prefer the more readable AtRegexp. +func AtPosition(name string, line, character uint32) DiagnosticFilter { + pos := protocol.Position{Line: line, Character: character} + return DiagnosticFilter{ + desc: fmt.Sprintf("at %s:%d:%d", name, line, character), + check: func(diagName string, d protocol.Diagnostic) bool { + return diagName == name && d.Range.Start == pos + }, + } +} + +// WithMessage filters to diagnostics whose message contains the given +// substring. +func WithMessage(substring string) DiagnosticFilter { + return DiagnosticFilter{ + desc: fmt.Sprintf("with message containing %q", substring), + check: func(_ string, d protocol.Diagnostic) bool { + return strings.Contains(d.Message, substring) + }, + } +} diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go new file mode 100644 index 00000000000..691a608c91e --- /dev/null +++ b/gopls/internal/lsp/regtest/marker.go @@ -0,0 +1,940 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "go/token" + "io/fs" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "golang.org/x/tools/go/expect" + "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/txtar" +) + +var update = flag.Bool("update", false, "if set, update test data during marker tests") + +// RunMarkerTests runs "marker" tests in the given test data directory. +// +// A marker test uses the '//@' marker syntax of the x/tools/go/expect package +// to annotate source code with various information such as locations and +// arguments of LSP operations to be executed by the test. The syntax following +// '@' is parsed as a comma-separated list of ordinary Go function calls, for +// example +// +// //@foo(a, "b", 3),bar(0) +// +// and delegates to a corresponding function to perform LSP-related operations. +// See the Marker types documentation below for a list of supported markers. +// +// Each call argument is coerced to the type of the corresponding parameter of +// the designated function. The coercion logic may use the surrounding context, +// such as the position or nearby text. See the Argument coercion section below +// for the full set of special conversions. As a special case, the blank +// identifier '_' is treated as the zero value of the parameter type. +// +// The test runner collects test cases by searching the given directory for +// files with the .txt extension. Each file is interpreted as a txtar archive, +// which is extracted to a temporary directory. The relative path to the .txt +// file is used as the subtest name. The preliminary section of the file +// (before the first archive entry) is a free-form comment. +// +// These tests were inspired by (and in many places copied from) a previous +// iteration of the marker tests built on top of the packagestest framework. +// Key design decisions motivating this reimplementation are as follows: +// - The old tests had a single global session, causing interaction at a +// distance and several awkward workarounds. +// - The old tests could not be safely parallelized, because certain tests +// manipulated the server options +// - Relatedly, the old tests did not have a logic grouping of assertions into +// a single unit, resulting in clusters of files serving clusters of +// entangled assertions. +// - The old tests used locations in the source as test names and as the +// identity of golden content, meaning that a single edit could change the +// name of an arbitrary number of subtests, and making it difficult to +// manually edit golden content. +// - The old tests did not hew closely to LSP concepts, resulting in, for +// example, each marker implementation doing its own position +// transformations, and inventing its own mechanism for configuration. +// - The old tests had an ad-hoc session initialization process. The regtest +// environment has had more time devoted to its initialization, and has a +// more convenient API. +// - The old tests lacked documentation, and often had failures that were hard +// to understand. By starting from scratch, we can revisit these aspects. +// +// # Special files +// +// There are three types of file within the test archive that are given special +// treatment by the test runner: +// - "flags": this file is treated as a whitespace-separated list of flags +// that configure the MarkerTest instance. For example, -min_go=go1.18 sets +// the minimum required Go version for the test. +// TODO(rfindley): support flag values containing whitespace. +// - "settings.json": this file is parsed as JSON, and used as the +// session configuration (see gopls/doc/settings.md) +// - "env": this file is parsed as a list of VAR=VALUE fields specifying the +// editor environment. +// - Golden files: Within the archive, file names starting with '@' are +// treated as "golden" content, and are not written to disk, but instead are +// made available to test methods expecting an argument of type *Golden, +// using the identifier following '@'. For example, if the first parameter of +// Foo were of type *Golden, the test runner would convert the identifier a +// in the call @foo(a, "b", 3) into a *Golden by collecting golden file +// data starting with "@a/". +// +// # Marker types +// +// The following markers are supported within marker tests: +// - diag(location, regexp): specifies an expected diagnostic matching the +// given regexp at the given location. The test runner requires +// a 1:1 correspondence between observed diagnostics and diag annotations +// - def(src, dst location): perform a textDocument/definition request at +// the src location, and check the the result points to the dst location. +// - hover(src, dst location, g Golden): perform a textDocument/hover at the +// src location, and checks that the result is the dst location, with hover +// content matching "hover.md" in the golden data g. +// - loc(name, location): specifies the name for a location in the source. These +// locations may be referenced by other markers. +// +// # Argument conversion +// +// Marker arguments are first parsed by the go/expect package, which accepts +// the following tokens as defined by the Go spec: +// - string, int64, float64, and rune literals +// - true and false +// - nil +// - identifiers (type expect.Identifier) +// - regular expressions, denoted the two tokens re"abc" (type *regexp.Regexp) +// +// These values are passed as arguments to the corresponding parameter of the +// test function. Additional value conversions may occur for these argument -> +// parameter type pairs: +// - string->regexp: the argument is parsed as a regular expressions. +// - string->location: the argument is converted to the location of the first +// instance of the argument in the partial line preceding the note. +// - regexp->location: the argument is converted to the location of the first +// match for the argument in the partial line preceding the note. If the +// regular expression contains exactly one subgroup, the position of the +// subgroup is used rather than the position of the submatch. +// - name->location: the argument is replaced by the named location. +// - name->Golden: the argument is used to look up golden content prefixed by +// @. +// +// # Example +// +// Here is a complete example: +// +// -- a.go -- +// package a +// +// const abc = 0x2a //@hover("b", "abc", abc),hover(" =", "abc", abc) +// -- @abc/hover.md -- +// ```go +// const abc untyped int = 42 +// ``` +// +// @hover("b", "abc", abc),hover(" =", "abc", abc) +// +// In this example, the @hover annotation tells the test runner to run the +// hoverMarker function, which has parameters: +// +// (c *markerContext, src, dsc protocol.Location, g *Golden). +// +// The first argument holds the test context, including fake editor with open +// files, and sandboxed directory. +// +// Argument converters translate the "b" and "abc" arguments into locations by +// interpreting each one as a regular expression and finding the location of +// its first match on the preceding portion of the line, and the abc identifier +// into a dictionary of golden content containing "hover.md". Then the +// hoverMarker method executes a textDocument/hover LSP request at the src +// position, and ensures the result spans "abc", with the markdown content from +// hover.md. (Note that the markdown content includes the expect annotation as +// the doc comment.) +// +// The next hover on the same line asserts the same result, but initiates the +// hover immediately after "abc" in the source. This tests that we find the +// preceding identifier when hovering. +// +// # Updating golden files +// +// To update golden content in the test archive, it is easier to regenerate +// content automatically rather than edit it by hand. To do this, run the +// tests with the -update flag. Only tests that actually run will be updated. +// +// In some cases, golden content will vary by Go version (for example, gopls +// produces different markdown at Go versions before the 1.19 go/doc update). +// By convention, the golden content in test archives should match the output +// at Go tip. Each test function can normalize golden content for older Go +// versions. +// +// Note that -update does not cause missing @diag or @loc markers to be added. +// +// # TODO +// +// This API is a work-in-progress, as we migrate existing marker tests from +// internal/lsp/tests. +// +// Remaining TODO: +// - parallelize/optimize test execution +// - reorganize regtest packages (and rename to just 'test'?) +// +// Existing marker tests to port: +// - CallHierarchy +// - CodeLens +// - Diagnostics +// - CompletionItems +// - Completions +// - CompletionSnippets +// - UnimportedCompletions +// - DeepCompletions +// - FuzzyCompletions +// - CaseSensitiveCompletions +// - RankCompletions +// - FoldingRanges +// - Formats +// - Imports +// - SemanticTokens +// - SuggestedFixes +// - FunctionExtractions +// - MethodExtractions +// - Definitions +// - Implementations +// - Highlights +// - References +// - Renames +// - PrepareRenames +// - Symbols +// - InlayHints +// - WorkspaceSymbols +// - Signatures +// - Links +// - AddImport +// - SelectionRanges +func RunMarkerTests(t *testing.T, dir string) { + // The marker tests must be able to run go/packages.Load. + testenv.NeedsGoPackages(t) + + tests, err := loadMarkerTests(dir) + if err != nil { + t.Fatal(err) + } + + // Opt: use a shared cache. + // TODO: opt: use a memoize store with no eviction. + cache := cache.New(nil, nil) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // TODO(rfindley): it may be more useful to have full support for build + // constraints. + if test.minGoVersion != "" { + var go1point int + if _, err := fmt.Sscanf(test.minGoVersion, "go1.%d", &go1point); err != nil { + t.Fatalf("parsing -min_go version: %v", err) + } + testenv.NeedsGo1Point(t, 18) + } + test.executed = true + config := fake.EditorConfig{ + Settings: test.settings, + Env: test.env, + } + c := &markerContext{ + test: test, + env: newEnv(t, cache, test.files, config), + + locations: make(map[expect.Identifier]protocol.Location), + diags: make(map[protocol.Location][]protocol.Diagnostic), + } + // TODO(rfindley): make it easier to clean up the regtest environment. + defer c.env.Editor.Shutdown(context.Background()) // ignore error + defer c.env.Sandbox.Close() // ignore error + + // Open all files so that we operate consistently with LSP clients, and + // (pragmatically) so that we have a Mapper available via the fake + // editor. + // + // This also allows avoiding mutating the editor state in tests. + for file := range test.files { + c.env.OpenFile(file) + } + + // Pre-process locations. + var notes []*expect.Note + for _, note := range test.notes { + switch note.Name { + case "loc": + mi := markers[note.Name] + if err := runMarker(c, mi, note); err != nil { + t.Error(err) + } + default: + notes = append(notes, note) + } + } + + // Wait for the didOpen notifications to be processed, then collect + // diagnostics. + var diags map[string]*protocol.PublishDiagnosticsParams + c.env.AfterChange(ReadAllDiagnostics(&diags)) + for path, params := range diags { + uri := c.env.Sandbox.Workdir.URI(path) + for _, diag := range params.Diagnostics { + loc := protocol.Location{ + URI: uri, + Range: diag.Range, + } + c.diags[loc] = append(c.diags[loc], diag) + } + } + + // Invoke each remaining note function in the test. + for _, note := range notes { + mi, ok := markers[note.Name] + if !ok { + t.Errorf("%s: no marker function named %s", c.fmtPos(note.Pos), note.Name) + continue + } + if err := runMarker(c, mi, note); err != nil { + t.Error(err) + } + } + + // Any remaining (un-eliminated) diagnostics are an error. + for loc, diags := range c.diags { + for _, diag := range diags { + t.Errorf("%s: unexpected diagnostic: %q", c.fmtLoc(loc), diag.Message) + } + } + }) + } + + // If updateGolden is set, golden content was updated during text execution, + // so we can now update the test data. + // TODO(rfindley): even when -update is not set, compare updated content with + // actual content. + if *update { + if err := writeMarkerTests(dir, tests); err != nil { + t.Fatalf("failed to -update: %v", err) + } + } +} + +// runMarker calls mi.fn with the arguments coerced from note. +func runMarker(c *markerContext, mi markerInfo, note *expect.Note) error { + // The first converter corresponds to the *Env argument. All others + // must be coerced from the marker syntax. + if got, want := len(note.Args), len(mi.converters); got != want { + return fmt.Errorf("%s: got %d arguments to %s, expect %d", c.fmtPos(note.Pos), got, note.Name, want) + } + + args := []reflect.Value{reflect.ValueOf(c)} + for i, in := range note.Args { + // Special handling for the blank identifier: treat it as the zero + // value. + if ident, ok := in.(expect.Identifier); ok && ident == "_" { + zero := reflect.Zero(mi.paramTypes[i]) + args = append(args, zero) + continue + } + out, err := mi.converters[i](c, note, in) + if err != nil { + return fmt.Errorf("%s: converting argument #%d of %s (%v): %v", c.fmtPos(note.Pos), i, note.Name, in, err) + } + args = append(args, reflect.ValueOf(out)) + } + + mi.fn.Call(args) + return nil +} + +// Supported markers. +// +// Each marker func must accept an markerContext as its first argument, with +// subsequent arguments coerced from the marker arguments. +// +// Marker funcs should not mutate the test environment (e.g. via opening files +// or applying edits in the editor). +var markers = map[string]markerInfo{ + "def": makeMarker(defMarker), + "diag": makeMarker(diagMarker), + "hover": makeMarker(hoverMarker), + "loc": makeMarker(locMarker), +} + +// MarkerTest holds all the test data extracted from a test txtar archive. +// +// See the documentation for RunMarkerTests for more information on the archive +// format. +type MarkerTest struct { + name string // relative path to the txtar file in the testdata dir + fset *token.FileSet // fileset used for parsing notes + archive *txtar.Archive // original test archive + settings map[string]interface{} // gopls settings + env map[string]string // editor environment + files map[string][]byte // data files from the archive (excluding special files) + notes []*expect.Note // extracted notes from data files + golden map[string]*Golden // extracted golden content, by identifier name + + // executed tracks whether the test was executed. + // + // When -update is set, only tests that were actually executed are written. + executed bool + + // flags holds flags extracted from the special "flags" archive file. + flags []string + // Parsed flags values. + minGoVersion string +} + +// flagSet returns the flagset used for parsing the special "flags" file in the +// test archive. +func (t *MarkerTest) flagSet() *flag.FlagSet { + flags := flag.NewFlagSet(t.name, flag.ContinueOnError) + flags.StringVar(&t.minGoVersion, "min_go", "", "if set, the minimum go1.X version required for this test") + return flags +} + +// Golden holds extracted golden content for a single @ prefix. The +// +// When -update is set, golden captures the updated golden contents for later +// writing. +type Golden struct { + id string + data map[string][]byte + updated map[string][]byte +} + +// Get returns golden content for the given name, which corresponds to the +// relative path following the golden prefix @/. For example, to access +// the content of @foo/path/to/result.json from the Golden associated with +// @foo, name should be "path/to/result.json". +// +// If -update is set, the given update function will be called to get the +// updated golden content that should be written back to testdata. +func (g *Golden) Get(t testing.TB, name string, getUpdate func() []byte) []byte { + if *update { + d := getUpdate() + if existing, ok := g.updated[name]; ok { + // Multiple tests may reference the same golden data, but if they do they + // must agree about its expected content. + if diff := compare.Text(string(existing), string(d)); diff != "" { + t.Errorf("conflicting updates for golden data %s/%s:\n%s", g.id, name, diff) + } + } + if g.updated == nil { + g.updated = make(map[string][]byte) + } + g.updated[name] = d + return d + } + return g.data[name] +} + +// loadMarkerTests walks the given dir looking for .txt files, which it +// interprets as a txtar archive. +// +// See the documentation for RunMarkerTests for more details on the test data +// archive. +// +// TODO(rfindley): this test could sanity check the results. For example, it is +// too easy to write "// @" instead of "//@", which we will happy skip silently. +func loadMarkerTests(dir string) ([]*MarkerTest, error) { + var tests []*MarkerTest + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if strings.HasSuffix(path, ".txt") { + content, err := os.ReadFile(path) + if err != nil { + return err + } + archive := txtar.Parse(content) + name := strings.TrimPrefix(path, dir+string(filepath.Separator)) + test, err := loadMarkerTest(name, archive) + if err != nil { + return fmt.Errorf("%s: %v", path, err) + } + tests = append(tests, test) + } + return nil + }) + return tests, err +} + +func loadMarkerTest(name string, archive *txtar.Archive) (*MarkerTest, error) { + test := &MarkerTest{ + name: name, + fset: token.NewFileSet(), + archive: archive, + files: make(map[string][]byte), + golden: make(map[string]*Golden), + } + for _, file := range archive.Files { + switch { + case file.Name == "flags": + test.flags = strings.Fields(string(file.Data)) + if err := test.flagSet().Parse(test.flags); err != nil { + return nil, fmt.Errorf("parsing flags: %v", err) + } + + case file.Name == "settings.json": + if err := json.Unmarshal(file.Data, &test.settings); err != nil { + return nil, err + } + + case file.Name == "env": + test.env = make(map[string]string) + fields := strings.Fields(string(file.Data)) + for _, field := range fields { + // TODO: use strings.Cut once we are on 1.18+. + key, value, ok := cut(field, "=") + if !ok { + return nil, fmt.Errorf("env vars must be formatted as var=value, got %q", field) + } + test.env[key] = value + } + + case strings.HasPrefix(file.Name, "@"): // golden content + prefix, name, ok := cut(file.Name, "/") + if !ok { + return nil, fmt.Errorf("golden file path %q must contain '/'", file.Name) + } + goldenID := prefix[len("@"):] + if _, ok := test.golden[goldenID]; !ok { + test.golden[goldenID] = &Golden{ + id: goldenID, + data: make(map[string][]byte), + } + } + test.golden[goldenID].data[name] = file.Data + + default: // ordinary file content + notes, err := expect.Parse(test.fset, file.Name, file.Data) + if err != nil { + return nil, fmt.Errorf("parsing notes in %q: %v", file.Name, err) + } + test.notes = append(test.notes, notes...) + test.files[file.Name] = file.Data + } + } + + return test, nil +} + +// cut is a copy of strings.Cut. +// +// TODO: once we only support Go 1.18+, just use strings.Cut. +func cut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// writeMarkerTests writes the updated golden content to the test data files. +func writeMarkerTests(dir string, tests []*MarkerTest) error { + for _, test := range tests { + if !test.executed { + continue + } + arch := &txtar.Archive{ + Comment: test.archive.Comment, + } + + // Special configuration files go first. + if len(test.flags) > 0 { + flags := strings.Join(test.flags, " ") + arch.Files = append(arch.Files, txtar.File{Name: "flags", Data: []byte(flags)}) + } + if len(test.settings) > 0 { + data, err := json.MarshalIndent(test.settings, "", "\t") + if err != nil { + return err + } + arch.Files = append(arch.Files, txtar.File{Name: "settings.json", Data: data}) + } + if len(test.env) > 0 { + var vars []string + for k, v := range test.env { + vars = append(vars, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(vars) + data := []byte(strings.Join(vars, "\n")) + arch.Files = append(arch.Files, txtar.File{Name: "env", Data: data}) + } + + // ...followed by ordinary files. Preserve the order they appeared in the + // original archive. + for _, file := range test.archive.Files { + if _, ok := test.files[file.Name]; ok { // ordinary file + arch.Files = append(arch.Files, file) + } + } + + // ...followed by golden files. + var goldenFiles []txtar.File + for id, golden := range test.golden { + for name, data := range golden.updated { + fullName := "@" + id + "/" + name + goldenFiles = append(goldenFiles, txtar.File{Name: fullName, Data: data}) + } + } + // Unlike ordinary files, golden content is usually not manually edited, so + // we sort lexically. + sort.Slice(goldenFiles, func(i, j int) bool { + return goldenFiles[i].Name < goldenFiles[j].Name + }) + arch.Files = append(arch.Files, goldenFiles...) + + data := txtar.Format(arch) + filename := filepath.Join(dir, test.name) + if err := os.WriteFile(filename, data, 0644); err != nil { + return err + } + } + return nil +} + +// newEnv creates a new environment for a marker test. +// +// TODO(rfindley): simplify and refactor the construction of testing +// environments across regtests, marker tests, and benchmarks. +func newEnv(t *testing.T, cache *cache.Cache, files map[string][]byte, config fake.EditorConfig) *Env { + sandbox, err := fake.NewSandbox(&fake.SandboxConfig{ + RootDir: t.TempDir(), + GOPROXY: "https://proxy.golang.org", + Files: files, + }) + if err != nil { + t.Fatal(err) + } + + // Put a debug instance in the context to prevent logging to stderr. + // See associated TODO in runner.go: we should revisit this pattern. + ctx := context.Background() + ctx = debug.WithInstance(ctx, "", "off") + + awaiter := NewAwaiter(sandbox.Workdir) + ss := lsprpc.NewStreamServer(cache, false, hooks.Options) + server := servertest.NewPipeServer(ss, jsonrpc2.NewRawStream) + editor, err := fake.NewEditor(sandbox, config).Connect(ctx, server, awaiter.Hooks()) + if err != nil { + sandbox.Close() // ignore error + t.Fatal(err) + } + if err := awaiter.Await(ctx, InitialWorkspaceLoad); err != nil { + sandbox.Close() // ignore error + t.Fatal(err) + } + return &Env{ + T: t, + Ctx: ctx, + Editor: editor, + Sandbox: sandbox, + Awaiter: awaiter, + } +} + +type markerInfo struct { + fn reflect.Value // the func to invoke + paramTypes []reflect.Type // parameter types, for zero values + converters []converter // to convert non-blank arguments +} + +type markerContext struct { + test *MarkerTest + env *Env + + // Collected information. + locations map[expect.Identifier]protocol.Location + diags map[protocol.Location][]protocol.Diagnostic +} + +// fmtLoc formats the given pos in the context of the test, using +// archive-relative paths for files and including the line number in the full +// archive file. +func (c markerContext) fmtPos(pos token.Pos) string { + file := c.test.fset.File(pos) + if file == nil { + c.env.T.Errorf("position %d not in test fileset", pos) + return "" + } + m := c.env.Editor.Mapper(file.Name()) + if m == nil { + c.env.T.Errorf("%s is not open", file.Name()) + return "" + } + loc, err := m.PosLocation(file, pos, pos) + if err != nil { + c.env.T.Errorf("Mapper(%s).PosLocation failed: %v", file.Name(), err) + } + return c.fmtLoc(loc) +} + +// fmtLoc formats the given location in the context of the test, using +// archive-relative paths for files and including the line number in the full +// archive file. +func (c markerContext) fmtLoc(loc protocol.Location) string { + if loc == (protocol.Location{}) { + return "" + } + lines := bytes.Count(c.test.archive.Comment, []byte("\n")) + var name string + for _, f := range c.test.archive.Files { + lines++ // -- separator -- + uri := c.env.Sandbox.Workdir.URI(f.Name) + if uri == loc.URI { + name = f.Name + break + } + lines += bytes.Count(f.Data, []byte("\n")) + } + if name == "" { + c.env.T.Errorf("unable to find %s in test archive", loc) + return "" + } + m := c.env.Editor.Mapper(name) + s, err := m.LocationSpan(loc) + if err != nil { + c.env.T.Errorf("error formatting location %s: %v", loc, err) + return "" + } + + innerSpan := fmt.Sprintf("%d:%d", s.Start().Line(), s.Start().Column()) // relative to the embedded file + outerSpan := fmt.Sprintf("%d:%d", lines+s.Start().Line(), s.Start().Column()) // relative to the archive file + if s.Start() != s.End() { + if s.End().Line() == s.Start().Line() { + innerSpan += fmt.Sprintf("-%d", s.End().Column()) + outerSpan += fmt.Sprintf("-%d", s.End().Column()) + } else { + innerSpan += fmt.Sprintf("-%d:%d", s.End().Line(), s.End().Column()) + innerSpan += fmt.Sprintf("-%d:%d", lines+s.End().Line(), s.End().Column()) + } + } + + return fmt.Sprintf("%s:%s (%s:%s)", name, innerSpan, c.test.name, outerSpan) +} + +// converter is the signature of argument converters. +type converter func(*markerContext, *expect.Note, interface{}) (interface{}, error) + +// makeMarker uses reflection to load markerInfo for the given func value. +func makeMarker(fn interface{}) markerInfo { + mi := markerInfo{ + fn: reflect.ValueOf(fn), + } + mtyp := mi.fn.Type() + if mtyp.NumIn() == 0 || mtyp.In(0) != markerContextType { + panic(fmt.Sprintf("marker function %#v must accept markerContext as its first argument", mi.fn)) + } + if mtyp.NumOut() != 0 { + panic(fmt.Sprintf("marker function %#v must not have results", mi.fn)) + } + for a := 1; a < mtyp.NumIn(); a++ { + in := mtyp.In(a) + mi.paramTypes = append(mi.paramTypes, in) + c := makeConverter(in) + mi.converters = append(mi.converters, c) + } + return mi +} + +// Types with special conversions. +var ( + goldenType = reflect.TypeOf(&Golden{}) + locationType = reflect.TypeOf(protocol.Location{}) + markerContextType = reflect.TypeOf(&markerContext{}) + regexpType = reflect.TypeOf(®exp.Regexp{}) +) + +func makeConverter(paramType reflect.Type) converter { + switch paramType { + case goldenType: + return goldenConverter + case locationType: + return locationConverter + default: + return func(_ *markerContext, _ *expect.Note, arg interface{}) (interface{}, error) { + if argType := reflect.TypeOf(arg); argType != paramType { + return nil, fmt.Errorf("cannot convert type %s to %s", argType, paramType) + } + return arg, nil + } + } +} + +// locationConverter converts a string argument into the protocol location +// corresponding to the first position of the string in the line preceding the +// note. +func locationConverter(c *markerContext, note *expect.Note, arg interface{}) (interface{}, error) { + switch arg := arg.(type) { + case string: + startOff, preceding, m, err := linePreceding(c, note.Pos) + if err != nil { + return protocol.Location{}, err + } + idx := bytes.Index(preceding, []byte(arg)) + if idx < 0 { + return nil, fmt.Errorf("substring %q not found in %q", arg, preceding) + } + off := startOff + idx + return m.OffsetLocation(off, off+len(arg)) + case *regexp.Regexp: + return findRegexpInLine(c, note.Pos, arg) + case expect.Identifier: + loc, ok := c.locations[arg] + if !ok { + return nil, fmt.Errorf("no location named %q", arg) + } + return loc, nil + default: + return nil, fmt.Errorf("cannot convert argument type %T to location (must be a string to match the preceding line)", arg) + } +} + +// findRegexpInLine searches the partial line preceding pos for a match for the +// regular expression re, returning a location spanning the first match. If re +// contains exactly one subgroup, the position of this subgroup match is +// returned rather than the position of the full match. +func findRegexpInLine(c *markerContext, pos token.Pos, re *regexp.Regexp) (protocol.Location, error) { + startOff, preceding, m, err := linePreceding(c, pos) + if err != nil { + return protocol.Location{}, err + } + + matches := re.FindSubmatchIndex(preceding) + if len(matches) == 0 { + return protocol.Location{}, fmt.Errorf("no match for regexp %q found in %q", re, string(preceding)) + } + var start, end int + switch len(matches) { + case 2: + // no subgroups: return the range of the regexp expression + start, end = matches[0], matches[1] + case 4: + // one subgroup: return its range + start, end = matches[2], matches[3] + default: + return protocol.Location{}, fmt.Errorf("invalid location regexp %q: expect either 0 or 1 subgroups, got %d", re, len(matches)/2-1) + } + + return m.OffsetLocation(start+startOff, end+startOff) +} + +func linePreceding(c *markerContext, pos token.Pos) (int, []byte, *protocol.Mapper, error) { + file := c.test.fset.File(pos) + posn := safetoken.Position(file, pos) + lineStart := file.LineStart(posn.Line) + startOff, endOff, err := safetoken.Offsets(file, lineStart, pos) + if err != nil { + return 0, nil, nil, err + } + m := c.env.Editor.Mapper(file.Name()) + return startOff, m.Content[startOff:endOff], m, nil +} + +// goldenConverter convers an identifier into the Golden directory of content +// prefixed by @ in the test archive file. +func goldenConverter(c *markerContext, note *expect.Note, arg interface{}) (interface{}, error) { + switch arg := arg.(type) { + case expect.Identifier: + golden := c.test.golden[string(arg)] + // If there was no golden content for this identifier, we must create one + // to handle the case where -update is set: we need a place to store + // the updated content. + if golden == nil { + golden = new(Golden) + c.test.golden[string(arg)] = golden + } + return golden, nil + default: + return nil, fmt.Errorf("invalid input type %T: golden key must be an identifier", arg) + } +} + +// defMarker implements the @godef marker, running textDocument/definition at +// the given src location and asserting that there is exactly one resulting +// location, matching dst. +// +// TODO(rfindley): support a variadic destination set. +func defMarker(c *markerContext, src, dst protocol.Location) { + got := c.env.GoToDefinition(src) + if got != dst { + c.env.T.Errorf("%s: definition location does not match:\n\tgot: %s\n\twant %s", c.fmtLoc(src), c.fmtLoc(got), c.fmtLoc(dst)) + } +} + +// hoverMarker implements the @hover marker, running textDocument/hover at the +// given src location and asserting that the resulting hover is over the dst +// location (typically a span surrounding src), and that the markdown content +// matches the golden content. +func hoverMarker(c *markerContext, src, dst protocol.Location, golden *Golden) { + content, gotDst := c.env.Hover(src) + if gotDst != dst { + c.env.T.Errorf("%s: hover location does not match:\n\tgot: %s\n\twant %s)", c.fmtLoc(src), c.fmtLoc(gotDst), c.fmtLoc(dst)) + } + gotMD := "" + if content != nil { + gotMD = content.Value + } + wantMD := "" + if golden != nil { + wantMD = string(golden.Get(c.env.T, "hover.md", func() []byte { return []byte(gotMD) })) + } + // Normalize newline termination: archive files can't express non-newline + // terminated files. + if strings.HasSuffix(wantMD, "\n") && !strings.HasSuffix(gotMD, "\n") { + gotMD += "\n" + } + if diff := tests.DiffMarkdown(wantMD, gotMD); diff != "" { + c.env.T.Errorf("%s: hover markdown mismatch (-want +got):\n%s", c.fmtLoc(src), diff) + } +} + +// locMarker implements the @loc hover marker. It is executed before other +// markers, so that locations are available. +func locMarker(c *markerContext, name expect.Identifier, loc protocol.Location) { + c.locations[name] = loc +} + +// diagMarker implements the @diag hover marker. It eliminates diagnostics from +// the observed set in the markerContext. +func diagMarker(c *markerContext, loc protocol.Location, re *regexp.Regexp) { + idx := -1 + diags := c.diags[loc] + for i, diag := range diags { + if re.MatchString(diag.Message) { + idx = i + break + } + } + if idx >= 0 { + c.diags[loc] = append(diags[:idx], diags[idx+1:]...) + } else { + c.env.T.Errorf("%s: no diagnostic matches %q", c.fmtLoc(loc), re) + } +} diff --git a/gopls/internal/lsp/regtest/options.go b/gopls/internal/lsp/regtest/options.go new file mode 100644 index 00000000000..3820e96b37f --- /dev/null +++ b/gopls/internal/lsp/regtest/options.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import "golang.org/x/tools/gopls/internal/lsp/fake" + +type runConfig struct { + editor fake.EditorConfig + sandbox fake.SandboxConfig + modes Mode + skipHooks bool +} + +// A RunOption augments the behavior of the test runner. +type RunOption interface { + set(*runConfig) +} + +type optionSetter func(*runConfig) + +func (f optionSetter) set(opts *runConfig) { + f(opts) +} + +// ProxyFiles configures a file proxy using the given txtar-encoded string. +func ProxyFiles(txt string) RunOption { + return optionSetter(func(opts *runConfig) { + opts.sandbox.ProxyFiles = fake.UnpackTxt(txt) + }) +} + +// Modes configures the execution modes that the test should run in. +// +// By default, modes are configured by the test runner. If this option is set, +// it overrides the set of default modes and the test runs in exactly these +// modes. +func Modes(modes Mode) RunOption { + return optionSetter(func(opts *runConfig) { + if opts.modes != 0 { + panic("modes set more than once") + } + opts.modes = modes + }) +} + +// WindowsLineEndings configures the editor to use windows line endings. +func WindowsLineEndings() RunOption { + return optionSetter(func(opts *runConfig) { + opts.editor.WindowsLineEndings = true + }) +} + +// Settings is a RunOption that sets user-provided configuration for the LSP +// server. +// +// As a special case, the env setting must not be provided via Settings: use +// EnvVars instead. +type Settings map[string]interface{} + +func (s Settings) set(opts *runConfig) { + if opts.editor.Settings == nil { + opts.editor.Settings = make(map[string]interface{}) + } + for k, v := range s { + opts.editor.Settings[k] = v + } +} + +// WorkspaceFolders configures the workdir-relative workspace folders to send +// to the LSP server. By default the editor sends a single workspace folder +// corresponding to the workdir root. To explicitly configure no workspace +// folders, use WorkspaceFolders with no arguments. +func WorkspaceFolders(relFolders ...string) RunOption { + if len(relFolders) == 0 { + // Use an empty non-nil slice to signal explicitly no folders. + relFolders = []string{} + } + return optionSetter(func(opts *runConfig) { + opts.editor.WorkspaceFolders = relFolders + }) +} + +// EnvVars sets environment variables for the LSP session. When applying these +// variables to the session, the special string $SANDBOX_WORKDIR is replaced by +// the absolute path to the sandbox working directory. +type EnvVars map[string]string + +func (e EnvVars) set(opts *runConfig) { + if opts.editor.Env == nil { + opts.editor.Env = make(map[string]string) + } + for k, v := range e { + opts.editor.Env[k] = v + } +} + +// InGOPATH configures the workspace working directory to be GOPATH, rather +// than a separate working directory for use with modules. +func InGOPATH() RunOption { + return optionSetter(func(opts *runConfig) { + opts.sandbox.InGoPath = true + }) +} diff --git a/gopls/internal/lsp/regtest/regtest.go b/gopls/internal/lsp/regtest/regtest.go new file mode 100644 index 00000000000..b2ef3575e5d --- /dev/null +++ b/gopls/internal/lsp/regtest/regtest.go @@ -0,0 +1,155 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "context" + "flag" + "fmt" + "go/token" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/tools/gopls/internal/lsp/cmd" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/internal/tool" +) + +var ( + runSubprocessTests = flag.Bool("enable_gopls_subprocess_tests", false, "run regtests against a gopls subprocess") + goplsBinaryPath = flag.String("gopls_test_binary", "", "path to the gopls binary for use as a remote, for use with the -enable_gopls_subprocess_tests flag") + regtestTimeout = flag.Duration("regtest_timeout", defaultRegtestTimeout(), "if nonzero, default timeout for each regtest; defaults to GOPLS_REGTEST_TIMEOUT") + skipCleanup = flag.Bool("regtest_skip_cleanup", false, "whether to skip cleaning up temp directories") + printGoroutinesOnFailure = flag.Bool("regtest_print_goroutines", false, "whether to print goroutines info on failure") + printLogs = flag.Bool("regtest_print_logs", false, "whether to print LSP logs") +) + +func defaultRegtestTimeout() time.Duration { + s := os.Getenv("GOPLS_REGTEST_TIMEOUT") + if s == "" { + return 0 + } + d, err := time.ParseDuration(s) + if err != nil { + fmt.Fprintf(os.Stderr, "invalid GOPLS_REGTEST_TIMEOUT %q: %v\n", s, err) + os.Exit(2) + } + return d +} + +var runner *Runner + +type regtestRunner interface { + Run(t *testing.T, files string, f TestFunc) +} + +func Run(t *testing.T, files string, f TestFunc) { + runner.Run(t, files, f) +} + +func WithOptions(opts ...RunOption) configuredRunner { + return configuredRunner{opts: opts} +} + +type configuredRunner struct { + opts []RunOption +} + +func (r configuredRunner) Run(t *testing.T, files string, f TestFunc) { + runner.Run(t, files, f, r.opts...) +} + +type RunMultiple []struct { + Name string + Runner regtestRunner +} + +func (r RunMultiple) Run(t *testing.T, files string, f TestFunc) { + for _, runner := range r { + t.Run(runner.Name, func(t *testing.T) { + runner.Runner.Run(t, files, f) + }) + } +} + +// DefaultModes returns the default modes to run for each regression test (they +// may be reconfigured by the tests themselves). +func DefaultModes() Mode { + modes := Default + if !testing.Short() { + modes |= Experimental | Forwarded + } + if *runSubprocessTests { + modes |= SeparateProcess + } + return modes +} + +// Main sets up and tears down the shared regtest state. +func Main(m *testing.M, hook func(*source.Options)) { + // golang/go#54461: enable additional debugging around hanging Go commands. + gocommand.DebugHangingGoCommands = true + + // If this magic environment variable is set, run gopls instead of the test + // suite. See the documentation for runTestAsGoplsEnvvar for more details. + if os.Getenv(runTestAsGoplsEnvvar) == "true" { + tool.Main(context.Background(), cmd.New("gopls", "", nil, hook), os.Args[1:]) + os.Exit(0) + } + + testenv.ExitIfSmallMachine() + + // Disable GOPACKAGESDRIVER, as it can cause spurious test failures. + os.Setenv("GOPACKAGESDRIVER", "off") + + flag.Parse() + + runner = &Runner{ + DefaultModes: DefaultModes(), + Timeout: *regtestTimeout, + PrintGoroutinesOnFailure: *printGoroutinesOnFailure, + SkipCleanup: *skipCleanup, + OptionsHook: hook, + fset: token.NewFileSet(), + store: memoize.NewStore(memoize.NeverEvict), + } + + runner.goplsPath = *goplsBinaryPath + if runner.goplsPath == "" { + var err error + runner.goplsPath, err = os.Executable() + if err != nil { + panic(fmt.Sprintf("finding test binary path: %v", err)) + } + } + + dir, err := ioutil.TempDir("", "gopls-regtest-") + if err != nil { + panic(fmt.Errorf("creating regtest temp directory: %v", err)) + } + runner.tempDir = dir + + var code int + defer func() { + if err := runner.Close(); err != nil { + fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err) + // Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on + // Windows due to file locking, but this is OK for our CI. + // + // Fail on go1.13+, except for windows and android which have shutdown problems. + if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" { + os.Exit(1) + } + } + os.Exit(code) + }() + code = m.Run() +} diff --git a/gopls/internal/lsp/regtest/runner.go b/gopls/internal/lsp/regtest/runner.go new file mode 100644 index 00000000000..20dac842316 --- /dev/null +++ b/gopls/internal/lsp/regtest/runner.go @@ -0,0 +1,438 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "bytes" + "context" + "fmt" + "go/token" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + "sync" + "testing" + "time" + + exec "golang.org/x/sys/execabs" + + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" + "golang.org/x/tools/internal/memoize" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/internal/xcontext" +) + +// Mode is a bitmask that defines for which execution modes a test should run. +// +// Each mode controls several aspects of gopls' configuration: +// - Which server options to use for gopls sessions +// - Whether to use a shared cache +// - Whether to use a shared server +// - Whether to run the server in-process or in a separate process +// +// The behavior of each mode with respect to these aspects is summarized below. +// TODO(rfindley, cleanup): rather than using arbitrary names for these modes, +// we can compose them explicitly out of the features described here, allowing +// individual tests more freedom in constructing problematic execution modes. +// For example, a test could assert on a certain behavior when running with +// experimental options on a separate process. Moreover, we could unify 'Modes' +// with 'Options', and use RunMultiple rather than a hard-coded loop through +// modes. +// +// Mode | Options | Shared Cache? | Shared Server? | In-process? +// --------------------------------------------------------------------------- +// Default | Default | Y | N | Y +// Forwarded | Default | Y | Y | Y +// SeparateProcess | Default | Y | Y | N +// Experimental | Experimental | N | N | Y +type Mode int + +const ( + // Default mode runs gopls with the default options, communicating over pipes + // to emulate the lsp sidecar execution mode, which communicates over + // stdin/stdout. + // + // It uses separate servers for each test, but a shared cache, to avoid + // duplicating work when processing GOROOT. + Default Mode = 1 << iota + + // Forwarded uses the default options, but forwards connections to a shared + // in-process gopls server. + Forwarded + + // SeparateProcess uses the default options, but forwards connection to an + // external gopls daemon. + // + // Only supported on GOOS=linux. + SeparateProcess + + // Experimental enables all of the experimental configurations that are + // being developed, and runs gopls in sidecar mode. + // + // It uses a separate cache for each test, to exercise races that may only + // appear with cache misses. + Experimental +) + +func (m Mode) String() string { + switch m { + case Default: + return "default" + case Forwarded: + return "forwarded" + case SeparateProcess: + return "separate process" + case Experimental: + return "experimental" + default: + return "unknown mode" + } +} + +// A Runner runs tests in gopls execution environments, as specified by its +// modes. For modes that share state (for example, a shared cache or common +// remote), any tests that execute on the same Runner will share the same +// state. +type Runner struct { + // Configuration + DefaultModes Mode // modes to run for each test + Timeout time.Duration // per-test timeout, if set + PrintGoroutinesOnFailure bool // whether to dump goroutines on test failure + SkipCleanup bool // if set, don't delete test data directories when the test exits + OptionsHook func(*source.Options) // if set, use these options when creating gopls sessions + + // Immutable state shared across test invocations + goplsPath string // path to the gopls executable (for SeparateProcess mode) + tempDir string // shared parent temp directory + fset *token.FileSet // shared FileSet + store *memoize.Store // shared store + + // Lazily allocated resources + tsOnce sync.Once + ts *servertest.TCPServer // shared in-process test server ("forwarded" mode) + + startRemoteOnce sync.Once + remoteSocket string // unix domain socket for shared daemon ("separate process" mode) + remoteErr error + cancelRemote func() +} + +type TestFunc func(t *testing.T, env *Env) + +// Run executes the test function in the default configured gopls execution +// modes. For each a test run, a new workspace is created containing the +// un-txtared files specified by filedata. +func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) { + // TODO(rfindley): this function has gotten overly complicated, and warrants + // refactoring. + t.Helper() + checkBuilder(t) + testenv.NeedsGoPackages(t) + + tests := []struct { + name string + mode Mode + getServer func(func(*source.Options)) jsonrpc2.StreamServer + }{ + {"default", Default, r.defaultServer}, + {"forwarded", Forwarded, r.forwardedServer}, + {"separate_process", SeparateProcess, r.separateProcessServer}, + {"experimental", Experimental, r.experimentalServer}, + } + + for _, tc := range tests { + tc := tc + var config runConfig + for _, opt := range opts { + opt.set(&config) + } + modes := r.DefaultModes + if config.modes != 0 { + modes = config.modes + } + if modes&tc.mode == 0 { + continue + } + + t.Run(tc.name, func(t *testing.T) { + // TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak + // goroutines in this test function. + // stacktest.NoLeak(t) + + ctx := context.Background() + if r.Timeout != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.Timeout) + defer cancel() + } else if d, ok := testenv.Deadline(t); ok { + timeout := time.Until(d) * 19 / 20 // Leave an arbitrary 5% for cleanup. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + // TODO(rfindley): do we need an instance at all? Can it be removed? + ctx = debug.WithInstance(ctx, "", "off") + + rootDir := filepath.Join(r.tempDir, filepath.FromSlash(t.Name())) + if err := os.MkdirAll(rootDir, 0755); err != nil { + t.Fatal(err) + } + + files := fake.UnpackTxt(files) + if config.editor.WindowsLineEndings { + for name, data := range files { + files[name] = bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n")) + } + } + config.sandbox.Files = files + config.sandbox.RootDir = rootDir + sandbox, err := fake.NewSandbox(&config.sandbox) + if err != nil { + t.Fatal(err) + } + defer func() { + if !r.SkipCleanup { + if err := sandbox.Close(); err != nil { + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + t.Errorf("closing the sandbox: %v", err) + } + } + }() + + ss := tc.getServer(r.OptionsHook) + + framer := jsonrpc2.NewRawStream + ls := &loggingFramer{} + framer = ls.framer(jsonrpc2.NewRawStream) + ts := servertest.NewPipeServer(ss, framer) + + awaiter := NewAwaiter(sandbox.Workdir) + editor, err := fake.NewEditor(sandbox, config.editor).Connect(ctx, ts, awaiter.Hooks()) + if err != nil { + t.Fatal(err) + } + env := &Env{ + T: t, + Ctx: ctx, + Sandbox: sandbox, + Editor: editor, + Server: ts, + Awaiter: awaiter, + } + defer func() { + if t.Failed() && r.PrintGoroutinesOnFailure { + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + } + if t.Failed() || *printLogs { + ls.printBuffers(t.Name(), os.Stderr) + } + // For tests that failed due to a timeout, don't fail to shutdown + // because ctx is done. + // + // There is little point to setting an arbitrary timeout for closing + // the editor: in general we want to clean up before proceeding to the + // next test, and if there is a deadlock preventing closing it will + // eventually be handled by the `go test` timeout. + if err := editor.Close(xcontext.Detach(ctx)); err != nil { + t.Errorf("closing editor: %v", err) + } + }() + // Always await the initial workspace load. + env.Await(InitialWorkspaceLoad) + test(t, env) + }) + } +} + +// longBuilders maps builders that are skipped when -short is set to a +// (possibly empty) justification. +var longBuilders = map[string]string{ + "openbsd-amd64-64": "golang.org/issues/42789", + "openbsd-386-64": "golang.org/issues/42789", + "openbsd-386-68": "golang.org/issues/42789", + "openbsd-amd64-68": "golang.org/issues/42789", + "darwin-amd64-10_12": "", + "freebsd-amd64-race": "", + "illumos-amd64": "", + "netbsd-arm-bsiegert": "", + "solaris-amd64-oraclerel": "", + "windows-arm-zx2c4": "", +} + +func checkBuilder(t *testing.T) { + t.Helper() + builder := os.Getenv("GO_BUILDER_NAME") + if reason, ok := longBuilders[builder]; ok && testing.Short() { + if reason != "" { + t.Skipf("Skipping %s with -short due to %s", builder, reason) + } else { + t.Skipf("Skipping %s with -short", builder) + } + } +} + +type loggingFramer struct { + mu sync.Mutex + buf *safeBuffer +} + +// safeBuffer is a threadsafe buffer for logs. +type safeBuffer struct { + mu sync.Mutex + buf bytes.Buffer +} + +func (b *safeBuffer) Write(p []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Write(p) +} + +func (s *loggingFramer) framer(f jsonrpc2.Framer) jsonrpc2.Framer { + return func(nc net.Conn) jsonrpc2.Stream { + s.mu.Lock() + framed := false + if s.buf == nil { + s.buf = &safeBuffer{buf: bytes.Buffer{}} + framed = true + } + s.mu.Unlock() + stream := f(nc) + if framed { + return protocol.LoggingStream(stream, s.buf) + } + return stream + } +} + +func (s *loggingFramer) printBuffers(testname string, w io.Writer) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.buf == nil { + return + } + fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs for %q\n", testname) + s.buf.mu.Lock() + io.Copy(w, &s.buf.buf) + s.buf.mu.Unlock() + fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname) +} + +// defaultServer handles the Default execution mode. +func (r *Runner) defaultServer(optsHook func(*source.Options)) jsonrpc2.StreamServer { + return lsprpc.NewStreamServer(cache.New(r.fset, r.store), false, optsHook) +} + +// experimentalServer handles the Experimental execution mode. +func (r *Runner) experimentalServer(optsHook func(*source.Options)) jsonrpc2.StreamServer { + options := func(o *source.Options) { + optsHook(o) + o.EnableAllExperiments() + } + return lsprpc.NewStreamServer(cache.New(nil, nil), false, options) +} + +// forwardedServer handles the Forwarded execution mode. +func (r *Runner) forwardedServer(optsHook func(*source.Options)) jsonrpc2.StreamServer { + r.tsOnce.Do(func() { + ctx := context.Background() + ctx = debug.WithInstance(ctx, "", "off") + ss := lsprpc.NewStreamServer(cache.New(nil, nil), false, optsHook) + r.ts = servertest.NewTCPServer(ctx, ss, nil) + }) + return newForwarder("tcp", r.ts.Addr) +} + +// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running +// tests. It's a trick to allow tests to find a binary to use to start a gopls +// subprocess. +const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS" + +// separateProcessServer handles the SeparateProcess execution mode. +func (r *Runner) separateProcessServer(optsHook func(*source.Options)) jsonrpc2.StreamServer { + if runtime.GOOS != "linux" { + panic("separate process execution mode is only supported on linux") + } + + r.startRemoteOnce.Do(func() { + socketDir, err := ioutil.TempDir(r.tempDir, "gopls-regtest-socket") + if err != nil { + r.remoteErr = err + return + } + r.remoteSocket = filepath.Join(socketDir, "gopls-test-daemon") + + // The server should be killed by when the test runner exits, but to be + // conservative also set a listen timeout. + args := []string{"serve", "-listen", "unix;" + r.remoteSocket, "-listen.timeout", "1m"} + + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, r.goplsPath, args...) + cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true") + + // Start the external gopls process. This is still somewhat racy, as we + // don't know when gopls binds to the socket, but the gopls forwarder + // client has built-in retry behavior that should mostly mitigate this + // problem (and if it doesn't, we probably want to improve the retry + // behavior). + if err := cmd.Start(); err != nil { + cancel() + r.remoteSocket = "" + r.remoteErr = err + } else { + r.cancelRemote = cancel + // Spin off a goroutine to wait, so that we free up resources when the + // server exits. + go cmd.Wait() + } + }) + + return newForwarder("unix", r.remoteSocket) +} + +func newForwarder(network, address string) *lsprpc.Forwarder { + server, err := lsprpc.NewForwarder(network+";"+address, nil) + if err != nil { + // This should never happen, as we are passing an explicit address. + panic(fmt.Sprintf("internal error: unable to create forwarder: %v", err)) + } + return server +} + +// Close cleans up resource that have been allocated to this workspace. +func (r *Runner) Close() error { + var errmsgs []string + if r.ts != nil { + if err := r.ts.Close(); err != nil { + errmsgs = append(errmsgs, err.Error()) + } + } + if r.cancelRemote != nil { + r.cancelRemote() + } + if !r.SkipCleanup { + if err := os.RemoveAll(r.tempDir); err != nil { + errmsgs = append(errmsgs, err.Error()) + } + } + if len(errmsgs) > 0 { + return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t")) + } + return nil +} diff --git a/gopls/internal/lsp/regtest/wrappers.go b/gopls/internal/lsp/regtest/wrappers.go new file mode 100644 index 00000000000..cdd39e5c7c9 --- /dev/null +++ b/gopls/internal/lsp/regtest/wrappers.go @@ -0,0 +1,476 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "encoding/json" + "path" + + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +// RemoveWorkspaceFile deletes a file on disk but does nothing in the +// editor. It calls t.Fatal on any error. +func (e *Env) RemoveWorkspaceFile(name string) { + e.T.Helper() + if err := e.Sandbox.Workdir.RemoveFile(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// ReadWorkspaceFile reads a file from the workspace, calling t.Fatal on any +// error. +func (e *Env) ReadWorkspaceFile(name string) string { + e.T.Helper() + content, err := e.Sandbox.Workdir.ReadFile(name) + if err != nil { + e.T.Fatal(err) + } + return string(content) +} + +// WriteWorkspaceFile writes a file to disk but does nothing in the editor. +// It calls t.Fatal on any error. +func (e *Env) WriteWorkspaceFile(name, content string) { + e.T.Helper() + if err := e.Sandbox.Workdir.WriteFile(e.Ctx, name, content); err != nil { + e.T.Fatal(err) + } +} + +// WriteWorkspaceFiles deletes a file on disk but does nothing in the +// editor. It calls t.Fatal on any error. +func (e *Env) WriteWorkspaceFiles(files map[string]string) { + e.T.Helper() + if err := e.Sandbox.Workdir.WriteFiles(e.Ctx, files); err != nil { + e.T.Fatal(err) + } +} + +// ListFiles lists relative paths to files in the given directory. +// It calls t.Fatal on any error. +func (e *Env) ListFiles(dir string) []string { + e.T.Helper() + paths, err := e.Sandbox.Workdir.ListFiles(dir) + if err != nil { + e.T.Fatal(err) + } + return paths +} + +// OpenFile opens a file in the editor, calling t.Fatal on any error. +func (e *Env) OpenFile(name string) { + e.T.Helper() + if err := e.Editor.OpenFile(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// CreateBuffer creates a buffer in the editor, calling t.Fatal on any error. +func (e *Env) CreateBuffer(name string, content string) { + e.T.Helper() + if err := e.Editor.CreateBuffer(e.Ctx, name, content); err != nil { + e.T.Fatal(err) + } +} + +// BufferText returns the current buffer contents for the file with the given +// relative path, calling t.Fatal if the file is not open in a buffer. +func (e *Env) BufferText(name string) string { + e.T.Helper() + text, ok := e.Editor.BufferText(name) + if !ok { + e.T.Fatalf("buffer %q is not open", name) + } + return text +} + +// CloseBuffer closes an editor buffer without saving, calling t.Fatal on any +// error. +func (e *Env) CloseBuffer(name string) { + e.T.Helper() + if err := e.Editor.CloseBuffer(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// EditBuffer applies edits to an editor buffer, calling t.Fatal on any error. +func (e *Env) EditBuffer(name string, edits ...protocol.TextEdit) { + e.T.Helper() + if err := e.Editor.EditBuffer(e.Ctx, name, edits); err != nil { + e.T.Fatal(err) + } +} + +func (e *Env) SetBufferContent(name string, content string) { + e.T.Helper() + if err := e.Editor.SetBufferContent(e.Ctx, name, content); err != nil { + e.T.Fatal(err) + } +} + +// RegexpSearch returns the starting position of the first match for re in the +// buffer specified by name, calling t.Fatal on any error. It first searches +// for the position in open buffers, then in workspace files. +func (e *Env) RegexpSearch(name, re string) protocol.Location { + e.T.Helper() + loc, err := e.Editor.RegexpSearch(name, re) + if err == fake.ErrUnknownBuffer { + loc, err = e.Sandbox.Workdir.RegexpSearch(name, re) + } + if err != nil { + e.T.Fatalf("RegexpSearch: %v, %v for %q", name, err, re) + } + return loc +} + +// RegexpReplace replaces the first group in the first match of regexpStr with +// the replace text, calling t.Fatal on any error. +func (e *Env) RegexpReplace(name, regexpStr, replace string) { + e.T.Helper() + if err := e.Editor.RegexpReplace(e.Ctx, name, regexpStr, replace); err != nil { + e.T.Fatalf("RegexpReplace: %v", err) + } +} + +// SaveBuffer saves an editor buffer, calling t.Fatal on any error. +func (e *Env) SaveBuffer(name string) { + e.T.Helper() + if err := e.Editor.SaveBuffer(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +func (e *Env) SaveBufferWithoutActions(name string) { + e.T.Helper() + if err := e.Editor.SaveBufferWithoutActions(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// GoToDefinition goes to definition in the editor, calling t.Fatal on any +// error. It returns the path and position of the resulting jump. +func (e *Env) GoToDefinition(loc protocol.Location) protocol.Location { + e.T.Helper() + loc, err := e.Editor.GoToDefinition(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return loc +} + +// FormatBuffer formats the editor buffer, calling t.Fatal on any error. +func (e *Env) FormatBuffer(name string) { + e.T.Helper() + if err := e.Editor.FormatBuffer(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// OrganizeImports processes the source.organizeImports codeAction, calling +// t.Fatal on any error. +func (e *Env) OrganizeImports(name string) { + e.T.Helper() + if err := e.Editor.OrganizeImports(e.Ctx, name); err != nil { + e.T.Fatal(err) + } +} + +// ApplyQuickFixes processes the quickfix codeAction, calling t.Fatal on any error. +func (e *Env) ApplyQuickFixes(path string, diagnostics []protocol.Diagnostic) { + e.T.Helper() + loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // zero Range => whole file + if err := e.Editor.ApplyQuickFixes(e.Ctx, loc, diagnostics); err != nil { + e.T.Fatal(err) + } +} + +// ApplyCodeAction applies the given code action. +func (e *Env) ApplyCodeAction(action protocol.CodeAction) { + e.T.Helper() + if err := e.Editor.ApplyCodeAction(e.Ctx, action); err != nil { + e.T.Fatal(err) + } +} + +// GetQuickFixes returns the available quick fix code actions. +func (e *Env) GetQuickFixes(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction { + e.T.Helper() + loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // zero Range => whole file + actions, err := e.Editor.GetQuickFixes(e.Ctx, loc, diagnostics) + if err != nil { + e.T.Fatal(err) + } + return actions +} + +// Hover in the editor, calling t.Fatal on any error. +func (e *Env) Hover(loc protocol.Location) (*protocol.MarkupContent, protocol.Location) { + e.T.Helper() + c, loc, err := e.Editor.Hover(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return c, loc +} + +func (e *Env) DocumentLink(name string) []protocol.DocumentLink { + e.T.Helper() + links, err := e.Editor.DocumentLink(e.Ctx, name) + if err != nil { + e.T.Fatal(err) + } + return links +} + +func (e *Env) DocumentHighlight(loc protocol.Location) []protocol.DocumentHighlight { + e.T.Helper() + highlights, err := e.Editor.DocumentHighlight(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return highlights +} + +// RunGenerate runs go:generate on the given dir, calling t.Fatal on any error. +// It waits for the generate command to complete and checks for file changes +// before returning. +func (e *Env) RunGenerate(dir string) { + e.T.Helper() + if err := e.Editor.RunGenerate(e.Ctx, dir); err != nil { + e.T.Fatal(err) + } + e.Await(NoOutstandingWork()) + // Ideally the fake.Workspace would handle all synthetic file watching, but + // we help it out here as we need to wait for the generate command to + // complete before checking the filesystem. + e.CheckForFileChanges() +} + +// RunGoCommand runs the given command in the sandbox's default working +// directory. +func (e *Env) RunGoCommand(verb string, args ...string) { + e.T.Helper() + if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, true); err != nil { + e.T.Fatal(err) + } +} + +// RunGoCommandInDir is like RunGoCommand, but executes in the given +// relative directory of the sandbox. +func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) { + e.T.Helper() + if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, true); err != nil { + e.T.Fatal(err) + } +} + +// GoVersion checks the version of the go command. +// It returns the X in Go 1.X. +func (e *Env) GoVersion() int { + e.T.Helper() + v, err := e.Sandbox.GoVersion(e.Ctx) + if err != nil { + e.T.Fatal(err) + } + return v +} + +// DumpGoSum prints the correct go.sum contents for dir in txtar format, +// for use in creating regtests. +func (e *Env) DumpGoSum(dir string) { + e.T.Helper() + + if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil { + e.T.Fatal(err) + } + sumFile := path.Join(dir, "/go.sum") + e.T.Log("\n\n-- " + sumFile + " --\n" + e.ReadWorkspaceFile(sumFile)) + e.T.Fatal("see contents above") +} + +// CheckForFileChanges triggers a manual poll of the workspace for any file +// changes since creation, or since last polling. It is a workaround for the +// lack of true file watching support in the fake workspace. +func (e *Env) CheckForFileChanges() { + e.T.Helper() + if err := e.Sandbox.Workdir.CheckForFileChanges(e.Ctx); err != nil { + e.T.Fatal(err) + } +} + +// CodeLens calls textDocument/codeLens for the given path, calling t.Fatal on +// any error. +func (e *Env) CodeLens(path string) []protocol.CodeLens { + e.T.Helper() + lens, err := e.Editor.CodeLens(e.Ctx, path) + if err != nil { + e.T.Fatal(err) + } + return lens +} + +// ExecuteCodeLensCommand executes the command for the code lens matching the +// given command name. +func (e *Env) ExecuteCodeLensCommand(path string, cmd command.Command, result interface{}) { + e.T.Helper() + lenses := e.CodeLens(path) + var lens protocol.CodeLens + var found bool + for _, l := range lenses { + if l.Command.Command == cmd.ID() { + lens = l + found = true + } + } + if !found { + e.T.Fatalf("found no command with the ID %s", cmd.ID()) + } + e.ExecuteCommand(&protocol.ExecuteCommandParams{ + Command: lens.Command.Command, + Arguments: lens.Command.Arguments, + }, result) +} + +func (e *Env) ExecuteCommand(params *protocol.ExecuteCommandParams, result interface{}) { + e.T.Helper() + response, err := e.Editor.ExecuteCommand(e.Ctx, params) + if err != nil { + e.T.Fatal(err) + } + if result == nil { + return + } + // Hack: The result of an executeCommand request will be unmarshaled into + // maps. Re-marshal and unmarshal into the type we expect. + // + // This could be improved by generating a jsonrpc2 command client from the + // command.Interface, but that should only be done if we're consolidating + // this part of the tsprotocol generation. + data, err := json.Marshal(response) + if err != nil { + e.T.Fatal(err) + } + if err := json.Unmarshal(data, result); err != nil { + e.T.Fatal(err) + } +} + +// InlayHints calls textDocument/inlayHints for the given path, calling t.Fatal on +// any error. +func (e *Env) InlayHints(path string) []protocol.InlayHint { + e.T.Helper() + hints, err := e.Editor.InlayHint(e.Ctx, path) + if err != nil { + e.T.Fatal(err) + } + return hints +} + +// Symbol calls workspace/symbol +func (e *Env) Symbol(query string) []protocol.SymbolInformation { + e.T.Helper() + ans, err := e.Editor.Symbols(e.Ctx, query) + if err != nil { + e.T.Fatal(err) + } + return ans +} + +// References wraps Editor.References, calling t.Fatal on any error. +func (e *Env) References(loc protocol.Location) []protocol.Location { + e.T.Helper() + locations, err := e.Editor.References(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return locations +} + +// Rename wraps Editor.Rename, calling t.Fatal on any error. +func (e *Env) Rename(loc protocol.Location, newName string) { + e.T.Helper() + if err := e.Editor.Rename(e.Ctx, loc, newName); err != nil { + e.T.Fatal(err) + } +} + +// Implementations wraps Editor.Implementations, calling t.Fatal on any error. +func (e *Env) Implementations(loc protocol.Location) []protocol.Location { + e.T.Helper() + locations, err := e.Editor.Implementations(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return locations +} + +// RenameFile wraps Editor.RenameFile, calling t.Fatal on any error. +func (e *Env) RenameFile(oldPath, newPath string) { + e.T.Helper() + if err := e.Editor.RenameFile(e.Ctx, oldPath, newPath); err != nil { + e.T.Fatal(err) + } +} + +// SignatureHelp wraps Editor.SignatureHelp, calling t.Fatal on error +func (e *Env) SignatureHelp(loc protocol.Location) *protocol.SignatureHelp { + e.T.Helper() + sighelp, err := e.Editor.SignatureHelp(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return sighelp +} + +// Completion executes a completion request on the server. +func (e *Env) Completion(loc protocol.Location) *protocol.CompletionList { + e.T.Helper() + completions, err := e.Editor.Completion(e.Ctx, loc) + if err != nil { + e.T.Fatal(err) + } + return completions +} + +// AcceptCompletion accepts a completion for the given item at the given +// position. +func (e *Env) AcceptCompletion(loc protocol.Location, item protocol.CompletionItem) { + e.T.Helper() + if err := e.Editor.AcceptCompletion(e.Ctx, loc, item); err != nil { + e.T.Fatal(err) + } +} + +// CodeAction calls testDocument/codeAction for the given path, and calls +// t.Fatal if there are errors. +func (e *Env) CodeAction(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction { + e.T.Helper() + loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // no Range => whole file + actions, err := e.Editor.CodeAction(e.Ctx, loc, diagnostics) + if err != nil { + e.T.Fatal(err) + } + return actions +} + +// ChangeConfiguration updates the editor config, calling t.Fatal on any error. +func (e *Env) ChangeConfiguration(newConfig fake.EditorConfig) { + e.T.Helper() + if err := e.Editor.ChangeConfiguration(e.Ctx, newConfig); err != nil { + e.T.Fatal(err) + } +} + +// ChangeWorkspaceFolders updates the editor workspace folders, calling t.Fatal +// on any error. +func (e *Env) ChangeWorkspaceFolders(newFolders ...string) { + e.T.Helper() + if err := e.Editor.ChangeWorkspaceFolders(e.Ctx, newFolders); err != nil { + e.T.Fatal(err) + } +} diff --git a/gopls/internal/lsp/rename.go b/gopls/internal/lsp/rename.go new file mode 100644 index 00000000000..359d9acd011 --- /dev/null +++ b/gopls/internal/lsp/rename.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "path/filepath" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + // Because we don't handle directory renaming within source.Rename, source.Rename returns + // boolean value isPkgRenaming to determine whether an DocumentChanges of type RenameFile should + // be added to the return protocol.WorkspaceEdit value. + edits, isPkgRenaming, err := source.Rename(ctx, snapshot, fh, params.Position, params.NewName) + if err != nil { + return nil, err + } + + var docChanges []protocol.DocumentChanges + for uri, e := range edits { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + docChanges = append(docChanges, documentChanges(fh, e)...) + } + if isPkgRenaming { + uri := params.TextDocument.URI.SpanURI() + oldBase := filepath.Dir(span.URI.Filename(uri)) + newURI := filepath.Join(filepath.Dir(oldBase), params.NewName) + docChanges = append(docChanges, protocol.DocumentChanges{ + RenameFile: &protocol.RenameFile{ + Kind: "rename", + OldURI: protocol.URIFromPath(oldBase), + NewURI: protocol.URIFromPath(newURI), + }, + }) + } + return &protocol.WorkspaceEdit{ + DocumentChanges: docChanges, + }, nil +} + +// prepareRename implements the textDocument/prepareRename handler. It may +// return (nil, nil) if there is no rename at the cursor position, but it is +// not desirable to display an error to the user. +// +// TODO(rfindley): why wouldn't we want to show an error to the user, if the +// user initiated a rename request at the cursor? +func (s *Server) prepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + // Do not return errors here, as it adds clutter. + // Returning a nil result means there is not a valid rename. + item, usererr, err := source.PrepareRename(ctx, snapshot, fh, params.Position) + if err != nil { + // Return usererr here rather than err, to avoid cluttering the UI with + // internal error details. + return nil, usererr + } + return &protocol.PrepareRename2Gn{ + Range: item.Range, + Placeholder: item.Text, + }, nil +} diff --git a/gopls/internal/lsp/reset_golden.sh b/gopls/internal/lsp/reset_golden.sh new file mode 100755 index 00000000000..ff7f4d08208 --- /dev/null +++ b/gopls/internal/lsp/reset_golden.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Updates the *.golden files ... to match the tests' current behavior. + +set -eu + +GO117BIN="go1.17.9" + +command -v $GO117BIN >/dev/null 2>&1 || { + go install golang.org/dl/$GO117BIN@latest + $GO117BIN download +} + +find ./internal/lsp/testdata -name *.golden ! -name summary*.txt.golden -delete +# Here we intentionally do not run the ./internal/lsp/source tests with +# -golden. Eventually these tests will be deleted, and in the meantime they are +# redundant with the ./internal/lsp tests. +# +# Note: go1.17.9 tests must be run *before* go tests, as by convention the +# golden output should match the output of gopls built with the most recent +# version of Go. If output differs at 1.17, tests must be tolerant of the 1.17 +# output. +$GO117BIN test ./internal/lsp -golden +go test ./internal/lsp -golden +$GO117BIN test ./test -golden +go test ./test -golden diff --git a/gopls/internal/lsp/safetoken/safetoken.go b/gopls/internal/lsp/safetoken/safetoken.go new file mode 100644 index 00000000000..29cc1b1c664 --- /dev/null +++ b/gopls/internal/lsp/safetoken/safetoken.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package safetoken provides wrappers around methods in go/token, +// that return errors rather than panicking. +// +// It also provides a central place for workarounds in the underlying +// packages. The use of this package's functions instead of methods of +// token.File (such as Offset, Position, and PositionFor) is mandatory +// throughout the gopls codebase and enforced by a static check. +package safetoken + +import ( + "fmt" + "go/token" +) + +// Offset returns f.Offset(pos), but first checks that the file +// contains the pos. +// +// The definition of "contains" here differs from that of token.File +// in order to work around a bug in the parser (issue #57490): during +// error recovery, the parser may create syntax nodes whose computed +// End position is 1 byte beyond EOF, which would cause +// token.File.Offset to panic. The workaround is that this function +// accepts a Pos that is exactly 1 byte beyond EOF and maps it to the +// EOF offset. +func Offset(f *token.File, pos token.Pos) (int, error) { + if !inRange(f, pos) { + // Accept a Pos that is 1 byte beyond EOF, + // and map it to the EOF offset. + // (Workaround for #57490.) + if int(pos) == f.Base()+f.Size()+1 { + return f.Size(), nil + } + + return -1, fmt.Errorf("pos %d is not in range [%d:%d] of file %s", + pos, f.Base(), f.Base()+f.Size(), f.Name()) + } + return int(pos) - f.Base(), nil +} + +// Offsets returns Offset(start) and Offset(end). +func Offsets(f *token.File, start, end token.Pos) (int, int, error) { + startOffset, err := Offset(f, start) + if err != nil { + return 0, 0, fmt.Errorf("start: %v", err) + } + endOffset, err := Offset(f, end) + if err != nil { + return 0, 0, fmt.Errorf("end: %v", err) + } + return startOffset, endOffset, nil +} + +// Pos returns f.Pos(offset), but first checks that the offset is +// non-negative and not larger than the size of the file. +func Pos(f *token.File, offset int) (token.Pos, error) { + if !(0 <= offset && offset <= f.Size()) { + return token.NoPos, fmt.Errorf("offset %d is not in range for file %s of size %d", offset, f.Name(), f.Size()) + } + return token.Pos(f.Base() + offset), nil +} + +// inRange reports whether file f contains position pos, +// according to the invariants of token.File. +// +// This function is not public because of the ambiguity it would +// create w.r.t. the definition of "contains". Use Offset instead. +func inRange(f *token.File, pos token.Pos) bool { + return token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) +} + +// Position returns the Position for the pos value in the given file. +// +// p must be NoPos, a valid Pos in the range of f, or exactly 1 byte +// beyond the end of f. (See [Offset] for explanation.) +// Any other value causes a panic. +// +// Line directives (//line comments) are ignored. +func Position(f *token.File, pos token.Pos) token.Position { + // Work around issue #57490. + if int(pos) == f.Base()+f.Size()+1 { + pos-- + } + + // TODO(adonovan): centralize the workaround for + // golang/go#41029 (newline at EOF) here too. + + return f.PositionFor(pos, false) +} + +// StartPosition converts a start Pos in the FileSet into a Position. +// +// Call this function only if start represents the start of a token or +// parse tree, such as the result of Node.Pos(). If start is the end of +// an interval, such as Node.End(), call EndPosition instead, as it +// may need the correction described at [Position]. +func StartPosition(fset *token.FileSet, start token.Pos) (_ token.Position) { + if f := fset.File(start); f != nil { + return Position(f, start) + } + return +} + +// EndPosition converts an end Pos in the FileSet into a Position. +// +// Call this function only if pos represents the end of +// a non-empty interval, such as the result of Node.End(). +func EndPosition(fset *token.FileSet, end token.Pos) (_ token.Position) { + if f := fset.File(end); f != nil && int(end) > f.Base() { + return Position(f, end) + } + + // Work around issue #57490. + if f := fset.File(end - 1); f != nil { + return Position(f, end) + } + + return +} diff --git a/gopls/internal/lsp/safetoken/safetoken_test.go b/gopls/internal/lsp/safetoken/safetoken_test.go new file mode 100644 index 00000000000..afd569472ac --- /dev/null +++ b/gopls/internal/lsp/safetoken/safetoken_test.go @@ -0,0 +1,121 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package safetoken_test + +import ( + "fmt" + "go/parser" + "go/token" + "go/types" + "os" + "testing" + + "golang.org/x/tools/go/packages" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/internal/testenv" +) + +func TestWorkaroundIssue57490(t *testing.T) { + // During error recovery the parser synthesizes various close + // tokens at EOF, causing the End position of incomplete + // syntax nodes, computed as Rbrace+len("}"), to be beyond EOF. + src := `package p; func f() { var x struct` + fset := token.NewFileSet() + file, _ := parser.ParseFile(fset, "a.go", src, 0) + tf := fset.File(file.Pos()) + + // Add another file to the FileSet. + file2, _ := parser.ParseFile(fset, "b.go", "package q", 0) + + // This is the ambiguity of #57490... + if file.End() != file2.Pos() { + t.Errorf("file.End() %d != %d file2.Pos()", file.End(), file2.Pos()) + } + // ...which causes these statements to panic. + if false { + tf.Offset(file.End()) // panic: invalid Pos value 36 (should be in [1, 35]) + tf.Position(file.End()) // panic: invalid Pos value 36 (should be in [1, 35]) + } + + // The offset of the EOF position is the file size. + offset, err := safetoken.Offset(tf, file.End()-1) + if err != nil || offset != tf.Size() { + t.Errorf("Offset(EOF) = (%d, %v), want token.File.Size %d", offset, err, tf.Size()) + } + + // The offset of the file.End() position, 1 byte beyond EOF, + // is also the size of the file. + offset, err = safetoken.Offset(tf, file.End()) + if err != nil || offset != tf.Size() { + t.Errorf("Offset(ast.File.End()) = (%d, %v), want token.File.Size %d", offset, err, tf.Size()) + } + + if got, want := safetoken.Position(tf, file.End()).String(), "a.go:1:35"; got != want { + t.Errorf("Position(ast.File.End()) = %s, want %s", got, want) + } + + if got, want := safetoken.EndPosition(fset, file.End()).String(), "a.go:1:35"; got != want { + t.Errorf("EndPosition(ast.File.End()) = %s, want %s", got, want) + } + + // Note that calling StartPosition on an end may yield the wrong file: + if got, want := safetoken.StartPosition(fset, file.End()).String(), "b.go:1:1"; got != want { + t.Errorf("StartPosition(ast.File.End()) = %s, want %s", got, want) + } +} + +// To reduce the risk of panic, or bugs for which this package +// provides a workaround, this test statically reports references to +// forbidden methods of token.File or FileSet throughout gopls and +// suggests alternatives. +func TestGoplsSourceDoesNotCallTokenFileMethods(t *testing.T) { + testenv.NeedsGoPackages(t) + + pkgs, err := packages.Load(&packages.Config{ + Mode: packages.NeedName | packages.NeedModule | packages.NeedCompiledGoFiles | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps, + }, "go/token", "golang.org/x/tools/gopls/...") + if err != nil { + t.Fatal(err) + } + var tokenPkg *packages.Package + for _, pkg := range pkgs { + if pkg.PkgPath == "go/token" { + tokenPkg = pkg + break + } + } + if tokenPkg == nil { + t.Fatal("missing package go/token") + } + + File := tokenPkg.Types.Scope().Lookup("File") + FileSet := tokenPkg.Types.Scope().Lookup("FileSet") + + alternative := make(map[types.Object]string) + setAlternative := func(recv types.Object, old, new string) { + oldMethod, _, _ := types.LookupFieldOrMethod(recv.Type(), true, recv.Pkg(), old) + alternative[oldMethod] = new + } + setAlternative(File, "Offset", "safetoken.Offset") + setAlternative(File, "Position", "safetoken.Position") + setAlternative(File, "PositionFor", "safetoken.Position") + setAlternative(FileSet, "Position", "safetoken.StartPosition or EndPosition") + setAlternative(FileSet, "PositionFor", "safetoken.StartPosition or EndPosition") + + for _, pkg := range pkgs { + switch pkg.PkgPath { + case "go/token", "golang.org/x/tools/gopls/internal/lsp/safetoken": + continue // allow calls within these packages + } + + for ident, obj := range pkg.TypesInfo.Uses { + if alt, ok := alternative[obj]; ok { + posn := safetoken.StartPosition(pkg.Fset, ident.Pos()) + fmt.Fprintf(os.Stderr, "%s: forbidden use of %v; use %s instead.\n", posn, obj, alt) + t.Fail() + } + } + } +} diff --git a/gopls/internal/lsp/selection_range.go b/gopls/internal/lsp/selection_range.go new file mode 100644 index 00000000000..5b3fd31e9f1 --- /dev/null +++ b/gopls/internal/lsp/selection_range.go @@ -0,0 +1,69 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +// selectionRange defines the textDocument/selectionRange feature, +// which, given a list of positions within a file, +// reports a linked list of enclosing syntactic blocks, innermost first. +// +// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_selectionRange. +// +// This feature can be used by a client to implement "expand selection" in a +// language-aware fashion. Multiple input positions are supported to allow +// for multiple cursors, and the entire path up to the whole document is +// returned for each cursor to avoid multiple round-trips when the user is +// likely to issue this command multiple times in quick succession. +func (s *Server) selectionRange(ctx context.Context, params *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) { + ctx, done := event.Start(ctx, "lsp.Server.documentSymbol") + defer done() + + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return nil, err + } + + pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) + if err != nil { + return nil, err + } + + result := make([]protocol.SelectionRange, len(params.Positions)) + for i, protocolPos := range params.Positions { + pos, err := pgf.PositionPos(protocolPos) + if err != nil { + return nil, err + } + + path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) + + tail := &result[i] // tail of the Parent linked list, built head first + + for j, node := range path { + rng, err := pgf.NodeRange(node) + if err != nil { + return nil, err + } + + // Add node to tail. + if j > 0 { + tail.Parent = &protocol.SelectionRange{} + tail = tail.Parent + } + tail.Range = rng + } + } + + return result, nil +} diff --git a/internal/lsp/semantic.go b/gopls/internal/lsp/semantic.go similarity index 85% rename from internal/lsp/semantic.go rename to gopls/internal/lsp/semantic.go index 286d2fd160d..a3a1bcaaed2 100644 --- a/internal/lsp/semantic.go +++ b/gopls/internal/lsp/semantic.go @@ -18,11 +18,11 @@ import ( "strings" "time" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" "golang.org/x/tools/internal/typeparams" ) @@ -76,10 +76,11 @@ func (s *Server) computeSemanticTokens(ctx context.Context, td protocol.TextDocu // this is a little cumbersome to avoid both exporting 'encoded' and its methods // and to avoid import cycles e := &encoded{ - ctx: ctx, - rng: rng, - tokTypes: s.session.Options().SemanticTypes, - tokMods: s.session.Options().SemanticMods, + ctx: ctx, + metadataSource: snapshot, + rng: rng, + tokTypes: s.session.Options().SemanticTypes, + tokMods: s.session.Options().SemanticMods, } add := func(line, start uint32, len uint32) { e.add(line, start, len, tokMacro, nil) @@ -92,29 +93,28 @@ func (s *Server) computeSemanticTokens(ctx context.Context, td protocol.TextDocu if kind != source.Go { return nil, nil } - pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckFull, source.WidestPackage) + pkg, pgf, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.TypecheckFull, source.NarrowestPackage) if err != nil { return nil, err } - pgf, err := pkg.File(fh.URI()) - if err != nil { - return nil, err - } - // ignore pgf.ParseErr. Do what we can. + if rng == nil && len(pgf.Src) > maxFullFileSize { err := fmt.Errorf("semantic tokens: file %s too large for full (%d>%d)", fh.URI().Filename(), len(pgf.Src), maxFullFileSize) return nil, err } e := &encoded{ - ctx: ctx, - pgf: pgf, - rng: rng, - ti: pkg.GetTypesInfo(), - pkg: pkg, - fset: snapshot.FileSet(), - tokTypes: s.session.Options().SemanticTypes, - tokMods: s.session.Options().SemanticMods, + ctx: ctx, + metadataSource: snapshot, + pgf: pgf, + rng: rng, + ti: pkg.GetTypesInfo(), + pkg: pkg, + fset: pkg.FileSet(), + tokTypes: s.session.Options().SemanticTypes, + tokMods: s.session.Options().SemanticMods, + noStrings: vv.Options().NoSemanticString, + noNumbers: vv.Options().NoSemanticNumber, } if err := e.init(); err != nil { // e.init should never return an error, unless there's some @@ -176,25 +176,17 @@ const ( ) func (e *encoded) token(start token.Pos, leng int, typ tokenType, mods []string) { - if !start.IsValid() { - // This is not worth reporting + // This is not worth reporting. TODO(pjw): does it still happen? return } if start >= e.end || start+token.Pos(leng) <= e.start { return } - // want a line and column from start (in LSP coordinates) - // [//line directives should be ignored] - rng := source.NewMappedRange(e.fset, e.pgf.Mapper, start, start+token.Pos(leng)) - lspRange, err := rng.Range() + // want a line and column from start (in LSP coordinates). Ignore line directives. + lspRange, err := e.pgf.PosRange(start, start+token.Pos(leng)) if err != nil { - // possibly a //line directive. TODO(pjw): fix this somehow - // "column mapper is for file...instead of..." - // "line is beyond end of file..." - // see line 116 of internal/span/token.go which uses Position not PositionFor - // (it is too verbose to print the error on every token. some other RPC will fail) - // event.Error(e.ctx, "failed to convert to range", err) + event.Error(e.ctx, "failed to convert to range", err) return } if lspRange.End.Line != lspRange.Start.Line { @@ -223,7 +215,12 @@ type encoded struct { // the generated data items []semItem - ctx context.Context + noStrings bool + noNumbers bool + + ctx context.Context + // metadataSource is used to resolve imports + metadataSource source.MetadataSource tokTypes, tokMods []string pgf *source.ParsedGoFile rng *protocol.Range @@ -246,14 +243,12 @@ func (e *encoded) strStack() string { } if len(e.stack) > 0 { loc := e.stack[len(e.stack)-1].Pos() - if !safetoken.InRange(e.pgf.Tok, loc) { + if _, err := safetoken.Offset(e.pgf.Tok, loc); err != nil { msg = append(msg, fmt.Sprintf("invalid position %v for %s", loc, e.pgf.URI)) - } else if safetoken.InRange(e.pgf.Tok, loc) { - add := e.pgf.Tok.PositionFor(loc, false) + } else { + add := safetoken.Position(e.pgf.Tok, loc) nm := filepath.Base(add.Filename) msg = append(msg, fmt.Sprintf("(%s:%d,col:%d)", nm, add.Line, add.Column)) - } else { - msg = append(msg, fmt.Sprintf("(loc %d out of range)", loc)) } } msg = append(msg, "]") @@ -291,7 +286,7 @@ func (e *encoded) inspector(n ast.Node) bool { e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil) case *ast.BasicLit: if strings.Contains(x.Value, "\n") { - // has to be a string + // has to be a string. e.multiline(x.Pos(), x.End(), x.Value, tokString) break } @@ -299,11 +294,6 @@ func (e *encoded) inspector(n ast.Node) bool { what := tokNumber if x.Kind == token.STRING { what = tokString - if _, ok := e.stack[len(e.stack)-2].(*ast.Field); ok { - // struct tags (this is probably pointless, as the - // TextMate grammar will treat all the other comments the same) - what = tokComment - } } e.token(x.Pos(), ln, what, nil) case *ast.BinaryExpr: @@ -379,7 +369,7 @@ func (e *encoded) inspector(n ast.Node) bool { case *ast.IncDecStmt: e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil) case *ast.IndexExpr: - case *typeparams.IndexListExpr: // accommodate generics + case *typeparams.IndexListExpr: case *ast.InterfaceType: e.token(x.Interface, len("interface"), tokKeyword, nil) case *ast.KeyValueExpr: @@ -426,7 +416,7 @@ func (e *encoded) inspector(n ast.Node) bool { return true // not going to see these case *ast.File, *ast.Package: - e.unexpected(fmt.Sprintf("implement %T %s", x, e.pgf.Tok.PositionFor(x.Pos(), false))) + e.unexpected(fmt.Sprintf("implement %T %s", x, safetoken.Position(e.pgf.Tok, x.Pos()))) // other things we knowingly ignore case *ast.Comment, *ast.CommentGroup: pop() @@ -523,11 +513,14 @@ func (e *encoded) ident(x *ast.Ident) { case *types.Var: if isSignature(y) { tok(x.Pos(), len(x.Name), tokFunction, nil) - } else if _, ok := y.Type().(*typeparams.TypeParam); ok { - tok(x.Pos(), len(x.Name), tokTypeParam, nil) + } else if e.isParam(use.Pos()) { + // variable, unless use.pos is the pos of a Field in an ancestor FuncDecl + // or FuncLit and then it's a parameter + tok(x.Pos(), len(x.Name), tokParameter, nil) } else { tok(x.Pos(), len(x.Name), tokVariable, nil) } + default: // can't happen if use == nil { @@ -542,10 +535,31 @@ func (e *encoded) ident(x *ast.Ident) { } } -func isSignature(use types.Object) bool { - if true { - return false //PJW: fix after generics seem ok +func (e *encoded) isParam(pos token.Pos) bool { + for i := len(e.stack) - 1; i >= 0; i-- { + switch n := e.stack[i].(type) { + case *ast.FuncDecl: + for _, f := range n.Type.Params.List { + for _, id := range f.Names { + if id.Pos() == pos { + return true + } + } + } + case *ast.FuncLit: + for _, f := range n.Type.Params.List { + for _, id := range f.Names { + if id.Pos() == pos { + return true + } + } + } + } } + return false +} + +func isSignature(use types.Object) bool { if _, ok := use.(*types.Var); !ok { return false } @@ -577,7 +591,7 @@ func (e *encoded) unkIdent(x *ast.Ident) (tokenType, []string) { *ast.IfStmt, /* condition */ *ast.KeyValueExpr: // either key or value return tokVariable, nil - case *typeparams.IndexListExpr: // generic? + case *typeparams.IndexListExpr: return tokVariable, nil case *ast.Ellipsis: return tokType, nil @@ -639,7 +653,7 @@ func (e *encoded) unkIdent(x *ast.Ident) (tokenType, []string) { if nd.Tok != token.DEFINE { def = nil } - return tokVariable, def + return tokVariable, def // '_' in _ = ... } } // RHS, = x @@ -697,7 +711,7 @@ func isDeprecated(n *ast.CommentGroup) bool { func (e *encoded) definitionFor(x *ast.Ident, def types.Object) (tokenType, []string) { // PJW: def == types.Label? probably a nothing - // PJW: look into replaceing these syntactic tests with types more generally + // PJW: look into replacing these syntactic tests with types more generally mods := []string{"definition"} for i := len(e.stack) - 1; i >= 0; i-- { s := e.stack[i] @@ -727,12 +741,19 @@ func (e *encoded) definitionFor(x *ast.Ident, def types.Object) (tokenType, []st return tokFunction, mods } // if x < ... < FieldList < FuncDecl, this is the receiver, a variable + // PJW: maybe not. it might be a typeparameter in the type of the receiver if _, ok := e.stack[i+1].(*ast.FieldList); ok { + if _, ok := def.(*types.TypeName); ok { + return tokTypeParam, mods + } return tokVariable, nil } // if x < ... < FieldList < FuncType < FuncDecl, this is a param return tokParameter, mods - case *ast.FuncType: + case *ast.FuncType: // is it in the TypeParams? + if isTypeParam(x, y) { + return tokTypeParam, mods + } return tokParameter, mods case *ast.InterfaceType: return tokMethod, mods @@ -759,11 +780,26 @@ func (e *encoded) definitionFor(x *ast.Ident, def types.Object) (tokenType, []st } } // can't happen - msg := fmt.Sprintf("failed to find the decl for %s", e.pgf.Tok.PositionFor(x.Pos(), false)) + msg := fmt.Sprintf("failed to find the decl for %s", safetoken.Position(e.pgf.Tok, x.Pos())) e.unexpected(msg) return "", []string{""} } +func isTypeParam(x *ast.Ident, y *ast.FuncType) bool { + tp := typeparams.ForFuncType(y) + if tp == nil { + return false + } + for _, p := range tp.List { + for _, n := range p.Names { + if x == n { + return true + } + } + } + return false +} + func (e *encoded) multiline(start, end token.Pos, val string, tok tokenType) { f := e.fset.File(start) // the hard part is finding the lengths of lines. include the \n @@ -774,8 +810,8 @@ func (e *encoded) multiline(start, end token.Pos, val string, tok tokenType) { } return int(f.LineStart(line+1) - n) } - spos := e.fset.PositionFor(start, false) - epos := e.fset.PositionFor(end, false) + spos := safetoken.StartPosition(e.fset, start) + epos := safetoken.EndPosition(e.fset, end) sline := spos.Line eline := epos.Line // first line is from spos.Column to end @@ -798,7 +834,7 @@ func (e *encoded) findKeyword(keyword string, start, end token.Pos) token.Pos { return start + token.Pos(idx) } //(in unparsable programs: type _ <-<-chan int) - e.unexpected(fmt.Sprintf("not found:%s %v", keyword, e.fset.PositionFor(start, false))) + e.unexpected(fmt.Sprintf("not found:%s %v", keyword, safetoken.StartPosition(e.fset, start))) return token.NoPos } @@ -832,29 +868,36 @@ func (e *encoded) Data() []uint32 { var j int var last semItem for i := 0; i < len(e.items); i++ { - typ, ok := typeMap[e.items[i].typeStr] + item := e.items[i] + typ, ok := typeMap[item.typeStr] if !ok { continue // client doesn't want typeStr } + if item.typeStr == tokString && e.noStrings { + continue + } + if item.typeStr == tokNumber && e.noNumbers { + continue + } if j == 0 { x[0] = e.items[0].line } else { - x[j] = e.items[i].line - last.line + x[j] = item.line - last.line } - x[j+1] = e.items[i].start + x[j+1] = item.start if j > 0 && x[j] == 0 { - x[j+1] = e.items[i].start - last.start + x[j+1] = item.start - last.start } - x[j+2] = e.items[i].len + x[j+2] = item.len x[j+3] = uint32(typ) mask := 0 - for _, s := range e.items[i].mods { + for _, s := range item.mods { // modMap[s] is 0 if the client doesn't want this modifier mask |= modMap[s] } x[j+4] = uint32(mask) j += 5 - last = e.items[i] + last = item } return x[:j] } @@ -868,31 +911,29 @@ func (e *encoded) importSpec(d *ast.ImportSpec) { } return // don't mark anything for . or _ } - val := d.Path.Value - if len(val) < 2 || val[0] != '"' || val[len(val)-1] != '"' { - // avoid panics on imports without a properly quoted string + importPath := source.UnquoteImportPath(d) + if importPath == "" { return } - nm := val[1 : len(val)-1] // remove surrounding "s // Import strings are implementation defined. Try to match with parse information. - x, err := e.pkg.GetImport(nm) - if err != nil { - // unexpected, but impact is that maybe some import is not colored + depID := e.pkg.Metadata().DepsByImpPath[importPath] + if depID == "" { return } - // expect that nm is x.PkgPath and that x.Name() is a component of it - if x.PkgPath() != nm { - // don't know how or what to color (if this can happen at all) + depMD := e.metadataSource.Metadata(depID) + if depMD == nil { + // unexpected, but impact is that maybe some import is not colored return } - // this is not a precise test: imagine "github.com/nasty/v/v2" - j := strings.LastIndex(nm, x.Name()) + // Check whether the original literal contains the package's declared name. + j := strings.LastIndex(d.Path.Value, string(depMD.Name)) if j == -1 { - // name doesn't show up, for whatever reason, so nothing to report + // Package name does not match import path, so there is nothing to report. return } - start := d.Path.Pos() + 1 + token.Pos(j) // skip the initial quote - e.token(start, len(x.Name()), tokNamespace, nil) + // Report virtual declaration at the position of the substring. + start := d.Path.Pos() + token.Pos(j) + e.token(start, len(depMD.Name), tokNamespace, nil) } // log unexpected state @@ -910,6 +951,7 @@ func SemType(n int) string { if n >= 0 && n < len(tokTypes) { return tokTypes[n] } + // not found for some reason return fmt.Sprintf("?%d[%d,%d]?", n, len(tokTypes), len(tokMods)) } diff --git a/internal/lsp/server.go b/gopls/internal/lsp/server.go similarity index 81% rename from internal/lsp/server.go rename to gopls/internal/lsp/server.go index fb820cccfea..3d42df11948 100644 --- a/internal/lsp/server.go +++ b/gopls/internal/lsp/server.go @@ -10,31 +10,29 @@ import ( "fmt" "sync" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/progress" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp/progress" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" ) const concurrentAnalyses = 1 // NewServer creates an LSP server and binds it to handle incoming client // messages on on the supplied stream. -func NewServer(session source.Session, client protocol.ClientCloser) *Server { - tracker := progress.NewTracker(client) - session.SetProgressTracker(tracker) +func NewServer(session *cache.Session, client protocol.ClientCloser) *Server { return &Server{ diagnostics: map[span.URI]*fileReports{}, - gcOptimizationDetails: make(map[string]struct{}), + gcOptimizationDetails: make(map[source.PackageID]struct{}), watchedGlobPatterns: make(map[string]struct{}), changedFiles: make(map[span.URI]struct{}), session: session, client: client, diagnosticsSema: make(chan struct{}, concurrentAnalyses), - progress: tracker, + progress: progress.NewTracker(client), diagDebouncer: newDebouncer(), - watchedFileDebouncer: newDebouncer(), } } @@ -70,7 +68,7 @@ type Server struct { // notifications generated before serverInitialized notifications []*protocol.ShowMessageParams - session source.Session + session *cache.Session tempDir string @@ -96,7 +94,7 @@ type Server struct { // optimization details to be included in the diagnostics. The key is the // ID of the package. gcOptimizationDetailsMu sync.Mutex - gcOptimizationDetails map[string]struct{} + gcOptimizationDetails map[source.PackageID]struct{} // diagnosticsSema limits the concurrency of diagnostics runs, which can be // expensive. @@ -107,23 +105,13 @@ type Server struct { // diagDebouncer is used for debouncing diagnostics. diagDebouncer *debouncer - // watchedFileDebouncer is used for batching didChangeWatchedFiles notifications. - watchedFileDebouncer *debouncer - fileChangeMu sync.Mutex - pendingOnDiskChanges []*pendingModificationSet - // When the workspace fails to load, we show its status through a progress // report with an error message. criticalErrorStatusMu sync.Mutex criticalErrorStatus *progress.WorkDone } -type pendingModificationSet struct { - diagnoseDone chan struct{} - changes []source.FileModification -} - -func (s *Server) workDoneProgressCancel(params *protocol.WorkDoneProgressCancelParams) error { +func (s *Server) workDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error { return s.progress.Cancel(params.Token) } @@ -131,6 +119,9 @@ func (s *Server) nonstandardRequest(ctx context.Context, method string, params i switch method { case "gopls/diagnoseFiles": paramMap := params.(map[string]interface{}) + // TODO(adonovan): opt: parallelize FileDiagnostics(URI...), either + // by calling it in multiple goroutines or, better, by making + // the relevant APIs accept a set of URIs/packages. for _, file := range paramMap["files"].([]interface{}) { snapshot, fh, ok, release, err := s.beginFileRequest(ctx, protocol.DocumentURI(file.(string)), source.UnknownKind) defer release() @@ -145,7 +136,7 @@ func (s *Server) nonstandardRequest(ctx context.Context, method string, params i if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ URI: protocol.URIFromSpanURI(fh.URI()), Diagnostics: toProtocolDiagnostics(diagnostics), - Version: fileID.Version, + Version: fileID.Version(), }); err != nil { return nil, err } diff --git a/internal/lsp/server_gen.go b/gopls/internal/lsp/server_gen.go similarity index 89% rename from internal/lsp/server_gen.go rename to gopls/internal/lsp/server_gen.go index 93b2f9913b8..2c6e9954d23 100644 --- a/internal/lsp/server_gen.go +++ b/gopls/internal/lsp/server_gen.go @@ -9,7 +9,7 @@ package lsp import ( "context" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) func (s *Server) CodeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) { @@ -20,10 +20,6 @@ func (s *Server) CodeLens(ctx context.Context, params *protocol.CodeLensParams) return s.codeLens(ctx, params) } -func (s *Server) CodeLensRefresh(context.Context) error { - return notImplemented("CodeLensRefresh") -} - func (s *Server) ColorPresentation(context.Context, *protocol.ColorPresentationParams) ([]protocol.ColorPresentation, error) { return nil, notImplemented("ColorPresentation") } @@ -32,11 +28,11 @@ func (s *Server) Completion(ctx context.Context, params *protocol.CompletionPara return s.completion(ctx, params) } -func (s *Server) Declaration(context.Context, *protocol.DeclarationParams) (protocol.Declaration, error) { +func (s *Server) Declaration(context.Context, *protocol.DeclarationParams) (*protocol.Or_textDocument_declaration, error) { return nil, notImplemented("Declaration") } -func (s *Server) Definition(ctx context.Context, params *protocol.DefinitionParams) (protocol.Definition, error) { +func (s *Server) Definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) { return s.definition(ctx, params) } @@ -44,10 +40,6 @@ func (s *Server) Diagnostic(context.Context, *string) (*string, error) { return nil, notImplemented("Diagnostic") } -func (s *Server) DiagnosticRefresh(context.Context) error { - return notImplemented("DiagnosticRefresh") -} - func (s *Server) DiagnosticWorkspace(context.Context, *protocol.WorkspaceDiagnosticParams) (*protocol.WorkspaceDiagnosticReport, error) { return nil, notImplemented("DiagnosticWorkspace") } @@ -144,7 +136,7 @@ func (s *Server) Hover(ctx context.Context, params *protocol.HoverParams) (*prot return s.hover(ctx, params) } -func (s *Server) Implementation(ctx context.Context, params *protocol.ImplementationParams) (protocol.Definition, error) { +func (s *Server) Implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) { return s.implementation(ctx, params) } @@ -160,30 +152,18 @@ func (s *Server) Initialized(ctx context.Context, params *protocol.InitializedPa return s.initialized(ctx, params) } -func (s *Server) InlayHint(context.Context, *protocol.InlayHintParams) ([]protocol.InlayHint, error) { - return nil, notImplemented("InlayHint") -} - -func (s *Server) InlayHintRefresh(context.Context) error { - return notImplemented("InlayHintRefresh") +func (s *Server) InlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) { + return s.inlayHint(ctx, params) } func (s *Server) InlineValue(context.Context, *protocol.InlineValueParams) ([]protocol.InlineValue, error) { return nil, notImplemented("InlineValue") } -func (s *Server) InlineValueRefresh(context.Context) error { - return notImplemented("InlineValueRefresh") -} - func (s *Server) LinkedEditingRange(context.Context, *protocol.LinkedEditingRangeParams) (*protocol.LinkedEditingRanges, error) { return nil, notImplemented("LinkedEditingRange") } -func (s *Server) LogTrace(context.Context, *protocol.LogTraceParams) error { - return notImplemented("LogTrace") -} - func (s *Server) Moniker(context.Context, *protocol.MonikerParams) ([]protocol.Moniker, error) { return nil, notImplemented("Moniker") } @@ -212,6 +192,10 @@ func (s *Server) PrepareTypeHierarchy(context.Context, *protocol.TypeHierarchyPr return nil, notImplemented("PrepareTypeHierarchy") } +func (s *Server) Progress(context.Context, *protocol.ProgressParams) error { + return notImplemented("Progress") +} + func (s *Server) RangeFormatting(context.Context, *protocol.DocumentRangeFormattingParams) ([]protocol.TextEdit, error) { return nil, notImplemented("RangeFormatting") } @@ -248,8 +232,8 @@ func (s *Server) ResolveWorkspaceSymbol(context.Context, *protocol.WorkspaceSymb return nil, notImplemented("ResolveWorkspaceSymbol") } -func (s *Server) SelectionRange(context.Context, *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) { - return nil, notImplemented("SelectionRange") +func (s *Server) SelectionRange(ctx context.Context, params *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) { + return s.selectionRange(ctx, params) } func (s *Server) SemanticTokensFull(ctx context.Context, p *protocol.SemanticTokensParams) (*protocol.SemanticTokens, error) { @@ -264,10 +248,6 @@ func (s *Server) SemanticTokensRange(ctx context.Context, p *protocol.SemanticTo return s.semanticTokensRange(ctx, p) } -func (s *Server) SemanticTokensRefresh(ctx context.Context) error { - return s.semanticTokensRefresh(ctx) -} - func (s *Server) SetTrace(context.Context, *protocol.SetTraceParams) error { return notImplemented("SetTrace") } @@ -292,7 +272,7 @@ func (s *Server) Symbol(ctx context.Context, params *protocol.WorkspaceSymbolPar return s.symbol(ctx, params) } -func (s *Server) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) (protocol.Definition, error) { +func (s *Server) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) { return s.typeDefinition(ctx, params) } @@ -317,5 +297,5 @@ func (s *Server) WillSaveWaitUntil(context.Context, *protocol.WillSaveTextDocume } func (s *Server) WorkDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error { - return s.workDoneProgressCancel(params) + return s.workDoneProgressCancel(ctx, params) } diff --git a/gopls/internal/lsp/signature_help.go b/gopls/internal/lsp/signature_help.go new file mode 100644 index 00000000000..b623f78ea74 --- /dev/null +++ b/gopls/internal/lsp/signature_help.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" +) + +func (s *Server) signatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + info, activeParameter, err := source.SignatureHelp(ctx, snapshot, fh, params.Position) + if err != nil { + event.Error(ctx, "no signature help", err, tag.Position.Of(params.Position)) + return nil, nil // sic? There could be many reasons for failure. + } + return &protocol.SignatureHelp{ + Signatures: []protocol.SignatureInformation{*info}, + ActiveParameter: uint32(activeParameter), + }, nil +} diff --git a/internal/lsp/snippet/snippet_builder.go b/gopls/internal/lsp/snippet/snippet_builder.go similarity index 100% rename from internal/lsp/snippet/snippet_builder.go rename to gopls/internal/lsp/snippet/snippet_builder.go diff --git a/internal/lsp/snippet/snippet_builder_test.go b/gopls/internal/lsp/snippet/snippet_builder_test.go similarity index 100% rename from internal/lsp/snippet/snippet_builder_test.go rename to gopls/internal/lsp/snippet/snippet_builder_test.go diff --git a/gopls/internal/lsp/source/add_import.go b/gopls/internal/lsp/source/add_import.go new file mode 100644 index 00000000000..cd8ec7ab70b --- /dev/null +++ b/gopls/internal/lsp/source/add_import.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/imports" +) + +// AddImport adds a single import statement to the given file +func AddImport(ctx context.Context, snapshot Snapshot, fh FileHandle, importPath string) ([]protocol.TextEdit, error) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + return ComputeOneImportFixEdits(snapshot, pgf, &imports.ImportFix{ + StmtInfo: imports.ImportInfo{ + ImportPath: importPath, + }, + FixType: imports.AddImport, + }) +} diff --git a/internal/lsp/source/api_json.go b/gopls/internal/lsp/source/api_json.go similarity index 77% rename from internal/lsp/source/api_json.go rename to gopls/internal/lsp/source/api_json.go index 0695efc2fa5..3fa7ca2227d 100755 --- a/internal/lsp/source/api_json.go +++ b/gopls/internal/lsp/source/api_json.go @@ -22,8 +22,8 @@ var GeneratedAPIJSON = &APIJSON{ { Name: "directoryFilters", Type: "[]string", - Doc: "directoryFilters can be used to exclude unwanted directories from the\nworkspace. By default, all directories are included. Filters are an\noperator, `+` to include and `-` to exclude, followed by a path prefix\nrelative to the workspace folder. They are evaluated in order, and\nthe last filter that applies to a path controls whether it is included.\nThe path prefix can be empty, so an initial `-` excludes everything.\n\nExamples:\n\nExclude node_modules: `-node_modules`\n\nInclude only project_a: `-` (exclude everything), `+project_a`\n\nInclude only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`\n", - Default: "[\"-node_modules\"]", + Doc: "directoryFilters can be used to exclude unwanted directories from the\nworkspace. By default, all directories are included. Filters are an\noperator, `+` to include and `-` to exclude, followed by a path prefix\nrelative to the workspace folder. They are evaluated in order, and\nthe last filter that applies to a path controls whether it is included.\nThe path prefix can be empty, so an initial `-` excludes everything.\n\nDirectoryFilters also supports the `**` operator to match 0 or more directories.\n\nExamples:\n\nExclude node_modules at current depth: `-node_modules`\n\nExclude node_modules at any depth: `-**/node_modules`\n\nInclude only project_a: `-` (exclude everything), `+project_a`\n\nInclude only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`\n", + Default: "[\"-**/node_modules\"]", Hierarchy: "build", }, { @@ -56,22 +56,6 @@ var GeneratedAPIJSON = &APIJSON{ Status: "experimental", Hierarchy: "build", }, - { - Name: "experimentalWorkspaceModule", - Type: "bool", - Doc: "experimentalWorkspaceModule opts a user into the experimental support\nfor multi-module workspaces.\n", - Default: "false", - Status: "experimental", - Hierarchy: "build", - }, - { - Name: "experimentalPackageCacheKey", - Type: "bool", - Doc: "experimentalPackageCacheKey controls whether to use a coarser cache key\nfor package type information to increase cache hits. This setting removes\nthe user's environment, build flags, and working directory from the cache\nkey, which should be a safe change as all relevant inputs into the type\nchecking pass are already hashed into the key. This is temporarily guarded\nby an experiment because caching behavior is subtle and difficult to\ncomprehensively test.\n", - Default: "true", - Status: "experimental", - Hierarchy: "build", - }, { Name: "allowModfileModifications", Type: "bool", @@ -89,11 +73,10 @@ var GeneratedAPIJSON = &APIJSON{ Hierarchy: "build", }, { - Name: "experimentalUseInvalidMetadata", - Type: "bool", - Doc: "experimentalUseInvalidMetadata enables gopls to fall back on outdated\npackage metadata to provide editor features if the go command fails to\nload packages for some reason (like an invalid go.mod file). This will\neventually be the default behavior, and this setting will be removed.\n", - Default: "false", - Status: "experimental", + Name: "standaloneTags", + Type: "[]string", + Doc: "standaloneTags specifies a set of build constraints that identify\nindividual Go source files that make up the entire main package of an\nexecutable.\n\nA common example of standalone main files is the convention of using the\ndirective `//go:build ignore` to denote files that are not intended to be\nincluded in any package, for example because they are invoked directly by\nthe developer using `go run`.\n\nGopls considers a file to be a standalone main file if and only if it has\npackage name \"main\" and has a build directive of the exact form\n\"//go:build tag\" or \"// +build tag\", where tag is among the list of tags\nconfigured by this setting. Notably, if the build constraint is more\ncomplicated than a simple tag (such as the composite constraint\n`//go:build tag && go1.18`), the file is not considered to be a standalone\nmain file.\n\nThis setting is only supported when gopls is built with Go 1.16 or later.\n", + Default: "[\"ignore\"]", Hierarchy: "build", }, { @@ -116,7 +99,7 @@ var GeneratedAPIJSON = &APIJSON{ { Name: "linkTarget", Type: "string", - Doc: "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n", + Doc: "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n\nModules matching the GOPRIVATE environment variable will not have\ndocumentation links in hover.\n", Default: "\"pkg.go.dev\"", Hierarchy: "ui.documentation", }, @@ -214,7 +197,7 @@ var GeneratedAPIJSON = &APIJSON{ { Name: "analyses", Type: "map[string]bool", - Doc: "analyses specify analyses that the user would like to enable or disable.\nA map of the names of analysis passes that should be enabled/disabled.\nA full list of analyzers that gopls uses can be found\n[here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).\n\nExample Usage:\n\n```json5\n...\n\"analyses\": {\n \"unreachable\": false, // Disable the unreachable analyzer.\n \"unusedparams\": true // Enable the unusedparams analyzer.\n}\n...\n```\n", + Doc: "analyses specify analyses that the user would like to enable or disable.\nA map of the names of analysis passes that should be enabled/disabled.\nA full list of analyzers that gopls uses can be found in\n[analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).\n\nExample Usage:\n\n```json5\n...\n\"analyses\": {\n \"unreachable\": false, // Disable the unreachable analyzer.\n \"unusedparams\": true // Enable the unusedparams analyzer.\n}\n...\n```\n", EnumKeys: EnumKeys{ ValueType: "bool", Keys: []EnumKey{ @@ -245,7 +228,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "\"buildtag\"", - Doc: "check that +build tags are well-formed and correctly located", + Doc: "check //go:build and // +build directives", Default: "true", }, { @@ -268,6 +251,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.", Default: "true", }, + { + Name: "\"directive\"", + Doc: "check Go toolchain directives such as //go:debug\n\nThis analyzer checks for problems with known Go toolchain directives\nin all Go source files in a package directory, even those excluded by\n//go:build constraints, and all non-Go source files too.\n\nFor //go:debug (see https://go.dev/doc/godebug), the analyzer checks\nthat the directives are placed only in Go source files, only above the\npackage comment, and only in package main or *_test.go files.\n\nSupport for other known directives may be added in the future.\n\nThis analyzer does not check //go:build, which is handled by the\nbuildtag analyzer.\n", + Default: "true", + }, { Name: "\"embed\"", Doc: "check for //go:embed directive import\n\nThis analyzer checks that the embed package is imported when source code contains //go:embed comment directives.\nThe embed package must be imported for //go:embed directives to function.import _ \"embed\".", @@ -280,7 +268,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "\"fieldalignment\"", - Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n", + Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n", Default: "false", }, { @@ -300,7 +288,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "\"loopclosure\"", - Doc: "check references to loop variables from within nested functions\n\nThis analyzer checks for references to loop variables from within a\nfunction literal inside the loop body. It checks only instances where\nthe function literal is called in a defer or go statement that is the\nlast statement in the loop body, as otherwise we would need whole\nprogram analysis.\n\nFor example:\n\n\tfor i, v := range s {\n\t\tgo func() {\n\t\t\tprintln(i, v) // not what you might expect\n\t\t}()\n\t}\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines", + Doc: "check references to loop variables from within nested functions\n\nThis analyzer reports places where a function literal references the\niteration variable of an enclosing loop, and the loop calls the function\nin such a way (e.g. with go or defer) that it may outlive the loop\niteration and possibly observe the wrong value of the variable.\n\nIn this example, all the deferred functions run after the loop has\ncompleted, so all observe the final value of v.\n\n for _, v := range list {\n defer func() {\n use(v) // incorrect\n }()\n }\n\nOne fix is to create a new variable for each iteration of the loop:\n\n for _, v := range list {\n v := v // new var per iteration\n defer func() {\n use(v) // ok\n }()\n }\n\nThe next example uses a go statement and has a similar problem.\nIn addition, it has a data race because the loop updates v\nconcurrent with the goroutines accessing it.\n\n for _, v := range elem {\n go func() {\n use(v) // incorrect, and a data race\n }()\n }\n\nA fix is the same as before. The checker also reports problems\nin goroutines started by golang.org/x/sync/errgroup.Group.\nA hard-to-spot variant of this form is common in parallel tests:\n\n func Test(t *testing.T) {\n for _, test := range tests {\n t.Run(test.name, func(t *testing.T) {\n t.Parallel()\n use(test) // incorrect, and a data race\n })\n }\n }\n\nThe t.Parallel() call causes the rest of the function to execute\nconcurrent with the loop.\n\nThe analyzer reports references only in the last statement,\nas it is not deep enough to understand the effects of subsequent\nstatements that might render the reference benign.\n(\"Last statement\" is defined recursively in compound\nstatements such as if, switch, and select.)\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines", Default: "true", }, { @@ -378,6 +366,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.", Default: "true", }, + { + Name: "\"timeformat\"", + Doc: "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n", + Default: "true", + }, { Name: "\"unmarshal\"", Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.", @@ -433,6 +426,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n", Default: "true", }, + { + Name: "\"unusedvariable\"", + Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n", + Default: "false", + }, { Name: "\"fillstruct\"", Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n", @@ -451,7 +449,7 @@ var GeneratedAPIJSON = &APIJSON{ { Name: "staticcheck", Type: "bool", - Doc: "staticcheck enables additional analyses from staticcheck.io.\n", + Doc: "staticcheck enables additional analyses from staticcheck.io.\nThese analyses are documented on\n[Staticcheck's website](https://staticcheck.io/docs/checks/).\n", Default: "false", Status: "experimental", Hierarchy: "ui.diagnostic", @@ -489,6 +487,24 @@ var GeneratedAPIJSON = &APIJSON{ Status: "experimental", Hierarchy: "ui.diagnostic", }, + { + Name: "vulncheck", + Type: "enum", + Doc: "vulncheck enables vulnerability scanning.\n", + EnumValues: []EnumValue{ + { + Value: "\"Imports\"", + Doc: "`\"Imports\"`: In Imports mode, `gopls` will report vulnerabilities that affect packages\ndirectly and indirectly used by the analyzed main module.\n", + }, + { + Value: "\"Off\"", + Doc: "`\"Off\"`: Disable vulnerability analysis.\n", + }, + }, + Default: "\"Off\"", + Status: "experimental", + Hierarchy: "ui.diagnostic", + }, { Name: "diagnosticsDelay", Type: "time.Duration", @@ -498,12 +514,49 @@ var GeneratedAPIJSON = &APIJSON{ Hierarchy: "ui.diagnostic", }, { - Name: "experimentalWatchedFileDelay", - Type: "time.Duration", - Doc: "experimentalWatchedFileDelay controls the amount of time that gopls waits\nfor additional workspace/didChangeWatchedFiles notifications to arrive,\nbefore processing all such notifications in a single batch. This is\nintended for use by LSP clients that don't support their own batching of\nfile system notifications.\n\nThis option must be set to a valid duration string, for example `\"100ms\"`.\n", - Default: "\"0s\"", + Name: "hints", + Type: "map[string]bool", + Doc: "hints specify inlay hints that users want to see. A full list of hints\nthat gopls uses can be found in\n[inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).\n", + EnumKeys: EnumKeys{Keys: []EnumKey{ + { + Name: "\"assignVariableTypes\"", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", + Default: "false", + }, + { + Name: "\"compositeLiteralFields\"", + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", + Default: "false", + }, + { + Name: "\"compositeLiteralTypes\"", + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", + Default: "false", + }, + { + Name: "\"constantValues\"", + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", + Default: "false", + }, + { + Name: "\"functionTypeParameters\"", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", + Default: "false", + }, + { + Name: "\"parameterNames\"", + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", + Default: "false", + }, + { + Name: "\"rangeVariableTypes\"", + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", + Default: "false", + }, + }}, + Default: "{}", Status: "experimental", - Hierarchy: "ui.diagnostic", + Hierarchy: "ui.inlayhint", }, { Name: "codelenses", @@ -527,6 +580,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "Regenerates cgo definitions.", Default: "true", }, + { + Name: "\"run_govulncheck\"", + Doc: "Run vulnerability check (`govulncheck`).", + Default: "false", + }, { Name: "\"test\"", Doc: "Runs `go test` for a specific set of test or benchmark functions.", @@ -560,6 +618,22 @@ var GeneratedAPIJSON = &APIJSON{ Status: "experimental", Hierarchy: "ui", }, + { + Name: "noSemanticString", + Type: "bool", + Doc: "noSemanticString turns off the sending of the semantic token 'string'\n", + Default: "false", + Status: "experimental", + Hierarchy: "ui", + }, + { + Name: "noSemanticNumber", + Type: "bool", + Doc: "noSemanticNumber turns off the sending of the semantic token 'number'\n", + Default: "false", + Status: "experimental", + Hierarchy: "ui", + }, { Name: "local", Type: "string", @@ -614,6 +688,13 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "Runs `go mod edit -go=version` for a module.", ArgDoc: "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The version to pass to `go mod edit -go`.\n\t\"Version\": string,\n}", }, + { + Command: "gopls.fetch_vulncheck_result", + Title: "Get known vulncheck result", + Doc: "Fetch the result of latest vulnerability check (`govulncheck`).", + ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}", + ResultDoc: "map[golang.org/x/tools/gopls/internal/lsp/protocol.DocumentURI]*golang.org/x/tools/gopls/internal/govulncheck.Result", + }, { Command: "gopls.gc_details", Title: "Toggle gc_details", @@ -626,12 +707,6 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "Runs `go generate` for a given directory.", ArgDoc: "{\n\t// URI for the directory to generate.\n\t\"Dir\": string,\n\t// Whether to generate recursively (go generate ./...)\n\t\"Recursive\": bool,\n}", }, - { - Command: "gopls.generate_gopls_mod", - Title: "Generate gopls.mod", - Doc: "(Re)generate the gopls.mod file for a workspace.", - ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}", - }, { Command: "gopls.go_get_package", Title: "go get a package", @@ -664,19 +739,25 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "Removes a dependency from the go.mod file of a module.", ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}", }, + { + Command: "gopls.reset_go_mod_diagnostics", + Title: "Reset go.mod diagnostics", + Doc: "Reset diagnostics in the go.mod file of a module.", + ArgDoc: "{\n\t\"URIArg\": {\n\t\t\"URI\": string,\n\t},\n\t// Optional: source of the diagnostics to reset.\n\t// If not set, all resettable go.mod diagnostics will be cleared.\n\t\"DiagnosticSource\": string,\n}", + }, + { + Command: "gopls.run_govulncheck", + Title: "Run govulncheck.", + Doc: "Run vulnerability check (`govulncheck`).", + ArgDoc: "{\n\t// Any document in the directory from which govulncheck will run.\n\t\"URI\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}", + ResultDoc: "{\n\t// Token holds the progress token for LSP workDone reporting of the vulncheck\n\t// invocation.\n\t\"Token\": interface{},\n}", + }, { Command: "gopls.run_tests", Title: "Run test(s)", Doc: "Runs `go test` for a specific set of test or benchmark functions.", ArgDoc: "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}", }, - { - Command: "gopls.run_vulncheck_exp", - Title: "Run vulncheck (experimental)", - Doc: "Run vulnerability check (`govulncheck`).", - ArgDoc: "{\n\t// Dir is the directory from which vulncheck will run from.\n\t\"Dir\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}", - ResultDoc: "{\n\t\"Vuln\": []{\n\t\t\"ID\": string,\n\t\t\"Details\": string,\n\t\t\"Aliases\": []string,\n\t\t\"Symbol\": string,\n\t\t\"PkgPath\": string,\n\t\t\"ModPath\": string,\n\t\t\"URL\": string,\n\t\t\"CurrentVersion\": string,\n\t\t\"FixedVersion\": string,\n\t\t\"CallStacks\": [][]golang.org/x/tools/internal/lsp/command.StackEntry,\n\t\t\"CallStackSummaries\": []string,\n\t},\n}", - }, { Command: "gopls.start_debugging", Title: "Start the gopls debug server", @@ -737,6 +818,11 @@ var GeneratedAPIJSON = &APIJSON{ Title: "Regenerate cgo", Doc: "Regenerates cgo definitions.", }, + { + Lens: "run_govulncheck", + Title: "Run govulncheck.", + Doc: "Run vulnerability check (`govulncheck`).", + }, { Lens: "test", Title: "Run test(s) (legacy)", @@ -786,7 +872,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "buildtag", - Doc: "check that +build tags are well-formed and correctly located", + Doc: "check //go:build and // +build directives", Default: true, }, { @@ -809,6 +895,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.", Default: true, }, + { + Name: "directive", + Doc: "check Go toolchain directives such as //go:debug\n\nThis analyzer checks for problems with known Go toolchain directives\nin all Go source files in a package directory, even those excluded by\n//go:build constraints, and all non-Go source files too.\n\nFor //go:debug (see https://go.dev/doc/godebug), the analyzer checks\nthat the directives are placed only in Go source files, only above the\npackage comment, and only in package main or *_test.go files.\n\nSupport for other known directives may be added in the future.\n\nThis analyzer does not check //go:build, which is handled by the\nbuildtag analyzer.\n", + Default: true, + }, { Name: "embed", Doc: "check for //go:embed directive import\n\nThis analyzer checks that the embed package is imported when source code contains //go:embed comment directives.\nThe embed package must be imported for //go:embed directives to function.import _ \"embed\".", @@ -821,7 +912,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "fieldalignment", - Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n", + Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n", }, { Name: "httpresponse", @@ -840,7 +931,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "loopclosure", - Doc: "check references to loop variables from within nested functions\n\nThis analyzer checks for references to loop variables from within a\nfunction literal inside the loop body. It checks only instances where\nthe function literal is called in a defer or go statement that is the\nlast statement in the loop body, as otherwise we would need whole\nprogram analysis.\n\nFor example:\n\n\tfor i, v := range s {\n\t\tgo func() {\n\t\t\tprintln(i, v) // not what you might expect\n\t\t}()\n\t}\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines", + Doc: "check references to loop variables from within nested functions\n\nThis analyzer reports places where a function literal references the\niteration variable of an enclosing loop, and the loop calls the function\nin such a way (e.g. with go or defer) that it may outlive the loop\niteration and possibly observe the wrong value of the variable.\n\nIn this example, all the deferred functions run after the loop has\ncompleted, so all observe the final value of v.\n\n for _, v := range list {\n defer func() {\n use(v) // incorrect\n }()\n }\n\nOne fix is to create a new variable for each iteration of the loop:\n\n for _, v := range list {\n v := v // new var per iteration\n defer func() {\n use(v) // ok\n }()\n }\n\nThe next example uses a go statement and has a similar problem.\nIn addition, it has a data race because the loop updates v\nconcurrent with the goroutines accessing it.\n\n for _, v := range elem {\n go func() {\n use(v) // incorrect, and a data race\n }()\n }\n\nA fix is the same as before. The checker also reports problems\nin goroutines started by golang.org/x/sync/errgroup.Group.\nA hard-to-spot variant of this form is common in parallel tests:\n\n func Test(t *testing.T) {\n for _, test := range tests {\n t.Run(test.name, func(t *testing.T) {\n t.Parallel()\n use(test) // incorrect, and a data race\n })\n }\n }\n\nThe t.Parallel() call causes the rest of the function to execute\nconcurrent with the loop.\n\nThe analyzer reports references only in the last statement,\nas it is not deep enough to understand the effects of subsequent\nstatements that might render the reference benign.\n(\"Last statement\" is defined recursively in compound\nstatements such as if, switch, and select.)\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines", Default: true, }, { @@ -916,6 +1007,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.", Default: true, }, + { + Name: "timeformat", + Doc: "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n", + Default: true, + }, { Name: "unmarshal", Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.", @@ -968,6 +1064,10 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n", Default: true, }, + { + Name: "unusedvariable", + Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n", + }, { Name: "fillstruct", Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n", @@ -979,4 +1079,34 @@ var GeneratedAPIJSON = &APIJSON{ Default: true, }, }, + Hints: []*HintJSON{ + { + Name: "assignVariableTypes", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", + }, + { + Name: "compositeLiteralFields", + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", + }, + { + Name: "compositeLiteralTypes", + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", + }, + { + Name: "constantValues", + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", + }, + { + Name: "functionTypeParameters", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", + }, + { + Name: "parameterNames", + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", + }, + { + Name: "rangeVariableTypes", + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", + }, + }, } diff --git a/gopls/internal/lsp/source/call_hierarchy.go b/gopls/internal/lsp/source/call_hierarchy.go new file mode 100644 index 00000000000..6d67dc03391 --- /dev/null +++ b/gopls/internal/lsp/source/call_hierarchy.go @@ -0,0 +1,311 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "path/filepath" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" +) + +// PrepareCallHierarchy returns an array of CallHierarchyItem for a file and the position within the file. +func PrepareCallHierarchy(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.CallHierarchyItem, error) { + ctx, done := event.Start(ctx, "source.PrepareCallHierarchy") + defer done() + + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + pos, err := pgf.PositionPos(pp) + if err != nil { + return nil, err + } + + obj := referencedObject(pkg, pgf, pos) + if obj == nil { + return nil, nil + } + + if _, ok := obj.Type().Underlying().(*types.Signature); !ok { + return nil, nil + } + + declLoc, err := mapPosition(ctx, pkg.FileSet(), snapshot, obj.Pos(), adjustedObjEnd(obj)) + if err != nil { + return nil, err + } + rng := declLoc.Range + + callHierarchyItem := protocol.CallHierarchyItem{ + Name: obj.Name(), + Kind: protocol.Function, + Tags: []protocol.SymbolTag{}, + Detail: fmt.Sprintf("%s • %s", obj.Pkg().Path(), filepath.Base(declLoc.URI.SpanURI().Filename())), + URI: declLoc.URI, + Range: rng, + SelectionRange: rng, + } + return []protocol.CallHierarchyItem{callHierarchyItem}, nil +} + +// IncomingCalls returns an array of CallHierarchyIncomingCall for a file and the position within the file. +func IncomingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyIncomingCall, error) { + ctx, done := event.Start(ctx, "source.IncomingCalls") + defer done() + + refs, err := referencesV2(ctx, snapshot, fh, pos, false) + if err != nil { + if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) { + return nil, nil + } + return nil, err + } + + // Group references by their enclosing function declaration. + incomingCalls := make(map[protocol.Location]*protocol.CallHierarchyIncomingCall) + for _, ref := range refs { + callItem, err := enclosingNodeCallItem(ctx, snapshot, ref.PkgPath, ref.Location) + if err != nil { + event.Error(ctx, "error getting enclosing node", err, tag.Method.Of(ref.Name)) + continue + } + loc := protocol.Location{ + URI: callItem.URI, + Range: callItem.Range, + } + call, ok := incomingCalls[loc] + if !ok { + call = &protocol.CallHierarchyIncomingCall{From: callItem} + incomingCalls[loc] = call + } + call.FromRanges = append(call.FromRanges, ref.Location.Range) + } + + // Flatten the map of pointers into a slice of values. + incomingCallItems := make([]protocol.CallHierarchyIncomingCall, 0, len(incomingCalls)) + for _, callItem := range incomingCalls { + incomingCallItems = append(incomingCallItems, *callItem) + } + return incomingCallItems, nil +} + +// enclosingNodeCallItem creates a CallHierarchyItem representing the function call at loc. +func enclosingNodeCallItem(ctx context.Context, snapshot Snapshot, pkgPath PackagePath, loc protocol.Location) (protocol.CallHierarchyItem, error) { + // Parse the file containing the reference. + fh, err := snapshot.GetFile(ctx, loc.URI.SpanURI()) + if err != nil { + return protocol.CallHierarchyItem{}, err + } + // TODO(adonovan): opt: before parsing, trim the bodies of functions + // that don't contain the reference, using either a scanner-based + // implementation such as https://go.dev/play/p/KUrObH1YkX8 + // (~31% speedup), or a byte-oriented implementation (2x speedup). + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return protocol.CallHierarchyItem{}, err + } + start, end, err := pgf.RangePos(loc.Range) + if err != nil { + return protocol.CallHierarchyItem{}, err + } + + // Find the enclosing function, if any, and the number of func literals in between. + var funcDecl *ast.FuncDecl + var funcLit *ast.FuncLit // innermost function literal + var litCount int + path, _ := astutil.PathEnclosingInterval(pgf.File, start, end) +outer: + for _, node := range path { + switch n := node.(type) { + case *ast.FuncDecl: + funcDecl = n + break outer + case *ast.FuncLit: + litCount++ + if litCount > 1 { + continue + } + funcLit = n + } + } + + nameIdent := path[len(path)-1].(*ast.File).Name + kind := protocol.Package + if funcDecl != nil { + nameIdent = funcDecl.Name + kind = protocol.Function + } + + nameStart, nameEnd := nameIdent.Pos(), nameIdent.End() + if funcLit != nil { + nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos() + kind = protocol.Function + } + rng, err := pgf.PosRange(nameStart, nameEnd) + if err != nil { + return protocol.CallHierarchyItem{}, err + } + + name := nameIdent.Name + for i := 0; i < litCount; i++ { + name += ".func()" + } + + return protocol.CallHierarchyItem{ + Name: name, + Kind: kind, + Tags: []protocol.SymbolTag{}, + Detail: fmt.Sprintf("%s • %s", pkgPath, filepath.Base(fh.URI().Filename())), + URI: loc.URI, + Range: rng, + SelectionRange: rng, + }, nil +} + +// OutgoingCalls returns an array of CallHierarchyOutgoingCall for a file and the position within the file. +func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.CallHierarchyOutgoingCall, error) { + ctx, done := event.Start(ctx, "source.OutgoingCalls") + defer done() + + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + pos, err := pgf.PositionPos(pp) + if err != nil { + return nil, err + } + + obj := referencedObject(pkg, pgf, pos) + if obj == nil { + return nil, nil + } + + if _, ok := obj.Type().Underlying().(*types.Signature); !ok { + return nil, nil + } + + // Skip builtins. + if obj.Pkg() == nil { + return nil, nil + } + + if !obj.Pos().IsValid() { + return nil, bug.Errorf("internal error: object %s.%s missing position", obj.Pkg().Path(), obj.Name()) + } + + declFile := pkg.FileSet().File(obj.Pos()) + if declFile == nil { + return nil, bug.Errorf("file not found for %d", obj.Pos()) + } + + uri := span.URIFromPath(declFile.Name()) + offset, err := safetoken.Offset(declFile, obj.Pos()) + if err != nil { + return nil, err + } + + // Use TypecheckFull as we want to inspect the body of the function declaration. + declPkg, declPGF, err := PackageForFile(ctx, snapshot, uri, TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + + declPos, err := safetoken.Pos(declPGF.Tok, offset) + if err != nil { + return nil, err + } + + declNode, _, _ := FindDeclInfo([]*ast.File{declPGF.File}, declPos) + if declNode == nil { + // TODO(rfindley): why don't we return an error here, or even bug.Errorf? + return nil, nil + // return nil, bug.Errorf("failed to find declaration for object %s.%s", obj.Pkg().Path(), obj.Name()) + } + + type callRange struct { + start, end token.Pos + } + callRanges := []callRange{} + ast.Inspect(declNode, func(n ast.Node) bool { + if call, ok := n.(*ast.CallExpr); ok { + var start, end token.Pos + switch n := call.Fun.(type) { + case *ast.SelectorExpr: + start, end = n.Sel.NamePos, call.Lparen + case *ast.Ident: + start, end = n.NamePos, call.Lparen + case *ast.FuncLit: + // while we don't add the function literal as an 'outgoing' call + // we still want to traverse into it + return true + default: + // ignore any other kind of call expressions + // for ex: direct function literal calls since that's not an 'outgoing' call + return false + } + callRanges = append(callRanges, callRange{start: start, end: end}) + } + return true + }) + + outgoingCalls := map[token.Pos]*protocol.CallHierarchyOutgoingCall{} + for _, callRange := range callRanges { + obj := referencedObject(declPkg, declPGF, callRange.start) + if obj == nil { + continue + } + + // ignore calls to builtin functions + if obj.Pkg() == nil { + continue + } + + outgoingCall, ok := outgoingCalls[obj.Pos()] + if !ok { + loc, err := mapPosition(ctx, declPkg.FileSet(), snapshot, obj.Pos(), obj.Pos()+token.Pos(len(obj.Name()))) + if err != nil { + return nil, err + } + outgoingCall = &protocol.CallHierarchyOutgoingCall{ + To: protocol.CallHierarchyItem{ + Name: obj.Name(), + Kind: protocol.Function, + Tags: []protocol.SymbolTag{}, + Detail: fmt.Sprintf("%s • %s", obj.Pkg().Path(), filepath.Base(loc.URI.SpanURI().Filename())), + URI: loc.URI, + Range: loc.Range, + SelectionRange: loc.Range, + }, + } + outgoingCalls[obj.Pos()] = outgoingCall + } + + rng, err := declPGF.PosRange(callRange.start, callRange.end) + if err != nil { + return nil, err + } + outgoingCall.FromRanges = append(outgoingCall.FromRanges, rng) + } + + outgoingCallItems := make([]protocol.CallHierarchyOutgoingCall, 0, len(outgoingCalls)) + for _, callItem := range outgoingCalls { + outgoingCallItems = append(outgoingCallItems, *callItem) + } + return outgoingCallItems, nil +} diff --git a/gopls/internal/lsp/source/code_lens.go b/gopls/internal/lsp/source/code_lens.go new file mode 100644 index 00000000000..dd66fe5f382 --- /dev/null +++ b/gopls/internal/lsp/source/code_lens.go @@ -0,0 +1,248 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "go/ast" + "go/token" + "go/types" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" +) + +type LensFunc func(context.Context, Snapshot, FileHandle) ([]protocol.CodeLens, error) + +// LensFuncs returns the supported lensFuncs for Go files. +func LensFuncs() map[command.Command]LensFunc { + return map[command.Command]LensFunc{ + command.Generate: goGenerateCodeLens, + command.Test: runTestCodeLens, + command.RegenerateCgo: regenerateCgoLens, + command.GCDetails: toggleDetailsCodeLens, + } +} + +var ( + testRe = regexp.MustCompile("^Test[^a-z]") + benchmarkRe = regexp.MustCompile("^Benchmark[^a-z]") +) + +func runTestCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { + codeLens := make([]protocol.CodeLens, 0) + + fns, err := TestsAndBenchmarks(ctx, snapshot, fh) + if err != nil { + return nil, err + } + puri := protocol.URIFromSpanURI(fh.URI()) + for _, fn := range fns.Tests { + cmd, err := command.NewTestCommand("run test", puri, []string{fn.Name}, nil) + if err != nil { + return nil, err + } + rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start} + codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) + } + + for _, fn := range fns.Benchmarks { + cmd, err := command.NewTestCommand("run benchmark", puri, nil, []string{fn.Name}) + if err != nil { + return nil, err + } + rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start} + codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) + } + + if len(fns.Benchmarks) > 0 { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + // add a code lens to the top of the file which runs all benchmarks in the file + rng, err := pgf.PosRange(pgf.File.Package, pgf.File.Package) + if err != nil { + return nil, err + } + var benches []string + for _, fn := range fns.Benchmarks { + benches = append(benches, fn.Name) + } + cmd, err := command.NewTestCommand("run file benchmarks", puri, nil, benches) + if err != nil { + return nil, err + } + codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) + } + return codeLens, nil +} + +type testFn struct { + Name string + Rng protocol.Range +} + +type testFns struct { + Tests []testFn + Benchmarks []testFn +} + +func TestsAndBenchmarks(ctx context.Context, snapshot Snapshot, fh FileHandle) (testFns, error) { + var out testFns + + if !strings.HasSuffix(fh.URI().Filename(), "_test.go") { + return out, nil + } + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return out, err + } + + for _, d := range pgf.File.Decls { + fn, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + + rng, err := pgf.NodeRange(fn) + if err != nil { + return out, err + } + + if matchTestFunc(fn, pkg, testRe, "T") { + out.Tests = append(out.Tests, testFn{fn.Name.Name, rng}) + } + + if matchTestFunc(fn, pkg, benchmarkRe, "B") { + out.Benchmarks = append(out.Benchmarks, testFn{fn.Name.Name, rng}) + } + } + + return out, nil +} + +func matchTestFunc(fn *ast.FuncDecl, pkg Package, nameRe *regexp.Regexp, paramID string) bool { + // Make sure that the function name matches a test function. + if !nameRe.MatchString(fn.Name.Name) { + return false + } + info := pkg.GetTypesInfo() + if info == nil { + return false + } + obj := info.ObjectOf(fn.Name) + if obj == nil { + return false + } + sig, ok := obj.Type().(*types.Signature) + if !ok { + return false + } + // Test functions should have only one parameter. + if sig.Params().Len() != 1 { + return false + } + + // Check the type of the only parameter + paramTyp, ok := sig.Params().At(0).Type().(*types.Pointer) + if !ok { + return false + } + named, ok := paramTyp.Elem().(*types.Named) + if !ok { + return false + } + namedObj := named.Obj() + if namedObj.Pkg().Path() != "testing" { + return false + } + return namedObj.Id() == paramID +} + +func goGenerateCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + const ggDirective = "//go:generate" + for _, c := range pgf.File.Comments { + for _, l := range c.List { + if !strings.HasPrefix(l.Text, ggDirective) { + continue + } + rng, err := pgf.PosRange(l.Pos(), l.Pos()+token.Pos(len(ggDirective))) + if err != nil { + return nil, err + } + dir := protocol.URIFromSpanURI(span.URIFromPath(filepath.Dir(fh.URI().Filename()))) + nonRecursiveCmd, err := command.NewGenerateCommand("run go generate", command.GenerateArgs{Dir: dir, Recursive: false}) + if err != nil { + return nil, err + } + recursiveCmd, err := command.NewGenerateCommand("run go generate ./...", command.GenerateArgs{Dir: dir, Recursive: true}) + if err != nil { + return nil, err + } + return []protocol.CodeLens{ + {Range: rng, Command: recursiveCmd}, + {Range: rng, Command: nonRecursiveCmd}, + }, nil + + } + } + return nil, nil +} + +func regenerateCgoLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + var c *ast.ImportSpec + for _, imp := range pgf.File.Imports { + if imp.Path.Value == `"C"` { + c = imp + } + } + if c == nil { + return nil, nil + } + rng, err := pgf.NodeRange(c) + if err != nil { + return nil, err + } + puri := protocol.URIFromSpanURI(fh.URI()) + cmd, err := command.NewRegenerateCgoCommand("regenerate cgo definitions", command.URIArg{URI: puri}) + if err != nil { + return nil, err + } + return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil +} + +func toggleDetailsCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + if !pgf.File.Package.IsValid() { + // Without a package name we have nowhere to put the codelens, so give up. + return nil, nil + } + rng, err := pgf.PosRange(pgf.File.Package, pgf.File.Package) + if err != nil { + return nil, err + } + puri := protocol.URIFromSpanURI(fh.URI()) + cmd, err := command.NewGCDetailsCommand("Toggle gc annotation details", puri) + if err != nil { + return nil, err + } + return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil +} diff --git a/internal/lsp/source/comment.go b/gopls/internal/lsp/source/comment.go similarity index 99% rename from internal/lsp/source/comment.go rename to gopls/internal/lsp/source/comment.go index 000d6136c80..ff6d11f4ff7 100644 --- a/internal/lsp/source/comment.go +++ b/gopls/internal/lsp/source/comment.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.19 +// +build !go1.19 + package source import ( @@ -226,7 +229,7 @@ func unindent(block []string) { prefix := block[0][0:indentLen(block[0])] for _, line := range block { if !isBlank(line) { - prefix = commonPrefix(prefix, line[0:indentLen(line)]) + prefix = commonPrefix(prefix, line) } } n := len(prefix) diff --git a/gopls/internal/lsp/source/comment_go118.go b/gopls/internal/lsp/source/comment_go118.go new file mode 100644 index 00000000000..ca4ab9d3e1c --- /dev/null +++ b/gopls/internal/lsp/source/comment_go118.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package source + +// Starting with go1.19, the formatting of comments has changed, and there +// is a new package (go/doc/comment) for processing them. +// As long as gopls has to compile under earlier versions, tests +// have to pass with both the old and new code, which produce +// slightly different results. (cmd/test/definition.go, source/comment_test.go, +// and source/source_test.go) Each of the test files checks the results +// with a function, tests.CheckSameMarkdown, that accepts both the old and the new +// results. (The old code escapes many characters the new code does not, +// and the new code sometimes adds a blank line.) + +// When gopls no longer needs to compile with go1.18, the old comment.go should +// be replaced by this file, the golden test files should be updated. +// (and checkSameMarkdown() could be replaced by a simple comparison.) + +import "go/doc/comment" + +// CommentToMarkdown converts comment text to formatted markdown. +// The comment was prepared by DocReader, +// so it is known not to have leading, trailing blank lines +// nor to have trailing spaces at the end of lines. +// The comment markers have already been removed. +func CommentToMarkdown(text string) string { + var p comment.Parser + doc := p.Parse(text) + var pr comment.Printer + // The default produces {#Hdr-...} tags for headings. + // vscode displays thems, which is undesirable. + // The godoc for comment.Printer says the tags + // avoid a security problem. + pr.HeadingID = func(*comment.Heading) string { return "" } + easy := pr.Markdown(doc) + return string(easy) +} diff --git a/gopls/internal/lsp/source/comment_go118_test.go b/gopls/internal/lsp/source/comment_go118_test.go new file mode 100644 index 00000000000..b48b2e753ce --- /dev/null +++ b/gopls/internal/lsp/source/comment_go118_test.go @@ -0,0 +1,371 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package source + +import ( + "bytes" + "reflect" + "strings" + "testing" +) + +// This file is a copy of go/doc/comment_test.go with the exception for +// the test cases for TestEmphasize and TestCommentEscape + +var headingTests = []struct { + line string + ok bool +}{ + {"Section", true}, + {"A typical usage", true}, + {"Ī”Ī›Īž is Greek", true}, + {"Foo 42", true}, + {"", false}, + {"section", false}, + {"A typical usage:", false}, + {"This code:", false}, + {"Ī“ is Greek", false}, + {"Foo §", false}, + {"Fermat's Last Sentence", true}, + {"Fermat's", true}, + {"'sX", false}, + {"Ted 'Too' Bar", false}, + {"Use n+m", false}, + {"Scanning:", false}, + {"N:M", false}, +} + +func TestIsHeading(t *testing.T) { + for _, tt := range headingTests { + if h := heading(tt.line); (len(h) > 0) != tt.ok { + t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok) + } + } +} + +var blocksTests = []struct { + in string + out []block + text string +}{ + { + in: `Para 1. +Para 1 line 2. + +Para 2. + +Section + +Para 3. + + pre + pre1 + +Para 4. + + pre + pre1 + + pre2 + +Para 5. + + + pre + + + pre1 + pre2 + +Para 6. + pre + pre2 +`, + out: []block{ + {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}}, + {opPara, []string{"Para 2.\n"}}, + {opHead, []string{"Section"}}, + {opPara, []string{"Para 3.\n"}}, + {opPre, []string{"pre\n", "pre1\n"}}, + {opPara, []string{"Para 4.\n"}}, + {opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}}, + {opPara, []string{"Para 5.\n"}}, + {opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}}, + {opPara, []string{"Para 6.\n"}}, + {opPre, []string{"pre\n", "pre2\n"}}, + }, + text: `. Para 1. Para 1 line 2. + +. Para 2. + + +. Section + +. Para 3. + +$ pre +$ pre1 + +. Para 4. + +$ pre +$ pre1 + +$ pre2 + +. Para 5. + +$ pre + + +$ pre1 +$ pre2 + +. Para 6. + +$ pre +$ pre2 +`, + }, + { + in: "Para.\n\tshould not be ``escaped''", + out: []block{ + {opPara, []string{"Para.\n"}}, + {opPre, []string{"should not be ``escaped''"}}, + }, + text: ". Para.\n\n$ should not be ``escaped''", + }, + { + in: "// A very long line of 46 char for line wrapping.", + out: []block{ + {opPara, []string{"// A very long line of 46 char for line wrapping."}}, + }, + text: `. // A very long line of 46 char for line +. // wrapping. +`, + }, + { + in: `/* A very long line of 46 char for line wrapping. +A very long line of 46 char for line wrapping. */`, + out: []block{ + {opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}}, + }, + text: `. /* A very long line of 46 char for line +. wrapping. A very long line of 46 char +. for line wrapping. */ +`, + }, +} + +func TestBlocks(t *testing.T) { + for i, tt := range blocksTests { + b := blocks(tt.in) + if !reflect.DeepEqual(b, tt.out) { + t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out) + } + } +} + +// This has been modified from go/doc to use markdown links instead of html ones +// and use markdown escaping instead oh html +var emphasizeTests = []struct { + in, out string +}{ + {"", ""}, + {"http://[::1]:8080/foo.txt", `[http\:\/\/\[\:\:1\]\:8080\/foo\.txt](http://[::1]:8080/foo.txt)`}, + {"before (https://www.google.com) after", `before \([https\:\/\/www\.google\.com](https://www.google.com)\) after`}, + {"before https://www.google.com:30/x/y/z:b::c. After", `before [https\:\/\/www\.google\.com\:30\/x\/y\/z\:b\:\:c](https://www.google.com:30/x/y/z:b::c)\. After`}, + {"http://www.google.com/path/:;!-/?query=%34b#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34b\#093124](http://www.google.com/path/:;!-/?query=%34b#093124)`}, + {"http://www.google.com/path/:;!-/?query=%34bar#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34bar\#093124](http://www.google.com/path/:;!-/?query=%34bar#093124)`}, + {"http://www.google.com/index.html! After", `[http\:\/\/www\.google\.com\/index\.html](http://www.google.com/index.html)\! After`}, + {"http://www.google.com/", `[http\:\/\/www\.google\.com\/](http://www.google.com/)`}, + {"https://www.google.com/", `[https\:\/\/www\.google\.com\/](https://www.google.com/)`}, + {"http://www.google.com/path.", `[http\:\/\/www\.google\.com\/path](http://www.google.com/path)\.`}, + {"http://en.wikipedia.org/wiki/Camellia_(cipher)", `[http\:\/\/en\.wikipedia\.org\/wiki\/Camellia\_\(cipher\)](http://en.wikipedia.org/wiki/Camellia_\(cipher\))`}, + {"(http://www.google.com/)", `\([http\:\/\/www\.google\.com\/](http://www.google.com/)\)`}, + {"http://gmail.com)", `[http\:\/\/gmail\.com](http://gmail.com)\)`}, + {"((http://gmail.com))", `\(\([http\:\/\/gmail\.com](http://gmail.com)\)\)`}, + {"http://gmail.com ((http://gmail.com)) ()", `[http\:\/\/gmail\.com](http://gmail.com) \(\([http\:\/\/gmail\.com](http://gmail.com)\)\) \(\)`}, + {"Foo bar http://example.com/ quux!", `Foo bar [http\:\/\/example\.com\/](http://example.com/) quux\!`}, + {"Hello http://example.com/%2f/ /world.", `Hello [http\:\/\/example\.com\/\%2f\/](http://example.com/%2f/) \/world\.`}, + {"Lorem http: ipsum //host/path", `Lorem http\: ipsum \/\/host\/path`}, + {"javascript://is/not/linked", `javascript\:\/\/is\/not\/linked`}, + {"http://foo", `[http\:\/\/foo](http://foo)`}, + {"art by [[https://www.example.com/person/][Person Name]]", `art by \[\[[https\:\/\/www\.example\.com\/person\/](https://www.example.com/person/)\]\[Person Name\]\]`}, + {"please visit (http://golang.org/)", `please visit \([http\:\/\/golang\.org\/](http://golang.org/)\)`}, + {"please visit http://golang.org/hello())", `please visit [http\:\/\/golang\.org\/hello\(\)](http://golang.org/hello\(\))\)`}, + {"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `[http\:\/\/git\.qemu\.org\/\?p\=qemu\.git\;a\=blob\;f\=qapi\-schema\.json\;hb\=HEAD](http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD)`}, + {"https://foo.bar/bal/x(])", `[https\:\/\/foo\.bar\/bal\/x\(](https://foo.bar/bal/x\()\]\)`}, + {"foo [ http://bar(])", `foo \[ [http\:\/\/bar\(](http://bar\()\]\)`}, +} + +func TestEmphasize(t *testing.T) { + for i, tt := range emphasizeTests { + var buf bytes.Buffer + emphasize(&buf, tt.in, true) + out := buf.String() + if out != tt.out { + t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out) + } + } +} + +func TestCommentEscape(t *testing.T) { + //ldquo -> ulquo and rdquo -> urquo + commentTests := []struct { + in, out string + }{ + {"typically invoked as ``go tool asm'',", "typically invoked as " + ulquo + "go tool asm" + urquo + ","}, + {"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ulquo + "go help test" + urquo + " and " + ulquo + "go help testflag" + urquo}} + for i, tt := range commentTests { + var buf strings.Builder + commentEscape(&buf, tt.in, true) + out := buf.String() + if out != tt.out { + t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out) + } + } +} + +func TestCommentToMarkdown(t *testing.T) { + tests := []struct { + in, out string + }{ + { + in: "F declaration.\n", + out: "F declaration\\.\n", + }, + { + in: ` +F declaration. Lorem ipsum dolor sit amet. +Etiam mattis eros at orci mollis molestie. +`, + out: ` +F declaration\. Lorem ipsum dolor sit amet\. +Etiam mattis eros at orci mollis molestie\. +`, + }, + { + in: ` +F declaration. + +Lorem ipsum dolor sit amet. +Sed id dui turpis. + + + + +Aenean tempus velit non auctor eleifend. +Aenean efficitur a sem id ultricies. + + +Phasellus efficitur mauris et viverra bibendum. +`, + out: ` +F declaration\. + +Lorem ipsum dolor sit amet\. +Sed id dui turpis\. + +Aenean tempus velit non auctor eleifend\. +Aenean efficitur a sem id ultricies\. + +Phasellus efficitur mauris et viverra bibendum\. +`, + }, + { + in: ` +F declaration. + +Aenean tempus velit non auctor eleifend. + +Section + +Lorem ipsum dolor sit amet, consectetur adipiscing elit. + + func foo() {} + + + func bar() {} + +Fusce lorem lacus. + + func foo() {} + + func bar() {} + +Maecenas in lobortis lectus. + + func foo() {} + + func bar() {} + +Phasellus efficitur mauris et viverra bibendum. +`, + out: ` +F declaration\. + +Aenean tempus velit non auctor eleifend\. + +### Section + +Lorem ipsum dolor sit amet, consectetur adipiscing elit\. + + func foo() {} + + + func bar() {} + +Fusce lorem lacus\. + + func foo() {} + + func bar() {} + +Maecenas in lobortis lectus\. + + func foo() {} + + func bar() {} + +Phasellus efficitur mauris et viverra bibendum\. +`, + }, + { + in: ` +F declaration. + + func foo() { + fmt.Println("foo") + } + func bar() { + fmt.Println("bar") + } +`, + out: ` +F declaration\. + + func foo() { + fmt.Println("foo") + } + func bar() { + fmt.Println("bar") + } +`, + }, + } + for i, tt := range tests { + // Comments start with new lines for better readability. So, we should trim them. + tt.in = strings.TrimPrefix(tt.in, "\n") + tt.out = strings.TrimPrefix(tt.out, "\n") + + if out := CommentToMarkdown(tt.in); out != tt.out { + t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out) + } + } +} diff --git a/internal/lsp/source/completion/builtin.go b/gopls/internal/lsp/source/completion/builtin.go similarity index 100% rename from internal/lsp/source/completion/builtin.go rename to gopls/internal/lsp/source/completion/builtin.go diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go new file mode 100644 index 00000000000..b78699f8043 --- /dev/null +++ b/gopls/internal/lsp/source/completion/completion.go @@ -0,0 +1,3027 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package completion provides core functionality for code completion in Go +// editors and tools. +package completion + +import ( + "context" + "fmt" + "go/ast" + "go/constant" + "go/scanner" + "go/token" + "go/types" + "math" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/fuzzy" + "golang.org/x/tools/internal/imports" + "golang.org/x/tools/internal/typeparams" +) + +type CompletionItem struct { + // Label is the primary text the user sees for this completion item. + Label string + + // Detail is supplemental information to present to the user. + // This often contains the type or return type of the completion item. + Detail string + + // InsertText is the text to insert if this item is selected. + // Any of the prefix that has already been typed is not trimmed. + // The insert text does not contain snippets. + InsertText string + + Kind protocol.CompletionItemKind + Tags []protocol.CompletionItemTag + Deprecated bool // Deprecated, prefer Tags if available + + // An optional array of additional TextEdits that are applied when + // selecting this completion. + // + // Additional text edits should be used to change text unrelated to the current cursor position + // (for example adding an import statement at the top of the file if the completion item will + // insert an unqualified type). + AdditionalTextEdits []protocol.TextEdit + + // Depth is how many levels were searched to find this completion. + // For example when completing "foo<>", "fooBar" is depth 0, and + // "fooBar.Baz" is depth 1. + Depth int + + // Score is the internal relevance score. + // A higher score indicates that this completion item is more relevant. + Score float64 + + // snippet is the LSP snippet for the completion item. The LSP + // specification contains details about LSP snippets. For example, a + // snippet for a function with the following signature: + // + // func foo(a, b, c int) + // + // would be: + // + // foo(${1:a int}, ${2: b int}, ${3: c int}) + // + // If Placeholders is false in the CompletionOptions, the above + // snippet would instead be: + // + // foo(${1:}) + snippet *snippet.Builder + + // Documentation is the documentation for the completion item. + Documentation string + + // obj is the object from which this candidate was derived, if any. + // obj is for internal use only. + obj types.Object +} + +// completionOptions holds completion specific configuration. +type completionOptions struct { + unimported bool + documentation bool + fullDocumentation bool + placeholders bool + literal bool + snippets bool + postfix bool + matcher source.Matcher + budget time.Duration +} + +// Snippet is a convenience returns the snippet if available, otherwise +// the InsertText. +// used for an item, depending on if the callee wants placeholders or not. +func (i *CompletionItem) Snippet() string { + if i.snippet != nil { + return i.snippet.String() + } + return i.InsertText +} + +// Scoring constants are used for weighting the relevance of different candidates. +const ( + // stdScore is the base score for all completion items. + stdScore float64 = 1.0 + + // highScore indicates a very relevant completion item. + highScore float64 = 10.0 + + // lowScore indicates an irrelevant or not useful completion item. + lowScore float64 = 0.01 +) + +// matcher matches a candidate's label against the user input. The +// returned score reflects the quality of the match. A score of zero +// indicates no match, and a score of one means a perfect match. +type matcher interface { + Score(candidateLabel string) (score float32) +} + +// prefixMatcher implements case sensitive prefix matching. +type prefixMatcher string + +func (pm prefixMatcher) Score(candidateLabel string) float32 { + if strings.HasPrefix(candidateLabel, string(pm)) { + return 1 + } + return -1 +} + +// insensitivePrefixMatcher implements case insensitive prefix matching. +type insensitivePrefixMatcher string + +func (ipm insensitivePrefixMatcher) Score(candidateLabel string) float32 { + if strings.HasPrefix(strings.ToLower(candidateLabel), string(ipm)) { + return 1 + } + return -1 +} + +// completer contains the necessary information for a single completion request. +type completer struct { + snapshot source.Snapshot + pkg source.Package + qf types.Qualifier // for qualifying typed expressions + mq source.MetadataQualifier // for syntactic qualifying + opts *completionOptions + + // completionContext contains information about the trigger for this + // completion request. + completionContext completionContext + + // fh is a handle to the file associated with this completion request. + fh source.FileHandle + + // filename is the name of the file associated with this completion request. + filename string + + // file is the AST of the file associated with this completion request. + file *ast.File + + // (tokFile, pos) is the position at which the request was triggered. + tokFile *token.File + pos token.Pos + + // path is the path of AST nodes enclosing the position. + path []ast.Node + + // seen is the map that ensures we do not return duplicate results. + seen map[types.Object]bool + + // items is the list of completion items returned. + items []CompletionItem + + // completionCallbacks is a list of callbacks to collect completions that + // require expensive operations. This includes operations where we search + // through the entire module cache. + completionCallbacks []func(opts *imports.Options) error + + // surrounding describes the identifier surrounding the position. + surrounding *Selection + + // inference contains information we've inferred about ideal + // candidates such as the candidate's type. + inference candidateInference + + // enclosingFunc contains information about the function enclosing + // the position. + enclosingFunc *funcInfo + + // enclosingCompositeLiteral contains information about the composite literal + // enclosing the position. + enclosingCompositeLiteral *compLitInfo + + // deepState contains the current state of our deep completion search. + deepState deepCompletionState + + // matcher matches the candidates against the surrounding prefix. + matcher matcher + + // methodSetCache caches the types.NewMethodSet call, which is relatively + // expensive and can be called many times for the same type while searching + // for deep completions. + methodSetCache map[methodSetKey]*types.MethodSet + + // mapper converts the positions in the file from which the completion originated. + mapper *protocol.Mapper + + // startTime is when we started processing this completion request. It does + // not include any time the request spent in the queue. + startTime time.Time + + // scopes contains all scopes defined by nodes in our path, + // including nil values for nodes that don't defined a scope. It + // also includes our package scope and the universal scope at the + // end. + scopes []*types.Scope +} + +// funcInfo holds info about a function object. +type funcInfo struct { + // sig is the function declaration enclosing the position. + sig *types.Signature + + // body is the function's body. + body *ast.BlockStmt +} + +type compLitInfo struct { + // cl is the *ast.CompositeLit enclosing the position. + cl *ast.CompositeLit + + // clType is the type of cl. + clType types.Type + + // kv is the *ast.KeyValueExpr enclosing the position, if any. + kv *ast.KeyValueExpr + + // inKey is true if we are certain the position is in the key side + // of a key-value pair. + inKey bool + + // maybeInFieldName is true if inKey is false and it is possible + // we are completing a struct field name. For example, + // "SomeStruct{<>}" will be inKey=false, but maybeInFieldName=true + // because we _could_ be completing a field name. + maybeInFieldName bool +} + +type importInfo struct { + importPath string + name string +} + +type methodSetKey struct { + typ types.Type + addressable bool +} + +type completionContext struct { + // triggerCharacter is the character used to trigger completion at current + // position, if any. + triggerCharacter string + + // triggerKind is information about how a completion was triggered. + triggerKind protocol.CompletionTriggerKind + + // commentCompletion is true if we are completing a comment. + commentCompletion bool + + // packageCompletion is true if we are completing a package name. + packageCompletion bool +} + +// A Selection represents the cursor position and surrounding identifier. +type Selection struct { + content string + tokFile *token.File + start, end, cursor token.Pos // relative to rng.TokFile + mapper *protocol.Mapper +} + +func (p Selection) Content() string { + return p.content +} + +func (p Selection) Range() (protocol.Range, error) { + return p.mapper.PosRange(p.tokFile, p.start, p.end) +} + +func (p Selection) Prefix() string { + return p.content[:p.cursor-p.start] +} + +func (p Selection) Suffix() string { + return p.content[p.cursor-p.start:] +} + +func (c *completer) setSurrounding(ident *ast.Ident) { + if c.surrounding != nil { + return + } + if !(ident.Pos() <= c.pos && c.pos <= ident.End()) { + return + } + + c.surrounding = &Selection{ + content: ident.Name, + cursor: c.pos, + // Overwrite the prefix only. + tokFile: c.tokFile, + start: ident.Pos(), + end: ident.End(), + mapper: c.mapper, + } + + c.setMatcherFromPrefix(c.surrounding.Prefix()) +} + +func (c *completer) setMatcherFromPrefix(prefix string) { + switch c.opts.matcher { + case source.Fuzzy: + c.matcher = fuzzy.NewMatcher(prefix) + case source.CaseSensitive: + c.matcher = prefixMatcher(prefix) + default: + c.matcher = insensitivePrefixMatcher(strings.ToLower(prefix)) + } +} + +func (c *completer) getSurrounding() *Selection { + if c.surrounding == nil { + c.surrounding = &Selection{ + content: "", + cursor: c.pos, + tokFile: c.tokFile, + start: c.pos, + end: c.pos, + mapper: c.mapper, + } + } + return c.surrounding +} + +// candidate represents a completion candidate. +type candidate struct { + // obj is the types.Object to complete to. + obj types.Object + + // score is used to rank candidates. + score float64 + + // name is the deep object name path, e.g. "foo.bar" + name string + + // detail is additional information about this item. If not specified, + // defaults to type string for the object. + detail string + + // path holds the path from the search root (excluding the candidate + // itself) for a deep candidate. + path []types.Object + + // pathInvokeMask is a bit mask tracking whether each entry in path + // should be formatted with "()" (i.e. whether it is a function + // invocation). + pathInvokeMask uint16 + + // mods contains modifications that should be applied to the + // candidate when inserted. For example, "foo" may be inserted as + // "*foo" or "foo()". + mods []typeModKind + + // addressable is true if a pointer can be taken to the candidate. + addressable bool + + // convertTo is a type that this candidate should be cast to. For + // example, if convertTo is float64, "foo" should be formatted as + // "float64(foo)". + convertTo types.Type + + // imp is the import that needs to be added to this package in order + // for this candidate to be valid. nil if no import needed. + imp *importInfo +} + +func (c candidate) hasMod(mod typeModKind) bool { + for _, m := range c.mods { + if m == mod { + return true + } + } + return false +} + +// ErrIsDefinition is an error that informs the user they got no +// completions because they tried to complete the name of a new object +// being defined. +type ErrIsDefinition struct { + objStr string +} + +func (e ErrIsDefinition) Error() string { + msg := "this is a definition" + if e.objStr != "" { + msg += " of " + e.objStr + } + return msg +} + +// Completion returns a list of possible candidates for completion, given a +// a file and a position. +// +// The selection is computed based on the preceding identifier and can be used by +// the client to score the quality of the completion. For instance, some clients +// may tolerate imperfect matches as valid completion results, since users may make typos. +func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, protoPos protocol.Position, protoContext protocol.CompletionContext) ([]CompletionItem, *Selection, error) { + ctx, done := event.Start(ctx, "completion.Completion") + defer done() + + startTime := time.Now() + + pkg, pgf, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.TypecheckFull, source.NarrowestPackage) + if err != nil || pgf.File.Package == token.NoPos { + // If we can't parse this file or find position for the package + // keyword, it may be missing a package declaration. Try offering + // suggestions for the package declaration. + // Note that this would be the case even if the keyword 'package' is + // present but no package name exists. + items, surrounding, innerErr := packageClauseCompletions(ctx, snapshot, fh, protoPos) + if innerErr != nil { + // return the error for GetParsedFile since it's more relevant in this situation. + return nil, nil, fmt.Errorf("getting file %s for Completion: %w (package completions: %v)", fh.URI(), err, innerErr) + } + return items, surrounding, nil + } + pos, err := pgf.PositionPos(protoPos) + if err != nil { + return nil, nil, err + } + // Completion is based on what precedes the cursor. + // Find the path to the position before pos. + path, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1) + if path == nil { + return nil, nil, fmt.Errorf("cannot find node enclosing position") + } + + // Check if completion at this position is valid. If not, return early. + switch n := path[0].(type) { + case *ast.BasicLit: + // Skip completion inside literals except for ImportSpec + if len(path) > 1 { + if _, ok := path[1].(*ast.ImportSpec); ok { + break + } + } + return nil, nil, nil + case *ast.CallExpr: + if n.Ellipsis.IsValid() && pos > n.Ellipsis && pos <= n.Ellipsis+token.Pos(len("...")) { + // Don't offer completions inside or directly after "...". For + // example, don't offer completions at "<>" in "foo(bar...<>"). + return nil, nil, nil + } + case *ast.Ident: + // reject defining identifiers + if obj, ok := pkg.GetTypesInfo().Defs[n]; ok { + if v, ok := obj.(*types.Var); ok && v.IsField() && v.Embedded() { + // An anonymous field is also a reference to a type. + } else if pgf.File.Name == n { + // Don't skip completions if Ident is for package name. + break + } else { + objStr := "" + if obj != nil { + qual := types.RelativeTo(pkg.GetTypes()) + objStr = types.ObjectString(obj, qual) + } + ans, sel := definition(path, obj, pgf) + if ans != nil { + sort.Slice(ans, func(i, j int) bool { + return ans[i].Score > ans[j].Score + }) + return ans, sel, nil + } + return nil, nil, ErrIsDefinition{objStr: objStr} + } + } + } + + // Collect all surrounding scopes, innermost first. + scopes := source.CollectScopes(pkg.GetTypesInfo(), path, pos) + scopes = append(scopes, pkg.GetTypes().Scope(), types.Universe) + + opts := snapshot.View().Options() + c := &completer{ + pkg: pkg, + snapshot: snapshot, + qf: source.Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()), + mq: source.MetadataQualifierForFile(snapshot, pgf.File, pkg.Metadata()), + completionContext: completionContext{ + triggerCharacter: protoContext.TriggerCharacter, + triggerKind: protoContext.TriggerKind, + }, + fh: fh, + filename: fh.URI().Filename(), + tokFile: pgf.Tok, + file: pgf.File, + path: path, + pos: pos, + seen: make(map[types.Object]bool), + enclosingFunc: enclosingFunction(path, pkg.GetTypesInfo()), + enclosingCompositeLiteral: enclosingCompositeLiteral(path, pos, pkg.GetTypesInfo()), + deepState: deepCompletionState{ + enabled: opts.DeepCompletion, + }, + opts: &completionOptions{ + matcher: opts.Matcher, + unimported: opts.CompleteUnimported, + documentation: opts.CompletionDocumentation && opts.HoverKind != source.NoDocumentation, + fullDocumentation: opts.HoverKind == source.FullDocumentation, + placeholders: opts.UsePlaceholders, + literal: opts.LiteralCompletions && opts.InsertTextFormat == protocol.SnippetTextFormat, + budget: opts.CompletionBudget, + snippets: opts.InsertTextFormat == protocol.SnippetTextFormat, + postfix: opts.ExperimentalPostfixCompletions, + }, + // default to a matcher that always matches + matcher: prefixMatcher(""), + methodSetCache: make(map[methodSetKey]*types.MethodSet), + mapper: pgf.Mapper, + startTime: startTime, + scopes: scopes, + } + + var cancel context.CancelFunc + if c.opts.budget == 0 { + ctx, cancel = context.WithCancel(ctx) + } else { + // timeoutDuration is the completion budget remaining. If less than + // 10ms, set to 10ms + timeoutDuration := time.Until(c.startTime.Add(c.opts.budget)) + if timeoutDuration < 10*time.Millisecond { + timeoutDuration = 10 * time.Millisecond + } + ctx, cancel = context.WithTimeout(ctx, timeoutDuration) + } + defer cancel() + + if surrounding := c.containingIdent(pgf.Src); surrounding != nil { + c.setSurrounding(surrounding) + } + + c.inference = expectedCandidate(ctx, c) + + err = c.collectCompletions(ctx) + if err != nil { + return nil, nil, err + } + + // Deep search collected candidates and their members for more candidates. + c.deepSearch(ctx) + + for _, callback := range c.completionCallbacks { + if err := c.snapshot.RunProcessEnvFunc(ctx, callback); err != nil { + return nil, nil, err + } + } + + // Search candidates populated by expensive operations like + // unimportedMembers etc. for more completion items. + c.deepSearch(ctx) + + // Statement candidates offer an entire statement in certain contexts, as + // opposed to a single object. Add statement candidates last because they + // depend on other candidates having already been collected. + c.addStatementCandidates() + + c.sortItems() + return c.items, c.getSurrounding(), nil +} + +// collectCompletions adds possible completion candidates to either the deep +// search queue or completion items directly for different completion contexts. +func (c *completer) collectCompletions(ctx context.Context) error { + // Inside import blocks, return completions for unimported packages. + for _, importSpec := range c.file.Imports { + if !(importSpec.Path.Pos() <= c.pos && c.pos <= importSpec.Path.End()) { + continue + } + return c.populateImportCompletions(ctx, importSpec) + } + + // Inside comments, offer completions for the name of the relevant symbol. + for _, comment := range c.file.Comments { + if comment.Pos() < c.pos && c.pos <= comment.End() { + c.populateCommentCompletions(ctx, comment) + return nil + } + } + + // Struct literals are handled entirely separately. + if c.wantStructFieldCompletions() { + // If we are definitely completing a struct field name, deep completions + // don't make sense. + if c.enclosingCompositeLiteral.inKey { + c.deepState.enabled = false + } + return c.structLiteralFieldName(ctx) + } + + if lt := c.wantLabelCompletion(); lt != labelNone { + c.labels(lt) + return nil + } + + if c.emptySwitchStmt() { + // Empty switch statements only admit "default" and "case" keywords. + c.addKeywordItems(map[string]bool{}, highScore, CASE, DEFAULT) + return nil + } + + switch n := c.path[0].(type) { + case *ast.Ident: + if c.file.Name == n { + return c.packageNameCompletions(ctx, c.fh.URI(), n) + } else if sel, ok := c.path[1].(*ast.SelectorExpr); ok && sel.Sel == n { + // Is this the Sel part of a selector? + return c.selector(ctx, sel) + } + return c.lexical(ctx) + // The function name hasn't been typed yet, but the parens are there: + // recv.‸(arg) + case *ast.TypeAssertExpr: + // Create a fake selector expression. + return c.selector(ctx, &ast.SelectorExpr{X: n.X}) + case *ast.SelectorExpr: + return c.selector(ctx, n) + // At the file scope, only keywords are allowed. + case *ast.BadDecl, *ast.File: + c.addKeywordCompletions() + default: + // fallback to lexical completions + return c.lexical(ctx) + } + + return nil +} + +// containingIdent returns the *ast.Ident containing pos, if any. It +// synthesizes an *ast.Ident to allow completion in the face of +// certain syntax errors. +func (c *completer) containingIdent(src []byte) *ast.Ident { + // In the normal case, our leaf AST node is the identifier being completed. + if ident, ok := c.path[0].(*ast.Ident); ok { + return ident + } + + pos, tkn, lit := c.scanToken(src) + if !pos.IsValid() { + return nil + } + + fakeIdent := &ast.Ident{Name: lit, NamePos: pos} + + if _, isBadDecl := c.path[0].(*ast.BadDecl); isBadDecl { + // You don't get *ast.Idents at the file level, so look for bad + // decls and use the manually extracted token. + return fakeIdent + } else if c.emptySwitchStmt() { + // Only keywords are allowed in empty switch statements. + // *ast.Idents are not parsed, so we must use the manually + // extracted token. + return fakeIdent + } else if tkn.IsKeyword() { + // Otherwise, manually extract the prefix if our containing token + // is a keyword. This improves completion after an "accidental + // keyword", e.g. completing to "variance" in "someFunc(var<>)". + return fakeIdent + } + + return nil +} + +// scanToken scans pgh's contents for the token containing pos. +func (c *completer) scanToken(contents []byte) (token.Pos, token.Token, string) { + tok := c.pkg.FileSet().File(c.pos) + + var s scanner.Scanner + s.Init(tok, contents, nil, 0) + for { + tknPos, tkn, lit := s.Scan() + if tkn == token.EOF || tknPos >= c.pos { + return token.NoPos, token.ILLEGAL, "" + } + + if len(lit) > 0 && tknPos <= c.pos && c.pos <= tknPos+token.Pos(len(lit)) { + return tknPos, tkn, lit + } + } +} + +func (c *completer) sortItems() { + sort.SliceStable(c.items, func(i, j int) bool { + // Sort by score first. + if c.items[i].Score != c.items[j].Score { + return c.items[i].Score > c.items[j].Score + } + + // Then sort by label so order stays consistent. This also has the + // effect of preferring shorter candidates. + return c.items[i].Label < c.items[j].Label + }) +} + +// emptySwitchStmt reports whether pos is in an empty switch or select +// statement. +func (c *completer) emptySwitchStmt() bool { + block, ok := c.path[0].(*ast.BlockStmt) + if !ok || len(block.List) > 0 || len(c.path) == 1 { + return false + } + + switch c.path[1].(type) { + case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + return true + default: + return false + } +} + +// populateImportCompletions yields completions for an import path around the cursor. +// +// Completions are suggested at the directory depth of the given import path so +// that we don't overwhelm the user with a large list of possibilities. As an +// example, a completion for the prefix "golang" results in "golang.org/". +// Completions for "golang.org/" yield its subdirectories +// (i.e. "golang.org/x/"). The user is meant to accept completion suggestions +// until they reach a complete import path. +func (c *completer) populateImportCompletions(ctx context.Context, searchImport *ast.ImportSpec) error { + if !strings.HasPrefix(searchImport.Path.Value, `"`) { + return nil + } + + // deepSearch is not valuable for import completions. + c.deepState.enabled = false + + importPath := searchImport.Path.Value + + // Extract the text between the quotes (if any) in an import spec. + // prefix is the part of import path before the cursor. + prefixEnd := c.pos - searchImport.Path.Pos() + prefix := strings.Trim(importPath[:prefixEnd], `"`) + + // The number of directories in the import path gives us the depth at + // which to search. + depth := len(strings.Split(prefix, "/")) - 1 + + content := importPath + start, end := searchImport.Path.Pos(), searchImport.Path.End() + namePrefix, nameSuffix := `"`, `"` + // If a starting quote is present, adjust surrounding to either after the + // cursor or after the first slash (/), except if cursor is at the starting + // quote. Otherwise we provide a completion including the starting quote. + if strings.HasPrefix(importPath, `"`) && c.pos > searchImport.Path.Pos() { + content = content[1:] + start++ + if depth > 0 { + // Adjust textEdit start to replacement range. For ex: if current + // path was "golang.or/x/to<>ols/internal/", where <> is the cursor + // position, start of the replacement range would be after + // "golang.org/x/". + path := strings.SplitAfter(prefix, "/") + numChars := len(strings.Join(path[:len(path)-1], "")) + content = content[numChars:] + start += token.Pos(numChars) + } + namePrefix = "" + } + + // We won't provide an ending quote if one is already present, except if + // cursor is after the ending quote but still in import spec. This is + // because cursor has to be in our textEdit range. + if strings.HasSuffix(importPath, `"`) && c.pos < searchImport.Path.End() { + end-- + content = content[:len(content)-1] + nameSuffix = "" + } + + c.surrounding = &Selection{ + content: content, + cursor: c.pos, + tokFile: c.tokFile, + start: start, + end: end, + mapper: c.mapper, + } + + seenImports := make(map[string]struct{}) + for _, importSpec := range c.file.Imports { + if importSpec.Path.Value == importPath { + continue + } + seenImportPath, err := strconv.Unquote(importSpec.Path.Value) + if err != nil { + return err + } + seenImports[seenImportPath] = struct{}{} + } + + var mu sync.Mutex // guard c.items locally, since searchImports is called in parallel + seen := make(map[string]struct{}) + searchImports := func(pkg imports.ImportFix) { + path := pkg.StmtInfo.ImportPath + if _, ok := seenImports[path]; ok { + return + } + + // Any package path containing fewer directories than the search + // prefix is not a match. + pkgDirList := strings.Split(path, "/") + if len(pkgDirList) < depth+1 { + return + } + pkgToConsider := strings.Join(pkgDirList[:depth+1], "/") + + name := pkgDirList[depth] + // if we're adding an opening quote to completion too, set name to full + // package path since we'll need to overwrite that range. + if namePrefix == `"` { + name = pkgToConsider + } + + score := pkg.Relevance + if len(pkgDirList)-1 == depth { + score *= highScore + } else { + // For incomplete package paths, add a terminal slash to indicate that the + // user should keep triggering completions. + name += "/" + pkgToConsider += "/" + } + + if _, ok := seen[pkgToConsider]; ok { + return + } + seen[pkgToConsider] = struct{}{} + + mu.Lock() + defer mu.Unlock() + + name = namePrefix + name + nameSuffix + obj := types.NewPkgName(0, nil, name, types.NewPackage(pkgToConsider, name)) + c.deepState.enqueue(candidate{ + obj: obj, + detail: fmt.Sprintf("%q", pkgToConsider), + score: score, + }) + } + + c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + return imports.GetImportPaths(ctx, searchImports, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) + }) + return nil +} + +// populateCommentCompletions yields completions for comments preceding or in declarations. +func (c *completer) populateCommentCompletions(ctx context.Context, comment *ast.CommentGroup) { + // If the completion was triggered by a period, ignore it. These types of + // completions will not be useful in comments. + if c.completionContext.triggerCharacter == "." { + return + } + + // Using the comment position find the line after + file := c.pkg.FileSet().File(comment.End()) + if file == nil { + return + } + + // Deep completion doesn't work properly in comments since we don't + // have a type object to complete further. + c.deepState.enabled = false + c.completionContext.commentCompletion = true + + // Documentation isn't useful in comments, since it might end up being the + // comment itself. + c.opts.documentation = false + + commentLine := file.Line(comment.End()) + + // comment is valid, set surrounding as word boundaries around cursor + c.setSurroundingForComment(comment) + + // Using the next line pos, grab and parse the exported symbol on that line + for _, n := range c.file.Decls { + declLine := file.Line(n.Pos()) + // if the comment is not in, directly above or on the same line as a declaration + if declLine != commentLine && declLine != commentLine+1 && + !(n.Pos() <= comment.Pos() && comment.End() <= n.End()) { + continue + } + switch node := n.(type) { + // handle const, vars, and types + case *ast.GenDecl: + for _, spec := range node.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for _, name := range spec.Names { + if name.String() == "_" { + continue + } + obj := c.pkg.GetTypesInfo().ObjectOf(name) + c.deepState.enqueue(candidate{obj: obj, score: stdScore}) + } + case *ast.TypeSpec: + // add TypeSpec fields to completion + switch typeNode := spec.Type.(type) { + case *ast.StructType: + c.addFieldItems(ctx, typeNode.Fields) + case *ast.FuncType: + c.addFieldItems(ctx, typeNode.Params) + c.addFieldItems(ctx, typeNode.Results) + case *ast.InterfaceType: + c.addFieldItems(ctx, typeNode.Methods) + } + + if spec.Name.String() == "_" { + continue + } + + obj := c.pkg.GetTypesInfo().ObjectOf(spec.Name) + // Type name should get a higher score than fields but not highScore by default + // since field near a comment cursor gets a highScore + score := stdScore * 1.1 + // If type declaration is on the line after comment, give it a highScore. + if declLine == commentLine+1 { + score = highScore + } + + c.deepState.enqueue(candidate{obj: obj, score: score}) + } + } + // handle functions + case *ast.FuncDecl: + c.addFieldItems(ctx, node.Recv) + c.addFieldItems(ctx, node.Type.Params) + c.addFieldItems(ctx, node.Type.Results) + + // collect receiver struct fields + if node.Recv != nil { + for _, fields := range node.Recv.List { + for _, name := range fields.Names { + obj := c.pkg.GetTypesInfo().ObjectOf(name) + if obj == nil { + continue + } + + recvType := obj.Type().Underlying() + if ptr, ok := recvType.(*types.Pointer); ok { + recvType = ptr.Elem() + } + recvStruct, ok := recvType.Underlying().(*types.Struct) + if !ok { + continue + } + for i := 0; i < recvStruct.NumFields(); i++ { + field := recvStruct.Field(i) + c.deepState.enqueue(candidate{obj: field, score: lowScore}) + } + } + } + } + + if node.Name.String() == "_" { + continue + } + + obj := c.pkg.GetTypesInfo().ObjectOf(node.Name) + if obj == nil || obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() { + continue + } + + c.deepState.enqueue(candidate{obj: obj, score: highScore}) + } + } +} + +// sets word boundaries surrounding a cursor for a comment +func (c *completer) setSurroundingForComment(comments *ast.CommentGroup) { + var cursorComment *ast.Comment + for _, comment := range comments.List { + if c.pos >= comment.Pos() && c.pos <= comment.End() { + cursorComment = comment + break + } + } + // if cursor isn't in the comment + if cursorComment == nil { + return + } + + // index of cursor in comment text + cursorOffset := int(c.pos - cursorComment.Pos()) + start, end := cursorOffset, cursorOffset + for start > 0 && isValidIdentifierChar(cursorComment.Text[start-1]) { + start-- + } + for end < len(cursorComment.Text) && isValidIdentifierChar(cursorComment.Text[end]) { + end++ + } + + c.surrounding = &Selection{ + content: cursorComment.Text[start:end], + cursor: c.pos, + tokFile: c.tokFile, + start: token.Pos(int(cursorComment.Slash) + start), + end: token.Pos(int(cursorComment.Slash) + end), + mapper: c.mapper, + } + c.setMatcherFromPrefix(c.surrounding.Prefix()) +} + +// isValidIdentifierChar returns true if a byte is a valid go identifier +// character, i.e. unicode letter or digit or underscore. +func isValidIdentifierChar(char byte) bool { + charRune := rune(char) + return unicode.In(charRune, unicode.Letter, unicode.Digit) || char == '_' +} + +// adds struct fields, interface methods, function declaration fields to completion +func (c *completer) addFieldItems(ctx context.Context, fields *ast.FieldList) { + if fields == nil { + return + } + + cursor := c.surrounding.cursor + for _, field := range fields.List { + for _, name := range field.Names { + if name.String() == "_" { + continue + } + obj := c.pkg.GetTypesInfo().ObjectOf(name) + if obj == nil { + continue + } + + // if we're in a field comment/doc, score that field as more relevant + score := stdScore + if field.Comment != nil && field.Comment.Pos() <= cursor && cursor <= field.Comment.End() { + score = highScore + } else if field.Doc != nil && field.Doc.Pos() <= cursor && cursor <= field.Doc.End() { + score = highScore + } + + c.deepState.enqueue(candidate{obj: obj, score: score}) + } + } +} + +func (c *completer) wantStructFieldCompletions() bool { + clInfo := c.enclosingCompositeLiteral + if clInfo == nil { + return false + } + + return clInfo.isStruct() && (clInfo.inKey || clInfo.maybeInFieldName) +} + +func (c *completer) wantTypeName() bool { + return !c.completionContext.commentCompletion && c.inference.typeName.wantTypeName +} + +// See https://golang.org/issue/36001. Unimported completions are expensive. +const ( + maxUnimportedPackageNames = 5 + unimportedMemberTarget = 100 +) + +// selector finds completions for the specified selector expression. +func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { + c.inference.objChain = objChain(c.pkg.GetTypesInfo(), sel.X) + + // Is sel a qualified identifier? + if id, ok := sel.X.(*ast.Ident); ok { + if pkgName, ok := c.pkg.GetTypesInfo().Uses[id].(*types.PkgName); ok { + // If this package path is not a known dep, it means that it resolves to + // an import path that couldn't be resolved by go/packages. + // + // Try to complete from the package cache. + pkgPath := source.PackagePath(pkgName.Imported().Path()) + if _, ok := c.pkg.Metadata().DepsByPkgPath[pkgPath]; !ok && c.opts.unimported { + if err := c.unimportedMembers(ctx, id); err != nil { + return err + } + } + c.packageMembers(pkgName.Imported(), stdScore, nil, func(cand candidate) { + c.deepState.enqueue(cand) + }) + return nil + } + } + + // Invariant: sel is a true selector. + tv, ok := c.pkg.GetTypesInfo().Types[sel.X] + if ok { + c.methodsAndFields(tv.Type, tv.Addressable(), nil, func(cand candidate) { + c.deepState.enqueue(cand) + }) + + c.addPostfixSnippetCandidates(ctx, sel) + + return nil + } + + // Try unimported packages. + if id, ok := sel.X.(*ast.Ident); ok && c.opts.unimported { + if err := c.unimportedMembers(ctx, id); err != nil { + return err + } + } + return nil +} + +func (c *completer) unimportedMembers(ctx context.Context, id *ast.Ident) error { + // Try loaded packages first. They're relevant, fast, and fully typed. + known, err := c.snapshot.CachedImportPaths(ctx) + if err != nil { + return err + } + + var paths []string + for path, pkg := range known { + if pkg.Name() != id.Name { + continue + } + paths = append(paths, string(path)) + } + + var relevances map[string]float64 + if len(paths) != 0 { + if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { + var err error + relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) + return err + }); err != nil { + return err + } + } + sort.Slice(paths, func(i, j int) bool { + return relevances[paths[i]] > relevances[paths[j]] + }) + + for _, path := range paths { + pkg := known[source.PackagePath(path)] + if pkg.Name() != id.Name { + continue + } + imp := &importInfo{ + importPath: path, + } + if imports.ImportPathToAssumedName(path) != pkg.Name() { + imp.name = pkg.Name() + } + c.packageMembers(pkg, unimportedScore(relevances[path]), imp, func(cand candidate) { + c.deepState.enqueue(cand) + }) + if len(c.items) >= unimportedMemberTarget { + return nil + } + } + + ctx, cancel := context.WithCancel(ctx) + + var mu sync.Mutex + add := func(pkgExport imports.PackageExport) { + mu.Lock() + defer mu.Unlock() + // TODO(adonovan): what if the actual package has a vendor/ prefix? + if _, ok := known[source.PackagePath(pkgExport.Fix.StmtInfo.ImportPath)]; ok { + return // We got this one above. + } + + // Continue with untyped proposals. + pkg := types.NewPackage(pkgExport.Fix.StmtInfo.ImportPath, pkgExport.Fix.IdentName) + for _, export := range pkgExport.Exports { + score := unimportedScore(pkgExport.Fix.Relevance) + c.deepState.enqueue(candidate{ + obj: types.NewVar(0, pkg, export, nil), + score: score, + imp: &importInfo{ + importPath: pkgExport.Fix.StmtInfo.ImportPath, + name: pkgExport.Fix.StmtInfo.Name, + }, + }) + } + if len(c.items) >= unimportedMemberTarget { + cancel() + } + } + + c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + defer cancel() + return imports.GetPackageExports(ctx, add, id.Name, c.filename, c.pkg.GetTypes().Name(), opts.Env) + }) + return nil +} + +// unimportedScore returns a score for an unimported package that is generally +// lower than other candidates. +func unimportedScore(relevance float64) float64 { + return (stdScore + .1*relevance) / 2 +} + +func (c *completer) packageMembers(pkg *types.Package, score float64, imp *importInfo, cb func(candidate)) { + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + cb(candidate{ + obj: obj, + score: score, + imp: imp, + addressable: isVar(obj), + }) + } +} + +func (c *completer) methodsAndFields(typ types.Type, addressable bool, imp *importInfo, cb func(candidate)) { + mset := c.methodSetCache[methodSetKey{typ, addressable}] + if mset == nil { + if addressable && !types.IsInterface(typ) && !isPointer(typ) { + // Add methods of *T, which includes methods with receiver T. + mset = types.NewMethodSet(types.NewPointer(typ)) + } else { + // Add methods of T. + mset = types.NewMethodSet(typ) + } + c.methodSetCache[methodSetKey{typ, addressable}] = mset + } + + if isStarTestingDotF(typ) && addressable { + // is that a sufficient test? (or is more care needed?) + if c.fuzz(typ, mset, imp, cb, c.pkg.FileSet()) { + return + } + } + + for i := 0; i < mset.Len(); i++ { + cb(candidate{ + obj: mset.At(i).Obj(), + score: stdScore, + imp: imp, + addressable: addressable || isPointer(typ), + }) + } + + // Add fields of T. + eachField(typ, func(v *types.Var) { + cb(candidate{ + obj: v, + score: stdScore - 0.01, + imp: imp, + addressable: addressable || isPointer(typ), + }) + }) +} + +// isStarTestingDotF reports whether typ is *testing.F. +func isStarTestingDotF(typ types.Type) bool { + ptr, _ := typ.(*types.Pointer) + if ptr == nil { + return false + } + named, _ := ptr.Elem().(*types.Named) + if named == nil { + return false + } + obj := named.Obj() + // obj.Pkg is nil for the error type. + return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == "F" +} + +// lexical finds completions in the lexical environment. +func (c *completer) lexical(ctx context.Context) error { + var ( + builtinIota = types.Universe.Lookup("iota") + builtinNil = types.Universe.Lookup("nil") + + // TODO(rfindley): only allow "comparable" where it is valid (in constraint + // position or embedded in interface declarations). + // builtinComparable = types.Universe.Lookup("comparable") + ) + + // Track seen variables to avoid showing completions for shadowed variables. + // This works since we look at scopes from innermost to outermost. + seen := make(map[string]struct{}) + + // Process scopes innermost first. + for i, scope := range c.scopes { + if scope == nil { + continue + } + + Names: + for _, name := range scope.Names() { + declScope, obj := scope.LookupParent(name, c.pos) + if declScope != scope { + continue // Name was declared in some enclosing scope, or not at all. + } + + // If obj's type is invalid, find the AST node that defines the lexical block + // containing the declaration of obj. Don't resolve types for packages. + if !isPkgName(obj) && !typeIsValid(obj.Type()) { + // Match the scope to its ast.Node. If the scope is the package scope, + // use the *ast.File as the starting node. + var node ast.Node + if i < len(c.path) { + node = c.path[i] + } else if i == len(c.path) { // use the *ast.File for package scope + node = c.path[i-1] + } + if node != nil { + if resolved := resolveInvalid(c.pkg.FileSet(), obj, node, c.pkg.GetTypesInfo()); resolved != nil { + obj = resolved + } + } + } + + // Don't use LHS of decl in RHS. + for _, ident := range enclosingDeclLHS(c.path) { + if obj.Pos() == ident.Pos() { + continue Names + } + } + + // Don't suggest "iota" outside of const decls. + if obj == builtinIota && !c.inConstDecl() { + continue + } + + // Rank outer scopes lower than inner. + score := stdScore * math.Pow(.99, float64(i)) + + // Dowrank "nil" a bit so it is ranked below more interesting candidates. + if obj == builtinNil { + score /= 2 + } + + // If we haven't already added a candidate for an object with this name. + if _, ok := seen[obj.Name()]; !ok { + seen[obj.Name()] = struct{}{} + c.deepState.enqueue(candidate{ + obj: obj, + score: score, + addressable: isVar(obj), + }) + } + } + } + + if c.inference.objType != nil { + if named, _ := source.Deref(c.inference.objType).(*types.Named); named != nil { + // If we expected a named type, check the type's package for + // completion items. This is useful when the current file hasn't + // imported the type's package yet. + + if named.Obj() != nil && named.Obj().Pkg() != nil { + pkg := named.Obj().Pkg() + + // Make sure the package name isn't already in use by another + // object, and that this file doesn't import the package yet. + // TODO(adonovan): what if pkg.Path has vendor/ prefix? + if _, ok := seen[pkg.Name()]; !ok && pkg != c.pkg.GetTypes() && !alreadyImports(c.file, source.ImportPath(pkg.Path())) { + seen[pkg.Name()] = struct{}{} + obj := types.NewPkgName(0, nil, pkg.Name(), pkg) + imp := &importInfo{ + importPath: pkg.Path(), + } + if imports.ImportPathToAssumedName(pkg.Path()) != pkg.Name() { + imp.name = pkg.Name() + } + c.deepState.enqueue(candidate{ + obj: obj, + score: stdScore, + imp: imp, + }) + } + } + } + } + + if c.opts.unimported { + if err := c.unimportedPackages(ctx, seen); err != nil { + return err + } + } + + if c.inference.typeName.isTypeParam { + // If we are completing a type param, offer each structural type. + // This ensures we suggest "[]int" and "[]float64" for a constraint + // with type union "[]int | []float64". + if t, _ := c.inference.objType.(*types.Interface); t != nil { + terms, _ := typeparams.InterfaceTermSet(t) + for _, term := range terms { + c.injectType(ctx, term.Type()) + } + } + } else { + c.injectType(ctx, c.inference.objType) + } + + // Add keyword completion items appropriate in the current context. + c.addKeywordCompletions() + + return nil +} + +// injectType manufactures candidates based on the given type. This is +// intended for types not discoverable via lexical search, such as +// composite and/or generic types. For example, if the type is "[]int", +// this method makes sure you get candidates "[]int{}" and "[]int" +// (the latter applies when completing a type name). +func (c *completer) injectType(ctx context.Context, t types.Type) { + if t == nil { + return + } + + t = source.Deref(t) + + // If we have an expected type and it is _not_ a named type, handle + // it specially. Non-named types like "[]int" will never be + // considered via a lexical search, so we need to directly inject + // them. Also allow generic types since lexical search does not + // infer instantiated versions of them. + if named, _ := t.(*types.Named); named == nil || typeparams.ForNamed(named).Len() > 0 { + // If our expected type is "[]int", this will add a literal + // candidate of "[]int{}". + c.literal(ctx, t, nil) + + if _, isBasic := t.(*types.Basic); !isBasic { + // If we expect a non-basic type name (e.g. "[]int"), hack up + // a named type whose name is literally "[]int". This allows + // us to reuse our object based completion machinery. + fakeNamedType := candidate{ + obj: types.NewTypeName(token.NoPos, nil, types.TypeString(t, c.qf), t), + score: stdScore, + } + // Make sure the type name matches before considering + // candidate. This cuts down on useless candidates. + if c.matchingTypeName(&fakeNamedType) { + c.deepState.enqueue(fakeNamedType) + } + } + } +} + +func (c *completer) unimportedPackages(ctx context.Context, seen map[string]struct{}) error { + var prefix string + if c.surrounding != nil { + prefix = c.surrounding.Prefix() + } + + // Don't suggest unimported packages if we have absolutely nothing + // to go on. + if prefix == "" { + return nil + } + + count := 0 + + // TODO(adonovan): strength-reduce to a metadata query. + // All that's needed below is Package.{Name,Path}. + // Presumably that can be answered more thoroughly more quickly. + known, err := c.snapshot.CachedImportPaths(ctx) + if err != nil { + return err + } + var paths []string // actually PackagePaths + for path, pkg := range known { + if !strings.HasPrefix(pkg.Name(), prefix) { + continue + } + paths = append(paths, string(path)) + } + + var relevances map[string]float64 + if len(paths) != 0 { + if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { + var err error + relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) + return err + }); err != nil { + return err + } + } + + sort.Slice(paths, func(i, j int) bool { + if relevances[paths[i]] != relevances[paths[j]] { + return relevances[paths[i]] > relevances[paths[j]] + } + + // Fall back to lexical sort to keep truncated set of candidates + // in a consistent order. + return paths[i] < paths[j] + }) + + for _, path := range paths { + pkg := known[source.PackagePath(path)] + if _, ok := seen[pkg.Name()]; ok { + continue + } + imp := &importInfo{ + importPath: path, + } + if imports.ImportPathToAssumedName(path) != pkg.Name() { + imp.name = pkg.Name() + } + if count >= maxUnimportedPackageNames { + return nil + } + c.deepState.enqueue(candidate{ + // Pass an empty *types.Package to disable deep completions. + obj: types.NewPkgName(0, nil, pkg.Name(), types.NewPackage(path, string(pkg.Name()))), + score: unimportedScore(relevances[path]), + imp: imp, + }) + count++ + } + + ctx, cancel := context.WithCancel(ctx) + + var mu sync.Mutex + add := func(pkg imports.ImportFix) { + mu.Lock() + defer mu.Unlock() + if _, ok := seen[pkg.IdentName]; ok { + return + } + if _, ok := relevances[pkg.StmtInfo.ImportPath]; ok { + return + } + + if count >= maxUnimportedPackageNames { + cancel() + return + } + + // Do not add the unimported packages to seen, since we can have + // multiple packages of the same name as completion suggestions, since + // only one will be chosen. + obj := types.NewPkgName(0, nil, pkg.IdentName, types.NewPackage(pkg.StmtInfo.ImportPath, pkg.IdentName)) + c.deepState.enqueue(candidate{ + obj: obj, + score: unimportedScore(pkg.Relevance), + imp: &importInfo{ + importPath: pkg.StmtInfo.ImportPath, + name: pkg.StmtInfo.Name, + }, + }) + count++ + } + c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + defer cancel() + return imports.GetAllCandidates(ctx, add, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) + }) + return nil +} + +// alreadyImports reports whether f has an import with the specified path. +func alreadyImports(f *ast.File, path source.ImportPath) bool { + for _, s := range f.Imports { + if source.UnquoteImportPath(s) == path { + return true + } + } + return false +} + +func (c *completer) inConstDecl() bool { + for _, n := range c.path { + if decl, ok := n.(*ast.GenDecl); ok && decl.Tok == token.CONST { + return true + } + } + return false +} + +// structLiteralFieldName finds completions for struct field names inside a struct literal. +func (c *completer) structLiteralFieldName(ctx context.Context) error { + clInfo := c.enclosingCompositeLiteral + + // Mark fields of the composite literal that have already been set, + // except for the current field. + addedFields := make(map[*types.Var]bool) + for _, el := range clInfo.cl.Elts { + if kvExpr, ok := el.(*ast.KeyValueExpr); ok { + if clInfo.kv == kvExpr { + continue + } + + if key, ok := kvExpr.Key.(*ast.Ident); ok { + if used, ok := c.pkg.GetTypesInfo().Uses[key]; ok { + if usedVar, ok := used.(*types.Var); ok { + addedFields[usedVar] = true + } + } + } + } + } + + deltaScore := 0.0001 + switch t := clInfo.clType.(type) { + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + field := t.Field(i) + if !addedFields[field] { + c.deepState.enqueue(candidate{ + obj: field, + score: highScore - float64(i)*deltaScore, + }) + } + } + + // Add lexical completions if we aren't certain we are in the key part of a + // key-value pair. + if clInfo.maybeInFieldName { + return c.lexical(ctx) + } + default: + return c.lexical(ctx) + } + + return nil +} + +func (cl *compLitInfo) isStruct() bool { + _, ok := cl.clType.(*types.Struct) + return ok +} + +// enclosingCompositeLiteral returns information about the composite literal enclosing the +// position. +func enclosingCompositeLiteral(path []ast.Node, pos token.Pos, info *types.Info) *compLitInfo { + for _, n := range path { + switch n := n.(type) { + case *ast.CompositeLit: + // The enclosing node will be a composite literal if the user has just + // opened the curly brace (e.g. &x{<>) or the completion request is triggered + // from an already completed composite literal expression (e.g. &x{foo: 1, <>}) + // + // The position is not part of the composite literal unless it falls within the + // curly braces (e.g. "foo.Foo<>Struct{}"). + if !(n.Lbrace < pos && pos <= n.Rbrace) { + // Keep searching since we may yet be inside a composite literal. + // For example "Foo{B: Ba<>{}}". + break + } + + tv, ok := info.Types[n] + if !ok { + return nil + } + + clInfo := compLitInfo{ + cl: n, + clType: source.Deref(tv.Type).Underlying(), + } + + var ( + expr ast.Expr + hasKeys bool + ) + for _, el := range n.Elts { + // Remember the expression that the position falls in, if any. + if el.Pos() <= pos && pos <= el.End() { + expr = el + } + + if kv, ok := el.(*ast.KeyValueExpr); ok { + hasKeys = true + // If expr == el then we know the position falls in this expression, + // so also record kv as the enclosing *ast.KeyValueExpr. + if expr == el { + clInfo.kv = kv + break + } + } + } + + if clInfo.kv != nil { + // If in a *ast.KeyValueExpr, we know we are in the key if the position + // is to the left of the colon (e.g. "Foo{F<>: V}". + clInfo.inKey = pos <= clInfo.kv.Colon + } else if hasKeys { + // If we aren't in a *ast.KeyValueExpr but the composite literal has + // other *ast.KeyValueExprs, we must be on the key side of a new + // *ast.KeyValueExpr (e.g. "Foo{F: V, <>}"). + clInfo.inKey = true + } else { + switch clInfo.clType.(type) { + case *types.Struct: + if len(n.Elts) == 0 { + // If the struct literal is empty, next could be a struct field + // name or an expression (e.g. "Foo{<>}" could become "Foo{F:}" + // or "Foo{someVar}"). + clInfo.maybeInFieldName = true + } else if len(n.Elts) == 1 { + // If there is one expression and the position is in that expression + // and the expression is an identifier, we may be writing a field + // name or an expression (e.g. "Foo{F<>}"). + _, clInfo.maybeInFieldName = expr.(*ast.Ident) + } + case *types.Map: + // If we aren't in a *ast.KeyValueExpr we must be adding a new key + // to the map. + clInfo.inKey = true + } + } + + return &clInfo + default: + if breaksExpectedTypeInference(n, pos) { + return nil + } + } + } + + return nil +} + +// enclosingFunction returns the signature and body of the function +// enclosing the given position. +func enclosingFunction(path []ast.Node, info *types.Info) *funcInfo { + for _, node := range path { + switch t := node.(type) { + case *ast.FuncDecl: + if obj, ok := info.Defs[t.Name]; ok { + return &funcInfo{ + sig: obj.Type().(*types.Signature), + body: t.Body, + } + } + case *ast.FuncLit: + if typ, ok := info.Types[t]; ok { + if sig, _ := typ.Type.(*types.Signature); sig == nil { + // golang/go#49397: it should not be possible, but we somehow arrived + // here with a non-signature type, most likely due to AST mangling + // such that node.Type is not a FuncType. + return nil + } + return &funcInfo{ + sig: typ.Type.(*types.Signature), + body: t.Body, + } + } + } + } + return nil +} + +func (c *completer) expectedCompositeLiteralType() types.Type { + clInfo := c.enclosingCompositeLiteral + switch t := clInfo.clType.(type) { + case *types.Slice: + if clInfo.inKey { + return types.Typ[types.UntypedInt] + } + return t.Elem() + case *types.Array: + if clInfo.inKey { + return types.Typ[types.UntypedInt] + } + return t.Elem() + case *types.Map: + if clInfo.inKey { + return t.Key() + } + return t.Elem() + case *types.Struct: + // If we are completing a key (i.e. field name), there is no expected type. + if clInfo.inKey { + return nil + } + + // If we are in a key-value pair, but not in the key, then we must be on the + // value side. The expected type of the value will be determined from the key. + if clInfo.kv != nil { + if key, ok := clInfo.kv.Key.(*ast.Ident); ok { + for i := 0; i < t.NumFields(); i++ { + if field := t.Field(i); field.Name() == key.Name { + return field.Type() + } + } + } + } else { + // If we aren't in a key-value pair and aren't in the key, we must be using + // implicit field names. + + // The order of the literal fields must match the order in the struct definition. + // Find the element that the position belongs to and suggest that field's type. + if i := exprAtPos(c.pos, clInfo.cl.Elts); i < t.NumFields() { + return t.Field(i).Type() + } + } + } + return nil +} + +// typeMod represents an operator that changes the expected type. +type typeMod struct { + mod typeModKind + arrayLen int64 +} + +type typeModKind int + +const ( + dereference typeModKind = iota // pointer indirection: "*" + reference // adds level of pointer: "&" for values, "*" for type names + chanRead // channel read operator: "<-" + sliceType // make a slice type: "[]" in "[]int" + arrayType // make an array type: "[2]" in "[2]int" + invoke // make a function call: "()" in "foo()" + takeSlice // take slice of array: "[:]" in "foo[:]" + takeDotDotDot // turn slice into variadic args: "..." in "foo..." + index // index into slice/array: "[0]" in "foo[0]" +) + +type objKind int + +const ( + kindAny objKind = 0 + kindArray objKind = 1 << iota + kindSlice + kindChan + kindMap + kindStruct + kindString + kindInt + kindBool + kindBytes + kindPtr + kindFloat + kindComplex + kindError + kindStringer + kindFunc +) + +// penalizedObj represents an object that should be disfavored as a +// completion candidate. +type penalizedObj struct { + // objChain is the full "chain", e.g. "foo.bar().baz" becomes + // []types.Object{foo, bar, baz}. + objChain []types.Object + // penalty is score penalty in the range (0, 1). + penalty float64 +} + +// candidateInference holds information we have inferred about a type that can be +// used at the current position. +type candidateInference struct { + // objType is the desired type of an object used at the query position. + objType types.Type + + // objKind is a mask of expected kinds of types such as "map", "slice", etc. + objKind objKind + + // variadic is true if we are completing the initial variadic + // parameter. For example: + // append([]T{}, <>) // objType=T variadic=true + // append([]T{}, T{}, <>) // objType=T variadic=false + variadic bool + + // modifiers are prefixes such as "*", "&" or "<-" that influence how + // a candidate type relates to the expected type. + modifiers []typeMod + + // convertibleTo is a type our candidate type must be convertible to. + convertibleTo types.Type + + // typeName holds information about the expected type name at + // position, if any. + typeName typeNameInference + + // assignees are the types that would receive a function call's + // results at the position. For example: + // + // foo := 123 + // foo, bar := <> + // + // at "<>", the assignees are [int, ]. + assignees []types.Type + + // variadicAssignees is true if we could be completing an inner + // function call that fills out an outer function call's variadic + // params. For example: + // + // func foo(int, ...string) {} + // + // foo(<>) // variadicAssignees=true + // foo(bar<>) // variadicAssignees=true + // foo(bar, baz<>) // variadicAssignees=false + variadicAssignees bool + + // penalized holds expressions that should be disfavored as + // candidates. For example, it tracks expressions already used in a + // switch statement's other cases. Each expression is tracked using + // its entire object "chain" allowing differentiation between + // "a.foo" and "b.foo" when "a" and "b" are the same type. + penalized []penalizedObj + + // objChain contains the chain of objects representing the + // surrounding *ast.SelectorExpr. For example, if we are completing + // "foo.bar.ba<>", objChain will contain []types.Object{foo, bar}. + objChain []types.Object +} + +// typeNameInference holds information about the expected type name at +// position. +type typeNameInference struct { + // wantTypeName is true if we expect the name of a type. + wantTypeName bool + + // modifiers are prefixes such as "*", "&" or "<-" that influence how + // a candidate type relates to the expected type. + modifiers []typeMod + + // assertableFrom is a type that must be assertable to our candidate type. + assertableFrom types.Type + + // wantComparable is true if we want a comparable type. + wantComparable bool + + // seenTypeSwitchCases tracks types that have already been used by + // the containing type switch. + seenTypeSwitchCases []types.Type + + // compLitType is true if we are completing a composite literal type + // name, e.g "foo<>{}". + compLitType bool + + // isTypeParam is true if we are completing a type instantiation parameter + isTypeParam bool +} + +// expectedCandidate returns information about the expected candidate +// for an expression at the query position. +func expectedCandidate(ctx context.Context, c *completer) (inf candidateInference) { + inf.typeName = expectTypeName(c) + + if c.enclosingCompositeLiteral != nil { + inf.objType = c.expectedCompositeLiteralType() + } + +Nodes: + for i, node := range c.path { + switch node := node.(type) { + case *ast.BinaryExpr: + // Determine if query position comes from left or right of op. + e := node.X + if c.pos < node.OpPos { + e = node.Y + } + if tv, ok := c.pkg.GetTypesInfo().Types[e]; ok { + switch node.Op { + case token.LAND, token.LOR: + // Don't infer "bool" type for "&&" or "||". Often you want + // to compose a boolean expression from non-boolean + // candidates. + default: + inf.objType = tv.Type + } + break Nodes + } + case *ast.AssignStmt: + // Only rank completions if you are on the right side of the token. + if c.pos > node.TokPos { + i := exprAtPos(c.pos, node.Rhs) + if i >= len(node.Lhs) { + i = len(node.Lhs) - 1 + } + if tv, ok := c.pkg.GetTypesInfo().Types[node.Lhs[i]]; ok { + inf.objType = tv.Type + } + + // If we have a single expression on the RHS, record the LHS + // assignees so we can favor multi-return function calls with + // matching result values. + if len(node.Rhs) <= 1 { + for _, lhs := range node.Lhs { + inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(lhs)) + } + } else { + // Otherwise, record our single assignee, even if its type is + // not available. We use this info to downrank functions + // with the wrong number of result values. + inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(node.Lhs[i])) + } + } + return inf + case *ast.ValueSpec: + if node.Type != nil && c.pos > node.Type.End() { + inf.objType = c.pkg.GetTypesInfo().TypeOf(node.Type) + } + return inf + case *ast.CallExpr: + // Only consider CallExpr args if position falls between parens. + if node.Lparen < c.pos && c.pos <= node.Rparen { + // For type conversions like "int64(foo)" we can only infer our + // desired type is convertible to int64. + if typ := typeConversion(node, c.pkg.GetTypesInfo()); typ != nil { + inf.convertibleTo = typ + break Nodes + } + + sig, _ := c.pkg.GetTypesInfo().Types[node.Fun].Type.(*types.Signature) + + if sig != nil && typeparams.ForSignature(sig).Len() > 0 { + // If we are completing a generic func call, re-check the call expression. + // This allows type param inference to work in cases like: + // + // func foo[T any](T) {} + // foo[int](<>) // <- get "int" completions instead of "T" + // + // TODO: remove this after https://go.dev/issue/52503 + info := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)} + types.CheckExpr(c.pkg.FileSet(), c.pkg.GetTypes(), node.Fun.Pos(), node.Fun, info) + sig, _ = info.Types[node.Fun].Type.(*types.Signature) + } + + if sig != nil { + inf = c.expectedCallParamType(inf, node, sig) + } + + if funIdent, ok := node.Fun.(*ast.Ident); ok { + obj := c.pkg.GetTypesInfo().ObjectOf(funIdent) + + if obj != nil && obj.Parent() == types.Universe { + // Defer call to builtinArgType so we can provide it the + // inferred type from its parent node. + defer func() { + inf = c.builtinArgType(obj, node, inf) + inf.objKind = c.builtinArgKind(ctx, obj, node) + }() + + // The expected type of builtin arguments like append() is + // the expected type of the builtin call itself. For + // example: + // + // var foo []int = append(<>) + // + // To find the expected type at <> we "skip" the append() + // node and get the expected type one level up, which is + // []int. + continue Nodes + } + } + + return inf + } + case *ast.ReturnStmt: + if c.enclosingFunc != nil { + sig := c.enclosingFunc.sig + // Find signature result that corresponds to our return statement. + if resultIdx := exprAtPos(c.pos, node.Results); resultIdx < len(node.Results) { + if resultIdx < sig.Results().Len() { + inf.objType = sig.Results().At(resultIdx).Type() + } + } + } + return inf + case *ast.CaseClause: + if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, node).(*ast.SwitchStmt); ok { + if tv, ok := c.pkg.GetTypesInfo().Types[swtch.Tag]; ok { + inf.objType = tv.Type + + // Record which objects have already been used in the case + // statements so we don't suggest them again. + for _, cc := range swtch.Body.List { + for _, caseExpr := range cc.(*ast.CaseClause).List { + // Don't record the expression we are currently completing. + if caseExpr.Pos() < c.pos && c.pos <= caseExpr.End() { + continue + } + + if objs := objChain(c.pkg.GetTypesInfo(), caseExpr); len(objs) > 0 { + inf.penalized = append(inf.penalized, penalizedObj{objChain: objs, penalty: 0.1}) + } + } + } + } + } + return inf + case *ast.SliceExpr: + // Make sure position falls within the brackets (e.g. "foo[a:<>]"). + if node.Lbrack < c.pos && c.pos <= node.Rbrack { + inf.objType = types.Typ[types.UntypedInt] + } + return inf + case *ast.IndexExpr: + // Make sure position falls within the brackets (e.g. "foo[<>]"). + if node.Lbrack < c.pos && c.pos <= node.Rbrack { + if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok { + switch t := tv.Type.Underlying().(type) { + case *types.Map: + inf.objType = t.Key() + case *types.Slice, *types.Array: + inf.objType = types.Typ[types.UntypedInt] + } + + if ct := expectedConstraint(tv.Type, 0); ct != nil { + inf.objType = ct + inf.typeName.wantTypeName = true + inf.typeName.isTypeParam = true + } + } + } + return inf + case *typeparams.IndexListExpr: + if node.Lbrack < c.pos && c.pos <= node.Rbrack { + if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok { + if ct := expectedConstraint(tv.Type, exprAtPos(c.pos, node.Indices)); ct != nil { + inf.objType = ct + inf.typeName.wantTypeName = true + inf.typeName.isTypeParam = true + } + } + } + return inf + case *ast.SendStmt: + // Make sure we are on right side of arrow (e.g. "foo <- <>"). + if c.pos > node.Arrow+1 { + if tv, ok := c.pkg.GetTypesInfo().Types[node.Chan]; ok { + if ch, ok := tv.Type.Underlying().(*types.Chan); ok { + inf.objType = ch.Elem() + } + } + } + return inf + case *ast.RangeStmt: + if source.NodeContains(node.X, c.pos) { + inf.objKind |= kindSlice | kindArray | kindMap | kindString + if node.Value == nil { + inf.objKind |= kindChan + } + } + return inf + case *ast.StarExpr: + inf.modifiers = append(inf.modifiers, typeMod{mod: dereference}) + case *ast.UnaryExpr: + switch node.Op { + case token.AND: + inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) + case token.ARROW: + inf.modifiers = append(inf.modifiers, typeMod{mod: chanRead}) + } + case *ast.DeferStmt, *ast.GoStmt: + inf.objKind |= kindFunc + return inf + default: + if breaksExpectedTypeInference(node, c.pos) { + return inf + } + } + } + + return inf +} + +func (c *completer) expectedCallParamType(inf candidateInference, node *ast.CallExpr, sig *types.Signature) candidateInference { + numParams := sig.Params().Len() + if numParams == 0 { + return inf + } + + exprIdx := exprAtPos(c.pos, node.Args) + + // If we have one or zero arg expressions, we may be + // completing to a function call that returns multiple + // values, in turn getting passed in to the surrounding + // call. Record the assignees so we can favor function + // calls that return matching values. + if len(node.Args) <= 1 && exprIdx == 0 { + for i := 0; i < sig.Params().Len(); i++ { + inf.assignees = append(inf.assignees, sig.Params().At(i).Type()) + } + + // Record that we may be completing into variadic parameters. + inf.variadicAssignees = sig.Variadic() + } + + // Make sure not to run past the end of expected parameters. + if exprIdx >= numParams { + inf.objType = sig.Params().At(numParams - 1).Type() + } else { + inf.objType = sig.Params().At(exprIdx).Type() + } + + if sig.Variadic() && exprIdx >= (numParams-1) { + // If we are completing a variadic param, deslice the variadic type. + inf.objType = deslice(inf.objType) + // Record whether we are completing the initial variadic param. + inf.variadic = exprIdx == numParams-1 && len(node.Args) <= numParams + + // Check if we can infer object kind from printf verb. + inf.objKind |= printfArgKind(c.pkg.GetTypesInfo(), node, exprIdx) + } + + // If our expected type is an uninstantiated generic type param, + // swap to the constraint which will do a decent job filtering + // candidates. + if tp, _ := inf.objType.(*typeparams.TypeParam); tp != nil { + inf.objType = tp.Constraint() + } + + return inf +} + +func expectedConstraint(t types.Type, idx int) types.Type { + var tp *typeparams.TypeParamList + if named, _ := t.(*types.Named); named != nil { + tp = typeparams.ForNamed(named) + } else if sig, _ := t.Underlying().(*types.Signature); sig != nil { + tp = typeparams.ForSignature(sig) + } + if tp == nil || idx >= tp.Len() { + return nil + } + return tp.At(idx).Constraint() +} + +// objChain decomposes e into a chain of objects if possible. For +// example, "foo.bar().baz" will yield []types.Object{foo, bar, baz}. +// If any part can't be turned into an object, return nil. +func objChain(info *types.Info, e ast.Expr) []types.Object { + var objs []types.Object + + for e != nil { + switch n := e.(type) { + case *ast.Ident: + obj := info.ObjectOf(n) + if obj == nil { + return nil + } + objs = append(objs, obj) + e = nil + case *ast.SelectorExpr: + obj := info.ObjectOf(n.Sel) + if obj == nil { + return nil + } + objs = append(objs, obj) + e = n.X + case *ast.CallExpr: + if len(n.Args) > 0 { + return nil + } + e = n.Fun + default: + return nil + } + } + + // Reverse order so the layout matches the syntactic order. + for i := 0; i < len(objs)/2; i++ { + objs[i], objs[len(objs)-1-i] = objs[len(objs)-1-i], objs[i] + } + + return objs +} + +// applyTypeModifiers applies the list of type modifiers to a type. +// It returns nil if the modifiers could not be applied. +func (ci candidateInference) applyTypeModifiers(typ types.Type, addressable bool) types.Type { + for _, mod := range ci.modifiers { + switch mod.mod { + case dereference: + // For every "*" indirection operator, remove a pointer layer + // from candidate type. + if ptr, ok := typ.Underlying().(*types.Pointer); ok { + typ = ptr.Elem() + } else { + return nil + } + case reference: + // For every "&" address operator, add another pointer layer to + // candidate type, if the candidate is addressable. + if addressable { + typ = types.NewPointer(typ) + } else { + return nil + } + case chanRead: + // For every "<-" operator, remove a layer of channelness. + if ch, ok := typ.(*types.Chan); ok { + typ = ch.Elem() + } else { + return nil + } + } + } + + return typ +} + +// applyTypeNameModifiers applies the list of type modifiers to a type name. +func (ci candidateInference) applyTypeNameModifiers(typ types.Type) types.Type { + for _, mod := range ci.typeName.modifiers { + switch mod.mod { + case reference: + typ = types.NewPointer(typ) + case arrayType: + typ = types.NewArray(typ, mod.arrayLen) + case sliceType: + typ = types.NewSlice(typ) + } + } + return typ +} + +// matchesVariadic returns true if we are completing a variadic +// parameter and candType is a compatible slice type. +func (ci candidateInference) matchesVariadic(candType types.Type) bool { + return ci.variadic && ci.objType != nil && assignableTo(candType, types.NewSlice(ci.objType)) +} + +// findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or +// *ast.TypeSwitchStmt. path should start from the case clause's first ancestor. +func findSwitchStmt(path []ast.Node, pos token.Pos, c *ast.CaseClause) ast.Stmt { + // Make sure position falls within a "case <>:" clause. + if exprAtPos(pos, c.List) >= len(c.List) { + return nil + } + // A case clause is always nested within a block statement in a switch statement. + if len(path) < 2 { + return nil + } + if _, ok := path[0].(*ast.BlockStmt); !ok { + return nil + } + switch s := path[1].(type) { + case *ast.SwitchStmt: + return s + case *ast.TypeSwitchStmt: + return s + default: + return nil + } +} + +// breaksExpectedTypeInference reports if an expression node's type is unrelated +// to its child expression node types. For example, "Foo{Bar: x.Baz(<>)}" should +// expect a function argument, not a composite literal value. +func breaksExpectedTypeInference(n ast.Node, pos token.Pos) bool { + switch n := n.(type) { + case *ast.CompositeLit: + // Doesn't break inference if pos is in type name. + // For example: "Foo<>{Bar: 123}" + return !source.NodeContains(n.Type, pos) + case *ast.CallExpr: + // Doesn't break inference if pos is in func name. + // For example: "Foo<>(123)" + return !source.NodeContains(n.Fun, pos) + case *ast.FuncLit, *ast.IndexExpr, *ast.SliceExpr: + return true + default: + return false + } +} + +// expectTypeName returns information about the expected type name at position. +func expectTypeName(c *completer) typeNameInference { + var inf typeNameInference + +Nodes: + for i, p := range c.path { + switch n := p.(type) { + case *ast.FieldList: + // Expect a type name if pos is in a FieldList. This applies to + // FuncType params/results, FuncDecl receiver, StructType, and + // InterfaceType. We don't need to worry about the field name + // because completion bails out early if pos is in an *ast.Ident + // that defines an object. + inf.wantTypeName = true + break Nodes + case *ast.CaseClause: + // Expect type names in type switch case clauses. + if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, n).(*ast.TypeSwitchStmt); ok { + // The case clause types must be assertable from the type switch parameter. + ast.Inspect(swtch.Assign, func(n ast.Node) bool { + if ta, ok := n.(*ast.TypeAssertExpr); ok { + inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(ta.X) + return false + } + return true + }) + inf.wantTypeName = true + + // Track the types that have already been used in this + // switch's case statements so we don't recommend them. + for _, e := range swtch.Body.List { + for _, typeExpr := range e.(*ast.CaseClause).List { + // Skip if type expression contains pos. We don't want to + // count it as already used if the user is completing it. + if typeExpr.Pos() < c.pos && c.pos <= typeExpr.End() { + continue + } + + if t := c.pkg.GetTypesInfo().TypeOf(typeExpr); t != nil { + inf.seenTypeSwitchCases = append(inf.seenTypeSwitchCases, t) + } + } + } + + break Nodes + } + return typeNameInference{} + case *ast.TypeAssertExpr: + // Expect type names in type assert expressions. + if n.Lparen < c.pos && c.pos <= n.Rparen { + // The type in parens must be assertable from the expression type. + inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(n.X) + inf.wantTypeName = true + break Nodes + } + return typeNameInference{} + case *ast.StarExpr: + inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) + case *ast.CompositeLit: + // We want a type name if position is in the "Type" part of a + // composite literal (e.g. "Foo<>{}"). + if n.Type != nil && n.Type.Pos() <= c.pos && c.pos <= n.Type.End() { + inf.wantTypeName = true + inf.compLitType = true + + if i < len(c.path)-1 { + // Track preceding "&" operator. Technically it applies to + // the composite literal and not the type name, but if + // affects our type completion nonetheless. + if u, ok := c.path[i+1].(*ast.UnaryExpr); ok && u.Op == token.AND { + inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) + } + } + } + break Nodes + case *ast.ArrayType: + // If we are inside the "Elt" part of an array type, we want a type name. + if n.Elt.Pos() <= c.pos && c.pos <= n.Elt.End() { + inf.wantTypeName = true + if n.Len == nil { + // No "Len" expression means a slice type. + inf.modifiers = append(inf.modifiers, typeMod{mod: sliceType}) + } else { + // Try to get the array type using the constant value of "Len". + tv, ok := c.pkg.GetTypesInfo().Types[n.Len] + if ok && tv.Value != nil && tv.Value.Kind() == constant.Int { + if arrayLen, ok := constant.Int64Val(tv.Value); ok { + inf.modifiers = append(inf.modifiers, typeMod{mod: arrayType, arrayLen: arrayLen}) + } + } + } + + // ArrayTypes can be nested, so keep going if our parent is an + // ArrayType. + if i < len(c.path)-1 { + if _, ok := c.path[i+1].(*ast.ArrayType); ok { + continue Nodes + } + } + + break Nodes + } + case *ast.MapType: + inf.wantTypeName = true + if n.Key != nil { + inf.wantComparable = source.NodeContains(n.Key, c.pos) + } else { + // If the key is empty, assume we are completing the key if + // pos is directly after the "map[". + inf.wantComparable = c.pos == n.Pos()+token.Pos(len("map[")) + } + break Nodes + case *ast.ValueSpec: + inf.wantTypeName = source.NodeContains(n.Type, c.pos) + break Nodes + case *ast.TypeSpec: + inf.wantTypeName = source.NodeContains(n.Type, c.pos) + default: + if breaksExpectedTypeInference(p, c.pos) { + return typeNameInference{} + } + } + } + + return inf +} + +func (c *completer) fakeObj(T types.Type) *types.Var { + return types.NewVar(token.NoPos, c.pkg.GetTypes(), "", T) +} + +// derivableTypes iterates types you can derive from t. For example, +// from "foo" we might derive "&foo", and "foo()". +func derivableTypes(t types.Type, addressable bool, f func(t types.Type, addressable bool, mod typeModKind) bool) bool { + switch t := t.Underlying().(type) { + case *types.Signature: + // If t is a func type with a single result, offer the result type. + if t.Results().Len() == 1 && f(t.Results().At(0).Type(), false, invoke) { + return true + } + case *types.Array: + if f(t.Elem(), true, index) { + return true + } + // Try converting array to slice. + if f(types.NewSlice(t.Elem()), false, takeSlice) { + return true + } + case *types.Pointer: + if f(t.Elem(), false, dereference) { + return true + } + case *types.Slice: + if f(t.Elem(), true, index) { + return true + } + case *types.Map: + if f(t.Elem(), false, index) { + return true + } + case *types.Chan: + if f(t.Elem(), false, chanRead) { + return true + } + } + + // Check if c is addressable and a pointer to c matches our type inference. + if addressable && f(types.NewPointer(t), false, reference) { + return true + } + + return false +} + +// anyCandType reports whether f returns true for any candidate type +// derivable from c. It searches up to three levels of type +// modification. For example, given "foo" we could discover "***foo" +// or "*foo()". +func (c *candidate) anyCandType(f func(t types.Type, addressable bool) bool) bool { + if c.obj == nil || c.obj.Type() == nil { + return false + } + + const maxDepth = 3 + + var searchTypes func(t types.Type, addressable bool, mods []typeModKind) bool + searchTypes = func(t types.Type, addressable bool, mods []typeModKind) bool { + if f(t, addressable) { + if len(mods) > 0 { + newMods := make([]typeModKind, len(mods)+len(c.mods)) + copy(newMods, mods) + copy(newMods[len(mods):], c.mods) + c.mods = newMods + } + return true + } + + if len(mods) == maxDepth { + return false + } + + return derivableTypes(t, addressable, func(t types.Type, addressable bool, mod typeModKind) bool { + return searchTypes(t, addressable, append(mods, mod)) + }) + } + + return searchTypes(c.obj.Type(), c.addressable, make([]typeModKind, 0, maxDepth)) +} + +// matchingCandidate reports whether cand matches our type inferences. +// It mutates cand's score in certain cases. +func (c *completer) matchingCandidate(cand *candidate) bool { + if c.completionContext.commentCompletion { + return false + } + + // Bail out early if we are completing a field name in a composite literal. + if v, ok := cand.obj.(*types.Var); ok && v.IsField() && c.wantStructFieldCompletions() { + return true + } + + if isTypeName(cand.obj) { + return c.matchingTypeName(cand) + } else if c.wantTypeName() { + // If we want a type, a non-type object never matches. + return false + } + + if c.inference.candTypeMatches(cand) { + return true + } + + candType := cand.obj.Type() + if candType == nil { + return false + } + + if sig, ok := candType.Underlying().(*types.Signature); ok { + if c.inference.assigneesMatch(cand, sig) { + // Invoke the candidate if its results are multi-assignable. + cand.mods = append(cand.mods, invoke) + return true + } + } + + // Default to invoking *types.Func candidates. This is so function + // completions in an empty statement (or other cases with no expected type) + // are invoked by default. + if isFunc(cand.obj) { + cand.mods = append(cand.mods, invoke) + } + + return false +} + +// candTypeMatches reports whether cand makes a good completion +// candidate given the candidate inference. cand's score may be +// mutated to downrank the candidate in certain situations. +func (ci *candidateInference) candTypeMatches(cand *candidate) bool { + var ( + expTypes = make([]types.Type, 0, 2) + variadicType types.Type + ) + if ci.objType != nil { + expTypes = append(expTypes, ci.objType) + + if ci.variadic { + variadicType = types.NewSlice(ci.objType) + expTypes = append(expTypes, variadicType) + } + } + + return cand.anyCandType(func(candType types.Type, addressable bool) bool { + // Take into account any type modifiers on the expected type. + candType = ci.applyTypeModifiers(candType, addressable) + if candType == nil { + return false + } + + if ci.convertibleTo != nil && convertibleTo(candType, ci.convertibleTo) { + return true + } + + for _, expType := range expTypes { + if isEmptyInterface(expType) { + continue + } + + matches := ci.typeMatches(expType, candType) + if !matches { + // If candType doesn't otherwise match, consider if we can + // convert candType directly to expType. + if considerTypeConversion(candType, expType, cand.path) { + cand.convertTo = expType + // Give a major score penalty so we always prefer directly + // assignable candidates, all else equal. + cand.score *= 0.5 + return true + } + + continue + } + + if expType == variadicType { + cand.mods = append(cand.mods, takeDotDotDot) + } + + // Lower candidate score for untyped conversions. This avoids + // ranking untyped constants above candidates with an exact type + // match. Don't lower score of builtin constants, e.g. "true". + if isUntyped(candType) && !types.Identical(candType, expType) && cand.obj.Parent() != types.Universe { + // Bigger penalty for deep completions into other packages to + // avoid random constants from other packages popping up all + // the time. + if len(cand.path) > 0 && isPkgName(cand.path[0]) { + cand.score *= 0.5 + } else { + cand.score *= 0.75 + } + } + + return true + } + + // If we don't have a specific expected type, fall back to coarser + // object kind checks. + if ci.objType == nil || isEmptyInterface(ci.objType) { + // If we were able to apply type modifiers to our candidate type, + // count that as a match. For example: + // + // var foo chan int + // <-fo<> + // + // We were able to apply the "<-" type modifier to "foo", so "foo" + // matches. + if len(ci.modifiers) > 0 { + return true + } + + // If we didn't have an exact type match, check if our object kind + // matches. + if ci.kindMatches(candType) { + if ci.objKind == kindFunc { + cand.mods = append(cand.mods, invoke) + } + return true + } + } + + return false + }) +} + +// considerTypeConversion returns true if we should offer a completion +// automatically converting "from" to "to". +func considerTypeConversion(from, to types.Type, path []types.Object) bool { + // Don't offer to convert deep completions from other packages. + // Otherwise there are many random package level consts/vars that + // pop up as candidates all the time. + if len(path) > 0 && isPkgName(path[0]) { + return false + } + + if _, ok := from.(*typeparams.TypeParam); ok { + return false + } + + if !convertibleTo(from, to) { + return false + } + + // Don't offer to convert ints to strings since that probably + // doesn't do what the user wants. + if isBasicKind(from, types.IsInteger) && isBasicKind(to, types.IsString) { + return false + } + + return true +} + +// typeMatches reports whether an object of candType makes a good +// completion candidate given the expected type expType. +func (ci *candidateInference) typeMatches(expType, candType types.Type) bool { + // Handle untyped values specially since AssignableTo gives false negatives + // for them (see https://golang.org/issue/32146). + if candBasic, ok := candType.Underlying().(*types.Basic); ok { + if expBasic, ok := expType.Underlying().(*types.Basic); ok { + // Note that the candidate and/or the expected can be untyped. + // In "fo<> == 100" the expected type is untyped, and the + // candidate could also be an untyped constant. + + // Sort by is_untyped and then by is_int to simplify below logic. + a, b := candBasic.Info(), expBasic.Info() + if a&types.IsUntyped == 0 || (b&types.IsInteger > 0 && b&types.IsUntyped > 0) { + a, b = b, a + } + + // If at least one is untyped... + if a&types.IsUntyped > 0 { + switch { + // Untyped integers are compatible with floats. + case a&types.IsInteger > 0 && b&types.IsFloat > 0: + return true + + // Check if their constant kind (bool|int|float|complex|string) matches. + // This doesn't take into account the constant value, so there will be some + // false positives due to integer sign and overflow. + case a&types.IsConstType == b&types.IsConstType: + return true + } + } + } + } + + // AssignableTo covers the case where the types are equal, but also handles + // cases like assigning a concrete type to an interface type. + return assignableTo(candType, expType) +} + +// kindMatches reports whether candType's kind matches our expected +// kind (e.g. slice, map, etc.). +func (ci *candidateInference) kindMatches(candType types.Type) bool { + return ci.objKind > 0 && ci.objKind&candKind(candType) > 0 +} + +// assigneesMatch reports whether an invocation of sig matches the +// number and type of any assignees. +func (ci *candidateInference) assigneesMatch(cand *candidate, sig *types.Signature) bool { + if len(ci.assignees) == 0 { + return false + } + + // Uniresult functions are always usable and are handled by the + // normal, non-assignees type matching logic. + if sig.Results().Len() == 1 { + return false + } + + // Don't prefer completing into func(...interface{}) calls since all + // functions would match. + if ci.variadicAssignees && len(ci.assignees) == 1 && isEmptyInterface(deslice(ci.assignees[0])) { + return false + } + + var numberOfResultsCouldMatch bool + if ci.variadicAssignees { + numberOfResultsCouldMatch = sig.Results().Len() >= len(ci.assignees)-1 + } else { + numberOfResultsCouldMatch = sig.Results().Len() == len(ci.assignees) + } + + // If our signature doesn't return the right number of values, it's + // not a match, so downrank it. For example: + // + // var foo func() (int, int) + // a, b, c := <> // downrank "foo()" since it only returns two values + if !numberOfResultsCouldMatch { + cand.score /= 2 + return false + } + + // If at least one assignee has a valid type, and all valid + // assignees match the corresponding sig result value, the signature + // is a match. + allMatch := false + for i := 0; i < sig.Results().Len(); i++ { + var assignee types.Type + + // If we are completing into variadic parameters, deslice the + // expected variadic type. + if ci.variadicAssignees && i >= len(ci.assignees)-1 { + assignee = ci.assignees[len(ci.assignees)-1] + if elem := deslice(assignee); elem != nil { + assignee = elem + } + } else { + assignee = ci.assignees[i] + } + + if assignee == nil || assignee == types.Typ[types.Invalid] { + continue + } + + allMatch = ci.typeMatches(assignee, sig.Results().At(i).Type()) + if !allMatch { + break + } + } + return allMatch +} + +func (c *completer) matchingTypeName(cand *candidate) bool { + if !c.wantTypeName() { + return false + } + + typeMatches := func(candType types.Type) bool { + // Take into account any type name modifier prefixes. + candType = c.inference.applyTypeNameModifiers(candType) + + if from := c.inference.typeName.assertableFrom; from != nil { + // Don't suggest the starting type in type assertions. For example, + // if "foo" is an io.Writer, don't suggest "foo.(io.Writer)". + if types.Identical(from, candType) { + return false + } + + if intf, ok := from.Underlying().(*types.Interface); ok { + if !types.AssertableTo(intf, candType) { + return false + } + } + } + + if c.inference.typeName.wantComparable && !types.Comparable(candType) { + return false + } + + // Skip this type if it has already been used in another type + // switch case. + for _, seen := range c.inference.typeName.seenTypeSwitchCases { + if types.Identical(candType, seen) { + return false + } + } + + // We can expect a type name and have an expected type in cases like: + // + // var foo []int + // foo = []i<> + // + // Where our expected type is "[]int", and we expect a type name. + if c.inference.objType != nil { + return assignableTo(candType, c.inference.objType) + } + + // Default to saying any type name is a match. + return true + } + + t := cand.obj.Type() + + if typeMatches(t) { + return true + } + + if !types.IsInterface(t) && typeMatches(types.NewPointer(t)) { + if c.inference.typeName.compLitType { + // If we are completing a composite literal type as in + // "foo<>{}", to make a pointer we must prepend "&". + cand.mods = append(cand.mods, reference) + } else { + // If we are completing a normal type name such as "foo<>", to + // make a pointer we must prepend "*". + cand.mods = append(cand.mods, dereference) + } + return true + } + + return false +} + +var ( + // "interface { Error() string }" (i.e. error) + errorIntf = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + + // "interface { String() string }" (i.e. fmt.Stringer) + stringerIntf = types.NewInterfaceType([]*types.Func{ + types.NewFunc(token.NoPos, nil, "String", types.NewSignature( + nil, + nil, + types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])), + false, + )), + }, nil).Complete() + + byteType = types.Universe.Lookup("byte").Type() +) + +// candKind returns the objKind of candType, if any. +func candKind(candType types.Type) objKind { + var kind objKind + + switch t := candType.Underlying().(type) { + case *types.Array: + kind |= kindArray + if t.Elem() == byteType { + kind |= kindBytes + } + case *types.Slice: + kind |= kindSlice + if t.Elem() == byteType { + kind |= kindBytes + } + case *types.Chan: + kind |= kindChan + case *types.Map: + kind |= kindMap + case *types.Pointer: + kind |= kindPtr + + // Some builtins handle array pointers as arrays, so just report a pointer + // to an array as an array. + if _, isArray := t.Elem().Underlying().(*types.Array); isArray { + kind |= kindArray + } + case *types.Basic: + switch info := t.Info(); { + case info&types.IsString > 0: + kind |= kindString + case info&types.IsInteger > 0: + kind |= kindInt + case info&types.IsFloat > 0: + kind |= kindFloat + case info&types.IsComplex > 0: + kind |= kindComplex + case info&types.IsBoolean > 0: + kind |= kindBool + } + case *types.Signature: + return kindFunc + } + + if types.Implements(candType, errorIntf) { + kind |= kindError + } + + if types.Implements(candType, stringerIntf) { + kind |= kindStringer + } + + return kind +} + +// innermostScope returns the innermost scope for c.pos. +func (c *completer) innermostScope() *types.Scope { + for _, s := range c.scopes { + if s != nil { + return s + } + } + return nil +} diff --git a/internal/lsp/source/completion/deep_completion.go b/gopls/internal/lsp/source/completion/deep_completion.go similarity index 100% rename from internal/lsp/source/completion/deep_completion.go rename to gopls/internal/lsp/source/completion/deep_completion.go diff --git a/internal/lsp/source/completion/deep_completion_test.go b/gopls/internal/lsp/source/completion/deep_completion_test.go similarity index 100% rename from internal/lsp/source/completion/deep_completion_test.go rename to gopls/internal/lsp/source/completion/deep_completion_test.go diff --git a/gopls/internal/lsp/source/completion/definition.go b/gopls/internal/lsp/source/completion/definition.go new file mode 100644 index 00000000000..a0160a1a1e3 --- /dev/null +++ b/gopls/internal/lsp/source/completion/definition.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "go/ast" + "go/types" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +// some function definitions in test files can be completed +// So far, TestFoo(t *testing.T), TestMain(m *testing.M) +// BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F) + +// path[0] is known to be *ast.Ident +func definition(path []ast.Node, obj types.Object, pgf *source.ParsedGoFile) ([]CompletionItem, *Selection) { + if _, ok := obj.(*types.Func); !ok { + return nil, nil // not a function at all + } + if !strings.HasSuffix(pgf.URI.Filename(), "_test.go") { + return nil, nil // not a test file + } + + name := path[0].(*ast.Ident).Name + if len(name) == 0 { + // can't happen + return nil, nil + } + start := path[0].Pos() + end := path[0].End() + sel := &Selection{ + content: "", + cursor: start, + tokFile: pgf.Tok, + start: start, + end: end, + mapper: pgf.Mapper, + } + var ans []CompletionItem + var hasParens bool + n, ok := path[1].(*ast.FuncDecl) + if !ok { + return nil, nil // can't happen + } + if n.Recv != nil { + return nil, nil // a method, not a function + } + t := n.Type.Params + if t.Closing != t.Opening { + hasParens = true + } + + // Always suggest TestMain, if possible + if strings.HasPrefix("TestMain", name) { + if hasParens { + ans = append(ans, defItem("TestMain", obj)) + } else { + ans = append(ans, defItem("TestMain(m *testing.M)", obj)) + } + } + + // If a snippet is possible, suggest it + if strings.HasPrefix("Test", name) { + if hasParens { + ans = append(ans, defItem("Test", obj)) + } else { + ans = append(ans, defSnippet("Test", "(t *testing.T)", obj)) + } + return ans, sel + } else if strings.HasPrefix("Benchmark", name) { + if hasParens { + ans = append(ans, defItem("Benchmark", obj)) + } else { + ans = append(ans, defSnippet("Benchmark", "(b *testing.B)", obj)) + } + return ans, sel + } else if strings.HasPrefix("Fuzz", name) { + if hasParens { + ans = append(ans, defItem("Fuzz", obj)) + } else { + ans = append(ans, defSnippet("Fuzz", "(f *testing.F)", obj)) + } + return ans, sel + } + + // Fill in the argument for what the user has already typed + if got := defMatches(name, "Test", path, "(t *testing.T)"); got != "" { + ans = append(ans, defItem(got, obj)) + } else if got := defMatches(name, "Benchmark", path, "(b *testing.B)"); got != "" { + ans = append(ans, defItem(got, obj)) + } else if got := defMatches(name, "Fuzz", path, "(f *testing.F)"); got != "" { + ans = append(ans, defItem(got, obj)) + } + return ans, sel +} + +// defMatches returns text for defItem, never for defSnippet +func defMatches(name, pat string, path []ast.Node, arg string) string { + if !strings.HasPrefix(name, pat) { + return "" + } + c, _ := utf8.DecodeRuneInString(name[len(pat):]) + if unicode.IsLower(c) { + return "" + } + fd, ok := path[1].(*ast.FuncDecl) + if !ok { + // we don't know what's going on + return "" + } + fp := fd.Type.Params + if len(fp.List) > 0 { + // signature already there, nothing to suggest + return "" + } + if fp.Opening != fp.Closing { + // nothing: completion works on words, not easy to insert arg + return "" + } + // suggesting signature too + return name + arg +} + +func defSnippet(prefix, suffix string, obj types.Object) CompletionItem { + var sn snippet.Builder + sn.WriteText(prefix) + sn.WritePlaceholder(func(b *snippet.Builder) { b.WriteText("Xxx") }) + sn.WriteText(suffix + " {\n\t") + sn.WriteFinalTabstop() + sn.WriteText("\n}") + return CompletionItem{ + Label: prefix + "Xxx" + suffix, + Detail: "tab, type the rest of the name, then tab", + Kind: protocol.FunctionCompletion, + Depth: 0, + Score: 10, + snippet: &sn, + Documentation: prefix + " test function", + obj: obj, + } +} +func defItem(val string, obj types.Object) CompletionItem { + return CompletionItem{ + Label: val, + InsertText: val, + Kind: protocol.FunctionCompletion, + Depth: 0, + Score: 9, // prefer the snippets when available + Documentation: "complete the function name", + obj: obj, + } +} diff --git a/gopls/internal/lsp/source/completion/format.go b/gopls/internal/lsp/source/completion/format.go new file mode 100644 index 00000000000..732943f2417 --- /dev/null +++ b/gopls/internal/lsp/source/completion/format.go @@ -0,0 +1,338 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "context" + "errors" + "fmt" + "go/ast" + "go/doc" + "go/types" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/imports" + "golang.org/x/tools/internal/typeparams" +) + +var ( + errNoMatch = errors.New("not a surrounding match") + errLowScore = errors.New("not a high scoring candidate") +) + +// item formats a candidate to a CompletionItem. +func (c *completer) item(ctx context.Context, cand candidate) (CompletionItem, error) { + obj := cand.obj + + // if the object isn't a valid match against the surrounding, return early. + matchScore := c.matcher.Score(cand.name) + if matchScore <= 0 { + return CompletionItem{}, errNoMatch + } + cand.score *= float64(matchScore) + + // Ignore deep candidates that won't be in the MaxDeepCompletions anyway. + if len(cand.path) != 0 && !c.deepState.isHighScore(cand.score) { + return CompletionItem{}, errLowScore + } + + // Handle builtin types separately. + if obj.Parent() == types.Universe { + return c.formatBuiltin(ctx, cand) + } + + var ( + label = cand.name + detail = types.TypeString(obj.Type(), c.qf) + insert = label + kind = protocol.TextCompletion + snip snippet.Builder + protocolEdits []protocol.TextEdit + ) + if obj.Type() == nil { + detail = "" + } + if isTypeName(obj) && c.wantTypeParams() { + x := cand.obj.(*types.TypeName) + if named, ok := x.Type().(*types.Named); ok { + tp := typeparams.ForNamed(named) + label += source.FormatTypeParams(tp) + insert = label // maintain invariant above (label == insert) + } + } + + snip.WriteText(insert) + + switch obj := obj.(type) { + case *types.TypeName: + detail, kind = source.FormatType(obj.Type(), c.qf) + case *types.Const: + kind = protocol.ConstantCompletion + case *types.Var: + if _, ok := obj.Type().(*types.Struct); ok { + detail = "struct{...}" // for anonymous structs + } else if obj.IsField() { + var err error + detail, err = source.FormatVarType(ctx, c.snapshot, c.pkg, c.file, obj, c.qf, c.mq) + if err != nil { + return CompletionItem{}, err + } + } + if obj.IsField() { + kind = protocol.FieldCompletion + c.structFieldSnippet(cand, detail, &snip) + } else { + kind = protocol.VariableCompletion + } + if obj.Type() == nil { + break + } + case *types.Func: + sig, ok := obj.Type().Underlying().(*types.Signature) + if !ok { + break + } + kind = protocol.FunctionCompletion + if sig != nil && sig.Recv() != nil { + kind = protocol.MethodCompletion + } + case *types.PkgName: + kind = protocol.ModuleCompletion + detail = fmt.Sprintf("%q", obj.Imported().Path()) + case *types.Label: + kind = protocol.ConstantCompletion + detail = "label" + } + + var prefix string + for _, mod := range cand.mods { + switch mod { + case reference: + prefix = "&" + prefix + case dereference: + prefix = "*" + prefix + case chanRead: + prefix = "<-" + prefix + } + } + + var ( + suffix string + funcType = obj.Type() + ) +Suffixes: + for _, mod := range cand.mods { + switch mod { + case invoke: + if sig, ok := funcType.Underlying().(*types.Signature); ok { + s, err := source.NewSignature(ctx, c.snapshot, c.pkg, c.file, sig, nil, c.qf, c.mq) + if err != nil { + return CompletionItem{}, err + } + c.functionCallSnippet("", s.TypeParams(), s.Params(), &snip) + if sig.Results().Len() == 1 { + funcType = sig.Results().At(0).Type() + } + detail = "func" + s.Format() + } + + if !c.opts.snippets { + // Without snippets the candidate will not include "()". Don't + // add further suffixes since they will be invalid. For + // example, with snippets "foo()..." would become "foo..." + // without snippets if we added the dotDotDot. + break Suffixes + } + case takeSlice: + suffix += "[:]" + case takeDotDotDot: + suffix += "..." + case index: + snip.WriteText("[") + snip.WritePlaceholder(nil) + snip.WriteText("]") + } + } + + // If this candidate needs an additional import statement, + // add the additional text edits needed. + if cand.imp != nil { + addlEdits, err := c.importEdits(cand.imp) + + if err != nil { + return CompletionItem{}, err + } + + protocolEdits = append(protocolEdits, addlEdits...) + if kind != protocol.ModuleCompletion { + if detail != "" { + detail += " " + } + detail += fmt.Sprintf("(from %q)", cand.imp.importPath) + } + } + + if cand.convertTo != nil { + typeName := types.TypeString(cand.convertTo, c.qf) + + switch cand.convertTo.(type) { + // We need extra parens when casting to these types. For example, + // we need "(*int)(foo)", not "*int(foo)". + case *types.Pointer, *types.Signature: + typeName = "(" + typeName + ")" + } + + prefix = typeName + "(" + prefix + suffix = ")" + } + + if prefix != "" { + // If we are in a selector, add an edit to place prefix before selector. + if sel := enclosingSelector(c.path, c.pos); sel != nil { + edits, err := c.editText(sel.Pos(), sel.Pos(), prefix) + if err != nil { + return CompletionItem{}, err + } + protocolEdits = append(protocolEdits, edits...) + } else { + // If there is no selector, just stick the prefix at the start. + insert = prefix + insert + snip.PrependText(prefix) + } + } + + if suffix != "" { + insert += suffix + snip.WriteText(suffix) + } + + detail = strings.TrimPrefix(detail, "untyped ") + // override computed detail with provided detail, if something is provided. + if cand.detail != "" { + detail = cand.detail + } + item := CompletionItem{ + Label: label, + InsertText: insert, + AdditionalTextEdits: protocolEdits, + Detail: detail, + Kind: kind, + Score: cand.score, + Depth: len(cand.path), + snippet: &snip, + obj: obj, + } + // If the user doesn't want documentation for completion items. + if !c.opts.documentation { + return item, nil + } + pos := safetoken.StartPosition(c.pkg.FileSet(), obj.Pos()) + + // We ignore errors here, because some types, like "unsafe" or "error", + // may not have valid positions that we can use to get documentation. + if !pos.IsValid() { + return item, nil + } + + comment, err := source.HoverDocForObject(ctx, c.snapshot, c.pkg, obj) + if err != nil { + event.Error(ctx, fmt.Sprintf("failed to find Hover for %q", obj.Name()), err) + return item, nil + } + if c.opts.fullDocumentation { + item.Documentation = comment.Text() + } else { + item.Documentation = doc.Synopsis(comment.Text()) + } + // The desired pattern is `^// Deprecated`, but the prefix has been removed + // TODO(rfindley): It doesn't look like this does the right thing for + // multi-line comments. + if strings.HasPrefix(comment.Text(), "Deprecated") { + if c.snapshot.View().Options().CompletionTags { + item.Tags = []protocol.CompletionItemTag{protocol.ComplDeprecated} + } else if c.snapshot.View().Options().CompletionDeprecated { + item.Deprecated = true + } + } + + return item, nil +} + +// importEdits produces the text edits necessary to add the given import to the current file. +func (c *completer) importEdits(imp *importInfo) ([]protocol.TextEdit, error) { + if imp == nil { + return nil, nil + } + + pgf, err := c.pkg.File(span.URIFromPath(c.filename)) + if err != nil { + return nil, err + } + + return source.ComputeOneImportFixEdits(c.snapshot, pgf, &imports.ImportFix{ + StmtInfo: imports.ImportInfo{ + ImportPath: imp.importPath, + Name: imp.name, + }, + // IdentName is unused on this path and is difficult to get. + FixType: imports.AddImport, + }) +} + +func (c *completer) formatBuiltin(ctx context.Context, cand candidate) (CompletionItem, error) { + obj := cand.obj + item := CompletionItem{ + Label: obj.Name(), + InsertText: obj.Name(), + Score: cand.score, + } + switch obj.(type) { + case *types.Const: + item.Kind = protocol.ConstantCompletion + case *types.Builtin: + item.Kind = protocol.FunctionCompletion + sig, err := source.NewBuiltinSignature(ctx, c.snapshot, obj.Name()) + if err != nil { + return CompletionItem{}, err + } + item.Detail = "func" + sig.Format() + item.snippet = &snippet.Builder{} + c.functionCallSnippet(obj.Name(), sig.TypeParams(), sig.Params(), item.snippet) + case *types.TypeName: + if types.IsInterface(obj.Type()) { + item.Kind = protocol.InterfaceCompletion + } else { + item.Kind = protocol.ClassCompletion + } + case *types.Nil: + item.Kind = protocol.VariableCompletion + } + return item, nil +} + +// decide if the type params (if any) should be part of the completion +// which only possible for types.Named and types.Signature +// (so far, only in receivers, e.g.; func (s *GENERIC[K, V])..., which is a types.Named) +func (c *completer) wantTypeParams() bool { + // Need to be lexically in a receiver, and a child of an IndexListExpr + // (but IndexListExpr only exists with go1.18) + start := c.path[0].Pos() + for i, nd := range c.path { + if fd, ok := nd.(*ast.FuncDecl); ok { + if i > 0 && fd.Recv != nil && start < fd.Recv.End() { + return true + } else { + return false + } + } + } + return false +} diff --git a/internal/lsp/source/completion/fuzz.go b/gopls/internal/lsp/source/completion/fuzz.go similarity index 98% rename from internal/lsp/source/completion/fuzz.go rename to gopls/internal/lsp/source/completion/fuzz.go index 92349ab9343..d7912ceabc6 100644 --- a/internal/lsp/source/completion/fuzz.go +++ b/gopls/internal/lsp/source/completion/fuzz.go @@ -11,7 +11,7 @@ import ( "go/types" "strings" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) // golang/go#51089 diff --git a/gopls/internal/lsp/source/completion/keywords.go b/gopls/internal/lsp/source/completion/keywords.go new file mode 100644 index 00000000000..a068ca2d57c --- /dev/null +++ b/gopls/internal/lsp/source/completion/keywords.go @@ -0,0 +1,154 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "go/ast" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +const ( + BREAK = "break" + CASE = "case" + CHAN = "chan" + CONST = "const" + CONTINUE = "continue" + DEFAULT = "default" + DEFER = "defer" + ELSE = "else" + FALLTHROUGH = "fallthrough" + FOR = "for" + FUNC = "func" + GO = "go" + GOTO = "goto" + IF = "if" + IMPORT = "import" + INTERFACE = "interface" + MAP = "map" + PACKAGE = "package" + RANGE = "range" + RETURN = "return" + SELECT = "select" + STRUCT = "struct" + SWITCH = "switch" + TYPE = "type" + VAR = "var" +) + +// addKeywordCompletions offers keyword candidates appropriate at the position. +func (c *completer) addKeywordCompletions() { + seen := make(map[string]bool) + + if c.wantTypeName() && c.inference.objType == nil { + // If we want a type name but don't have an expected obj type, + // include "interface", "struct", "func", "chan", and "map". + + // "interface" and "struct" are more common declaring named types. + // Give them a higher score if we are in a type declaration. + structIntf, funcChanMap := stdScore, highScore + if len(c.path) > 1 { + if _, namedDecl := c.path[1].(*ast.TypeSpec); namedDecl { + structIntf, funcChanMap = highScore, stdScore + } + } + + c.addKeywordItems(seen, structIntf, STRUCT, INTERFACE) + c.addKeywordItems(seen, funcChanMap, FUNC, CHAN, MAP) + } + + // If we are at the file scope, only offer decl keywords. We don't + // get *ast.Idents at the file scope because non-keyword identifiers + // turn into *ast.BadDecl, not *ast.Ident. + if len(c.path) == 1 || isASTFile(c.path[1]) { + c.addKeywordItems(seen, stdScore, TYPE, CONST, VAR, FUNC, IMPORT) + return + } else if _, ok := c.path[0].(*ast.Ident); !ok { + // Otherwise only offer keywords if the client is completing an identifier. + return + } + + if len(c.path) > 2 { + // Offer "range" if we are in ast.ForStmt.Init. This is what the + // AST looks like before "range" is typed, e.g. "for i := r<>". + if loop, ok := c.path[2].(*ast.ForStmt); ok && source.NodeContains(loop.Init, c.pos) { + c.addKeywordItems(seen, stdScore, RANGE) + } + } + + // Only suggest keywords if we are beginning a statement. + switch n := c.path[1].(type) { + case *ast.BlockStmt, *ast.ExprStmt: + // OK - our ident must be at beginning of statement. + case *ast.CommClause: + // Make sure we aren't in the Comm statement. + if !n.Colon.IsValid() || c.pos <= n.Colon { + return + } + case *ast.CaseClause: + // Make sure we aren't in the case List. + if !n.Colon.IsValid() || c.pos <= n.Colon { + return + } + default: + return + } + + // Filter out keywords depending on scope + // Skip the first one because we want to look at the enclosing scopes + path := c.path[1:] + for i, n := range path { + switch node := n.(type) { + case *ast.CaseClause: + // only recommend "fallthrough" and "break" within the bodies of a case clause + if c.pos > node.Colon { + c.addKeywordItems(seen, stdScore, BREAK) + // "fallthrough" is only valid in switch statements. + // A case clause is always nested within a block statement in a switch statement, + // that block statement is nested within either a TypeSwitchStmt or a SwitchStmt. + if i+2 >= len(path) { + continue + } + if _, ok := path[i+2].(*ast.SwitchStmt); ok { + c.addKeywordItems(seen, stdScore, FALLTHROUGH) + } + } + case *ast.CommClause: + if c.pos > node.Colon { + c.addKeywordItems(seen, stdScore, BREAK) + } + case *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.SwitchStmt: + c.addKeywordItems(seen, stdScore, CASE, DEFAULT) + case *ast.ForStmt, *ast.RangeStmt: + c.addKeywordItems(seen, stdScore, BREAK, CONTINUE) + // This is a bit weak, functions allow for many keywords + case *ast.FuncDecl: + if node.Body != nil && c.pos > node.Body.Lbrace { + c.addKeywordItems(seen, stdScore, DEFER, RETURN, FOR, GO, SWITCH, SELECT, IF, ELSE, VAR, CONST, GOTO, TYPE) + } + } + } +} + +// addKeywordItems dedupes and adds completion items for the specified +// keywords with the specified score. +func (c *completer) addKeywordItems(seen map[string]bool, score float64, kws ...string) { + for _, kw := range kws { + if seen[kw] { + continue + } + seen[kw] = true + + if matchScore := c.matcher.Score(kw); matchScore > 0 { + c.items = append(c.items, CompletionItem{ + Label: kw, + Kind: protocol.KeywordCompletion, + InsertText: kw, + Score: score * float64(matchScore), + }) + } + } +} diff --git a/internal/lsp/source/completion/labels.go b/gopls/internal/lsp/source/completion/labels.go similarity index 100% rename from internal/lsp/source/completion/labels.go rename to gopls/internal/lsp/source/completion/labels.go diff --git a/gopls/internal/lsp/source/completion/literal.go b/gopls/internal/lsp/source/completion/literal.go new file mode 100644 index 00000000000..6777f7333ed --- /dev/null +++ b/gopls/internal/lsp/source/completion/literal.go @@ -0,0 +1,592 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "context" + "fmt" + "go/types" + "strings" + "unicode" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/typeparams" +) + +// literal generates composite literal, function literal, and make() +// completion items. +func (c *completer) literal(ctx context.Context, literalType types.Type, imp *importInfo) { + if !c.opts.literal { + return + } + + expType := c.inference.objType + + if c.inference.matchesVariadic(literalType) { + // Don't offer literal slice candidates for variadic arguments. + // For example, don't offer "[]interface{}{}" in "fmt.Print(<>)". + return + } + + // Avoid literal candidates if the expected type is an empty + // interface. It isn't very useful to suggest a literal candidate of + // every possible type. + if expType != nil && isEmptyInterface(expType) { + return + } + + // We handle unnamed literal completions explicitly before searching + // for candidates. Avoid named-type literal completions for + // unnamed-type expected type since that results in duplicate + // candidates. For example, in + // + // type mySlice []int + // var []int = <> + // + // don't offer "mySlice{}" since we have already added a candidate + // of "[]int{}". + if _, named := literalType.(*types.Named); named && expType != nil { + if _, named := source.Deref(expType).(*types.Named); !named { + return + } + } + + // Check if an object of type literalType would match our expected type. + cand := candidate{ + obj: c.fakeObj(literalType), + } + + switch literalType.Underlying().(type) { + // These literal types are addressable (e.g. "&[]int{}"), others are + // not (e.g. can't do "&(func(){})"). + case *types.Struct, *types.Array, *types.Slice, *types.Map: + cand.addressable = true + } + + if !c.matchingCandidate(&cand) || cand.convertTo != nil { + return + } + + var ( + qf = c.qf + sel = enclosingSelector(c.path, c.pos) + ) + + // Don't qualify the type name if we are in a selector expression + // since the package name is already present. + if sel != nil { + qf = func(_ *types.Package) string { return "" } + } + + snip, typeName := c.typeNameSnippet(literalType, qf) + + // A type name of "[]int" doesn't work very will with the matcher + // since "[" isn't a valid identifier prefix. Here we strip off the + // slice (and array) prefix yielding just "int". + matchName := typeName + switch t := literalType.(type) { + case *types.Slice: + matchName = types.TypeString(t.Elem(), qf) + case *types.Array: + matchName = types.TypeString(t.Elem(), qf) + } + + addlEdits, err := c.importEdits(imp) + if err != nil { + event.Error(ctx, "error adding import for literal candidate", err) + return + } + + // If prefix matches the type name, client may want a composite literal. + if score := c.matcher.Score(matchName); score > 0 { + if cand.hasMod(reference) { + if sel != nil { + // If we are in a selector we must place the "&" before the selector. + // For example, "foo.B<>" must complete to "&foo.Bar{}", not + // "foo.&Bar{}". + edits, err := c.editText(sel.Pos(), sel.Pos(), "&") + if err != nil { + event.Error(ctx, "error making edit for literal pointer completion", err) + return + } + addlEdits = append(addlEdits, edits...) + } else { + // Otherwise we can stick the "&" directly before the type name. + typeName = "&" + typeName + snip.PrependText("&") + } + } + + switch t := literalType.Underlying().(type) { + case *types.Struct, *types.Array, *types.Slice, *types.Map: + c.compositeLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) + case *types.Signature: + // Add a literal completion for a signature type that implements + // an interface. For example, offer "http.HandlerFunc()" when + // expected type is "http.Handler". + if expType != nil && types.IsInterface(expType) { + c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) + } + case *types.Basic: + // Add a literal completion for basic types that implement our + // expected interface (e.g. named string type http.Dir + // implements http.FileSystem), or are identical to our expected + // type (i.e. yielding a type conversion such as "float64()"). + if expType != nil && (types.IsInterface(expType) || types.Identical(expType, literalType)) { + c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) + } + } + } + + // If prefix matches "make", client may want a "make()" + // invocation. We also include the type name to allow for more + // flexible fuzzy matching. + if score := c.matcher.Score("make." + matchName); !cand.hasMod(reference) && score > 0 { + switch literalType.Underlying().(type) { + case *types.Slice: + // The second argument to "make()" for slices is required, so default to "0". + c.makeCall(snip.Clone(), typeName, "0", float64(score), addlEdits) + case *types.Map, *types.Chan: + // Maps and channels don't require the second argument, so omit + // to keep things simple for now. + c.makeCall(snip.Clone(), typeName, "", float64(score), addlEdits) + } + } + + // If prefix matches "func", client may want a function literal. + if score := c.matcher.Score("func"); !cand.hasMod(reference) && score > 0 && (expType == nil || !types.IsInterface(expType)) { + switch t := literalType.Underlying().(type) { + case *types.Signature: + c.functionLiteral(ctx, t, float64(score)) + } + } +} + +// literalCandidateScore is the base score for literal candidates. +// Literal candidates match the expected type so they should be high +// scoring, but we want them ranked below lexical objects of the +// correct type, so scale down highScore. +const literalCandidateScore = highScore / 2 + +// functionLiteral adds a function literal completion item for the +// given signature. +func (c *completer) functionLiteral(ctx context.Context, sig *types.Signature, matchScore float64) { + snip := &snippet.Builder{} + snip.WriteText("func(") + + // First we generate names for each param and keep a seen count so + // we know if we need to uniquify param names. For example, + // "func(int)" will become "func(i int)", but "func(int, int64)" + // will become "func(i1 int, i2 int64)". + var ( + paramNames = make([]string, sig.Params().Len()) + paramNameCount = make(map[string]int) + hasTypeParams bool + ) + for i := 0; i < sig.Params().Len(); i++ { + var ( + p = sig.Params().At(i) + name = p.Name() + ) + + if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { + hasTypeParams = true + } + + if name == "" { + // If the param has no name in the signature, guess a name based + // on the type. Use an empty qualifier to ignore the package. + // For example, we want to name "http.Request" "r", not "hr". + typeName, err := source.FormatVarType(ctx, c.snapshot, c.pkg, c.file, p, + func(p *types.Package) string { return "" }, + func(source.PackageName, source.ImportPath, source.PackagePath) string { return "" }) + if err != nil { + // In general, the only error we should encounter while formatting is + // context cancellation. + if ctx.Err() == nil { + event.Error(ctx, "formatting var type", err) + } + return + } + name = abbreviateTypeName(typeName) + } + paramNames[i] = name + if name != "_" { + paramNameCount[name]++ + } + } + + for n, c := range paramNameCount { + // Any names we saw more than once will need a unique suffix added + // on. Reset the count to 1 to act as the suffix for the first + // name. + if c >= 2 { + paramNameCount[n] = 1 + } else { + delete(paramNameCount, n) + } + } + + for i := 0; i < sig.Params().Len(); i++ { + if hasTypeParams && !c.opts.placeholders { + // If there are type params in the args then the user must + // choose the concrete types. If placeholders are disabled just + // drop them between the parens and let them fill things in. + snip.WritePlaceholder(nil) + break + } + + if i > 0 { + snip.WriteText(", ") + } + + var ( + p = sig.Params().At(i) + name = paramNames[i] + ) + + // Uniquify names by adding on an incrementing numeric suffix. + if idx, found := paramNameCount[name]; found { + paramNameCount[name]++ + name = fmt.Sprintf("%s%d", name, idx) + } + + if name != p.Name() && c.opts.placeholders { + // If we didn't use the signature's param name verbatim then we + // may have chosen a poor name. Give the user a placeholder so + // they can easily fix the name. + snip.WritePlaceholder(func(b *snippet.Builder) { + b.WriteText(name) + }) + } else { + snip.WriteText(name) + } + + // If the following param's type is identical to this one, omit + // this param's type string. For example, emit "i, j int" instead + // of "i int, j int". + if i == sig.Params().Len()-1 || !types.Identical(p.Type(), sig.Params().At(i+1).Type()) { + snip.WriteText(" ") + typeStr, err := source.FormatVarType(ctx, c.snapshot, c.pkg, c.file, p, c.qf, c.mq) + if err != nil { + // In general, the only error we should encounter while formatting is + // context cancellation. + if ctx.Err() == nil { + event.Error(ctx, "formatting var type", err) + } + return + } + if sig.Variadic() && i == sig.Params().Len()-1 { + typeStr = strings.Replace(typeStr, "[]", "...", 1) + } + + if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { + snip.WritePlaceholder(func(snip *snippet.Builder) { + snip.WriteText(typeStr) + }) + } else { + snip.WriteText(typeStr) + } + } + } + snip.WriteText(")") + + results := sig.Results() + if results.Len() > 0 { + snip.WriteText(" ") + } + + resultsNeedParens := results.Len() > 1 || + results.Len() == 1 && results.At(0).Name() != "" + + var resultHasTypeParams bool + for i := 0; i < results.Len(); i++ { + if tp, _ := results.At(i).Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { + resultHasTypeParams = true + } + } + + if resultsNeedParens { + snip.WriteText("(") + } + for i := 0; i < results.Len(); i++ { + if resultHasTypeParams && !c.opts.placeholders { + // Leave an empty tabstop if placeholders are disabled and there + // are type args that need specificying. + snip.WritePlaceholder(nil) + break + } + + if i > 0 { + snip.WriteText(", ") + } + r := results.At(i) + if name := r.Name(); name != "" { + snip.WriteText(name + " ") + } + + text, err := source.FormatVarType(ctx, c.snapshot, c.pkg, c.file, r, c.qf, c.mq) + if err != nil { + // In general, the only error we should encounter while formatting is + // context cancellation. + if ctx.Err() == nil { + event.Error(ctx, "formatting var type", err) + } + return + } + if tp, _ := r.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { + snip.WritePlaceholder(func(snip *snippet.Builder) { + snip.WriteText(text) + }) + } else { + snip.WriteText(text) + } + } + if resultsNeedParens { + snip.WriteText(")") + } + + snip.WriteText(" {") + snip.WriteFinalTabstop() + snip.WriteText("}") + + c.items = append(c.items, CompletionItem{ + Label: "func(...) {}", + Score: matchScore * literalCandidateScore, + Kind: protocol.VariableCompletion, + snippet: snip, + }) +} + +// conventionalAcronyms contains conventional acronyms for type names +// in lower case. For example, "ctx" for "context" and "err" for "error". +var conventionalAcronyms = map[string]string{ + "context": "ctx", + "error": "err", + "tx": "tx", + "responsewriter": "w", +} + +// abbreviateTypeName abbreviates type names into acronyms. For +// example, "fooBar" is abbreviated "fb". Care is taken to ignore +// non-identifier runes. For example, "[]int" becomes "i", and +// "struct { i int }" becomes "s". +func abbreviateTypeName(s string) string { + var ( + b strings.Builder + useNextUpper bool + ) + + // Trim off leading non-letters. We trim everything between "[" and + // "]" to handle array types like "[someConst]int". + var inBracket bool + s = strings.TrimFunc(s, func(r rune) bool { + if inBracket { + inBracket = r != ']' + return true + } + + if r == '[' { + inBracket = true + } + + return !unicode.IsLetter(r) + }) + + if acr, ok := conventionalAcronyms[strings.ToLower(s)]; ok { + return acr + } + + for i, r := range s { + // Stop if we encounter a non-identifier rune. + if !unicode.IsLetter(r) && !unicode.IsNumber(r) { + break + } + + if i == 0 { + b.WriteRune(unicode.ToLower(r)) + } + + if unicode.IsUpper(r) { + if useNextUpper { + b.WriteRune(unicode.ToLower(r)) + useNextUpper = false + } + } else { + useNextUpper = true + } + } + + return b.String() +} + +// compositeLiteral adds a composite literal completion item for the given typeName. +func (c *completer) compositeLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) { + snip.WriteText("{") + // Don't put the tab stop inside the composite literal curlies "{}" + // for structs that have no accessible fields. + if strct, ok := T.(*types.Struct); !ok || fieldsAccessible(strct, c.pkg.GetTypes()) { + snip.WriteFinalTabstop() + } + snip.WriteText("}") + + nonSnippet := typeName + "{}" + + c.items = append(c.items, CompletionItem{ + Label: nonSnippet, + InsertText: nonSnippet, + Score: matchScore * literalCandidateScore, + Kind: protocol.VariableCompletion, + AdditionalTextEdits: edits, + snippet: snip, + }) +} + +// basicLiteral adds a literal completion item for the given basic +// type name typeName. +func (c *completer) basicLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) { + // Never give type conversions like "untyped int()". + if isUntyped(T) { + return + } + + snip.WriteText("(") + snip.WriteFinalTabstop() + snip.WriteText(")") + + nonSnippet := typeName + "()" + + c.items = append(c.items, CompletionItem{ + Label: nonSnippet, + InsertText: nonSnippet, + Detail: T.String(), + Score: matchScore * literalCandidateScore, + Kind: protocol.VariableCompletion, + AdditionalTextEdits: edits, + snippet: snip, + }) +} + +// makeCall adds a completion item for a "make()" call given a specific type. +func (c *completer) makeCall(snip *snippet.Builder, typeName string, secondArg string, matchScore float64, edits []protocol.TextEdit) { + // Keep it simple and don't add any placeholders for optional "make()" arguments. + + snip.PrependText("make(") + if secondArg != "" { + snip.WriteText(", ") + snip.WritePlaceholder(func(b *snippet.Builder) { + if c.opts.placeholders { + b.WriteText(secondArg) + } + }) + } + snip.WriteText(")") + + var nonSnippet strings.Builder + nonSnippet.WriteString("make(" + typeName) + if secondArg != "" { + nonSnippet.WriteString(", ") + nonSnippet.WriteString(secondArg) + } + nonSnippet.WriteByte(')') + + c.items = append(c.items, CompletionItem{ + Label: nonSnippet.String(), + InsertText: nonSnippet.String(), + Score: matchScore * literalCandidateScore, + Kind: protocol.FunctionCompletion, + AdditionalTextEdits: edits, + snippet: snip, + }) +} + +// Create a snippet for a type name where type params become placeholders. +func (c *completer) typeNameSnippet(literalType types.Type, qf types.Qualifier) (*snippet.Builder, string) { + var ( + snip snippet.Builder + typeName string + named, _ = literalType.(*types.Named) + ) + + if named != nil && named.Obj() != nil && typeparams.ForNamed(named).Len() > 0 && !c.fullyInstantiated(named) { + // We are not "fully instantiated" meaning we have type params that must be specified. + if pkg := qf(named.Obj().Pkg()); pkg != "" { + typeName = pkg + "." + } + + // We do this to get "someType" instead of "someType[T]". + typeName += named.Obj().Name() + snip.WriteText(typeName + "[") + + if c.opts.placeholders { + for i := 0; i < typeparams.ForNamed(named).Len(); i++ { + if i > 0 { + snip.WriteText(", ") + } + snip.WritePlaceholder(func(snip *snippet.Builder) { + snip.WriteText(types.TypeString(typeparams.ForNamed(named).At(i), qf)) + }) + } + } else { + snip.WritePlaceholder(nil) + } + snip.WriteText("]") + typeName += "[...]" + } else { + // We don't have unspecified type params so use default type formatting. + typeName = types.TypeString(literalType, qf) + snip.WriteText(typeName) + } + + return &snip, typeName +} + +// fullyInstantiated reports whether all of t's type params have +// specified type args. +func (c *completer) fullyInstantiated(t *types.Named) bool { + tps := typeparams.ForNamed(t) + tas := typeparams.NamedTypeArgs(t) + + if tps.Len() != tas.Len() { + return false + } + + for i := 0; i < tas.Len(); i++ { + switch ta := tas.At(i).(type) { + case *typeparams.TypeParam: + // A *TypeParam only counts as specified if it is currently in + // scope (i.e. we are in a generic definition). + if !c.typeParamInScope(ta) { + return false + } + case *types.Named: + if !c.fullyInstantiated(ta) { + return false + } + } + } + return true +} + +// typeParamInScope returns whether tp's object is in scope at c.pos. +// This tells you whether you are in a generic definition and can +// assume tp has been specified. +func (c *completer) typeParamInScope(tp *typeparams.TypeParam) bool { + obj := tp.Obj() + if obj == nil { + return false + } + + scope := c.innermostScope() + if scope == nil { + return false + } + + _, foundObj := scope.LookupParent(obj.Name(), c.pos) + return obj == foundObj +} diff --git a/gopls/internal/lsp/source/completion/package.go b/gopls/internal/lsp/source/completion/package.go new file mode 100644 index 00000000000..f3bc30688c3 --- /dev/null +++ b/gopls/internal/lsp/source/completion/package.go @@ -0,0 +1,351 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "bytes" + "context" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/fuzzy" +) + +// packageClauseCompletions offers completions for a package declaration when +// one is not present in the given file. +func packageClauseCompletions(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) ([]CompletionItem, *Selection, error) { + // We know that the AST for this file will be empty due to the missing + // package declaration, but parse it anyway to get a mapper. + // TODO(adonovan): opt: there's no need to parse just to get a mapper. + pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) + if err != nil { + return nil, nil, err + } + + offset, err := pgf.Mapper.PositionOffset(position) + if err != nil { + return nil, nil, err + } + surrounding, err := packageCompletionSurrounding(pgf, offset) + if err != nil { + return nil, nil, fmt.Errorf("invalid position for package completion: %w", err) + } + + packageSuggestions, err := packageSuggestions(ctx, snapshot, fh.URI(), "") + if err != nil { + return nil, nil, err + } + + var items []CompletionItem + for _, pkg := range packageSuggestions { + insertText := fmt.Sprintf("package %s", pkg.name) + items = append(items, CompletionItem{ + Label: insertText, + Kind: protocol.ModuleCompletion, + InsertText: insertText, + Score: pkg.score, + }) + } + + return items, surrounding, nil +} + +// packageCompletionSurrounding returns surrounding for package completion if a +// package completions can be suggested at a given cursor offset. A valid location +// for package completion is above any declarations or import statements. +func packageCompletionSurrounding(pgf *source.ParsedGoFile, offset int) (*Selection, error) { + m := pgf.Mapper + // If the file lacks a package declaration, the parser will return an empty + // AST. As a work-around, try to parse an expression from the file contents. + fset := token.NewFileSet() + expr, _ := parser.ParseExprFrom(fset, m.URI.Filename(), pgf.Src, parser.Mode(0)) + if expr == nil { + return nil, fmt.Errorf("unparseable file (%s)", m.URI) + } + tok := fset.File(expr.Pos()) + cursor := tok.Pos(offset) + + // If we were able to parse out an identifier as the first expression from + // the file, it may be the beginning of a package declaration ("pack "). + // We can offer package completions if the cursor is in the identifier. + if name, ok := expr.(*ast.Ident); ok { + if cursor >= name.Pos() && cursor <= name.End() { + if !strings.HasPrefix(PACKAGE, name.Name) { + return nil, fmt.Errorf("cursor in non-matching ident") + } + return &Selection{ + content: name.Name, + cursor: cursor, + tokFile: tok, + start: name.Pos(), + end: name.End(), + mapper: m, + }, nil + } + } + + // The file is invalid, but it contains an expression that we were able to + // parse. We will use this expression to construct the cursor's + // "surrounding". + + // First, consider the possibility that we have a valid "package" keyword + // with an empty package name ("package "). "package" is parsed as an + // *ast.BadDecl since it is a keyword. This logic would allow "package" to + // appear on any line of the file as long as it's the first code expression + // in the file. + lines := strings.Split(string(pgf.Src), "\n") + cursorLine := tok.Line(cursor) + if cursorLine <= 0 || cursorLine > len(lines) { + return nil, fmt.Errorf("invalid line number") + } + if safetoken.StartPosition(fset, expr.Pos()).Line == cursorLine { + words := strings.Fields(lines[cursorLine-1]) + if len(words) > 0 && words[0] == PACKAGE { + content := PACKAGE + // Account for spaces if there are any. + if len(words) > 1 { + content += " " + } + + start := expr.Pos() + end := token.Pos(int(expr.Pos()) + len(content) + 1) + // We have verified that we have a valid 'package' keyword as our + // first expression. Ensure that cursor is in this keyword or + // otherwise fallback to the general case. + if cursor >= start && cursor <= end { + return &Selection{ + content: content, + cursor: cursor, + tokFile: tok, + start: start, + end: end, + mapper: m, + }, nil + } + } + } + + // If the cursor is after the start of the expression, no package + // declaration will be valid. + if cursor > expr.Pos() { + return nil, fmt.Errorf("cursor after expression") + } + + // If the cursor is in a comment, don't offer any completions. + if cursorInComment(tok, cursor, m.Content) { + return nil, fmt.Errorf("cursor in comment") + } + + // The surrounding range in this case is the cursor. + return &Selection{ + content: "", + tokFile: tok, + start: cursor, + end: cursor, + cursor: cursor, + mapper: m, + }, nil +} + +func cursorInComment(file *token.File, cursor token.Pos, src []byte) bool { + var s scanner.Scanner + s.Init(file, src, func(_ token.Position, _ string) {}, scanner.ScanComments) + for { + pos, tok, lit := s.Scan() + if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) { + return tok == token.COMMENT + } + if tok == token.EOF { + break + } + } + return false +} + +// packageNameCompletions returns name completions for a package clause using +// the current name as prefix. +func (c *completer) packageNameCompletions(ctx context.Context, fileURI span.URI, name *ast.Ident) error { + cursor := int(c.pos - name.NamePos) + if cursor < 0 || cursor > len(name.Name) { + return errors.New("cursor is not in package name identifier") + } + + c.completionContext.packageCompletion = true + + prefix := name.Name[:cursor] + packageSuggestions, err := packageSuggestions(ctx, c.snapshot, fileURI, prefix) + if err != nil { + return err + } + + for _, pkg := range packageSuggestions { + c.deepState.enqueue(pkg) + } + return nil +} + +// packageSuggestions returns a list of packages from workspace packages that +// have the given prefix and are used in the same directory as the given +// file. This also includes test packages for these packages (_test) and +// the directory name itself. +func packageSuggestions(ctx context.Context, snapshot source.Snapshot, fileURI span.URI, prefix string) (packages []candidate, err error) { + active, err := snapshot.ActiveMetadata(ctx) + if err != nil { + return nil, err + } + + toCandidate := func(name string, score float64) candidate { + obj := types.NewPkgName(0, nil, name, types.NewPackage("", name)) + return candidate{obj: obj, name: name, detail: name, score: score} + } + + matcher := fuzzy.NewMatcher(prefix) + + // Always try to suggest a main package + defer func() { + if score := float64(matcher.Score("main")); score > 0 { + packages = append(packages, toCandidate("main", score*lowScore)) + } + }() + + dirPath := filepath.Dir(fileURI.Filename()) + dirName := filepath.Base(dirPath) + if !isValidDirName(dirName) { + return packages, nil + } + pkgName := convertDirNameToPkgName(dirName) + + seenPkgs := make(map[source.PackageName]struct{}) + + // The `go` command by default only allows one package per directory but we + // support multiple package suggestions since gopls is build system agnostic. + for _, m := range active { + if m.Name == "main" || m.Name == "" { + continue + } + if _, ok := seenPkgs[m.Name]; ok { + continue + } + + // Only add packages that are previously used in the current directory. + var relevantPkg bool + for _, uri := range m.CompiledGoFiles { + if filepath.Dir(uri.Filename()) == dirPath { + relevantPkg = true + break + } + } + if !relevantPkg { + continue + } + + // Add a found package used in current directory as a high relevance + // suggestion and the test package for it as a medium relevance + // suggestion. + if score := float64(matcher.Score(string(m.Name))); score > 0 { + packages = append(packages, toCandidate(string(m.Name), score*highScore)) + } + seenPkgs[m.Name] = struct{}{} + + testPkgName := m.Name + "_test" + if _, ok := seenPkgs[testPkgName]; ok || strings.HasSuffix(string(m.Name), "_test") { + continue + } + if score := float64(matcher.Score(string(testPkgName))); score > 0 { + packages = append(packages, toCandidate(string(testPkgName), score*stdScore)) + } + seenPkgs[testPkgName] = struct{}{} + } + + // Add current directory name as a low relevance suggestion. + if _, ok := seenPkgs[pkgName]; !ok { + if score := float64(matcher.Score(string(pkgName))); score > 0 { + packages = append(packages, toCandidate(string(pkgName), score*lowScore)) + } + + testPkgName := pkgName + "_test" + if score := float64(matcher.Score(string(testPkgName))); score > 0 { + packages = append(packages, toCandidate(string(testPkgName), score*lowScore)) + } + } + + return packages, nil +} + +// isValidDirName checks whether the passed directory name can be used in +// a package path. Requirements for a package path can be found here: +// https://golang.org/ref/mod#go-mod-file-ident. +func isValidDirName(dirName string) bool { + if dirName == "" { + return false + } + + for i, ch := range dirName { + if isLetter(ch) || isDigit(ch) { + continue + } + if i == 0 { + // Directory name can start only with '_'. '.' is not allowed in module paths. + // '-' and '~' are not allowed because elements of package paths must be + // safe command-line arguments. + if ch == '_' { + continue + } + } else { + // Modules path elements can't end with '.' + if isAllowedPunctuation(ch) && (i != len(dirName)-1 || ch != '.') { + continue + } + } + + return false + } + return true +} + +// convertDirNameToPkgName converts a valid directory name to a valid package name. +// It leaves only letters and digits. All letters are mapped to lower case. +func convertDirNameToPkgName(dirName string) source.PackageName { + var buf bytes.Buffer + for _, ch := range dirName { + switch { + case isLetter(ch): + buf.WriteRune(unicode.ToLower(ch)) + + case buf.Len() != 0 && isDigit(ch): + buf.WriteRune(ch) + } + } + return source.PackageName(buf.String()) +} + +// isLetter and isDigit allow only ASCII characters because +// "Each path element is a non-empty string made of up ASCII letters, +// ASCII digits, and limited ASCII punctuation" +// (see https://golang.org/ref/mod#go-mod-file-ident). + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +func isAllowedPunctuation(ch rune) bool { + return ch == '_' || ch == '-' || ch == '~' || ch == '.' +} diff --git a/internal/lsp/source/completion/package_test.go b/gopls/internal/lsp/source/completion/package_test.go similarity index 95% rename from internal/lsp/source/completion/package_test.go rename to gopls/internal/lsp/source/completion/package_test.go index 6436984fdc9..614359fa5dc 100644 --- a/internal/lsp/source/completion/package_test.go +++ b/gopls/internal/lsp/source/completion/package_test.go @@ -4,7 +4,11 @@ package completion -import "testing" +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/source" +) func TestIsValidDirName(t *testing.T) { tests := []struct { @@ -51,7 +55,7 @@ func TestIsValidDirName(t *testing.T) { func TestConvertDirNameToPkgName(t *testing.T) { tests := []struct { dirName string - pkgName string + pkgName source.PackageName }{ {dirName: "a", pkgName: "a"}, {dirName: "abcdef", pkgName: "abcdef"}, diff --git a/internal/lsp/source/completion/postfix_snippets.go b/gopls/internal/lsp/source/completion/postfix_snippets.go similarity index 94% rename from internal/lsp/source/completion/postfix_snippets.go rename to gopls/internal/lsp/source/completion/postfix_snippets.go index d7f0d90da9e..0737ec2461f 100644 --- a/internal/lsp/source/completion/postfix_snippets.go +++ b/gopls/internal/lsp/source/completion/postfix_snippets.go @@ -16,11 +16,11 @@ import ( "sync" "text/template" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" ) // Postfix snippets are artificial methods that allow the user to @@ -149,6 +149,14 @@ for {{.VarName .KeyType "k"}}, {{.VarName .ElemType "v"}} := range {{.X}} { {{$keysVar}} = append({{$keysVar}}, {{$k}}) } {{end}}`, +}, { + label: "range", + details: "range over channel", + body: `{{if and (eq .Kind "chan") .StmtOK -}} +for {{.VarName .ElemType "e"}} := range {{.X}} { + {{.Cursor}} +} +{{- end}}`, }, { label: "var", details: "assign to variables", @@ -263,7 +271,8 @@ func (a *postfixTmplArgs) VarName(t types.Type, nonNamedDefault string) string { } var name string - if types.Implements(t, errorIntf) { + // go/types predicates are undefined on types.Typ[types.Invalid]. + if !types.Identical(t, types.Typ[types.Invalid]) && types.Implements(t, errorIntf) { name = "err" } else if _, isNamed := source.Deref(t).(*types.Named); !isNamed { name = nonNamedDefault @@ -314,7 +323,7 @@ func (c *completer) addPostfixSnippetCandidates(ctx context.Context, sel *ast.Se return } - tokFile := c.snapshot.FileSet().File(c.pos) + tokFile := c.pkg.FileSet().File(c.pos) // Only replace sel with a statement if sel is already a statement. var stmtOK bool @@ -371,7 +380,7 @@ func (c *completer) addPostfixSnippetCandidates(ctx context.Context, sel *ast.Se } tmplArgs := postfixTmplArgs{ - X: source.FormatNode(c.snapshot.FileSet(), sel.X), + X: source.FormatNode(c.pkg.FileSet(), sel.X), StmtOK: stmtOK, Obj: exprObj(c.pkg.GetTypesInfo(), sel.X), Type: selType, @@ -434,7 +443,9 @@ func (c *completer) importIfNeeded(pkgPath string, scope *types.Scope) (string, // Check if file already imports pkgPath. for _, s := range c.file.Imports { - if source.ImportPath(s) == pkgPath { + // TODO(adonovan): what if pkgPath has a vendor/ suffix? + // This may be the cause of go.dev/issue/56291. + if source.UnquoteImportPath(s) == source.ImportPath(pkgPath) { if s.Name == nil { return defaultName, nil, nil } diff --git a/internal/lsp/source/completion/printf.go b/gopls/internal/lsp/source/completion/printf.go similarity index 100% rename from internal/lsp/source/completion/printf.go rename to gopls/internal/lsp/source/completion/printf.go diff --git a/internal/lsp/source/completion/printf_test.go b/gopls/internal/lsp/source/completion/printf_test.go similarity index 100% rename from internal/lsp/source/completion/printf_test.go rename to gopls/internal/lsp/source/completion/printf_test.go diff --git a/internal/lsp/source/completion/snippet.go b/gopls/internal/lsp/source/completion/snippet.go similarity index 86% rename from internal/lsp/source/completion/snippet.go rename to gopls/internal/lsp/source/completion/snippet.go index 72c351f946e..f4ea767e9dc 100644 --- a/internal/lsp/source/completion/snippet.go +++ b/gopls/internal/lsp/source/completion/snippet.go @@ -7,10 +7,11 @@ package completion import ( "go/ast" - "golang.org/x/tools/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/snippet" ) -// structFieldSnippets calculates the snippet for struct literal field names. +// structFieldSnippet calculates the snippet for struct literal field names. func (c *completer) structFieldSnippet(cand candidate, detail string, snip *snippet.Builder) { if !c.wantStructFieldCompletions() { return @@ -39,16 +40,16 @@ func (c *completer) structFieldSnippet(cand candidate, detail string, snip *snip } }) - fset := c.snapshot.FileSet() + fset := c.pkg.FileSet() // If the cursor position is on a different line from the literal's opening brace, - // we are in a multiline literal. - if fset.Position(c.pos).Line != fset.Position(clInfo.cl.Lbrace).Line { + // we are in a multiline literal. Ignore line directives. + if safetoken.StartPosition(fset, c.pos).Line != safetoken.StartPosition(fset, clInfo.cl.Lbrace).Line { snip.WriteText(",") } } -// functionCallSnippets calculates the snippet for function calls. +// functionCallSnippet calculates the snippet for function calls. func (c *completer) functionCallSnippet(name string, tparams, params []string, snip *snippet.Builder) { // If there is no suffix then we need to reuse existing call parens // "()" if present. If there is an identifier suffix then we always diff --git a/internal/lsp/source/completion/statements.go b/gopls/internal/lsp/source/completion/statements.go similarity index 94% rename from internal/lsp/source/completion/statements.go rename to gopls/internal/lsp/source/completion/statements.go index d8e30a2d5b2..809cb808a60 100644 --- a/internal/lsp/source/completion/statements.go +++ b/gopls/internal/lsp/source/completion/statements.go @@ -10,9 +10,9 @@ import ( "go/token" "go/types" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/snippet" + "golang.org/x/tools/gopls/internal/lsp/source" ) // addStatementCandidates adds full statement completion candidates @@ -52,7 +52,7 @@ func (c *completer) addAssignAppend() { // needsLHS is true if we need to prepend the LHS slice name and // "=" to our candidate. needsLHS = false - fset = c.snapshot.FileSet() + fset = c.pkg.FileSet() ) switch n := c.path[1].(type) { @@ -213,7 +213,7 @@ func (c *completer) addErrCheck() { var ( // errVar is e.g. "err" in "foo, err := bar()". - errVar = source.FormatNode(c.snapshot.FileSet(), lastAssignee) + errVar = source.FormatNode(c.pkg.FileSet(), lastAssignee) // Whether we need to include the "if" keyword in our candidate. needsIf = true @@ -330,24 +330,31 @@ func getTestVar(enclosingFunc *funcInfo, pkg source.Package) string { return "" } + var testingPkg *types.Package + for _, p := range pkg.GetTypes().Imports() { + if p.Path() == "testing" { + testingPkg = p + break + } + } + if testingPkg == nil { + return "" + } + tbObj := testingPkg.Scope().Lookup("TB") + if tbObj == nil { + return "" + } + iface, ok := tbObj.Type().Underlying().(*types.Interface) + if !ok { + return "" + } + sig := enclosingFunc.sig for i := 0; i < sig.Params().Len(); i++ { param := sig.Params().At(i) if param.Name() == "_" { continue } - testingPkg, err := pkg.GetImport("testing") - if err != nil { - continue - } - tbObj := testingPkg.GetTypes().Scope().Lookup("TB") - if tbObj == nil { - continue - } - iface, ok := tbObj.Type().Underlying().(*types.Interface) - if !ok { - continue - } if !types.Implements(param.Type(), iface) { continue } diff --git a/gopls/internal/lsp/source/completion/util.go b/gopls/internal/lsp/source/completion/util.go new file mode 100644 index 00000000000..4b6ec09a092 --- /dev/null +++ b/gopls/internal/lsp/source/completion/util.go @@ -0,0 +1,344 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package completion + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/typeparams" +) + +// exprAtPos returns the index of the expression containing pos. +func exprAtPos(pos token.Pos, args []ast.Expr) int { + for i, expr := range args { + if expr.Pos() <= pos && pos <= expr.End() { + return i + } + } + return len(args) +} + +// eachField invokes fn for each field that can be selected from a +// value of type T. +func eachField(T types.Type, fn func(*types.Var)) { + // TODO(adonovan): this algorithm doesn't exclude ambiguous + // selections that match more than one field/method. + // types.NewSelectionSet should do that for us. + + // for termination on recursive types + var seen typeutil.Map + + var visit func(T types.Type) + visit = func(T types.Type) { + if T, ok := source.Deref(T).Underlying().(*types.Struct); ok { + if seen.At(T) != nil { + return + } + + for i := 0; i < T.NumFields(); i++ { + f := T.Field(i) + fn(f) + if f.Anonymous() { + seen.Set(T, true) + visit(f.Type()) + } + } + } + } + visit(T) +} + +// typeIsValid reports whether typ doesn't contain any Invalid types. +func typeIsValid(typ types.Type) bool { + // Check named types separately, because we don't want + // to call Underlying() on them to avoid problems with recursive types. + if _, ok := typ.(*types.Named); ok { + return true + } + + switch typ := typ.Underlying().(type) { + case *types.Basic: + return typ.Kind() != types.Invalid + case *types.Array: + return typeIsValid(typ.Elem()) + case *types.Slice: + return typeIsValid(typ.Elem()) + case *types.Pointer: + return typeIsValid(typ.Elem()) + case *types.Map: + return typeIsValid(typ.Key()) && typeIsValid(typ.Elem()) + case *types.Chan: + return typeIsValid(typ.Elem()) + case *types.Signature: + return typeIsValid(typ.Params()) && typeIsValid(typ.Results()) + case *types.Tuple: + for i := 0; i < typ.Len(); i++ { + if !typeIsValid(typ.At(i).Type()) { + return false + } + } + return true + case *types.Struct, *types.Interface: + // Don't bother checking structs, interfaces for validity. + return true + default: + return false + } +} + +// resolveInvalid traverses the node of the AST that defines the scope +// containing the declaration of obj, and attempts to find a user-friendly +// name for its invalid type. The resulting Object and its Type are fake. +func resolveInvalid(fset *token.FileSet, obj types.Object, node ast.Node, info *types.Info) types.Object { + var resultExpr ast.Expr + ast.Inspect(node, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.ValueSpec: + for _, name := range n.Names { + if info.Defs[name] == obj { + resultExpr = n.Type + } + } + return false + case *ast.Field: // This case handles parameters and results of a FuncDecl or FuncLit. + for _, name := range n.Names { + if info.Defs[name] == obj { + resultExpr = n.Type + } + } + return false + default: + return true + } + }) + // Construct a fake type for the object and return a fake object with this type. + typename := source.FormatNode(fset, resultExpr) + typ := types.NewNamed(types.NewTypeName(token.NoPos, obj.Pkg(), typename, nil), types.Typ[types.Invalid], nil) + return types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), typ) +} + +func isPointer(T types.Type) bool { + _, ok := T.(*types.Pointer) + return ok +} + +func isVar(obj types.Object) bool { + _, ok := obj.(*types.Var) + return ok +} + +func isTypeName(obj types.Object) bool { + _, ok := obj.(*types.TypeName) + return ok +} + +func isFunc(obj types.Object) bool { + _, ok := obj.(*types.Func) + return ok +} + +func isEmptyInterface(T types.Type) bool { + intf, _ := T.(*types.Interface) + return intf != nil && intf.NumMethods() == 0 && typeparams.IsMethodSet(intf) +} + +func isUntyped(T types.Type) bool { + if basic, ok := T.(*types.Basic); ok { + return basic.Info()&types.IsUntyped > 0 + } + return false +} + +func isPkgName(obj types.Object) bool { + _, ok := obj.(*types.PkgName) + return ok +} + +func isASTFile(n ast.Node) bool { + _, ok := n.(*ast.File) + return ok +} + +func deslice(T types.Type) types.Type { + if slice, ok := T.Underlying().(*types.Slice); ok { + return slice.Elem() + } + return nil +} + +// isSelector returns the enclosing *ast.SelectorExpr when pos is in the +// selector. +func enclosingSelector(path []ast.Node, pos token.Pos) *ast.SelectorExpr { + if len(path) == 0 { + return nil + } + + if sel, ok := path[0].(*ast.SelectorExpr); ok { + return sel + } + + if _, ok := path[0].(*ast.Ident); ok && len(path) > 1 { + if sel, ok := path[1].(*ast.SelectorExpr); ok && pos >= sel.Sel.Pos() { + return sel + } + } + + return nil +} + +// enclosingDeclLHS returns LHS idents from containing value spec or +// assign statement. +func enclosingDeclLHS(path []ast.Node) []*ast.Ident { + for _, n := range path { + switch n := n.(type) { + case *ast.ValueSpec: + return n.Names + case *ast.AssignStmt: + ids := make([]*ast.Ident, 0, len(n.Lhs)) + for _, e := range n.Lhs { + if id, ok := e.(*ast.Ident); ok { + ids = append(ids, id) + } + } + return ids + } + } + + return nil +} + +// exprObj returns the types.Object associated with the *ast.Ident or +// *ast.SelectorExpr e. +func exprObj(info *types.Info, e ast.Expr) types.Object { + var ident *ast.Ident + switch expr := e.(type) { + case *ast.Ident: + ident = expr + case *ast.SelectorExpr: + ident = expr.Sel + default: + return nil + } + + return info.ObjectOf(ident) +} + +// typeConversion returns the type being converted to if call is a type +// conversion expression. +func typeConversion(call *ast.CallExpr, info *types.Info) types.Type { + // Type conversion (e.g. "float64(foo)"). + if fun, _ := exprObj(info, call.Fun).(*types.TypeName); fun != nil { + return fun.Type() + } + + return nil +} + +// fieldsAccessible returns whether s has at least one field accessible by p. +func fieldsAccessible(s *types.Struct, p *types.Package) bool { + for i := 0; i < s.NumFields(); i++ { + f := s.Field(i) + if f.Exported() || f.Pkg() == p { + return true + } + } + return false +} + +// prevStmt returns the statement that precedes the statement containing pos. +// For example: +// +// foo := 1 +// bar(1 + 2<>) +// +// If "<>" is pos, prevStmt returns "foo := 1" +func prevStmt(pos token.Pos, path []ast.Node) ast.Stmt { + var blockLines []ast.Stmt + for i := 0; i < len(path) && blockLines == nil; i++ { + switch n := path[i].(type) { + case *ast.BlockStmt: + blockLines = n.List + case *ast.CommClause: + blockLines = n.Body + case *ast.CaseClause: + blockLines = n.Body + } + } + + for i := len(blockLines) - 1; i >= 0; i-- { + if blockLines[i].End() < pos { + return blockLines[i] + } + } + + return nil +} + +// formatZeroValue produces Go code representing the zero value of T. It +// returns the empty string if T is invalid. +func formatZeroValue(T types.Type, qf types.Qualifier) string { + switch u := T.Underlying().(type) { + case *types.Basic: + switch { + case u.Info()&types.IsNumeric > 0: + return "0" + case u.Info()&types.IsString > 0: + return `""` + case u.Info()&types.IsBoolean > 0: + return "false" + default: + return "" + } + case *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Slice, *types.Signature: + return "nil" + default: + return types.TypeString(T, qf) + "{}" + } +} + +// isBasicKind returns whether t is a basic type of kind k. +func isBasicKind(t types.Type, k types.BasicInfo) bool { + b, _ := t.Underlying().(*types.Basic) + return b != nil && b.Info()&k > 0 +} + +func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) { + start, end, err := safetoken.Offsets(c.tokFile, from, to) + if err != nil { + return nil, err // can't happen: from/to came from c + } + return source.ToProtocolEdits(c.mapper, []diff.Edit{{ + Start: start, + End: end, + New: newText, + }}) +} + +// assignableTo is like types.AssignableTo, but returns false if +// either type is invalid. +func assignableTo(x, to types.Type) bool { + if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] { + return false + } + + return types.AssignableTo(x, to) +} + +// convertibleTo is like types.ConvertibleTo, but returns false if +// either type is invalid. +func convertibleTo(x, to types.Type) bool { + if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] { + return false + } + + return types.ConvertibleTo(x, to) +} diff --git a/internal/lsp/source/completion/util_test.go b/gopls/internal/lsp/source/completion/util_test.go similarity index 100% rename from internal/lsp/source/completion/util_test.go rename to gopls/internal/lsp/source/completion/util_test.go diff --git a/gopls/internal/lsp/source/definition.go b/gopls/internal/lsp/source/definition.go new file mode 100644 index 00000000000..d9dd446b451 --- /dev/null +++ b/gopls/internal/lsp/source/definition.go @@ -0,0 +1,220 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" +) + +// Definition handles the textDocument/definition request for Go files. +func Definition(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Location, error) { + ctx, done := event.Start(ctx, "source.Definition") + defer done() + + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + pos, err := pgf.PositionPos(position) + if err != nil { + return nil, err + } + + // Handle the case where the cursor is in an import. + importLocations, err := importDefinition(ctx, snapshot, pkg, pgf, pos) + if err != nil { + return nil, err + } + if len(importLocations) > 0 { + return importLocations, nil + } + + // Handle the case where the cursor is in the package name. + // We use "<= End" to accept a query immediately after the package name. + if pgf.File != nil && pgf.File.Name.Pos() <= pos && pos <= pgf.File.Name.End() { + // If there's no package documentation, just use current file. + declFile := pgf + for _, pgf := range pkg.CompiledGoFiles() { + if pgf.File.Name != nil && pgf.File.Doc != nil { + declFile = pgf + break + } + } + loc, err := declFile.NodeLocation(declFile.File.Name) + if err != nil { + return nil, err + } + return []protocol.Location{loc}, nil + } + + // The general case: the cursor is on an identifier. + obj := referencedObject(pkg, pgf, pos) + if obj == nil { + return nil, nil + } + + // Handle built-in identifiers. + if obj.Parent() == types.Universe { + builtin, err := snapshot.BuiltinFile(ctx) + if err != nil { + return nil, err + } + // Note that builtinObj is an ast.Object, not types.Object :) + builtinObj := builtin.File.Scope.Lookup(obj.Name()) + if builtinObj == nil { + // Every builtin should have documentation. + return nil, bug.Errorf("internal error: no builtin object for %s", obj.Name()) + } + decl, ok := builtinObj.Decl.(ast.Node) + if !ok { + return nil, bug.Errorf("internal error: no declaration for %s", obj.Name()) + } + // The builtin package isn't in the dependency graph, so the usual + // utilities won't work here. + loc, err := builtin.PosLocation(decl.Pos(), decl.Pos()+token.Pos(len(obj.Name()))) + if err != nil { + return nil, err + } + return []protocol.Location{loc}, nil + } + + // Finally, map the object position. + var locs []protocol.Location + if !obj.Pos().IsValid() { + return nil, bug.Errorf("internal error: no position for %v", obj.Name()) + } + loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, obj.Pos(), adjustedObjEnd(obj)) + if err != nil { + return nil, err + } + locs = append(locs, loc) + return locs, nil +} + +// referencedObject returns the object referenced at the specified position, +// which must be within the file pgf, for the purposes of definition/hover/call +// hierarchy operations. It may return nil if no object was found at the given +// position. +// +// It differs from types.Info.ObjectOf in several ways: +// - It adjusts positions to do a better job of finding associated +// identifiers. For example it finds 'foo' from the cursor position _*foo +// - It handles type switch implicits, choosing the first one. +// - For embedded fields, it returns the type name object rather than the var +// (field) object. +// +// TODO(rfindley): this function exists to preserve the pre-existing behavior +// of source.Identifier. Eliminate this helper in favor of sharing +// functionality with objectsAt, after choosing suitable primitives. +func referencedObject(pkg Package, pgf *ParsedGoFile, pos token.Pos) types.Object { + path := pathEnclosingObjNode(pgf.File, pos) + if len(path) == 0 { + return nil + } + var obj types.Object + info := pkg.GetTypesInfo() + switch n := path[0].(type) { + case *ast.Ident: + // If leaf represents an implicit type switch object or the type + // switch "assign" variable, expand to all of the type switch's + // implicit objects. + if implicits, _ := typeSwitchImplicits(info, path); len(implicits) > 0 { + obj = implicits[0] + } else { + obj = info.ObjectOf(n) + } + // If the original position was an embedded field, we want to jump + // to the field's type definition, not the field's definition. + if v, ok := obj.(*types.Var); ok && v.Embedded() { + // types.Info.Uses contains the embedded field's *types.TypeName. + if typeName := info.Uses[n]; typeName != nil { + obj = typeName + } + } + } + return obj +} + +// importDefinition returns locations defining a package referenced by the +// import spec containing pos. +// +// If pos is not inside an import spec, it returns nil, nil. +func importDefinition(ctx context.Context, s Snapshot, pkg Package, pgf *ParsedGoFile, pos token.Pos) ([]protocol.Location, error) { + var imp *ast.ImportSpec + for _, spec := range pgf.File.Imports { + // We use "<= End" to accept a query immediately after an ImportSpec. + if spec.Path.Pos() <= pos && pos <= spec.Path.End() { + imp = spec + } + } + if imp == nil { + return nil, nil + } + + importPath := UnquoteImportPath(imp) + impID := pkg.Metadata().DepsByImpPath[importPath] + if impID == "" { + return nil, fmt.Errorf("failed to resolve import %q", importPath) + } + impMetadata := s.Metadata(impID) + if impMetadata == nil { + return nil, fmt.Errorf("missing information for package %q", impID) + } + + var locs []protocol.Location + for _, f := range impMetadata.CompiledGoFiles { + fh, err := s.GetFile(ctx, f) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + pgf, err := s.ParseGo(ctx, fh, ParseHeader) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + loc, err := pgf.NodeLocation(pgf.File) + if err != nil { + return nil, err + } + locs = append(locs, loc) + } + + if len(locs) == 0 { + return nil, fmt.Errorf("package %q has no readable files", impID) // incl. unsafe + } + + return locs, nil +} + +// TODO(rfindley): avoid the duplicate column mapping here, by associating a +// column mapper with each file handle. +func mapPosition(ctx context.Context, fset *token.FileSet, s FileSource, start, end token.Pos) (protocol.Location, error) { + file := fset.File(start) + uri := span.URIFromPath(file.Name()) + fh, err := s.GetFile(ctx, uri) + if err != nil { + return protocol.Location{}, err + } + content, err := fh.Read() + if err != nil { + return protocol.Location{}, err + } + m := protocol.NewMapper(fh.URI(), content) + return m.PosLocation(file, start, end) +} diff --git a/gopls/internal/lsp/source/diagnostics.go b/gopls/internal/lsp/source/diagnostics.go new file mode 100644 index 00000000000..13f2e2d6d2d --- /dev/null +++ b/gopls/internal/lsp/source/diagnostics.go @@ -0,0 +1,145 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" +) + +type SuggestedFix struct { + Title string + Edits map[span.URI][]protocol.TextEdit + Command *protocol.Command + ActionKind protocol.CodeActionKind +} + +type RelatedInformation struct { + // TODO(adonovan): replace these two fields by a protocol.Location. + URI span.URI + Range protocol.Range + Message string +} + +// Analyze reports go/analysis-framework diagnostics in the specified package. +func Analyze(ctx context.Context, snapshot Snapshot, pkgid PackageID, includeConvenience bool) (map[span.URI][]*Diagnostic, error) { + // Exit early if the context has been canceled. This also protects us + // from a race on Options, see golang/go#36699. + if ctx.Err() != nil { + return nil, ctx.Err() + } + + options := snapshot.View().Options() + categories := []map[string]*Analyzer{ + options.DefaultAnalyzers, + options.StaticcheckAnalyzers, + options.TypeErrorAnalyzers, + } + if includeConvenience { // e.g. for codeAction + categories = append(categories, options.ConvenienceAnalyzers) // e.g. fillstruct + } + + var analyzers []*Analyzer + for _, cat := range categories { + for _, a := range cat { + analyzers = append(analyzers, a) + } + } + + analysisDiagnostics, err := snapshot.Analyze(ctx, pkgid, analyzers) + if err != nil { + return nil, err + } + + // Report diagnostics and errors from root analyzers. + reports := make(map[span.URI][]*Diagnostic) + for _, diag := range analysisDiagnostics { + reports[diag.URI] = append(reports[diag.URI], diag) + } + return reports, nil +} + +// FileDiagnostics reports diagnostics in the specified file, +// as used by the "gopls check" command. +// +// TODO(adonovan): factor in common with (*Server).codeAction, which +// executes { PackageForFile; Analyze } too? +// +// TODO(adonovan): opt: this function is called in a loop from the +// "gopls/diagnoseFiles" nonstandard request handler. It would be more +// efficient to compute the set of packages and TypeCheck and +// Analyze them all at once. +func FileDiagnostics(ctx context.Context, snapshot Snapshot, uri span.URI) (FileHandle, []*Diagnostic, error) { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, nil, err + } + pkg, _, err := PackageForFile(ctx, snapshot, uri, TypecheckFull, NarrowestPackage) + if err != nil { + return nil, nil, err + } + pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, uri) + if err != nil { + return nil, nil, err + } + adiags, err := Analyze(ctx, snapshot, pkg.Metadata().ID, false) + if err != nil { + return nil, nil, err + } + var fileDiags []*Diagnostic // combine load/parse/type + analysis diagnostics + CombineDiagnostics(pkgDiags, adiags[uri], &fileDiags, &fileDiags) + return fh, fileDiags, nil +} + +// CombineDiagnostics combines and filters list/parse/type diagnostics from +// tdiags with adiags, and appends the two lists to *outT and *outA, +// respectively. +// +// Type-error analyzers produce diagnostics that are redundant +// with type checker diagnostics, but more detailed (e.g. fixes). +// Rather than report two diagnostics for the same problem, +// we combine them by augmenting the type-checker diagnostic +// and discarding the analyzer diagnostic. +// +// If an analysis diagnostic has the same range and message as +// a list/parse/type diagnostic, the suggested fix information +// (et al) of the latter is merged into a copy of the former. +// This handles the case where a type-error analyzer suggests +// a fix to a type error, and avoids duplication. +// +// The use of out-slices, though irregular, allows the caller to +// easily choose whether to keep the results separate or combined. +// +// The arguments are not modified. +func CombineDiagnostics(tdiags []*Diagnostic, adiags []*Diagnostic, outT, outA *[]*Diagnostic) { + + // Build index of (list+parse+)type errors. + type key struct { + Range protocol.Range + message string + } + index := make(map[key]int) // maps (Range,Message) to index in tdiags slice + for i, diag := range tdiags { + index[key{diag.Range, diag.Message}] = i + } + + // Filter out analysis diagnostics that match type errors, + // retaining their suggested fix (etc) fields. + for _, diag := range adiags { + if i, ok := index[key{diag.Range, diag.Message}]; ok { + copy := *tdiags[i] + copy.SuggestedFixes = diag.SuggestedFixes + copy.Tags = diag.Tags + tdiags[i] = © + continue + } + + *outA = append(*outA, diag) + } + + *outT = append(*outT, tdiags...) +} diff --git a/internal/lsp/source/extract.go b/gopls/internal/lsp/source/extract.go similarity index 85% rename from internal/lsp/source/extract.go rename to gopls/internal/lsp/source/extract.go index 90999d821a6..56e8a5e236a 100644 --- a/internal/lsp/source/extract.go +++ b/gopls/internal/lsp/source/extract.go @@ -12,20 +12,22 @@ import ( "go/parser" "go/token" "go/types" + "sort" "strings" - "unicode" + "text/scanner" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/safetoken" "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/bug" ) -func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { - expr, path, ok, err := CanExtractVariable(rng, file) +func extractVariable(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + tokFile := fset.File(file.Pos()) + expr, path, ok, err := CanExtractVariable(start, end, file) if !ok { - return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", fset.Position(rng.Start), err) + return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", safetoken.StartPosition(fset, start), err) } // Create new AST node for extracted code. @@ -60,11 +62,7 @@ func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast. if insertBeforeStmt == nil { return nil, fmt.Errorf("cannot find location to insert extraction") } - tok := fset.File(expr.Pos()) - if tok == nil { - return nil, fmt.Errorf("no file for pos %v", fset.Position(file.Pos())) - } - indent, err := calculateIndentation(src, tok, insertBeforeStmt) + indent, err := calculateIndentation(src, tokFile, insertBeforeStmt) if err != nil { return nil, err } @@ -90,8 +88,8 @@ func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast. NewText: []byte(assignment), }, { - Pos: rng.Start, - End: rng.End, + Pos: start, + End: end, NewText: []byte(lhs), }, }, @@ -100,11 +98,11 @@ func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast. // CanExtractVariable reports whether the code in the given range can be // extracted to a variable. -func CanExtractVariable(rng span.Range, file *ast.File) (ast.Expr, []ast.Node, bool, error) { - if rng.Start == rng.End { +func CanExtractVariable(start, end token.Pos, file *ast.File) (ast.Expr, []ast.Node, bool, error) { + if start == end { return nil, nil, false, fmt.Errorf("start and end are equal") } - path, _ := astutil.PathEnclosingInterval(file, rng.Start, rng.End) + path, _ := astutil.PathEnclosingInterval(file, start, end) if len(path) == 0 { return nil, nil, false, fmt.Errorf("no path enclosing interval") } @@ -114,7 +112,7 @@ func CanExtractVariable(rng span.Range, file *ast.File) (ast.Expr, []ast.Node, b } } node := path[0] - if rng.Start != node.Pos() || rng.End != node.End() { + if start != node.Pos() || end != node.End() { return nil, nil, false, fmt.Errorf("range does not map to an AST node") } expr, ok := node.(ast.Expr) @@ -135,11 +133,7 @@ func CanExtractVariable(rng span.Range, file *ast.File) (ast.Expr, []ast.Node, b // line of code on which the insertion occurs. func calculateIndentation(content []byte, tok *token.File, insertBeforeStmt ast.Node) (string, error) { line := tok.Line(insertBeforeStmt.Pos()) - lineOffset, err := safetoken.Offset(tok, tok.LineStart(line)) - if err != nil { - return "", err - } - stmtOffset, err := safetoken.Offset(tok, insertBeforeStmt.Pos()) + lineOffset, stmtOffset, err := safetoken.Offsets(tok, tok.LineStart(line), insertBeforeStmt.Pos()) if err != nil { return "", err } @@ -195,13 +189,13 @@ type returnVariable struct { } // extractMethod refactors the selected block of code into a new method. -func extractMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { - return extractFunctionMethod(fset, rng, src, file, pkg, info, true) +func extractMethod(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + return extractFunctionMethod(fset, start, end, src, file, pkg, info, true) } // extractFunction refactors the selected block of code into a new function. -func extractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { - return extractFunctionMethod(fset, rng, src, file, pkg, info, false) +func extractFunction(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + return extractFunctionMethod(fset, start, end, src, file, pkg, info, false) } // extractFunctionMethod refactors the selected block of code into a new function/method. @@ -212,17 +206,22 @@ func extractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast. // and return values of the extracted function/method. Lastly, we construct the call // of the function/method and insert this call as well as the extracted function/method into // their proper locations. -func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info, isMethod bool) (*analysis.SuggestedFix, error) { +func extractFunctionMethod(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info, isMethod bool) (*analysis.SuggestedFix, error) { errorPrefix := "extractFunction" if isMethod { errorPrefix = "extractMethod" } - p, ok, methodOk, err := CanExtractFunction(fset, rng, src, file) + + tok := fset.File(file.Pos()) + if tok == nil { + return nil, bug.Errorf("no file for position") + } + p, ok, methodOk, err := CanExtractFunction(tok, start, end, src, file) if (!ok && !isMethod) || (!methodOk && isMethod) { return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix, - fset.Position(rng.Start), err) + safetoken.StartPosition(fset, start), err) } - tok, path, rng, outer, start := p.tok, p.path, p.rng, p.outer, p.start + tok, path, start, end, outer, node := p.tok, p.path, p.start, p.end, p.outer, p.node fileScope := info.Scopes[file] if fileScope == nil { return nil, fmt.Errorf("%s: file scope is empty", errorPrefix) @@ -237,13 +236,13 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // non-nested return statements are guaranteed to execute. var retStmts []*ast.ReturnStmt var hasNonNestedReturn bool - startParent := findParent(outer, start) + startParent := findParent(outer, node) ast.Inspect(outer, func(n ast.Node) bool { if n == nil { return false } - if n.Pos() < rng.Start || n.End() > rng.End { - return n.Pos() <= rng.End + if n.Pos() < start || n.End() > end { + return n.Pos() <= end } ret, ok := n.(*ast.ReturnStmt) if !ok { @@ -261,7 +260,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // we must determine the signature of the extracted function. We will then replace // the block with an assignment statement that calls the extracted function with // the appropriate parameters and return values. - variables, err := collectFreeVars(info, file, fileScope, pkgScope, rng, path[0]) + variables, err := collectFreeVars(info, file, fileScope, pkgScope, start, end, path[0]) if err != nil { return nil, err } @@ -344,7 +343,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file if v.obj.Parent() == nil { return nil, fmt.Errorf("parent nil") } - isUsed, firstUseAfter := objUsed(info, span.NewRange(fset, rng.End, v.obj.Parent().End()), v.obj) + isUsed, firstUseAfter := objUsed(info, end, v.obj.Parent().End(), v.obj) if v.assigned && isUsed && !varOverridden(info, firstUseAfter, v.obj, v.free, outer) { returnTypes = append(returnTypes, &ast.Field{Type: typ}) returns = append(returns, identifier) @@ -401,11 +400,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // We put the selection in a constructed file. We can then traverse and edit // the extracted selection without modifying the original AST. - startOffset, err := safetoken.Offset(tok, rng.Start) - if err != nil { - return nil, err - } - endOffset, err := safetoken.Offset(tok, rng.End) + startOffset, endOffset, err := safetoken.Offsets(tok, start, end) if err != nil { return nil, err } @@ -504,7 +499,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // statements in the selection. Update the type signature of the extracted // function and construct the if statement that will be inserted in the enclosing // function. - retVars, ifReturn, err = generateReturnInfo(enclosing, pkg, path, file, info, fset, rng.Start, hasNonNestedReturn) + retVars, ifReturn, err = generateReturnInfo(enclosing, pkg, path, file, info, fset, start, hasNonNestedReturn) if err != nil { return nil, err } @@ -539,7 +534,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file funName = name } else { name = "newFunction" - funName, _ = generateAvailableIdentifier(rng.Start, file, path, info, name, 0) + funName, _ = generateAvailableIdentifier(start, file, path, info, name, 0) } extractedFunCall := generateFuncCall(hasNonNestedReturn, hasReturnValues, params, append(returns, getNames(retVars)...), funName, sym, receiverName) @@ -592,7 +587,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // Find all the comments within the range and print them to be put somewhere. // TODO(suzmue): print these in the extracted function at the correct place. for _, cg := range file.Comments { - if cg.Pos().IsValid() && cg.Pos() < rng.End && cg.Pos() >= rng.Start { + if cg.Pos().IsValid() && cg.Pos() < end && cg.Pos() >= start { for _, c := range cg.List { fmt.Fprintln(&commentBuf, c.Text) } @@ -601,17 +596,13 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file // We're going to replace the whole enclosing function, // so preserve the text before and after the selected block. - outerStart, err := safetoken.Offset(tok, outer.Pos()) - if err != nil { - return nil, err - } - outerEnd, err := safetoken.Offset(tok, outer.End()) + outerStart, outerEnd, err := safetoken.Offsets(tok, outer.Pos(), outer.End()) if err != nil { return nil, err } before := src[outerStart:startOffset] after := src[endOffset:outerEnd] - indent, err := calculateIndentation(src, tok, start) + indent, err := calculateIndentation(src, tok, node) if err != nil { return nil, err } @@ -647,47 +638,83 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file }, nil } -// adjustRangeForWhitespace adjusts the given range to exclude unnecessary leading or -// trailing whitespace characters from selection. In the following example, each line -// of the if statement is indented once. There are also two extra spaces after the -// closing bracket before the line break. +// adjustRangeForCommentsAndWhiteSpace adjusts the given range to exclude unnecessary leading or +// trailing whitespace characters from selection as well as leading or trailing comments. +// In the following example, each line of the if statement is indented once. There are also two +// extra spaces after the sclosing bracket before the line break and a comment. // // \tif (true) { // \t _ = 1 -// \t} \n +// \t} // hello \n // // By default, a valid range begins at 'if' and ends at the first whitespace character // after the '}'. But, users are likely to highlight full lines rather than adjusting // their cursors for whitespace. To support this use case, we must manually adjust the // ranges to match the correct AST node. In this particular example, we would adjust -// rng.Start forward by one byte, and rng.End backwards by two bytes. -func adjustRangeForWhitespace(rng span.Range, tok *token.File, content []byte) (span.Range, error) { - offset, err := safetoken.Offset(tok, rng.Start) - if err != nil { - return span.Range{}, err - } - for offset < len(content) { - if !unicode.IsSpace(rune(content[offset])) { - break +// rng.Start forward to the start of 'if' and rng.End backward to after '}'. +func adjustRangeForCommentsAndWhiteSpace(tok *token.File, start, end token.Pos, content []byte, file *ast.File) (token.Pos, token.Pos, error) { + // Adjust the end of the range to after leading whitespace and comments. + prevStart := token.NoPos + startComment := sort.Search(len(file.Comments), func(i int) bool { + // Find the index for the first comment that ends after range start. + return file.Comments[i].End() > start + }) + for prevStart != start { + prevStart = start + // If start is within a comment, move start to the end + // of the comment group. + if startComment < len(file.Comments) && file.Comments[startComment].Pos() <= start && start < file.Comments[startComment].End() { + start = file.Comments[startComment].End() + startComment++ + } + // Move forwards to find a non-whitespace character. + offset, err := safetoken.Offset(tok, start) + if err != nil { + return 0, 0, err + } + for offset < len(content) && isGoWhiteSpace(content[offset]) { + offset++ } - // Move forwards one byte to find a non-whitespace character. - offset += 1 + start = tok.Pos(offset) } - rng.Start = tok.Pos(offset) - // Move backwards to find a non-whitespace character. - offset, err = safetoken.Offset(tok, rng.End) - if err != nil { - return span.Range{}, err - } - for o := offset - 1; 0 <= o && o < len(content); o-- { - if !unicode.IsSpace(rune(content[o])) { - break + // Adjust the end of the range to before trailing whitespace and comments. + prevEnd := token.NoPos + endComment := sort.Search(len(file.Comments), func(i int) bool { + // Find the index for the first comment that ends after the range end. + return file.Comments[i].End() >= end + }) + // Search will return n if not found, so we need to adjust if there are no + // comments that would match. + if endComment == len(file.Comments) { + endComment = -1 + } + for prevEnd != end { + prevEnd = end + // If end is within a comment, move end to the start + // of the comment group. + if endComment >= 0 && file.Comments[endComment].Pos() < end && end <= file.Comments[endComment].End() { + end = file.Comments[endComment].Pos() + endComment-- + } + // Move backwards to find a non-whitespace character. + offset, err := safetoken.Offset(tok, end) + if err != nil { + return 0, 0, err + } + for offset > 0 && isGoWhiteSpace(content[offset-1]) { + offset-- } - offset = o + end = tok.Pos(offset) } - rng.End = tok.Pos(offset) - return rng, nil + + return start, end, nil +} + +// isGoWhiteSpace returns true if b is a considered white space in +// Go as defined by scanner.GoWhitespace. +func isGoWhiteSpace(b byte) bool { + return uint64(scanner.GoWhitespace)&(1< not free } return obj, true @@ -775,7 +802,7 @@ func collectFreeVars(info *types.Info, file *ast.File, fileScope, pkgScope *type if n == nil { return false } - if rng.Start <= n.Pos() && n.End() <= rng.End { + if start <= n.Pos() && n.End() <= end { var obj types.Object var isFree, prune bool switch n := n.(type) { @@ -801,7 +828,7 @@ func collectFreeVars(info *types.Info, file *ast.File, fileScope, pkgScope *type } } } - return n.Pos() <= rng.End + return n.Pos() <= end }) // Find identifiers that are initialized or whose values are altered at some @@ -818,8 +845,8 @@ func collectFreeVars(info *types.Info, file *ast.File, fileScope, pkgScope *type if n == nil { return false } - if n.Pos() < rng.Start || n.End() > rng.End { - return n.Pos() <= rng.End + if n.Pos() < start || n.End() > end { + return n.Pos() <= end } switch n := n.(type) { case *ast.AssignStmt: @@ -932,29 +959,25 @@ func referencesObj(info *types.Info, expr ast.Expr, obj types.Object) bool { } type fnExtractParams struct { - tok *token.File - path []ast.Node - rng span.Range - outer *ast.FuncDecl - start ast.Node + tok *token.File + start, end token.Pos + path []ast.Node + outer *ast.FuncDecl + node ast.Node } // CanExtractFunction reports whether the code in the given range can be // extracted to a function. -func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) { - if rng.Start == rng.End { +func CanExtractFunction(tok *token.File, start, end token.Pos, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) { + if start == end { return nil, false, false, fmt.Errorf("start and end are equal") } - tok := fset.File(file.Pos()) - if tok == nil { - return nil, false, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos())) - } var err error - rng, err = adjustRangeForWhitespace(rng, tok, src) + start, end, err = adjustRangeForCommentsAndWhiteSpace(tok, start, end, src, file) if err != nil { return nil, false, false, err } - path, _ := astutil.PathEnclosingInterval(file, rng.Start, rng.End) + path, _ := astutil.PathEnclosingInterval(file, start, end) if len(path) == 0 { return nil, false, false, fmt.Errorf("no path enclosing interval") } @@ -978,52 +1001,53 @@ func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *a } // Find the nodes at the start and end of the selection. - var start, end ast.Node + var startNode, endNode ast.Node ast.Inspect(outer, func(n ast.Node) bool { if n == nil { return false } // Do not override 'start' with a node that begins at the same location // but is nested further from 'outer'. - if start == nil && n.Pos() == rng.Start && n.End() <= rng.End { - start = n + if startNode == nil && n.Pos() == start && n.End() <= end { + startNode = n } - if end == nil && n.End() == rng.End && n.Pos() >= rng.Start { - end = n + if endNode == nil && n.End() == end && n.Pos() >= start { + endNode = n } - return n.Pos() <= rng.End + return n.Pos() <= end }) - if start == nil || end == nil { + if startNode == nil || endNode == nil { return nil, false, false, fmt.Errorf("range does not map to AST nodes") } // If the region is a blockStmt, use the first and last nodes in the block // statement. // { ... } => { ... } - if blockStmt, ok := start.(*ast.BlockStmt); ok { + if blockStmt, ok := startNode.(*ast.BlockStmt); ok { if len(blockStmt.List) == 0 { return nil, false, false, fmt.Errorf("range maps to empty block statement") } - start, end = blockStmt.List[0], blockStmt.List[len(blockStmt.List)-1] - rng.Start, rng.End = start.Pos(), end.End() + startNode, endNode = blockStmt.List[0], blockStmt.List[len(blockStmt.List)-1] + start, end = startNode.Pos(), endNode.End() } return &fnExtractParams{ tok: tok, + start: start, + end: end, path: path, - rng: rng, outer: outer, - start: start, + node: startNode, }, true, outer.Recv != nil, nil } // objUsed checks if the object is used within the range. It returns the first // occurrence of the object in the range, if it exists. -func objUsed(info *types.Info, rng span.Range, obj types.Object) (bool, *ast.Ident) { +func objUsed(info *types.Info, start, end token.Pos, obj types.Object) (bool, *ast.Ident) { var firstUse *ast.Ident for id, objUse := range info.Uses { if obj != objUse { continue } - if id.Pos() < rng.Start || id.End() > rng.End { + if id.Pos() < start || id.End() > end { continue } if firstUse == nil || id.Pos() < firstUse.Pos() { @@ -1083,7 +1107,7 @@ func varOverridden(info *types.Info, firstUse *ast.Ident, obj types.Object, isFr return isOverriden } -// parseExtraction generates an AST file from the given text. We then return the portion of the +// parseBlockStmt generates an AST file from the given text. We then return the portion of the // file that represents the text. func parseBlockStmt(fset *token.FileSet, src []byte) (*ast.BlockStmt, error) { text := "package main\nfunc _() { " + string(src) + " }" diff --git a/gopls/internal/lsp/source/fix.go b/gopls/internal/lsp/source/fix.go new file mode 100644 index 00000000000..d5eca7658ba --- /dev/null +++ b/gopls/internal/lsp/source/fix.go @@ -0,0 +1,135 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct" + "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" +) + +type ( + // SuggestedFixFunc is a function used to get the suggested fixes for a given + // gopls command, some of which are provided by go/analysis.Analyzers. Some of + // the analyzers in internal/lsp/analysis are not efficient enough to include + // suggested fixes with their diagnostics, so we have to compute them + // separately. Such analyzers should provide a function with a signature of + // SuggestedFixFunc. + SuggestedFixFunc func(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error) + singleFileFixFunc func(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) +) + +const ( + FillStruct = "fill_struct" + StubMethods = "stub_methods" + UndeclaredName = "undeclared_name" + ExtractVariable = "extract_variable" + ExtractFunction = "extract_function" + ExtractMethod = "extract_method" +) + +// suggestedFixes maps a suggested fix command id to its handler. +var suggestedFixes = map[string]SuggestedFixFunc{ + FillStruct: singleFile(fillstruct.SuggestedFix), + UndeclaredName: singleFile(undeclaredname.SuggestedFix), + ExtractVariable: singleFile(extractVariable), + ExtractFunction: singleFile(extractFunction), + ExtractMethod: singleFile(extractMethod), + StubMethods: stubSuggestedFixFunc, +} + +// singleFile calls analyzers that expect inputs for a single file +func singleFile(sf singleFileFixFunc) SuggestedFixFunc { + return func(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error) { + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, nil, err + } + start, end, err := pgf.RangePos(pRng) + if err != nil { + return nil, nil, err + } + fix, err := sf(pkg.FileSet(), start, end, pgf.Src, pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()) + return pkg.FileSet(), fix, err + } +} + +func SuggestedFixFromCommand(cmd protocol.Command, kind protocol.CodeActionKind) SuggestedFix { + return SuggestedFix{ + Title: cmd.Title, + Command: &cmd, + ActionKind: kind, + } +} + +// ApplyFix applies the command's suggested fix to the given file and +// range, returning the resulting edits. +func ApplyFix(ctx context.Context, fix string, snapshot Snapshot, fh FileHandle, pRng protocol.Range) ([]protocol.TextDocumentEdit, error) { + handler, ok := suggestedFixes[fix] + if !ok { + return nil, fmt.Errorf("no suggested fix function for %s", fix) + } + fset, suggestion, err := handler(ctx, snapshot, fh, pRng) + if err != nil { + return nil, err + } + if suggestion == nil { + return nil, nil + } + editsPerFile := map[span.URI]*protocol.TextDocumentEdit{} + for _, edit := range suggestion.TextEdits { + tokFile := fset.File(edit.Pos) + if tokFile == nil { + return nil, bug.Errorf("no file for edit position") + } + end := edit.End + if !end.IsValid() { + end = edit.Pos + } + fh, err := snapshot.GetFile(ctx, span.URIFromPath(tokFile.Name())) + if err != nil { + return nil, err + } + te, ok := editsPerFile[fh.URI()] + if !ok { + te = &protocol.TextDocumentEdit{ + TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ + Version: fh.Version(), + TextDocumentIdentifier: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(fh.URI()), + }, + }, + } + editsPerFile[fh.URI()] = te + } + content, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewMapper(fh.URI(), content) + rng, err := m.PosRange(tokFile, edit.Pos, end) + if err != nil { + return nil, err + } + te.Edits = append(te.Edits, protocol.TextEdit{ + Range: rng, + NewText: string(edit.NewText), + }) + } + var edits []protocol.TextDocumentEdit + for _, edit := range editsPerFile { + edits = append(edits, *edit) + } + return edits, nil +} diff --git a/gopls/internal/lsp/source/folding_range.go b/gopls/internal/lsp/source/folding_range.go new file mode 100644 index 00000000000..41f7b5bf5e3 --- /dev/null +++ b/gopls/internal/lsp/source/folding_range.go @@ -0,0 +1,193 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "go/ast" + "go/token" + "sort" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/bug" +) + +// FoldingRangeInfo holds range and kind info of folding for an ast.Node +type FoldingRangeInfo struct { + MappedRange protocol.MappedRange + Kind protocol.FoldingRangeKind +} + +// FoldingRange gets all of the folding range for f. +func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFoldingOnly bool) (ranges []*FoldingRangeInfo, err error) { + // TODO(suzmue): consider limiting the number of folding ranges returned, and + // implement a way to prioritize folding ranges in that case. + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + + // With parse errors, we wouldn't be able to produce accurate folding info. + // LSP protocol (3.16) currently does not have a way to handle this case + // (https://github.com/microsoft/language-server-protocol/issues/1200). + // We cannot return an error either because we are afraid some editors + // may not handle errors nicely. As a workaround, we now return an empty + // result and let the client handle this case by double check the file + // contents (i.e. if the file is not empty and the folding range result + // is empty, raise an internal error). + if pgf.ParseErr != nil { + return nil, nil + } + + // Get folding ranges for comments separately as they are not walked by ast.Inspect. + ranges = append(ranges, commentsFoldingRange(pgf)...) + + visit := func(n ast.Node) bool { + rng := foldingRangeFunc(pgf, n, lineFoldingOnly) + if rng != nil { + ranges = append(ranges, rng) + } + return true + } + // Walk the ast and collect folding ranges. + ast.Inspect(pgf.File, visit) + + sort.Slice(ranges, func(i, j int) bool { + irng := ranges[i].MappedRange.Range() + jrng := ranges[j].MappedRange.Range() + return protocol.CompareRange(irng, jrng) < 0 + }) + + return ranges, nil +} + +// foldingRangeFunc calculates the line folding range for ast.Node n +func foldingRangeFunc(pgf *ParsedGoFile, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo { + // TODO(suzmue): include trailing empty lines before the closing + // parenthesis/brace. + var kind protocol.FoldingRangeKind + var start, end token.Pos + switch n := n.(type) { + case *ast.BlockStmt: + // Fold between positions of or lines between "{" and "}". + var startList, endList token.Pos + if num := len(n.List); num != 0 { + startList, endList = n.List[0].Pos(), n.List[num-1].End() + } + start, end = validLineFoldingRange(pgf.Tok, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly) + case *ast.CaseClause: + // Fold from position of ":" to end. + start, end = n.Colon+1, n.End() + case *ast.CommClause: + // Fold from position of ":" to end. + start, end = n.Colon+1, n.End() + case *ast.CallExpr: + // Fold from position of "(" to position of ")". + start, end = n.Lparen+1, n.Rparen + case *ast.FieldList: + // Fold between positions of or lines between opening parenthesis/brace and closing parenthesis/brace. + var startList, endList token.Pos + if num := len(n.List); num != 0 { + startList, endList = n.List[0].Pos(), n.List[num-1].End() + } + start, end = validLineFoldingRange(pgf.Tok, n.Opening, n.Closing, startList, endList, lineFoldingOnly) + case *ast.GenDecl: + // If this is an import declaration, set the kind to be protocol.Imports. + if n.Tok == token.IMPORT { + kind = protocol.Imports + } + // Fold between positions of or lines between "(" and ")". + var startSpecs, endSpecs token.Pos + if num := len(n.Specs); num != 0 { + startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End() + } + start, end = validLineFoldingRange(pgf.Tok, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly) + case *ast.BasicLit: + // Fold raw string literals from position of "`" to position of "`". + if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' { + start, end = n.Pos(), n.End() + } + case *ast.CompositeLit: + // Fold between positions of or lines between "{" and "}". + var startElts, endElts token.Pos + if num := len(n.Elts); num != 0 { + startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End() + } + start, end = validLineFoldingRange(pgf.Tok, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly) + } + + // Check that folding positions are valid. + if !start.IsValid() || !end.IsValid() { + return nil + } + // in line folding mode, do not fold if the start and end lines are the same. + if lineFoldingOnly && pgf.Tok.Line(start) == pgf.Tok.Line(end) { + return nil + } + mrng, err := pgf.PosMappedRange(start, end) + if err != nil { + bug.Errorf("%w", err) // can't happen + } + return &FoldingRangeInfo{ + MappedRange: mrng, + Kind: kind, + } +} + +// validLineFoldingRange returns start and end token.Pos for folding range if the range is valid. +// returns token.NoPos otherwise, which fails token.IsValid check +func validLineFoldingRange(tokFile *token.File, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) { + if lineFoldingOnly { + if !open.IsValid() || !close.IsValid() { + return token.NoPos, token.NoPos + } + + // Don't want to fold if the start/end is on the same line as the open/close + // as an example, the example below should *not* fold: + // var x = [2]string{"d", + // "e" } + if tokFile.Line(open) == tokFile.Line(start) || + tokFile.Line(close) == tokFile.Line(end) { + return token.NoPos, token.NoPos + } + + return open + 1, end + } + return open + 1, close +} + +// commentsFoldingRange returns the folding ranges for all comment blocks in file. +// The folding range starts at the end of the first line of the comment block, and ends at the end of the +// comment block and has kind protocol.Comment. +func commentsFoldingRange(pgf *ParsedGoFile) (comments []*FoldingRangeInfo) { + tokFile := pgf.Tok + for _, commentGrp := range pgf.File.Comments { + startGrpLine, endGrpLine := tokFile.Line(commentGrp.Pos()), tokFile.Line(commentGrp.End()) + if startGrpLine == endGrpLine { + // Don't fold single line comments. + continue + } + + firstComment := commentGrp.List[0] + startPos, endLinePos := firstComment.Pos(), firstComment.End() + startCmmntLine, endCmmntLine := tokFile.Line(startPos), tokFile.Line(endLinePos) + if startCmmntLine != endCmmntLine { + // If the first comment spans multiple lines, then we want to have the + // folding range start at the end of the first line. + endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0])) + } + mrng, err := pgf.PosMappedRange(endLinePos, commentGrp.End()) + if err != nil { + bug.Errorf("%w", err) // can't happen + } + comments = append(comments, &FoldingRangeInfo{ + // Fold from the end of the first line comment to the end of the comment block. + MappedRange: mrng, + Kind: protocol.Comment, + }) + } + return comments +} diff --git a/gopls/internal/lsp/source/format.go b/gopls/internal/lsp/source/format.go new file mode 100644 index 00000000000..932671686ba --- /dev/null +++ b/gopls/internal/lsp/source/format.go @@ -0,0 +1,391 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package source provides core features for use by Go editors and tools. +package source + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "strings" + "text/scanner" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/imports" +) + +// Format formats a file with a given range. +func Format(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.TextEdit, error) { + ctx, done := event.Start(ctx, "source.Format") + defer done() + + // Generated files shouldn't be edited. So, don't format them + if IsGenerated(ctx, snapshot, fh.URI()) { + return nil, fmt.Errorf("can't format %q: file is generated", fh.URI().Filename()) + } + + fset := snapshot.FileSet() + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, err + } + // Even if this file has parse errors, it might still be possible to format it. + // Using format.Node on an AST with errors may result in code being modified. + // Attempt to format the source of this file instead. + if pgf.ParseErr != nil { + formatted, err := formatSource(ctx, fh) + if err != nil { + return nil, err + } + return computeTextEdits(ctx, snapshot, pgf, string(formatted)) + } + + // format.Node changes slightly from one release to another, so the version + // of Go used to build the LSP server will determine how it formats code. + // This should be acceptable for all users, who likely be prompted to rebuild + // the LSP server on each Go release. + buf := &bytes.Buffer{} + if err := format.Node(buf, fset, pgf.File); err != nil { + return nil, err + } + formatted := buf.String() + + // Apply additional formatting, if any is supported. Currently, the only + // supported additional formatter is gofumpt. + if format := snapshot.View().Options().GofumptFormat; snapshot.View().Options().Gofumpt && format != nil { + // gofumpt can customize formatting based on language version and module + // path, if available. + // + // Try to derive this information, but fall-back on the default behavior. + // + // TODO: under which circumstances can we fail to find module information? + // Can this, for example, result in inconsistent formatting across saves, + // due to pending calls to packages.Load? + var langVersion, modulePath string + mds, err := snapshot.MetadataForFile(ctx, fh.URI()) + if err == nil && len(mds) > 0 { + if mi := mds[0].Module; mi != nil { + langVersion = mi.GoVersion + modulePath = mi.Path + } + } + b, err := format(ctx, langVersion, modulePath, buf.Bytes()) + if err != nil { + return nil, err + } + formatted = string(b) + } + return computeTextEdits(ctx, snapshot, pgf, formatted) +} + +func formatSource(ctx context.Context, fh FileHandle) ([]byte, error) { + _, done := event.Start(ctx, "source.formatSource") + defer done() + + data, err := fh.Read() + if err != nil { + return nil, err + } + return format.Source(data) +} + +type ImportFix struct { + Fix *imports.ImportFix + Edits []protocol.TextEdit +} + +// AllImportsFixes formats f for each possible fix to the imports. +// In addition to returning the result of applying all edits, +// it returns a list of fixes that could be applied to the file, with the +// corresponding TextEdits that would be needed to apply that fix. +func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { + ctx, done := event.Start(ctx, "source.AllImportsFixes") + defer done() + + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, nil, err + } + if err := snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { + allFixEdits, editsPerFix, err = computeImportEdits(snapshot, pgf, opts) + return err + }); err != nil { + return nil, nil, fmt.Errorf("AllImportsFixes: %v", err) + } + return allFixEdits, editsPerFix, nil +} + +// computeImportEdits computes a set of edits that perform one or all of the +// necessary import fixes. +func computeImportEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { + filename := pgf.URI.Filename() + + // Build up basic information about the original file. + allFixes, err := imports.FixImports(filename, pgf.Src, options) + if err != nil { + return nil, nil, err + } + + allFixEdits, err = computeFixEdits(snapshot, pgf, options, allFixes) + if err != nil { + return nil, nil, err + } + + // Apply all of the import fixes to the file. + // Add the edits for each fix to the result. + for _, fix := range allFixes { + edits, err := computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix}) + if err != nil { + return nil, nil, err + } + editsPerFix = append(editsPerFix, &ImportFix{ + Fix: fix, + Edits: edits, + }) + } + return allFixEdits, editsPerFix, nil +} + +// ComputeOneImportFixEdits returns text edits for a single import fix. +func ComputeOneImportFixEdits(snapshot Snapshot, pgf *ParsedGoFile, fix *imports.ImportFix) ([]protocol.TextEdit, error) { + options := &imports.Options{ + LocalPrefix: snapshot.View().Options().Local, + // Defaults. + AllErrors: true, + Comments: true, + Fragment: true, + FormatOnly: false, + TabIndent: true, + TabWidth: 8, + } + return computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix}) +} + +func computeFixEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options, fixes []*imports.ImportFix) ([]protocol.TextEdit, error) { + // trim the original data to match fixedData + left, err := importPrefix(pgf.Src) + if err != nil { + return nil, err + } + extra := !strings.Contains(left, "\n") // one line may have more than imports + if extra { + left = string(pgf.Src) + } + if len(left) > 0 && left[len(left)-1] != '\n' { + left += "\n" + } + // Apply the fixes and re-parse the file so that we can locate the + // new imports. + flags := parser.ImportsOnly + if extra { + // used all of origData above, use all of it here too + flags = 0 + } + fixedData, err := imports.ApplyFixes(fixes, "", pgf.Src, options, flags) + if err != nil { + return nil, err + } + if fixedData == nil || fixedData[len(fixedData)-1] != '\n' { + fixedData = append(fixedData, '\n') // ApplyFixes may miss the newline, go figure. + } + edits := snapshot.View().Options().ComputeEdits(left, string(fixedData)) + return protocolEditsFromSource([]byte(left), edits) +} + +// importPrefix returns the prefix of the given file content through the final +// import statement. If there are no imports, the prefix is the package +// statement and any comment groups below it. +func importPrefix(src []byte) (string, error) { + fset := token.NewFileSet() + // do as little parsing as possible + f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly|parser.ParseComments) + if err != nil { // This can happen if 'package' is misspelled + return "", fmt.Errorf("importPrefix: failed to parse: %s", err) + } + tok := fset.File(f.Pos()) + var importEnd int + for _, d := range f.Decls { + if x, ok := d.(*ast.GenDecl); ok && x.Tok == token.IMPORT { + if e, err := safetoken.Offset(tok, d.End()); err != nil { + return "", fmt.Errorf("importPrefix: %s", err) + } else if e > importEnd { + importEnd = e + } + } + } + + maybeAdjustToLineEnd := func(pos token.Pos, isCommentNode bool) int { + offset, err := safetoken.Offset(tok, pos) + if err != nil { + return -1 + } + + // Don't go past the end of the file. + if offset > len(src) { + offset = len(src) + } + // The go/ast package does not account for different line endings, and + // specifically, in the text of a comment, it will strip out \r\n line + // endings in favor of \n. To account for these differences, we try to + // return a position on the next line whenever possible. + switch line := tok.Line(tok.Pos(offset)); { + case line < tok.LineCount(): + nextLineOffset, err := safetoken.Offset(tok, tok.LineStart(line+1)) + if err != nil { + return -1 + } + // If we found a position that is at the end of a line, move the + // offset to the start of the next line. + if offset+1 == nextLineOffset { + offset = nextLineOffset + } + case isCommentNode, offset+1 == tok.Size(): + // If the last line of the file is a comment, or we are at the end + // of the file, the prefix is the entire file. + offset = len(src) + } + return offset + } + if importEnd == 0 { + pkgEnd := f.Name.End() + importEnd = maybeAdjustToLineEnd(pkgEnd, false) + } + for _, cgroup := range f.Comments { + for _, c := range cgroup.List { + if end, err := safetoken.Offset(tok, c.End()); err != nil { + return "", err + } else if end > importEnd { + startLine := safetoken.Position(tok, c.Pos()).Line + endLine := safetoken.Position(tok, c.End()).Line + + // Work around golang/go#41197 by checking if the comment might + // contain "\r", and if so, find the actual end position of the + // comment by scanning the content of the file. + startOffset, err := safetoken.Offset(tok, c.Pos()) + if err != nil { + return "", err + } + if startLine != endLine && bytes.Contains(src[startOffset:], []byte("\r")) { + if commentEnd := scanForCommentEnd(src[startOffset:]); commentEnd > 0 { + end = startOffset + commentEnd + } + } + importEnd = maybeAdjustToLineEnd(tok.Pos(end), true) + } + } + } + if importEnd > len(src) { + importEnd = len(src) + } + return string(src[:importEnd]), nil +} + +// scanForCommentEnd returns the offset of the end of the multi-line comment +// at the start of the given byte slice. +func scanForCommentEnd(src []byte) int { + var s scanner.Scanner + s.Init(bytes.NewReader(src)) + s.Mode ^= scanner.SkipComments + + t := s.Scan() + if t == scanner.Comment { + return s.Pos().Offset + } + return 0 +} + +func computeTextEdits(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile, formatted string) ([]protocol.TextEdit, error) { + _, done := event.Start(ctx, "source.computeTextEdits") + defer done() + + edits := snapshot.View().Options().ComputeEdits(string(pgf.Src), formatted) + return ToProtocolEdits(pgf.Mapper, edits) +} + +// protocolEditsFromSource converts text edits to LSP edits using the original +// source. +func protocolEditsFromSource(src []byte, edits []diff.Edit) ([]protocol.TextEdit, error) { + m := protocol.NewMapper("", src) + var result []protocol.TextEdit + for _, edit := range edits { + rng, err := m.OffsetRange(edit.Start, edit.End) + if err != nil { + return nil, err + } + + if rng.Start == rng.End && edit.New == "" { + // Degenerate case, which may result from a diff tool wanting to delete + // '\r' in line endings. Filter it out. + continue + } + result = append(result, protocol.TextEdit{ + Range: rng, + NewText: edit.New, + }) + } + return result, nil +} + +// ToProtocolEdits converts diff.Edits to LSP TextEdits. +// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textEditArray +func ToProtocolEdits(m *protocol.Mapper, edits []diff.Edit) ([]protocol.TextEdit, error) { + // LSP doesn't require TextEditArray to be sorted: + // this is the receiver's concern. But govim, and perhaps + // other clients have historically relied on the order. + edits = append([]diff.Edit(nil), edits...) + diff.SortEdits(edits) + + result := make([]protocol.TextEdit, len(edits)) + for i, edit := range edits { + rng, err := m.OffsetRange(edit.Start, edit.End) + if err != nil { + return nil, err + } + result[i] = protocol.TextEdit{ + Range: rng, + NewText: edit.New, + } + } + return result, nil +} + +// FromProtocolEdits converts LSP TextEdits to diff.Edits. +// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textEditArray +func FromProtocolEdits(m *protocol.Mapper, edits []protocol.TextEdit) ([]diff.Edit, error) { + if edits == nil { + return nil, nil + } + result := make([]diff.Edit, len(edits)) + for i, edit := range edits { + start, end, err := m.RangeOffsets(edit.Range) + if err != nil { + return nil, err + } + result[i] = diff.Edit{ + Start: start, + End: end, + New: edit.NewText, + } + } + return result, nil +} + +// ApplyProtocolEdits applies the patch (edits) to m.Content and returns the result. +// It also returns the edits converted to diff-package form. +func ApplyProtocolEdits(m *protocol.Mapper, edits []protocol.TextEdit) (string, []diff.Edit, error) { + diffEdits, err := FromProtocolEdits(m, edits) + if err != nil { + return "", nil, err + } + out, err := diff.Apply(string(m.Content), diffEdits) + return out, diffEdits, err +} diff --git a/gopls/internal/lsp/source/format_test.go b/gopls/internal/lsp/source/format_test.go new file mode 100644 index 00000000000..fac80c3115b --- /dev/null +++ b/gopls/internal/lsp/source/format_test.go @@ -0,0 +1,75 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/tests/compare" +) + +func TestImportPrefix(t *testing.T) { + for i, tt := range []struct { + input, want string + }{ + {"package foo", "package foo"}, + {"package foo\n", "package foo\n"}, + {"package foo\n\nfunc f(){}\n", "package foo\n"}, + {"package foo\n\nimport \"fmt\"\n", "package foo\n\nimport \"fmt\""}, + {"package foo\nimport (\n\"fmt\"\n)\n", "package foo\nimport (\n\"fmt\"\n)"}, + {"\n\n\npackage foo\n", "\n\n\npackage foo\n"}, + {"// hi \n\npackage foo //xx\nfunc _(){}\n", "// hi \n\npackage foo //xx\n"}, + {"package foo //hi\n", "package foo //hi\n"}, + {"//hi\npackage foo\n//a\n\n//b\n", "//hi\npackage foo\n//a\n\n//b\n"}, + { + "package a\n\nimport (\n \"fmt\"\n)\n//hi\n", + "package a\n\nimport (\n \"fmt\"\n)\n//hi\n", + }, + {`package a /*hi*/`, `package a /*hi*/`}, + {"package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n", "package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n"}, + {"package x; import \"os\"; func f() {}\n\n", "package x; import \"os\""}, + {"package x; func f() {fmt.Println()}\n\n", "package x"}, + } { + got, err := importPrefix([]byte(tt.input)) + if err != nil { + t.Fatal(err) + } + if d := compare.Text(tt.want, got); d != "" { + t.Errorf("%d: failed for %q:\n%s", i, tt.input, d) + } + } +} + +func TestCRLFFile(t *testing.T) { + for i, tt := range []struct { + input, want string + }{ + { + input: `package main + +/* +Hi description +*/ +func Hi() { +} +`, + want: `package main + +/* +Hi description +*/`, + }, + } { + got, err := importPrefix([]byte(strings.ReplaceAll(tt.input, "\n", "\r\n"))) + if err != nil { + t.Fatal(err) + } + want := strings.ReplaceAll(tt.want, "\n", "\r\n") + if d := compare.Text(want, got); d != "" { + t.Errorf("%d: failed for %q:\n%s", i, tt.input, d) + } + } +} diff --git a/internal/lsp/source/gc_annotations.go b/gopls/internal/lsp/source/gc_annotations.go similarity index 93% rename from internal/lsp/source/gc_annotations.go rename to gopls/internal/lsp/source/gc_annotations.go index 3616bbfb1cf..72159e6f46e 100644 --- a/internal/lsp/source/gc_annotations.go +++ b/gopls/internal/lsp/source/gc_annotations.go @@ -14,9 +14,9 @@ import ( "path/filepath" "strings" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" ) type Annotation string @@ -35,11 +35,11 @@ const ( Bounds Annotation = "bounds" ) -func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, pkg Package) (map[VersionedFileIdentity][]*Diagnostic, error) { - if len(pkg.CompiledGoFiles()) == 0 { +func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, m *Metadata) (map[span.URI][]*Diagnostic, error) { + if len(m.CompiledGoFiles) == 0 { return nil, nil } - pkgDir := filepath.Dir(pkg.CompiledGoFiles()[0].URI.Filename()) + pkgDir := filepath.Dir(m.CompiledGoFiles[0].Filename()) outDir := filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.details", os.Getpid())) if err := os.MkdirAll(outDir, 0700); err != nil { @@ -74,7 +74,7 @@ func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, pkg Package) if err != nil { return nil, err } - reports := make(map[VersionedFileIdentity][]*Diagnostic) + reports := make(map[span.URI][]*Diagnostic) opts := snapshot.View().Options() var parseError error for _, fn := range files { @@ -93,7 +93,7 @@ func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, pkg Package) // outside the package can never be taken back. continue } - reports[fh.VersionedFileIdentity()] = diagnostics + reports[fh.URI()] = diagnostics } return reports, parseError } diff --git a/gopls/internal/lsp/source/highlight.go b/gopls/internal/lsp/source/highlight.go new file mode 100644 index 00000000000..e2f6c84e8cb --- /dev/null +++ b/gopls/internal/lsp/source/highlight.go @@ -0,0 +1,493 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/event" +) + +func Highlight(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Range, error) { + ctx, done := event.Start(ctx, "source.Highlight") + defer done() + + // We always want fully parsed files for highlight, regardless + // of whether the file belongs to a workspace package. + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, fmt.Errorf("getting package for Highlight: %w", err) + } + + pos, err := pgf.PositionPos(position) + if err != nil { + return nil, err + } + path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) + if len(path) == 0 { + return nil, fmt.Errorf("no enclosing position found for %v:%v", position.Line, position.Character) + } + // If start == end for astutil.PathEnclosingInterval, the 1-char interval + // following start is used instead. As a result, we might not get an exact + // match so we should check the 1-char interval to the left of the passed + // in position to see if that is an exact match. + if _, ok := path[0].(*ast.Ident); !ok { + if p, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1); p != nil { + switch p[0].(type) { + case *ast.Ident, *ast.SelectorExpr: + path = p // use preceding ident/selector + } + } + } + result, err := highlightPath(path, pgf.File, pkg.GetTypesInfo()) + if err != nil { + return nil, err + } + var ranges []protocol.Range + for rng := range result { + rng, err := pgf.PosRange(rng.start, rng.end) + if err != nil { + return nil, err + } + ranges = append(ranges, rng) + } + return ranges, nil +} + +func highlightPath(path []ast.Node, file *ast.File, info *types.Info) (map[posRange]struct{}, error) { + result := make(map[posRange]struct{}) + switch node := path[0].(type) { + case *ast.BasicLit: + if len(path) > 1 { + if _, ok := path[1].(*ast.ImportSpec); ok { + err := highlightImportUses(path, info, result) + return result, err + } + } + highlightFuncControlFlow(path, result) + case *ast.ReturnStmt, *ast.FuncDecl, *ast.FuncType: + highlightFuncControlFlow(path, result) + case *ast.Ident: + // Check if ident is inside return or func decl. + highlightFuncControlFlow(path, result) + highlightIdentifier(node, file, info, result) + case *ast.ForStmt, *ast.RangeStmt: + highlightLoopControlFlow(path, result) + case *ast.SwitchStmt: + highlightSwitchFlow(path, result) + case *ast.BranchStmt: + // BREAK can exit a loop, switch or select, while CONTINUE exit a loop so + // these need to be handled separately. They can also be embedded in any + // other loop/switch/select if they have a label. TODO: add support for + // GOTO and FALLTHROUGH as well. + if node.Label != nil { + highlightLabeledFlow(node, result) + } else { + switch node.Tok { + case token.BREAK: + highlightUnlabeledBreakFlow(path, result) + case token.CONTINUE: + highlightLoopControlFlow(path, result) + } + } + default: + // If the cursor is in an unidentified area, return empty results. + return nil, nil + } + return result, nil +} + +type posRange struct { + start, end token.Pos +} + +func highlightFuncControlFlow(path []ast.Node, result map[posRange]struct{}) { + var enclosingFunc ast.Node + var returnStmt *ast.ReturnStmt + var resultsList *ast.FieldList + inReturnList := false + +Outer: + // Reverse walk the path till we get to the func block. + for i, n := range path { + switch node := n.(type) { + case *ast.KeyValueExpr: + // If cursor is in a key: value expr, we don't want control flow highlighting + return + case *ast.CallExpr: + // If cursor is an arg in a callExpr, we don't want control flow highlighting. + if i > 0 { + for _, arg := range node.Args { + if arg == path[i-1] { + return + } + } + } + case *ast.Field: + inReturnList = true + case *ast.FuncLit: + enclosingFunc = n + resultsList = node.Type.Results + break Outer + case *ast.FuncDecl: + enclosingFunc = n + resultsList = node.Type.Results + break Outer + case *ast.ReturnStmt: + returnStmt = node + // If the cursor is not directly in a *ast.ReturnStmt, then + // we need to know if it is within one of the values that is being returned. + inReturnList = inReturnList || path[0] != returnStmt + } + } + // Cursor is not in a function. + if enclosingFunc == nil { + return + } + // If the cursor is on a "return" or "func" keyword, we should highlight all of the exit + // points of the function, including the "return" and "func" keywords. + highlightAllReturnsAndFunc := path[0] == returnStmt || path[0] == enclosingFunc + switch path[0].(type) { + case *ast.Ident, *ast.BasicLit: + // Cursor is in an identifier and not in a return statement or in the results list. + if returnStmt == nil && !inReturnList { + return + } + case *ast.FuncType: + highlightAllReturnsAndFunc = true + } + // The user's cursor may be within the return statement of a function, + // or within the result section of a function's signature. + // index := -1 + var nodes []ast.Node + if returnStmt != nil { + for _, n := range returnStmt.Results { + nodes = append(nodes, n) + } + } else if resultsList != nil { + for _, n := range resultsList.List { + nodes = append(nodes, n) + } + } + _, index := nodeAtPos(nodes, path[0].Pos()) + + // Highlight the correct argument in the function declaration return types. + if resultsList != nil && -1 < index && index < len(resultsList.List) { + rng := posRange{ + start: resultsList.List[index].Pos(), + end: resultsList.List[index].End(), + } + result[rng] = struct{}{} + } + // Add the "func" part of the func declaration. + if highlightAllReturnsAndFunc { + r := posRange{ + start: enclosingFunc.Pos(), + end: enclosingFunc.Pos() + token.Pos(len("func")), + } + result[r] = struct{}{} + } + ast.Inspect(enclosingFunc, func(n ast.Node) bool { + // Don't traverse any other functions. + switch n.(type) { + case *ast.FuncDecl, *ast.FuncLit: + return enclosingFunc == n + } + ret, ok := n.(*ast.ReturnStmt) + if !ok { + return true + } + var toAdd ast.Node + // Add the entire return statement, applies when highlight the word "return" or "func". + if highlightAllReturnsAndFunc { + toAdd = n + } + // Add the relevant field within the entire return statement. + if -1 < index && index < len(ret.Results) { + toAdd = ret.Results[index] + } + if toAdd != nil { + result[posRange{start: toAdd.Pos(), end: toAdd.End()}] = struct{}{} + } + return false + }) +} + +func highlightUnlabeledBreakFlow(path []ast.Node, result map[posRange]struct{}) { + // Reverse walk the path until we find closest loop, select, or switch. + for _, n := range path { + switch n.(type) { + case *ast.ForStmt, *ast.RangeStmt: + highlightLoopControlFlow(path, result) + return // only highlight the innermost statement + case *ast.SwitchStmt: + highlightSwitchFlow(path, result) + return + case *ast.SelectStmt: + // TODO: add highlight when breaking a select. + return + } + } +} + +func highlightLabeledFlow(node *ast.BranchStmt, result map[posRange]struct{}) { + obj := node.Label.Obj + if obj == nil || obj.Decl == nil { + return + } + label, ok := obj.Decl.(*ast.LabeledStmt) + if !ok { + return + } + switch label.Stmt.(type) { + case *ast.ForStmt, *ast.RangeStmt: + highlightLoopControlFlow([]ast.Node{label.Stmt, label}, result) + case *ast.SwitchStmt: + highlightSwitchFlow([]ast.Node{label.Stmt, label}, result) + } +} + +func labelFor(path []ast.Node) *ast.Ident { + if len(path) > 1 { + if n, ok := path[1].(*ast.LabeledStmt); ok { + return n.Label + } + } + return nil +} + +func highlightLoopControlFlow(path []ast.Node, result map[posRange]struct{}) { + var loop ast.Node + var loopLabel *ast.Ident + stmtLabel := labelFor(path) +Outer: + // Reverse walk the path till we get to the for loop. + for i := range path { + switch n := path[i].(type) { + case *ast.ForStmt, *ast.RangeStmt: + loopLabel = labelFor(path[i:]) + + if stmtLabel == nil || loopLabel == stmtLabel { + loop = n + break Outer + } + } + } + if loop == nil { + return + } + + // Add the for statement. + rng := posRange{ + start: loop.Pos(), + end: loop.Pos() + token.Pos(len("for")), + } + result[rng] = struct{}{} + + // Traverse AST to find branch statements within the same for-loop. + ast.Inspect(loop, func(n ast.Node) bool { + switch n.(type) { + case *ast.ForStmt, *ast.RangeStmt: + return loop == n + case *ast.SwitchStmt, *ast.SelectStmt: + return false + } + b, ok := n.(*ast.BranchStmt) + if !ok { + return true + } + if b.Label == nil || labelDecl(b.Label) == loopLabel { + result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} + } + return true + }) + + // Find continue statements in the same loop or switches/selects. + ast.Inspect(loop, func(n ast.Node) bool { + switch n.(type) { + case *ast.ForStmt, *ast.RangeStmt: + return loop == n + } + + if n, ok := n.(*ast.BranchStmt); ok && n.Tok == token.CONTINUE { + result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} + } + return true + }) + + // We don't need to check other for loops if we aren't looking for labeled statements. + if loopLabel == nil { + return + } + + // Find labeled branch statements in any loop. + ast.Inspect(loop, func(n ast.Node) bool { + b, ok := n.(*ast.BranchStmt) + if !ok { + return true + } + // statement with labels that matches the loop + if b.Label != nil && labelDecl(b.Label) == loopLabel { + result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} + } + return true + }) +} + +func highlightSwitchFlow(path []ast.Node, result map[posRange]struct{}) { + var switchNode ast.Node + var switchNodeLabel *ast.Ident + stmtLabel := labelFor(path) +Outer: + // Reverse walk the path till we get to the switch statement. + for i := range path { + switch n := path[i].(type) { + case *ast.SwitchStmt: + switchNodeLabel = labelFor(path[i:]) + if stmtLabel == nil || switchNodeLabel == stmtLabel { + switchNode = n + break Outer + } + } + } + // Cursor is not in a switch statement + if switchNode == nil { + return + } + + // Add the switch statement. + rng := posRange{ + start: switchNode.Pos(), + end: switchNode.Pos() + token.Pos(len("switch")), + } + result[rng] = struct{}{} + + // Traverse AST to find break statements within the same switch. + ast.Inspect(switchNode, func(n ast.Node) bool { + switch n.(type) { + case *ast.SwitchStmt: + return switchNode == n + case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt: + return false + } + + b, ok := n.(*ast.BranchStmt) + if !ok || b.Tok != token.BREAK { + return true + } + + if b.Label == nil || labelDecl(b.Label) == switchNodeLabel { + result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} + } + return true + }) + + // We don't need to check other switches if we aren't looking for labeled statements. + if switchNodeLabel == nil { + return + } + + // Find labeled break statements in any switch + ast.Inspect(switchNode, func(n ast.Node) bool { + b, ok := n.(*ast.BranchStmt) + if !ok || b.Tok != token.BREAK { + return true + } + + if b.Label != nil && labelDecl(b.Label) == switchNodeLabel { + result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} + } + + return true + }) +} + +func labelDecl(n *ast.Ident) *ast.Ident { + if n == nil { + return nil + } + if n.Obj == nil { + return nil + } + if n.Obj.Decl == nil { + return nil + } + stmt, ok := n.Obj.Decl.(*ast.LabeledStmt) + if !ok { + return nil + } + return stmt.Label +} + +func highlightImportUses(path []ast.Node, info *types.Info, result map[posRange]struct{}) error { + basicLit, ok := path[0].(*ast.BasicLit) + if !ok { + return fmt.Errorf("highlightImportUses called with an ast.Node of type %T", basicLit) + } + ast.Inspect(path[len(path)-1], func(node ast.Node) bool { + if imp, ok := node.(*ast.ImportSpec); ok && imp.Path == basicLit { + result[posRange{start: node.Pos(), end: node.End()}] = struct{}{} + return false + } + n, ok := node.(*ast.Ident) + if !ok { + return true + } + obj, ok := info.ObjectOf(n).(*types.PkgName) + if !ok { + return true + } + if !strings.Contains(basicLit.Value, obj.Name()) { + return true + } + result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} + return false + }) + return nil +} + +func highlightIdentifier(id *ast.Ident, file *ast.File, info *types.Info, result map[posRange]struct{}) { + // TODO(rfindley): idObj may be nil. Note that returning early in this case + // causes tests to fail (because the nObj == idObj check below was succeeded + // for nil == nil!) + // + // Revisit this. If ObjectOf is nil, there are type errors, and it seems + // reasonable for identifier highlighting not to work. + idObj := info.ObjectOf(id) + pkgObj, isImported := idObj.(*types.PkgName) + ast.Inspect(file, func(node ast.Node) bool { + if imp, ok := node.(*ast.ImportSpec); ok && isImported { + highlightImport(pkgObj, imp, result) + } + n, ok := node.(*ast.Ident) + if !ok { + return true + } + if n.Name != id.Name { + return false + } + if nObj := info.ObjectOf(n); nObj == idObj { + result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} + } + return false + }) +} + +func highlightImport(obj *types.PkgName, imp *ast.ImportSpec, result map[posRange]struct{}) { + if imp.Name != nil || imp.Path == nil { + return + } + if !strings.Contains(imp.Path.Value, obj.Name()) { + return + } + result[posRange{start: imp.Path.Pos(), end: imp.Path.End()}] = struct{}{} +} diff --git a/gopls/internal/lsp/source/hover.go b/gopls/internal/lsp/source/hover.go new file mode 100644 index 00000000000..e0b469a8815 --- /dev/null +++ b/gopls/internal/lsp/source/hover.go @@ -0,0 +1,1094 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/constant" + "go/doc" + "go/format" + "go/token" + "go/types" + "strconv" + "strings" + "time" + "unicode/utf8" + + "golang.org/x/text/unicode/runenames" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/typeparams" +) + +// HoverContext contains context extracted from the syntax and type information +// of a given node, for use in various summaries (hover, autocomplete, +// signature help). +type HoverContext struct { + // signatureSource is the object or node use to derive the hover signature. + // + // It may also hold a precomputed string. + // TODO(rfindley): pre-compute all signatures to avoid this indirection. + signatureSource interface{} + + // comment is the most relevant comment group associated with the hovered object. + Comment *ast.CommentGroup +} + +// HoverJSON contains information used by hover. It is also the JSON returned +// for the "structured" hover format +type HoverJSON struct { + // Synopsis is a single sentence synopsis of the symbol's documentation. + Synopsis string `json:"synopsis"` + + // FullDocumentation is the symbol's full documentation. + FullDocumentation string `json:"fullDocumentation"` + + // Signature is the symbol's signature. + Signature string `json:"signature"` + + // SingleLine is a single line describing the symbol. + // This is recommended only for use in clients that show a single line for hover. + SingleLine string `json:"singleLine"` + + // SymbolName is the types.Object.Name for the given symbol. + SymbolName string `json:"symbolName"` + + // LinkPath is the pkg.go.dev link for the given symbol. + // For example, the "go/ast" part of "pkg.go.dev/go/ast#Node". + LinkPath string `json:"linkPath"` + + // LinkAnchor is the pkg.go.dev link anchor for the given symbol. + // For example, the "Node" part of "pkg.go.dev/go/ast#Node". + LinkAnchor string `json:"linkAnchor"` +} + +func Hover(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) { + ident, err := Identifier(ctx, snapshot, fh, position) + if err != nil { + if hover, innerErr := hoverRune(ctx, snapshot, fh, position); innerErr == nil { + return hover, nil + } + return nil, nil + } + h, err := hoverIdentifier(ctx, ident) + if err != nil { + return nil, err + } + hover, err := formatHover(h, snapshot.View().Options()) + if err != nil { + return nil, err + } + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: snapshot.View().Options().PreferredContentFormat, + Value: hover, + }, + Range: ident.MappedRange.Range(), + }, nil +} + +func hoverRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) { + ctx, done := event.Start(ctx, "source.hoverRune") + defer done() + + r, rng, err := findRune(ctx, snapshot, fh, position) + if err != nil { + return nil, err + } + + var desc string + runeName := runenames.Name(r) + if len(runeName) > 0 && runeName[0] == '<' { + // Check if the rune looks like an HTML tag. If so, trim the surrounding <> + // characters to work around https://github.com/microsoft/vscode/issues/124042. + runeName = strings.TrimRight(runeName[1:], ">") + } + if strconv.IsPrint(r) { + desc = fmt.Sprintf("'%s', U+%04X, %s", string(r), uint32(r), runeName) + } else { + desc = fmt.Sprintf("U+%04X, %s", uint32(r), runeName) + } + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: snapshot.View().Options().PreferredContentFormat, + Value: desc, + }, + Range: rng, + }, nil +} + +// ErrNoRuneFound is the error returned when no rune is found at a particular position. +var ErrNoRuneFound = errors.New("no rune found") + +// findRune returns rune information for a position in a file. +func findRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (rune, protocol.Range, error) { + fh, err := snapshot.GetFile(ctx, fh.URI()) + if err != nil { + return 0, protocol.Range{}, err + } + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return 0, protocol.Range{}, err + } + pos, err := pgf.PositionPos(position) + if err != nil { + return 0, protocol.Range{}, err + } + + // Find the basic literal enclosing the given position, if there is one. + var lit *ast.BasicLit + ast.Inspect(pgf.File, func(n ast.Node) bool { + if n == nil || // pop + lit != nil || // found: terminate the search + !(n.Pos() <= pos && pos < n.End()) { // subtree does not contain pos: skip + return false + } + if n, ok := n.(*ast.BasicLit); ok { + lit = n // found! + } + return lit == nil // descend unless target is found + }) + if lit == nil { + return 0, protocol.Range{}, ErrNoRuneFound + } + + var r rune + var start, end token.Pos + switch lit.Kind { + case token.CHAR: + s, err := strconv.Unquote(lit.Value) + if err != nil { + // If the conversion fails, it's because of an invalid syntax, therefore + // there is no rune to be found. + return 0, protocol.Range{}, ErrNoRuneFound + } + r, _ = utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return 0, protocol.Range{}, fmt.Errorf("rune error") + } + start, end = lit.Pos(), lit.End() + case token.INT: + // It's an integer, scan only if it is a hex literal whose bitsize in + // ranging from 8 to 32. + if !(strings.HasPrefix(lit.Value, "0x") && len(lit.Value[2:]) >= 2 && len(lit.Value[2:]) <= 8) { + return 0, protocol.Range{}, ErrNoRuneFound + } + v, err := strconv.ParseUint(lit.Value[2:], 16, 32) + if err != nil { + return 0, protocol.Range{}, err + } + r = rune(v) + if r == utf8.RuneError { + return 0, protocol.Range{}, fmt.Errorf("rune error") + } + start, end = lit.Pos(), lit.End() + case token.STRING: + // It's a string, scan only if it contains a unicode escape sequence under or before the + // current cursor position. + var found bool + litOffset, err := safetoken.Offset(pgf.Tok, lit.Pos()) + if err != nil { + return 0, protocol.Range{}, err + } + offset, err := safetoken.Offset(pgf.Tok, pos) + if err != nil { + return 0, protocol.Range{}, err + } + for i := offset - litOffset; i > 0; i-- { + // Start at the cursor position and search backward for the beginning of a rune escape sequence. + rr, _ := utf8.DecodeRuneInString(lit.Value[i:]) + if rr == utf8.RuneError { + return 0, protocol.Range{}, fmt.Errorf("rune error") + } + if rr == '\\' { + // Got the beginning, decode it. + var tail string + r, _, tail, err = strconv.UnquoteChar(lit.Value[i:], '"') + if err != nil { + // If the conversion fails, it's because of an invalid syntax, therefore is no rune to be found. + return 0, protocol.Range{}, ErrNoRuneFound + } + // Only the rune escape sequence part of the string has to be highlighted, recompute the range. + runeLen := len(lit.Value) - (int(i) + len(tail)) + start = token.Pos(int(lit.Pos()) + int(i)) + end = token.Pos(int(start) + runeLen) + found = true + break + } + } + if !found { + // No escape sequence found + return 0, protocol.Range{}, ErrNoRuneFound + } + default: + return 0, protocol.Range{}, ErrNoRuneFound + } + rng, err := pgf.PosRange(start, end) + if err != nil { + return 0, protocol.Range{}, err + } + return r, rng, nil +} + +func hoverIdentifier(ctx context.Context, i *IdentifierInfo) (*HoverJSON, error) { + ctx, done := event.Start(ctx, "source.Hover") + defer done() + + hoverCtx, err := FindHoverContext(ctx, i.Snapshot, i.pkg, i.Declaration.obj, i.Declaration.node, i.Declaration.fullDecl) + if err != nil { + return nil, err + } + + h := &HoverJSON{ + FullDocumentation: hoverCtx.Comment.Text(), + Synopsis: doc.Synopsis(hoverCtx.Comment.Text()), + } + + fset := i.pkg.FileSet() + // Determine the symbol's signature. + switch x := hoverCtx.signatureSource.(type) { + case string: + h.Signature = x // a pre-computed signature + + case *ast.TypeSpec: + x2 := *x + // Don't duplicate comments when formatting type specs. + x2.Doc = nil + x2.Comment = nil + var b strings.Builder + b.WriteString("type ") + if err := format.Node(&b, fset, &x2); err != nil { + return nil, err + } + + // Display the declared methods accessible from the identifier. + // + // (The format.Node call above displays any struct fields, public + // or private, in syntactic form. We choose not to recursively + // enumerate any fields and methods promoted from them.) + obj := i.Type.Object + if obj != nil && !types.IsInterface(obj.Type()) { + sep := "\n\n" + for _, m := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + if (m.Obj().Exported() || m.Obj().Pkg() == i.pkg.GetTypes()) && len(m.Index()) == 1 { + b.WriteString(sep) + sep = "\n" + b.WriteString(objectString(m.Obj(), i.qf, nil)) + } + } + } + + h.Signature = b.String() + + case ast.Node: + var b strings.Builder + if err := format.Node(&b, fset, x); err != nil { + return nil, err + } + h.Signature = b.String() + + // Check if the variable is an integer whose value we can present in a more + // user-friendly way, i.e. `var hex = 0xe34e` becomes `var hex = 58190` + if spec, ok := x.(*ast.ValueSpec); ok && len(spec.Values) > 0 { + if lit, ok := spec.Values[0].(*ast.BasicLit); ok && len(spec.Names) > 0 { + val := constant.MakeFromLiteral(types.ExprString(lit), lit.Kind, 0) + h.Signature = fmt.Sprintf("var %s = %s", spec.Names[0], val) + } + } + + case types.Object: + // If the variable is implicitly declared in a type switch, we need to + // manually generate its object string. + if typ := i.Declaration.typeSwitchImplicit; typ != nil { + if v, ok := x.(*types.Var); ok { + h.Signature = fmt.Sprintf("var %s %s", v.Name(), types.TypeString(typ, i.qf)) + break + } + } + h.Signature = objectString(x, i.qf, i.Inferred) + } + if obj := i.Declaration.obj; obj != nil { + h.SingleLine = objectString(obj, i.qf, nil) + } + obj := i.Declaration.obj + if obj == nil { + return h, nil + } + + // Check if the identifier is test-only (and is therefore not part of a + // package's API). This is true if the request originated in a test package, + // and if the declaration is also found in the same test package. + if i.pkg != nil && obj.Pkg() != nil && i.pkg.Metadata().ForTest != "" { + if _, err := i.pkg.File(i.Declaration.MappedRange[0].URI()); err == nil { + return h, nil + } + } + + h.SymbolName, h.LinkPath, h.LinkAnchor = linkData(obj, i.enclosing) + + // See golang/go#36998: don't link to modules matching GOPRIVATE. + // + // The path returned by linkData is a package path. + if i.Snapshot.View().IsGoPrivatePath(h.LinkPath) { + h.LinkPath = "" + } else if mod, version, ok := moduleAtVersion(h.LinkPath, i); ok { + h.LinkPath = strings.Replace(h.LinkPath, mod, mod+"@"+version, 1) + } + + return h, nil +} + +// linkData returns the name, package path, and anchor to use in building links +// to obj. +// +// If obj is not visible in documentation, the returned name will be empty. +func linkData(obj types.Object, enclosing *types.TypeName) (name, packagePath, anchor string) { + // Package names simply link to the package. + if obj, ok := obj.(*types.PkgName); ok { + return obj.Name(), obj.Imported().Path(), "" + } + + // Builtins link to the special builtin package. + if obj.Parent() == types.Universe { + return obj.Name(), "builtin", obj.Name() + } + + // In all other cases, the object must be exported. + if !obj.Exported() { + return "", "", "" + } + + var recv types.Object // If non-nil, the field or method receiver base. + + switch obj := obj.(type) { + case *types.Var: + // If the object is a field, and we have an associated selector + // composite literal, or struct, we can determine the link. + if obj.IsField() && enclosing != nil { + recv = enclosing + } + case *types.Func: + typ, ok := obj.Type().(*types.Signature) + if !ok { + // Note: this should never happen. go/types guarantees that the type of + // *Funcs are Signatures. + // + // TODO(rfindley): given a 'debug' mode, we should panic here. + return "", "", "" + } + if r := typ.Recv(); r != nil { + if rtyp, _ := Deref(r.Type()).(*types.Named); rtyp != nil { + // If we have an unexported type, see if the enclosing type is + // exported (we may have an interface or struct we can link + // to). If not, don't show any link. + if !rtyp.Obj().Exported() { + if enclosing != nil { + recv = enclosing + } else { + return "", "", "" + } + } else { + recv = rtyp.Obj() + } + } + } + } + + if recv != nil && !recv.Exported() { + return "", "", "" + } + + // Either the object or its receiver must be in the package scope. + scopeObj := obj + if recv != nil { + scopeObj = recv + } + if scopeObj.Pkg() == nil || scopeObj.Pkg().Scope().Lookup(scopeObj.Name()) != scopeObj { + return "", "", "" + } + + // golang/go#52211: somehow we get here with a nil obj.Pkg + if obj.Pkg() == nil { + bug.Report("object with nil pkg", bug.Data{ + "name": obj.Name(), + "type": fmt.Sprintf("%T", obj), + }) + return "", "", "" + } + + packagePath = obj.Pkg().Path() + if recv != nil { + anchor = fmt.Sprintf("%s.%s", recv.Name(), obj.Name()) + name = fmt.Sprintf("(%s.%s).%s", obj.Pkg().Name(), recv.Name(), obj.Name()) + } else { + // For most cases, the link is "package/path#symbol". + anchor = obj.Name() + name = fmt.Sprintf("%s.%s", obj.Pkg().Name(), obj.Name()) + } + return name, packagePath, anchor +} + +func moduleAtVersion(path string, i *IdentifierInfo) (string, string, bool) { + // TODO(rfindley): moduleAtVersion should not be responsible for deciding + // whether or not the link target supports module version links. + if strings.ToLower(i.Snapshot.View().Options().LinkTarget) != "pkg.go.dev" { + return "", "", false + } + impID, ok := i.pkg.Metadata().DepsByPkgPath[PackagePath(path)] + if !ok { + return "", "", false + } + impMeta := i.Snapshot.Metadata(impID) + if impMeta == nil { + return "", "", false + } + module := impMeta.Module + if module == nil { + return "", "", false + } + if module.Path == "" || module.Version == "" { + return "", "", false + } + return module.Path, module.Version, true +} + +// objectString is a wrapper around the types.ObjectString function. +// It handles adding more information to the object string. +func objectString(obj types.Object, qf types.Qualifier, inferred *types.Signature) string { + // If the signature type was inferred, prefer the preferred signature with a + // comment showing the generic signature. + if sig, _ := obj.Type().(*types.Signature); sig != nil && typeparams.ForSignature(sig).Len() > 0 && inferred != nil { + obj2 := types.NewFunc(obj.Pos(), obj.Pkg(), obj.Name(), inferred) + str := types.ObjectString(obj2, qf) + // Try to avoid overly long lines. + if len(str) > 60 { + str += "\n" + } else { + str += " " + } + str += "// " + types.TypeString(sig, qf) + return str + } + str := types.ObjectString(obj, qf) + switch obj := obj.(type) { + case *types.Const: + str = fmt.Sprintf("%s = %s", str, obj.Val()) + + // Try to add a formatted duration as an inline comment + typ, ok := obj.Type().(*types.Named) + if !ok { + break + } + pkg := typ.Obj().Pkg() + if pkg.Path() == "time" && typ.Obj().Name() == "Duration" { + if d, ok := constant.Int64Val(obj.Val()); ok { + str += " // " + time.Duration(d).String() + } + } + } + return str +} + +// HoverDocForObject returns the best doc comment for obj, referenced by srcpkg. +// +// TODO(rfindley): there appears to be zero(!) tests for this functionality. +func HoverDocForObject(ctx context.Context, snapshot Snapshot, srcpkg Package, obj types.Object) (*ast.CommentGroup, error) { + if _, isTypeName := obj.(*types.TypeName); isTypeName { + if _, isTypeParam := obj.Type().(*typeparams.TypeParam); isTypeParam { + return nil, nil + } + } + + pgf, pos, err := parseFull(ctx, snapshot, srcpkg, obj.Pos()) + if err != nil { + return nil, fmt.Errorf("re-parsing: %v", err) + } + + decl, spec, field := FindDeclInfo([]*ast.File{pgf.File}, pos) + if field != nil && field.Doc != nil { + return field.Doc, nil + } + switch decl := decl.(type) { + case *ast.FuncDecl: + return decl.Doc, nil + case *ast.GenDecl: + switch spec := spec.(type) { + case *ast.ValueSpec: + if spec.Doc != nil { + return spec.Doc, nil + } + if decl.Doc != nil { + return decl.Doc, nil + } + return spec.Comment, nil + case *ast.TypeSpec: + if spec.Doc != nil { + return spec.Doc, nil + } + if decl.Doc != nil { + return decl.Doc, nil + } + return spec.Comment, nil + } + } + return nil, nil +} + +// parseFull fully parses the file corresponding to position pos, referenced +// from the given srcpkg. +// +// It returns the resulting ParsedGoFile as well as new pos contained in the +// parsed file. +func parseFull(ctx context.Context, snapshot Snapshot, srcpkg Package, pos token.Pos) (*ParsedGoFile, token.Pos, error) { + f := srcpkg.FileSet().File(pos) + if f == nil { + return nil, 0, bug.Errorf("internal error: no file for position %d in %s", pos, srcpkg.Metadata().ID) + } + + uri := span.URIFromPath(f.Name()) + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, 0, err + } + + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, 0, err + } + + offset, err := safetoken.Offset(f, pos) + if err != nil { + return nil, 0, bug.Errorf("offset out of bounds in %q", uri) + } + + fullPos, err := safetoken.Pos(pgf.Tok, offset) + if err != nil { + return nil, 0, err + } + + return pgf, fullPos, nil +} + +// FindHoverContext returns a HoverContext struct for an AST node and its +// declaration object. node should be the actual node used in type checking, +// while fullNode could be a separate node with more complete syntactic +// information. +func FindHoverContext(ctx context.Context, s Snapshot, pkg Package, obj types.Object, pkgNode ast.Node, fullDecl ast.Decl) (*HoverContext, error) { + var info *HoverContext + + // Type parameters get their signature from their declaration object. + if _, isTypeName := obj.(*types.TypeName); isTypeName { + if _, isTypeParam := obj.Type().(*typeparams.TypeParam); isTypeParam { + return &HoverContext{signatureSource: obj}, nil + } + } + + // This is problematic for a number of reasons. We really need to have a more + // general mechanism to validate the coherency of AST with type information, + // but absent that we must do our best to ensure that we don't use fullNode + // when we actually need the node that was type checked. + // + // pkgNode may be nil, if it was eliminated from the type-checked syntax. In + // that case, use fullDecl if available. + node := pkgNode + if node == nil && fullDecl != nil { + node = fullDecl + } + + switch node := node.(type) { + case *ast.Ident: + // The package declaration. + for _, f := range pkg.GetSyntax() { + if f.Name == pkgNode { + info = &HoverContext{Comment: f.Doc} + } + } + case *ast.ImportSpec: + // Try to find the package documentation for an imported package. + importPath := UnquoteImportPath(node) + impID := pkg.Metadata().DepsByImpPath[importPath] + if impID == "" { + return nil, fmt.Errorf("failed to resolve import %q", importPath) + } + impMetadata := s.Metadata(impID) + if impMetadata == nil { + return nil, fmt.Errorf("failed to resolve import ID %q", impID) + } + for _, f := range impMetadata.CompiledGoFiles { + fh, err := s.GetFile(ctx, f) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + pgf, err := s.ParseGo(ctx, fh, ParseHeader) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + if pgf.File.Doc != nil { + return &HoverContext{ + Comment: pgf.File.Doc, + signatureSource: "package " + impMetadata.Name, + }, nil + } + } + + case *ast.GenDecl: + switch obj := obj.(type) { + case *types.TypeName, *types.Var, *types.Const, *types.Func: + // Always use the full declaration here if we have it, because the + // dependent code doesn't rely on pointer identity. This is fragile. + if d, _ := fullDecl.(*ast.GenDecl); d != nil { + node = d + } + // obj may not have been produced by type checking the AST containing + // node, so we need to be careful about using token.Pos. + tok := pkg.FileSet().File(obj.Pos()) + offset, err := safetoken.Offset(tok, obj.Pos()) + if err != nil { + return nil, err + } + + // fullTok and fullPos are the *token.File and object position in for the + // full AST. + fullTok := pkg.FileSet().File(node.Pos()) + fullPos, err := safetoken.Pos(fullTok, offset) + if err != nil { + return nil, err + } + + var spec ast.Spec + for _, s := range node.Specs { + // Avoid panics by guarding the calls to token.Offset (golang/go#48249). + start, end, err := safetoken.Offsets(fullTok, s.Pos(), s.End()) + if err != nil { + return nil, err + } + if start <= offset && offset <= end { + spec = s + break + } + } + + info, err = hoverGenDecl(node, spec, fullPos, obj) + if err != nil { + return nil, err + } + } + case *ast.TypeSpec: + if obj.Parent() == types.Universe { + if genDecl, ok := fullDecl.(*ast.GenDecl); ok { + info = hoverTypeSpec(node, genDecl) + } + } + case *ast.FuncDecl: + switch obj.(type) { + case *types.Func: + info = &HoverContext{signatureSource: obj, Comment: node.Doc} + case *types.Builtin: + info = &HoverContext{Comment: node.Doc} + if sig, err := NewBuiltinSignature(ctx, s, obj.Name()); err == nil { + info.signatureSource = "func " + sig.name + sig.Format() + } else { + // Fall back on the object as a signature source. + bug.Report("invalid builtin hover", bug.Data{ + "err": err.Error(), + }) + info.signatureSource = obj + } + case *types.Var: + // Object is a function param or the field of an anonymous struct + // declared with ':='. Skip the first one because only fields + // can have docs. + if isFunctionParam(obj, node) { + break + } + + _, _, field := FindDeclInfo(pkg.GetSyntax(), obj.Pos()) + if field != nil { + comment := field.Doc + if comment.Text() == "" { + comment = field.Comment + } + info = &HoverContext{signatureSource: obj, Comment: comment} + } + } + } + + if info == nil { + info = &HoverContext{signatureSource: obj} + } + + return info, nil +} + +// isFunctionParam returns true if the passed object is either an incoming +// or an outgoing function param +func isFunctionParam(obj types.Object, node *ast.FuncDecl) bool { + for _, f := range node.Type.Params.List { + if f.Pos() == obj.Pos() { + return true + } + } + if node.Type.Results != nil { + for _, f := range node.Type.Results.List { + if f.Pos() == obj.Pos() { + return true + } + } + } + return false +} + +// hoverGenDecl returns hover information an object declared via spec inside +// of the GenDecl node. obj is the type-checked object corresponding to the +// declaration, but may have been type-checked using a different AST than the +// given nodes; fullPos is the position of obj in node's AST. +func hoverGenDecl(node *ast.GenDecl, spec ast.Spec, fullPos token.Pos, obj types.Object) (*HoverContext, error) { + if spec == nil { + return nil, fmt.Errorf("no spec for node %v at position %v", node, fullPos) + } + + // If we have a field or method. + switch obj.(type) { + case *types.Var, *types.Const, *types.Func: + return hoverVar(spec, fullPos, obj, node), nil + } + // Handle types. + switch spec := spec.(type) { + case *ast.TypeSpec: + return hoverTypeSpec(spec, node), nil + case *ast.ValueSpec: + return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil + case *ast.ImportSpec: + return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil + } + return nil, fmt.Errorf("unable to format spec %v (%T)", spec, spec) +} + +// TODO(rfindley): rename this function. +func hoverTypeSpec(spec *ast.TypeSpec, decl *ast.GenDecl) *HoverContext { + comment := spec.Doc + if comment == nil && decl != nil { + comment = decl.Doc + } + if comment == nil { + comment = spec.Comment + } + return &HoverContext{ + signatureSource: spec, + Comment: comment, + } +} + +func hoverVar(node ast.Spec, fullPos token.Pos, obj types.Object, decl *ast.GenDecl) *HoverContext { + var fieldList *ast.FieldList + switch spec := node.(type) { + case *ast.TypeSpec: + switch t := spec.Type.(type) { + case *ast.StructType: + fieldList = t.Fields + case *ast.InterfaceType: + fieldList = t.Methods + } + case *ast.ValueSpec: + // Try to extract the field list of an anonymous struct + if fieldList = extractFieldList(spec.Type); fieldList != nil { + break + } + + comment := spec.Doc + if comment == nil { + comment = decl.Doc + } + if comment == nil { + comment = spec.Comment + } + + // We need the AST nodes for variable declarations of basic literals with + // associated values so that we can augment their hover with more information. + if _, ok := obj.(*types.Var); ok && spec.Type == nil && len(spec.Values) > 0 { + if _, ok := spec.Values[0].(*ast.BasicLit); ok { + return &HoverContext{signatureSource: spec, Comment: comment} + } + } + + return &HoverContext{signatureSource: obj, Comment: comment} + } + + if fieldList != nil { + comment := findFieldComment(fullPos, fieldList) + return &HoverContext{signatureSource: obj, Comment: comment} + } + return &HoverContext{signatureSource: obj, Comment: decl.Doc} +} + +// extractFieldList recursively tries to extract a field list. +// If it is not found, nil is returned. +func extractFieldList(specType ast.Expr) *ast.FieldList { + switch t := specType.(type) { + case *ast.StructType: + return t.Fields + case *ast.InterfaceType: + return t.Methods + case *ast.ArrayType: + return extractFieldList(t.Elt) + case *ast.MapType: + // Map value has a greater chance to be a struct + if fields := extractFieldList(t.Value); fields != nil { + return fields + } + return extractFieldList(t.Key) + case *ast.ChanType: + return extractFieldList(t.Value) + } + return nil +} + +// findFieldComment visits all fields in depth-first order and returns +// the comment of a field with passed position. If no comment is found, +// nil is returned. +func findFieldComment(pos token.Pos, fieldList *ast.FieldList) *ast.CommentGroup { + for _, field := range fieldList.List { + if field.Pos() == pos { + if field.Doc.Text() != "" { + return field.Doc + } + return field.Comment + } + + if nestedFieldList := extractFieldList(field.Type); nestedFieldList != nil { + if c := findFieldComment(pos, nestedFieldList); c != nil { + return c + } + } + } + return nil +} + +func formatHover(h *HoverJSON, options *Options) (string, error) { + signature := formatSignature(h, options) + + switch options.HoverKind { + case SingleLine: + return h.SingleLine, nil + case NoDocumentation: + return signature, nil + case Structured: + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return string(b), nil + } + + link := formatLink(h, options) + doc := formatDoc(h, options) + + var b strings.Builder + parts := []string{signature, doc, link} + for i, el := range parts { + if el != "" { + b.WriteString(el) + + // If any elements of the remainder of the list are non-empty, + // write an extra newline. + if anyNonEmpty(parts[i+1:]) { + if options.PreferredContentFormat == protocol.Markdown { + b.WriteString("\n\n") + } else { + b.WriteRune('\n') + } + } + } + } + return b.String(), nil +} + +func formatSignature(h *HoverJSON, options *Options) string { + signature := h.Signature + if signature != "" && options.PreferredContentFormat == protocol.Markdown { + signature = fmt.Sprintf("```go\n%s\n```", signature) + } + return signature +} + +func formatLink(h *HoverJSON, options *Options) string { + if !options.LinksInHover || options.LinkTarget == "" || h.LinkPath == "" { + return "" + } + plainLink := BuildLink(options.LinkTarget, h.LinkPath, h.LinkAnchor) + switch options.PreferredContentFormat { + case protocol.Markdown: + return fmt.Sprintf("[`%s` on %s](%s)", h.SymbolName, options.LinkTarget, plainLink) + case protocol.PlainText: + return "" + default: + return plainLink + } +} + +// BuildLink constructs a URL with the given target, path, and anchor. +func BuildLink(target, path, anchor string) string { + link := fmt.Sprintf("https://%s/%s", target, path) + if anchor == "" { + return link + } + return link + "#" + anchor +} + +func formatDoc(h *HoverJSON, options *Options) string { + var doc string + switch options.HoverKind { + case SynopsisDocumentation: + doc = h.Synopsis + case FullDocumentation: + doc = h.FullDocumentation + } + if options.PreferredContentFormat == protocol.Markdown { + return CommentToMarkdown(doc) + } + return doc +} + +func anyNonEmpty(x []string) bool { + for _, el := range x { + if el != "" { + return true + } + } + return false +} + +// FindDeclInfo returns the syntax nodes involved in the declaration of the +// types.Object with position pos, searching the given list of file syntax +// trees. +// +// Pos may be the position of the name-defining identifier in a FuncDecl, +// ValueSpec, TypeSpec, Field, or as a special case the position of +// Ellipsis.Elt in an ellipsis field. +// +// If found, the resulting decl, spec, and field will be the inner-most +// instance of each node type surrounding pos. +// +// It returns a nil decl if no object-defining node is found at pos. +func FindDeclInfo(files []*ast.File, pos token.Pos) (decl ast.Decl, spec ast.Spec, field *ast.Field) { + // panic(found{}) breaks off the traversal and + // causes the function to return normally. + type found struct{} + defer func() { + switch x := recover().(type) { + case nil: + case found: + default: + panic(x) + } + }() + + // Visit the files in search of the node at pos. + stack := make([]ast.Node, 0, 20) + // Allocate the closure once, outside the loop. + f := func(n ast.Node) bool { + if n != nil { + stack = append(stack, n) // push + } else { + stack = stack[:len(stack)-1] // pop + return false + } + + // Skip subtrees (incl. files) that don't contain the search point. + if !(n.Pos() <= pos && pos < n.End()) { + return false + } + + switch n := n.(type) { + case *ast.Field: + findEnclosingDeclAndSpec := func() { + for i := len(stack) - 1; i >= 0; i-- { + switch n := stack[i].(type) { + case ast.Spec: + spec = n + case ast.Decl: + decl = n + return + } + } + } + + // Check each field name since you can have + // multiple names for the same type expression. + for _, id := range n.Names { + if id.Pos() == pos { + field = n + findEnclosingDeclAndSpec() + panic(found{}) + } + } + + // Check *ast.Field itself. This handles embedded + // fields which have no associated *ast.Ident name. + if n.Pos() == pos { + field = n + findEnclosingDeclAndSpec() + panic(found{}) + } + + // Also check "X" in "...X". This makes it easy to format variadic + // signature params properly. + // + // TODO(rfindley): I don't understand this comment. How does finding the + // field in this case make it easier to format variadic signature params? + if ell, ok := n.Type.(*ast.Ellipsis); ok && ell.Elt != nil && ell.Elt.Pos() == pos { + field = n + findEnclosingDeclAndSpec() + panic(found{}) + } + + case *ast.FuncDecl: + if n.Name.Pos() == pos { + decl = n + panic(found{}) + } + + case *ast.GenDecl: + for _, s := range n.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if s.Name.Pos() == pos { + decl = n + spec = s + panic(found{}) + } + case *ast.ValueSpec: + for _, id := range s.Names { + if id.Pos() == pos { + decl = n + spec = s + panic(found{}) + } + } + } + } + } + return true + } + for _, file := range files { + ast.Inspect(file, f) + } + + return nil, nil, nil +} diff --git a/internal/lsp/source/identifier.go b/gopls/internal/lsp/source/identifier.go similarity index 76% rename from internal/lsp/source/identifier.go rename to gopls/internal/lsp/source/identifier.go index 40655e20779..1e1bc527fa5 100644 --- a/internal/lsp/source/identifier.go +++ b/gopls/internal/lsp/source/identifier.go @@ -12,27 +12,25 @@ import ( "go/parser" "go/token" "go/types" - "sort" "strconv" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/typeparams" ) // IdentifierInfo holds information about an identifier in Go source. type IdentifierInfo struct { - Name string - Snapshot Snapshot - MappedRange + Name string + Snapshot Snapshot // only needed for .View(); TODO(adonovan): reduce. + MappedRange protocol.MappedRange Type struct { - MappedRange - Object types.Object + MappedRange protocol.MappedRange + Object *types.TypeName } Inferred *types.Signature @@ -50,16 +48,12 @@ type IdentifierInfo struct { qf types.Qualifier } -func (i *IdentifierInfo) IsImport() bool { - _, ok := i.Declaration.node.(*ast.ImportSpec) - return ok -} - type Declaration struct { - MappedRange []MappedRange + MappedRange []protocol.MappedRange - // The typechecked node. - node ast.Node + // The typechecked node + node ast.Node + nodeFile *ParsedGoFile // provides token.File and Mapper for node // Optional: the fully parsed node, to be used for formatting in cases where // node has missing information. This could be the case when node was parsed @@ -81,40 +75,15 @@ func Identifier(ctx context.Context, snapshot Snapshot, fh FileHandle, position ctx, done := event.Start(ctx, "source.Identifier") defer done() - pkgs, err := snapshot.PackagesForFile(ctx, fh.URI(), TypecheckAll, false) + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) if err != nil { return nil, err } - if len(pkgs) == 0 { - return nil, fmt.Errorf("no packages for file %v", fh.URI()) - } - sort.Slice(pkgs, func(i, j int) bool { - // Prefer packages with a more complete parse mode. - if pkgs[i].ParseMode() != pkgs[j].ParseMode() { - return pkgs[i].ParseMode() > pkgs[j].ParseMode() - } - return len(pkgs[i].CompiledGoFiles()) < len(pkgs[j].CompiledGoFiles()) - }) - var findErr error - for _, pkg := range pkgs { - pgf, err := pkg.File(fh.URI()) - if err != nil { - // We shouldn't get a package from PackagesForFile that doesn't actually - // contain the file. - bug.Report("missing package file", bug.Data{"pkg": pkg.ID(), "file": fh.URI()}) - return nil, err - } - pos, err := pgf.Mapper.Pos(position) - if err != nil { - return nil, err - } - var ident *IdentifierInfo - ident, findErr = findIdentifier(ctx, snapshot, pkg, pgf, pos) - if findErr == nil { - return ident, nil - } + pos, err := pgf.PositionPos(position) + if err != nil { + return nil, err } - return nil, findErr + return findIdentifier(ctx, snapshot, pkg, pgf, pos) } // ErrNoIdentFound is error returned when no identifier is found at a particular position @@ -124,7 +93,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa file := pgf.File // Handle import specs separately, as there is no formal position for a // package declaration. - if result, err := importSpec(snapshot, pkg, file, pos); result != nil || err != nil { + if result, err := importSpec(ctx, snapshot, pkg, pgf, pos); result != nil || err != nil { return result, err } path := pathEnclosingObjNode(file, pos) @@ -141,34 +110,32 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa // Special case for package declarations, since they have no // corresponding types.Object. if ident == file.Name { - rng, err := posToMappedRange(snapshot, pkg, file.Name.Pos(), file.Name.End()) - if err != nil { - return nil, err - } - var declAST *ast.File + // If there's no package documentation, just use current file. + decl := pgf for _, pgf := range pkg.CompiledGoFiles() { if pgf.File.Doc != nil { - declAST = pgf.File + decl = pgf } } - // If there's no package documentation, just use current file. - if declAST == nil { - declAST = file + pkgRng, err := pgf.NodeMappedRange(file.Name) + if err != nil { + return nil, err } - declRng, err := posToMappedRange(snapshot, pkg, declAST.Name.Pos(), declAST.Name.End()) + declRng, err := decl.NodeMappedRange(decl.File.Name) if err != nil { return nil, err } return &IdentifierInfo{ Name: file.Name.Name, ident: file.Name, - MappedRange: rng, + MappedRange: pkgRng, pkg: pkg, qf: qf, Snapshot: snapshot, Declaration: Declaration{ - node: declAST.Name, - MappedRange: []MappedRange{declRng}, + node: decl.File.Name, + nodeFile: decl, + MappedRange: []protocol.MappedRange{declRng}, }, }, nil } @@ -183,7 +150,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa result.Name = result.ident.Name var err error - if result.MappedRange, err = posToMappedRange(snapshot, pkg, result.ident.Pos(), result.ident.End()); err != nil { + if result.MappedRange, err = posToMappedRange(ctx, snapshot, pkg, result.ident.Pos(), result.ident.End()); err != nil { return nil, err } @@ -191,7 +158,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa if result.Declaration.obj == nil { // If there was no types.Object for the declaration, there might be an // implicit local variable declaration in a type switch. - if objs, typ := typeSwitchImplicits(pkg, path); len(objs) > 0 { + if objs, typ := typeSwitchImplicits(pkg.GetTypesInfo(), path); len(objs) > 0 { // There is no types.Object for the declaration of an implicit local variable, // but all of the types.Objects associated with the usages of this variable can be // used to connect it back to the declaration. @@ -219,6 +186,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa return nil, fmt.Errorf("no declaration for %s", result.Name) } result.Declaration.node = decl + result.Declaration.nodeFile = builtin if typeSpec, ok := decl.(*ast.TypeSpec); ok { // Find the GenDecl (which has the doc comments) for the TypeSpec. result.Declaration.fullDecl = findGenDecl(builtin.File, typeSpec) @@ -226,7 +194,10 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa // The builtin package isn't in the dependency graph, so the usual // utilities won't work here. - rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name))) + rng, err := builtin.PosMappedRange(decl.Pos(), decl.Pos()+token.Pos(len(result.Name))) + if err != nil { + return nil, err + } result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng) return result, nil } @@ -240,7 +211,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa return nil, err } // Look up "error" and then navigate to its only method. - // The Error method does not appear in the builtin package's scope.log.Pri + // The Error method does not appear in the builtin package's scope. const errorName = "error" builtinObj := builtin.File.Scope.Lookup(errorName) if builtinObj == nil { @@ -267,7 +238,11 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa } name := method.Names[0].Name result.Declaration.node = method - rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name))) + result.Declaration.nodeFile = builtin + rng, err := builtin.PosMappedRange(method.Pos(), method.Pos()+token.Pos(len(name))) + if err != nil { + return nil, err + } result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng) return result, nil } @@ -282,22 +257,29 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa } } - rng, err := objToMappedRange(snapshot, pkg, result.Declaration.obj) + // TODO(adonovan): this step calls the somewhat expensive + // findFileInDeps, which is also called below. Refactor + // objToMappedRange to separate the find-file from the + // lookup-position steps to avoid the redundancy. + obj := result.Declaration.obj + rng, err := posToMappedRange(ctx, snapshot, pkg, obj.Pos(), adjustedObjEnd(obj)) if err != nil { return nil, err } result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng) - declPkg, err := FindPackageFromPos(ctx, snapshot, result.Declaration.obj.Pos()) + declPos := result.Declaration.obj.Pos() + objURI := span.URIFromPath(pkg.FileSet().File(declPos).Name()) + declFile, declPkg, err := findFileInDeps(ctx, snapshot, pkg, objURI) if err != nil { return nil, err } - if result.Declaration.node, err = snapshot.PosToDecl(ctx, declPkg, result.Declaration.obj.Pos()); err != nil { - return nil, err - } + result.Declaration.node, _, _ = FindDeclInfo([]*ast.File{declFile.File}, declPos) // may be nil + result.Declaration.nodeFile = declFile + // Ensure that we have the full declaration, in case the declaration was // parsed in ParseExported and therefore could be missing information. - if result.Declaration.fullDecl, err = fullNode(snapshot, result.Declaration.obj, declPkg); err != nil { + if result.Declaration.fullDecl, err = fullNode(pkg.FileSet(), result.Declaration.obj, declPkg); err != nil { return nil, err } typ := pkg.GetTypesInfo().TypeOf(result.ident) @@ -313,7 +295,9 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa if hasErrorType(result.Type.Object) { return result, nil } - if result.Type.MappedRange, err = objToMappedRange(snapshot, pkg, result.Type.Object); err != nil { + obj := result.Type.Object + // TODO(rfindley): no need to use an adjusted end here. + if result.Type.MappedRange, err = posToMappedRange(ctx, snapshot, pkg, obj.Pos(), adjustedObjEnd(obj)); err != nil { return nil, err } } @@ -335,9 +319,9 @@ func findGenDecl(f *ast.File, spec ast.Spec) *ast.GenDecl { // fullNode tries to extract the full spec corresponding to obj's declaration. // If the package was not parsed in full, the declaration file will be // re-parsed to ensure it has complete syntax. -func fullNode(snapshot Snapshot, obj types.Object, pkg Package) (ast.Decl, error) { +func fullNode(fset *token.FileSet, obj types.Object, pkg Package) (ast.Decl, error) { // declaration in a different package... make sure we have full AST information. - tok := snapshot.FileSet().File(obj.Pos()) + tok := fset.File(obj.Pos()) uri := span.URIFromPath(tok.Name()) pgf, err := pkg.File(uri) if err != nil { @@ -346,7 +330,6 @@ func fullNode(snapshot Snapshot, obj types.Object, pkg Package) (ast.Decl, error file := pgf.File pos := obj.Pos() if pgf.Mode != ParseFull { - fset := snapshot.FileSet() file2, _ := parser.ParseFile(fset, tok.Name(), pgf.Src, parser.AllErrors|parser.ParseComments) if file2 != nil { offset, err := safetoken.Offset(tok, obj.Pos()) @@ -420,7 +403,10 @@ func searchForEnclosing(info *types.Info, path []ast.Node) *types.TypeName { return nil } -func typeToObject(typ types.Type) types.Object { +// typeToObject returns the relevant type name for the given type, after +// unwrapping pointers, arrays, slices, channels, and function signatures with +// a single non-error result. +func typeToObject(typ types.Type) *types.TypeName { switch typ := typ.(type) { case *types.Named: return typ.Obj() @@ -435,7 +421,7 @@ func typeToObject(typ types.Type) types.Object { case *types.Signature: // Try to find a return value of a named type. If there's only one // such value, jump to its type definition. - var res types.Object + var res *types.TypeName results := typ.Results() for i := 0; i < results.Len(); i++ { @@ -462,9 +448,9 @@ func hasErrorType(obj types.Object) bool { } // importSpec handles positions inside of an *ast.ImportSpec. -func importSpec(snapshot Snapshot, pkg Package, file *ast.File, pos token.Pos) (*IdentifierInfo, error) { +func importSpec(ctx context.Context, snapshot Snapshot, pkg Package, pgf *ParsedGoFile, pos token.Pos) (*IdentifierInfo, error) { var imp *ast.ImportSpec - for _, spec := range file.Imports { + for _, spec := range pgf.File.Imports { if spec.Path.Pos() <= pos && pos < spec.Path.End() { imp = spec } @@ -476,22 +462,40 @@ func importSpec(snapshot Snapshot, pkg Package, file *ast.File, pos token.Pos) ( if err != nil { return nil, fmt.Errorf("import path not quoted: %s (%v)", imp.Path.Value, err) } + result := &IdentifierInfo{ Snapshot: snapshot, - Name: importPath, + Name: importPath, // should this perhaps be imported.PkgPath()? pkg: pkg, } - if result.MappedRange, err = posToMappedRange(snapshot, pkg, imp.Path.Pos(), imp.Path.End()); err != nil { + if result.MappedRange, err = posToMappedRange(ctx, snapshot, pkg, imp.Path.Pos(), imp.Path.End()); err != nil { return nil, err } - // Consider the "declaration" of an import spec to be the imported package. - importedPkg, err := pkg.GetImport(importPath) - if err != nil { - return nil, err + + impID := pkg.Metadata().DepsByImpPath[ImportPath(importPath)] + if impID == "" { + return nil, fmt.Errorf("failed to resolve import %q", importPath) } - // Return all of the files in the package as the definition of the import spec. - for _, dst := range importedPkg.GetSyntax() { - rng, err := posToMappedRange(snapshot, pkg, dst.Pos(), dst.End()) + impMetadata := snapshot.Metadata(impID) + if impMetadata == nil { + return nil, fmt.Errorf("failed to resolve import ID %q", impID) + } + for _, f := range impMetadata.CompiledGoFiles { + fh, err := snapshot.GetFile(ctx, f) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + rng, err := pgf.NodeMappedRange(pgf.File) if err != nil { return nil, err } @@ -499,13 +503,14 @@ func importSpec(snapshot Snapshot, pkg Package, file *ast.File, pos token.Pos) ( } result.Declaration.node = imp + result.Declaration.nodeFile = pgf return result, nil } // typeSwitchImplicits returns all the implicit type switch objects that // correspond to the leaf *ast.Ident. It also returns the original type // associated with the identifier (outside of a case clause). -func typeSwitchImplicits(pkg Package, path []ast.Node) ([]types.Object, types.Type) { +func typeSwitchImplicits(info *types.Info, path []ast.Node) ([]types.Object, types.Type) { ident, _ := path[0].(*ast.Ident) if ident == nil { return nil, nil @@ -515,7 +520,7 @@ func typeSwitchImplicits(pkg Package, path []ast.Node) ([]types.Object, types.Ty ts *ast.TypeSwitchStmt assign *ast.AssignStmt cc *ast.CaseClause - obj = pkg.GetTypesInfo().ObjectOf(ident) + obj = info.ObjectOf(ident) ) // Walk our ancestors to determine if our leaf ident refers to a @@ -534,7 +539,7 @@ Outer: // case clause implicitly maps "a" to a different types.Object, // so check if ident's object is the case clause's implicit // object. - if obj != nil && pkg.GetTypesInfo().Implicits[n] == obj { + if obj != nil && info.Implicits[n] == obj { cc = n } case *ast.TypeSwitchStmt: @@ -560,7 +565,7 @@ Outer: // type switch's implicit case clause objects. var objs []types.Object for _, cc := range ts.Body.List { - if ccObj := pkg.GetTypesInfo().Implicits[cc]; ccObj != nil { + if ccObj := info.Implicits[cc]; ccObj != nil { objs = append(objs, ccObj) } } @@ -570,7 +575,7 @@ Outer: var typ types.Type if assign, ok := ts.Assign.(*ast.AssignStmt); ok && len(assign.Rhs) == 1 { if rhs := assign.Rhs[0].(*ast.TypeAssertExpr); ok { - typ = pkg.GetTypesInfo().TypeOf(rhs.X) + typ = info.TypeOf(rhs.X) } } return objs, typ diff --git a/internal/lsp/source/identifier_test.go b/gopls/internal/lsp/source/identifier_test.go similarity index 100% rename from internal/lsp/source/identifier_test.go rename to gopls/internal/lsp/source/identifier_test.go diff --git a/gopls/internal/lsp/source/implementation.go b/gopls/internal/lsp/source/implementation.go new file mode 100644 index 00000000000..6d165df3f94 --- /dev/null +++ b/gopls/internal/lsp/source/implementation.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "errors" + "go/ast" + "go/token" + "go/types" +) + +// TODO(adonovan): move these declarations elsewhere. + +// concreteImplementsIntf returns true if a is an interface type implemented by +// concrete type b, or vice versa. +func concreteImplementsIntf(a, b types.Type) bool { + aIsIntf, bIsIntf := types.IsInterface(a), types.IsInterface(b) + + // Make sure exactly one is an interface type. + if aIsIntf == bIsIntf { + return false + } + + // Rearrange if needed so "a" is the concrete type. + if aIsIntf { + a, b = b, a + } + + // TODO(adonovan): this should really use GenericAssignableTo + // to report (e.g.) "ArrayList[T] implements List[T]", but + // GenericAssignableTo doesn't work correctly on pointers to + // generic named types. Thus the legacy implementation and the + // "local" part of implementation2 fail to report generics. + // The global algorithm based on subsets does the right thing. + return types.AssignableTo(a, b) +} + +var ( + // TODO(adonovan): why do various RPC handlers related to + // IncomingCalls return (nil, nil) on the protocol in response + // to this error? That seems like a violation of the protocol. + // Is it perhaps a workaround for VSCode behavior? + errNoObjectFound = errors.New("no object found") +) + +// pathEnclosingObjNode returns the AST path to the object-defining +// node associated with pos. "Object-defining" means either an +// *ast.Ident mapped directly to a types.Object or an ast.Node mapped +// implicitly to a types.Object. +func pathEnclosingObjNode(f *ast.File, pos token.Pos) []ast.Node { + var ( + path []ast.Node + found bool + ) + + ast.Inspect(f, func(n ast.Node) bool { + if found { + return false + } + + if n == nil { + path = path[:len(path)-1] + return false + } + + path = append(path, n) + + switch n := n.(type) { + case *ast.Ident: + // Include the position directly after identifier. This handles + // the common case where the cursor is right after the + // identifier the user is currently typing. Previously we + // handled this by calling astutil.PathEnclosingInterval twice, + // once for "pos" and once for "pos-1". + found = n.Pos() <= pos && pos <= n.End() + case *ast.ImportSpec: + if n.Path.Pos() <= pos && pos < n.Path.End() { + found = true + // If import spec has a name, add name to path even though + // position isn't in the name. + if n.Name != nil { + path = append(path, n.Name) + } + } + case *ast.StarExpr: + // Follow star expressions to the inner identifier. + if pos == n.Star { + pos = n.X.Pos() + } + } + + return !found + }) + + if len(path) == 0 { + return nil + } + + // Reverse path so leaf is first element. + for i := 0; i < len(path)/2; i++ { + path[i], path[len(path)-1-i] = path[len(path)-1-i], path[i] + } + + return path +} diff --git a/gopls/internal/lsp/source/implementation2.go b/gopls/internal/lsp/source/implementation2.go new file mode 100644 index 00000000000..c7ff5bfb6cf --- /dev/null +++ b/gopls/internal/lsp/source/implementation2.go @@ -0,0 +1,388 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +// This file defines the new implementation of the 'implementation' +// operator that does not require type-checker data structures for an +// unbounded number of packages. +// +// TODO(adonovan): +// - Audit to ensure robustness in face of type errors. +// - Support 'error' and 'error.Error', which were also lacking from the old implementation. +// - Eliminate false positives due to 'tricky' cases of the global algorithm. +// - Ensure we have test coverage of: +// type aliases +// nil, PkgName, Builtin (all errors) +// any (empty result) +// method of unnamed interface type (e.g. var x interface { f() }) +// (the global algorithm may find implementations of this type +// but will not include it in the index.) + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "sort" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" +) + +// Implementation returns a new sorted array of locations of +// declarations of types that implement (or are implemented by) the +// type referred to at the given position. +// +// If the position denotes a method, the computation is applied to its +// receiver type and then its corresponding methods are returned. +func Implementation(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) ([]protocol.Location, error) { + ctx, done := event.Start(ctx, "source.Implementation") + defer done() + + locs, err := implementations2(ctx, snapshot, f, pp) + if err != nil { + return nil, err + } + + // Sort and de-duplicate locations. + sort.Slice(locs, func(i, j int) bool { + return protocol.CompareLocation(locs[i], locs[j]) < 0 + }) + out := locs[:0] + for _, loc := range locs { + if len(out) == 0 || out[len(out)-1] != loc { + out = append(out, loc) + } + } + locs = out + + return locs, nil +} + +func implementations2(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.Location, error) { + + // Type-check the query package, find the query identifier, + // and locate the type or method declaration it refers to. + declPosn, err := typeDeclPosition(ctx, snapshot, fh.URI(), pp) + if err != nil { + return nil, err + } + + // Type-check the declaring package (incl. variants) for use + // by the "local" search, which uses type information to + // enumerate all types within the package that satisfy the + // query type, even those defined local to a function. + declURI := span.URIFromPath(declPosn.Filename) + declMetas, err := snapshot.MetadataForFile(ctx, declURI) + if err != nil { + return nil, err + } + if len(declMetas) == 0 { + return nil, fmt.Errorf("no packages for file %s", declURI) + } + ids := make([]PackageID, len(declMetas)) + for i, m := range declMetas { + ids[i] = m.ID + } + localPkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, ids...) + if err != nil { + return nil, err + } + // The narrowest package will do, since the local search is based + // on position and the global search is based on fingerprint. + // (Neither is based on object identity.) + declPkg := localPkgs[0] + declFile, err := declPkg.File(declURI) + if err != nil { + return nil, err // "can't happen" + } + + // Find declaration of corresponding object + // in this package based on (URI, offset). + pos, err := safetoken.Pos(declFile.Tok, declPosn.Offset) + if err != nil { + return nil, err + } + // TODO(adonovan): simplify: use objectsAt? + path := pathEnclosingObjNode(declFile.File, pos) + if path == nil { + return nil, ErrNoIdentFound // checked earlier + } + id, ok := path[0].(*ast.Ident) + if !ok { + return nil, ErrNoIdentFound // checked earlier + } + obj := declPkg.GetTypesInfo().ObjectOf(id) // may be nil + + // Is the selected identifier a type name or method? + // (For methods, report the corresponding method names.) + var queryType types.Type + var queryMethodID string + switch obj := obj.(type) { + case *types.TypeName: + queryType = obj.Type() + case *types.Func: + // For methods, use the receiver type, which may be anonymous. + if recv := obj.Type().(*types.Signature).Recv(); recv != nil { + queryType = recv.Type() + queryMethodID = obj.Id() + } + } + if queryType == nil { + return nil, fmt.Errorf("%s is not a type or method", id.Name) + } + + // Compute the method-set fingerprint used as a key to the global search. + key, hasMethods := methodsets.KeyOf(queryType) + if !hasMethods { + // A type with no methods yields an empty result. + // (No point reporting that every type satisfies 'any'.) + return nil, nil + } + + // The global search needs to look at every package in the workspace; + // see package ./methodsets. + // + // For now we do all the type checking before beginning the search. + // TODO(adonovan): opt: search in parallel topological order + // so that we can overlap index lookup with typechecking. + // I suspect a number of algorithms on the result of TypeCheck could + // be optimized by being applied as soon as each package is available. + globalMetas, err := snapshot.AllMetadata(ctx) + if err != nil { + return nil, err + } + globalIDs := make([]PackageID, 0, len(globalMetas)) + for _, m := range globalMetas { + if m.PkgPath == declPkg.Metadata().PkgPath { + continue // declaring package is handled by local implementation + } + globalIDs = append(globalIDs, m.ID) + } + globalPkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, globalIDs...) + if err != nil { + return nil, err + } + + // Search local and global packages in parallel. + var ( + group errgroup.Group + locsMu sync.Mutex + locs []protocol.Location + ) + // local search + for _, localPkg := range localPkgs { + localPkg := localPkg + group.Go(func() error { + localLocs, err := localImplementations(ctx, snapshot, localPkg, queryType, queryMethodID) + if err != nil { + return err + } + locsMu.Lock() + locs = append(locs, localLocs...) + locsMu.Unlock() + return nil + }) + } + // global search + for _, globalPkg := range globalPkgs { + globalPkg := globalPkg + group.Go(func() error { + for _, res := range globalPkg.MethodSetsIndex().Search(key, queryMethodID) { + loc := res.Location + // Map offsets to protocol.Locations in parallel (may involve I/O). + group.Go(func() error { + ploc, err := offsetToLocation(ctx, snapshot, loc.Filename, loc.Start, loc.End) + if err != nil { + return err + } + locsMu.Lock() + locs = append(locs, ploc) + locsMu.Unlock() + return nil + }) + } + return nil + }) + } + if err := group.Wait(); err != nil { + return nil, err + } + + return locs, nil +} + +// offsetToLocation converts an offset-based position to a protocol.Location, +// which requires reading the file. +func offsetToLocation(ctx context.Context, snapshot Snapshot, filename string, start, end int) (protocol.Location, error) { + uri := span.URIFromPath(filename) + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return protocol.Location{}, err // cancelled, perhaps + } + content, err := fh.Read() + if err != nil { + return protocol.Location{}, err // nonexistent or deleted ("can't happen") + } + m := protocol.NewMapper(uri, content) + return m.OffsetLocation(start, end) +} + +// typeDeclPosition returns the position of the declaration of the +// type (or one of its methods) referred to at (uri, ppos). +func typeDeclPosition(ctx context.Context, snapshot Snapshot, uri span.URI, ppos protocol.Position) (token.Position, error) { + var noPosn token.Position + + pkg, pgf, err := PackageForFile(ctx, snapshot, uri, TypecheckFull, WidestPackage) + if err != nil { + return noPosn, err + } + pos, err := pgf.PositionPos(ppos) + if err != nil { + return noPosn, err + } + + // This function inherits the limitation of its predecessor in + // requiring the selection to be an identifier (of a type or + // method). But there's no fundamental reason why one could + // not pose this query about any selected piece of syntax that + // has a type and thus a method set. + // (If LSP was more thorough about passing text selections as + // intervals to queries, you could ask about the method set of a + // subexpression such as x.f().) + + // TODO(adonovan): simplify: use objectsAt? + path := pathEnclosingObjNode(pgf.File, pos) + if path == nil { + return noPosn, ErrNoIdentFound + } + id, ok := path[0].(*ast.Ident) + if !ok { + return noPosn, ErrNoIdentFound + } + + // Is the object a type or method? Reject other kinds. + obj := pkg.GetTypesInfo().Uses[id] + if obj == nil { + // Check uses first (unlike ObjectOf) so that T in + // struct{T} is treated as a reference to a type, + // not a declaration of a field. + obj = pkg.GetTypesInfo().Defs[id] + } + switch obj := obj.(type) { + case *types.TypeName: + // ok + case *types.Func: + if obj.Type().(*types.Signature).Recv() == nil { + return noPosn, fmt.Errorf("%s is a function, not a method", id.Name) + } + case nil: + return noPosn, fmt.Errorf("%s denotes unknown object", id.Name) + default: + // e.g. *types.Var -> "var". + kind := strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types.")) + return noPosn, fmt.Errorf("%s is a %s, not a type", id.Name, kind) + } + + declPosn := safetoken.StartPosition(pkg.FileSet(), obj.Pos()) + return declPosn, nil +} + +// localImplementations searches within pkg for declarations of all +// types that are assignable to/from the query type, and returns a new +// unordered array of their locations. +// +// If methodID is non-empty, the function instead returns the location +// of each type's method (if any) of that ID. +// +// ("Local" refers to the search within the same package, but this +// function's results may include type declarations that are local to +// a function body. The global search index excludes such types +// because reliably naming such types is hard.) +func localImplementations(ctx context.Context, snapshot Snapshot, pkg Package, queryType types.Type, methodID string) ([]protocol.Location, error) { + queryType = methodsets.EnsurePointer(queryType) + + // Scan through all type declarations in the syntax. + var locs []protocol.Location + var methodLocs []methodsets.Location + for _, pgf := range pkg.CompiledGoFiles() { + ast.Inspect(pgf.File, func(n ast.Node) bool { + spec, ok := n.(*ast.TypeSpec) + if !ok { + return true // not a type declaration + } + def := pkg.GetTypesInfo().Defs[spec.Name] + if def == nil { + return true // "can't happen" for types + } + if def.(*types.TypeName).IsAlias() { + return true // skip type aliases to avoid duplicate reporting + } + candidateType := methodsets.EnsurePointer(def.Type()) + + // The historical behavior enshrined by this + // function rejects cases where both are + // (nontrivial) interface types? + // That seems like useful information. + // TODO(adonovan): UX: report I/I pairs too? + // The same question appears in the global algorithm (methodsets). + if !concreteImplementsIntf(candidateType, queryType) { + return true // not assignable + } + + // Ignore types with empty method sets. + // (No point reporting that every type satisfies 'any'.) + mset := types.NewMethodSet(candidateType) + if mset.Len() == 0 { + return true + } + + if methodID == "" { + // Found matching type. + locs = append(locs, mustLocation(pgf, spec.Name)) + return true + } + + // Find corresponding method. + // + // We can't use LookupFieldOrMethod because it requires + // the methodID's types.Package, which we don't know. + // We could recursively search pkg.Imports for it, + // but it's easier to walk the method set. + for i := 0; i < mset.Len(); i++ { + method := mset.At(i).Obj() + if method.Id() == methodID { + posn := safetoken.StartPosition(pkg.FileSet(), method.Pos()) + methodLocs = append(methodLocs, methodsets.Location{ + Filename: posn.Filename, + Start: posn.Offset, + End: posn.Offset + len(method.Name()), + }) + break + } + } + return true + }) + } + + // Finally convert method positions to protocol form by reading the files. + for _, mloc := range methodLocs { + loc, err := offsetToLocation(ctx, snapshot, mloc.Filename, mloc.Start, mloc.End) + if err != nil { + return nil, err + } + locs = append(locs, loc) + } + + return locs, nil +} diff --git a/gopls/internal/lsp/source/inlay_hint.go b/gopls/internal/lsp/source/inlay_hint.go new file mode 100644 index 00000000000..c5520d9f9dd --- /dev/null +++ b/gopls/internal/lsp/source/inlay_hint.go @@ -0,0 +1,394 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/typeparams" +) + +const ( + maxLabelLength = 28 +) + +type InlayHintFunc func(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint + +type Hint struct { + Name string + Doc string + Run InlayHintFunc +} + +const ( + ParameterNames = "parameterNames" + AssignVariableTypes = "assignVariableTypes" + ConstantValues = "constantValues" + RangeVariableTypes = "rangeVariableTypes" + CompositeLiteralTypes = "compositeLiteralTypes" + CompositeLiteralFieldNames = "compositeLiteralFields" + FunctionTypeParameters = "functionTypeParameters" +) + +var AllInlayHints = map[string]*Hint{ + AssignVariableTypes: { + Name: AssignVariableTypes, + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", + Run: assignVariableTypes, + }, + ParameterNames: { + Name: ParameterNames, + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", + Run: parameterNames, + }, + ConstantValues: { + Name: ConstantValues, + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", + Run: constantValues, + }, + RangeVariableTypes: { + Name: RangeVariableTypes, + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", + Run: rangeVariableTypes, + }, + CompositeLiteralTypes: { + Name: CompositeLiteralTypes, + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", + Run: compositeLiteralTypes, + }, + CompositeLiteralFieldNames: { + Name: CompositeLiteralFieldNames, + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", + Run: compositeLiteralFields, + }, + FunctionTypeParameters: { + Name: FunctionTypeParameters, + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", + Run: funcTypeParams, + }, +} + +func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) ([]protocol.InlayHint, error) { + ctx, done := event.Start(ctx, "source.InlayHint") + defer done() + + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, fmt.Errorf("getting file for InlayHint: %w", err) + } + + // Collect a list of the inlay hints that are enabled. + inlayHintOptions := snapshot.View().Options().InlayHintOptions + var enabledHints []InlayHintFunc + for hint, enabled := range inlayHintOptions.Hints { + if !enabled { + continue + } + if h, ok := AllInlayHints[hint]; ok { + enabledHints = append(enabledHints, h.Run) + } + } + if len(enabledHints) == 0 { + return nil, nil + } + + info := pkg.GetTypesInfo() + q := Qualifier(pgf.File, pkg.GetTypes(), info) + + // Set the range to the full file if the range is not valid. + start, end := pgf.File.Pos(), pgf.File.End() + if pRng.Start.Line < pRng.End.Line || pRng.Start.Character < pRng.End.Character { + // Adjust start and end for the specified range. + var err error + start, end, err = pgf.RangePos(pRng) + if err != nil { + return nil, err + } + } + + var hints []protocol.InlayHint + ast.Inspect(pgf.File, func(node ast.Node) bool { + // If not in range, we can stop looking. + if node == nil || node.End() < start || node.Pos() > end { + return false + } + for _, fn := range enabledHints { + hints = append(hints, fn(node, pgf.Mapper, pgf.Tok, info, &q)...) + } + return true + }) + return hints, nil +} + +func parameterNames(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + callExpr, ok := node.(*ast.CallExpr) + if !ok { + return nil + } + signature, ok := info.TypeOf(callExpr.Fun).(*types.Signature) + if !ok { + return nil + } + + var hints []protocol.InlayHint + for i, v := range callExpr.Args { + start, err := m.PosPosition(tf, v.Pos()) + if err != nil { + continue + } + params := signature.Params() + // When a function has variadic params, we skip args after + // params.Len(). + if i > params.Len()-1 { + break + } + param := params.At(i) + // param.Name is empty for built-ins like append + if param.Name() == "" { + continue + } + // Skip the parameter name hint if the arg matches the + // the parameter name. + if i, ok := v.(*ast.Ident); ok && i.Name == param.Name() { + continue + } + + label := param.Name() + if signature.Variadic() && i == params.Len()-1 { + label = label + "..." + } + hints = append(hints, protocol.InlayHint{ + Position: &start, + Label: buildLabel(label + ":"), + Kind: protocol.Parameter, + PaddingRight: true, + }) + } + return hints +} + +func funcTypeParams(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + ce, ok := node.(*ast.CallExpr) + if !ok { + return nil + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + return nil + } + inst := typeparams.GetInstances(info)[id] + if inst.TypeArgs == nil { + return nil + } + start, err := m.PosPosition(tf, id.End()) + if err != nil { + return nil + } + var args []string + for i := 0; i < inst.TypeArgs.Len(); i++ { + args = append(args, inst.TypeArgs.At(i).String()) + } + if len(args) == 0 { + return nil + } + return []protocol.InlayHint{{ + Position: &start, + Label: buildLabel("[" + strings.Join(args, ", ") + "]"), + Kind: protocol.Type, + }} +} + +func assignVariableTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + stmt, ok := node.(*ast.AssignStmt) + if !ok || stmt.Tok != token.DEFINE { + return nil + } + + var hints []protocol.InlayHint + for _, v := range stmt.Lhs { + if h := variableType(v, m, tf, info, q); h != nil { + hints = append(hints, *h) + } + } + return hints +} + +func rangeVariableTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + rStmt, ok := node.(*ast.RangeStmt) + if !ok { + return nil + } + var hints []protocol.InlayHint + if h := variableType(rStmt.Key, m, tf, info, q); h != nil { + hints = append(hints, *h) + } + if h := variableType(rStmt.Value, m, tf, info, q); h != nil { + hints = append(hints, *h) + } + return hints +} + +func variableType(e ast.Expr, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) *protocol.InlayHint { + typ := info.TypeOf(e) + if typ == nil { + return nil + } + end, err := m.PosPosition(tf, e.End()) + if err != nil { + return nil + } + return &protocol.InlayHint{ + Position: &end, + Label: buildLabel(types.TypeString(typ, *q)), + Kind: protocol.Type, + PaddingLeft: true, + } +} + +func constantValues(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + genDecl, ok := node.(*ast.GenDecl) + if !ok || genDecl.Tok != token.CONST { + return nil + } + + var hints []protocol.InlayHint + for _, v := range genDecl.Specs { + spec, ok := v.(*ast.ValueSpec) + if !ok { + continue + } + end, err := m.PosPosition(tf, v.End()) + if err != nil { + continue + } + // Show hints when values are missing or at least one value is not + // a basic literal. + showHints := len(spec.Values) == 0 + checkValues := len(spec.Names) == len(spec.Values) + var values []string + for i, w := range spec.Names { + obj, ok := info.ObjectOf(w).(*types.Const) + if !ok || obj.Val().Kind() == constant.Unknown { + return nil + } + if checkValues { + switch spec.Values[i].(type) { + case *ast.BadExpr: + return nil + case *ast.BasicLit: + default: + if obj.Val().Kind() != constant.Bool { + showHints = true + } + } + } + values = append(values, fmt.Sprintf("%v", obj.Val())) + } + if !showHints || len(values) == 0 { + continue + } + hints = append(hints, protocol.InlayHint{ + Position: &end, + Label: buildLabel("= " + strings.Join(values, ", ")), + PaddingLeft: true, + }) + } + return hints +} + +func compositeLiteralFields(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + compLit, ok := node.(*ast.CompositeLit) + if !ok { + return nil + } + typ := info.TypeOf(compLit) + if typ == nil { + return nil + } + if t, ok := typ.(*types.Pointer); ok { + typ = t.Elem() + } + strct, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil + } + + var hints []protocol.InlayHint + var allEdits []protocol.TextEdit + for i, v := range compLit.Elts { + if _, ok := v.(*ast.KeyValueExpr); !ok { + start, err := m.PosPosition(tf, v.Pos()) + if err != nil { + continue + } + if i > strct.NumFields()-1 { + break + } + hints = append(hints, protocol.InlayHint{ + Position: &start, + Label: buildLabel(strct.Field(i).Name() + ":"), + Kind: protocol.Parameter, + PaddingRight: true, + }) + allEdits = append(allEdits, protocol.TextEdit{ + Range: protocol.Range{Start: start, End: start}, + NewText: strct.Field(i).Name() + ": ", + }) + } + } + // It is not allowed to have a mix of keyed and unkeyed fields, so + // have the text edits add keys to all fields. + for i := range hints { + hints[i].TextEdits = allEdits + } + return hints +} + +func compositeLiteralTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + compLit, ok := node.(*ast.CompositeLit) + if !ok { + return nil + } + typ := info.TypeOf(compLit) + if typ == nil { + return nil + } + if compLit.Type != nil { + return nil + } + prefix := "" + if t, ok := typ.(*types.Pointer); ok { + typ = t.Elem() + prefix = "&" + } + // The type for this composite literal is implicit, add an inlay hint. + start, err := m.PosPosition(tf, compLit.Lbrace) + if err != nil { + return nil + } + return []protocol.InlayHint{{ + Position: &start, + Label: buildLabel(fmt.Sprintf("%s%s", prefix, types.TypeString(typ, *q))), + Kind: protocol.Type, + }} +} + +func buildLabel(s string) []protocol.InlayHintLabelPart { + label := protocol.InlayHintLabelPart{ + Value: s, + } + if len(s) > maxLabelLength+len("...") { + label.Value = s[:maxLabelLength] + "..." + } + return []protocol.InlayHintLabelPart{label} +} diff --git a/gopls/internal/lsp/source/known_packages.go b/gopls/internal/lsp/source/known_packages.go new file mode 100644 index 00000000000..07b4c30a818 --- /dev/null +++ b/gopls/internal/lsp/source/known_packages.go @@ -0,0 +1,140 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/parser" + "go/token" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/imports" +) + +// KnownPackagePaths returns a new list of package paths of all known +// packages in the package graph that could potentially be imported by +// the given file. The list is ordered lexicographically, except that +// all dot-free paths (standard packages) appear before dotful ones. +// +// It is part of the gopls.list_known_packages command. +func KnownPackagePaths(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]PackagePath, error) { + // This algorithm is expressed in terms of Metadata, not Packages, + // so it doesn't cause or wait for type checking. + + // Find a Metadata containing the file. + metas, err := snapshot.MetadataForFile(ctx, fh.URI()) + if err != nil { + return nil, err // e.g. context cancelled + } + if len(metas) == 0 { + return nil, fmt.Errorf("no loaded package contain file %s", fh.URI()) + } + current := metas[0] // pick one arbitrarily (they should all have the same package path) + + // Parse the file's imports so we can compute which + // PackagePaths are imported by this specific file. + src, err := fh.Read() + if err != nil { + return nil, err + } + file, err := parser.ParseFile(token.NewFileSet(), fh.URI().Filename(), src, parser.ImportsOnly) + if err != nil { + return nil, err + } + imported := make(map[PackagePath]bool) + for _, imp := range file.Imports { + if id := current.DepsByImpPath[UnquoteImportPath(imp)]; id != "" { + if m := snapshot.Metadata(id); m != nil { + imported[m.PkgPath] = true + } + } + } + + // Now find candidates among known packages. + knownPkgs, err := snapshot.AllMetadata(ctx) + if err != nil { + return nil, err + } + seen := make(map[PackagePath]bool) + for _, knownPkg := range knownPkgs { + // package main cannot be imported + if knownPkg.Name == "main" { + continue + } + // test packages cannot be imported + if knownPkg.ForTest != "" { + continue + } + // No need to import what the file already imports. + // This check is based on PackagePath, not PackageID, + // so that all test variants are filtered out too. + if imported[knownPkg.PkgPath] { + continue + } + // make sure internal packages are importable by the file + if !IsValidImport(current.PkgPath, knownPkg.PkgPath) { + continue + } + // naive check on cyclical imports + if isDirectlyCyclical(current, knownPkg) { + continue + } + // AllMetadata may have multiple variants of a pkg. + seen[knownPkg.PkgPath] = true + } + + // Augment the set by invoking the goimports algorithm. + if err := snapshot.RunProcessEnvFunc(ctx, func(o *imports.Options) error { + ctx, cancel := context.WithTimeout(ctx, time.Millisecond*80) + defer cancel() + var seenMu sync.Mutex + wrapped := func(ifix imports.ImportFix) { + seenMu.Lock() + defer seenMu.Unlock() + // TODO(adonovan): what if the actual package path has a vendor/ prefix? + seen[PackagePath(ifix.StmtInfo.ImportPath)] = true + } + return imports.GetAllCandidates(ctx, wrapped, "", fh.URI().Filename(), string(current.Name), o.Env) + }); err != nil { + // If goimports failed, proceed with just the candidates from the metadata. + event.Error(ctx, "imports.GetAllCandidates", err) + } + + // Sort lexicographically, but with std before non-std packages. + paths := make([]PackagePath, 0, len(seen)) + for path := range seen { + paths = append(paths, path) + } + sort.Slice(paths, func(i, j int) bool { + importI, importJ := paths[i], paths[j] + iHasDot := strings.Contains(string(importI), ".") + jHasDot := strings.Contains(string(importJ), ".") + if iHasDot != jHasDot { + return jHasDot // dot-free paths (standard packages) compare less + } + return importI < importJ + }) + + return paths, nil +} + +// isDirectlyCyclical checks if imported directly imports pkg. +// It does not (yet) offer a full cyclical check because showing a user +// a list of importable packages already generates a very large list +// and having a few false positives in there could be worth the +// performance snappiness. +// +// TODO(adonovan): ensure that metadata graph is always cyclic! +// Many algorithms will get confused or even stuck in the +// presence of cycles. Then replace this function by 'false'. +func isDirectlyCyclical(pkg, imported *Metadata) bool { + _, ok := imported.DepsByPkgPath[pkg.PkgPath] + return ok +} diff --git a/gopls/internal/lsp/source/linkname.go b/gopls/internal/lsp/source/linkname.go new file mode 100644 index 00000000000..4fb667e6860 --- /dev/null +++ b/gopls/internal/lsp/source/linkname.go @@ -0,0 +1,165 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "errors" + "fmt" + "go/token" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" +) + +// ErrNoLinkname is returned by LinknameDefinition when no linkname +// directive is found at a particular position. +// As such it indicates that other definitions could be worth checking. +var ErrNoLinkname = errors.New("no linkname directive found") + +// LinknameDefinition finds the definition of the linkname directive in fh at pos. +// If there is no linkname directive at pos, returns ErrNoLinkname. +func LinknameDefinition(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.Location, error) { + pkgPath, name := parseLinkname(ctx, snapshot, fh, pos) + if pkgPath == "" { + return nil, ErrNoLinkname + } + return findLinkname(ctx, snapshot, fh, pos, PackagePath(pkgPath), name) +} + +// parseLinkname attempts to parse a go:linkname declaration at the given pos. +// If successful, it returns the package path and object name referenced by the second +// argument of the linkname directive. +// +// If the position is not in the second argument of a go:linkname directive, or parsing fails, it returns "", "". +func parseLinkname(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) (pkgPath, name string) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return "", "" + } + + span, err := pgf.Mapper.PositionPoint(pos) + if err != nil { + return "", "" + } + atLine := span.Line() + atColumn := span.Column() + + // Looking for pkgpath in '//go:linkname f pkgpath.g'. + // (We ignore 1-arg linkname directives.) + directive, column := findLinknameOnLine(pgf, atLine) + parts := strings.Fields(directive) + if len(parts) != 3 { + return "", "" + } + + // Inside 2nd arg [start, end]? + end := column + len(directive) + start := end - len(parts[2]) + if !(start <= atColumn && atColumn <= end) { + return "", "" + } + linkname := parts[2] + + // Split the pkg path from the name. + dot := strings.LastIndexByte(linkname, '.') + if dot < 0 { + return "", "" + } + return linkname[:dot], linkname[dot+1:] +} + +// findLinknameOnLine returns the first linkname directive on line and the column it starts at. +// Returns "", 0 if no linkname directive is found on the line. +func findLinknameOnLine(pgf *ParsedGoFile, line int) (string, int) { + for _, grp := range pgf.File.Comments { + for _, com := range grp.List { + if strings.HasPrefix(com.Text, "//go:linkname") { + p := safetoken.Position(pgf.Tok, com.Pos()) + if p.Line == line { + return com.Text, p.Column + } + } + } + } + return "", 0 +} + +// findLinkname searches dependencies of packages containing fh for an object +// with linker name matching the given package path and name. +func findLinkname(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position, pkgPath PackagePath, name string) ([]protocol.Location, error) { + metas, err := snapshot.MetadataForFile(ctx, fh.URI()) + if err != nil { + return nil, err + } + if len(metas) == 0 { + return nil, fmt.Errorf("no package found for file %q", fh.URI()) + } + + // Find package starting from narrowest package metadata. + pkgMeta := findPackageInDeps(snapshot, metas[0], pkgPath) + if pkgMeta == nil { + // Fall back to searching reverse dependencies. + reverse, err := snapshot.ReverseDependencies(ctx, metas[0].ID, true /* transitive */) + if err != nil { + return nil, err + } + for _, dep := range reverse { + if dep.PkgPath == pkgPath { + pkgMeta = dep + break + } + } + if pkgMeta == nil { + return nil, fmt.Errorf("cannot find package %q", pkgPath) + } + } + + // When found, type check the desired package (snapshot.TypeCheck in TypecheckFull mode), + pkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, pkgMeta.ID) + if err != nil { + return nil, err + } + pkg := pkgs[0] + + obj := pkg.GetTypes().Scope().Lookup(name) + if obj == nil { + return nil, fmt.Errorf("package %q does not define %s", pkgPath, name) + } + + objURI := safetoken.StartPosition(pkg.FileSet(), obj.Pos()) + pgf, err := pkg.File(span.URIFromPath(objURI.Filename)) + if err != nil { + return nil, err + } + loc, err := pgf.PosLocation(obj.Pos(), obj.Pos()+token.Pos(len(name))) + if err != nil { + return nil, err + } + return []protocol.Location{loc}, nil +} + +// findPackageInDeps returns the dependency of meta of the specified package path, if any. +func findPackageInDeps(snapshot Snapshot, meta *Metadata, pkgPath PackagePath) *Metadata { + seen := make(map[*Metadata]bool) + var visit func(*Metadata) *Metadata + visit = func(meta *Metadata) *Metadata { + if !seen[meta] { + seen[meta] = true + if meta.PkgPath == pkgPath { + return meta + } + for _, id := range meta.DepsByPkgPath { + if m := visit(snapshot.Metadata(id)); m != nil { + return m + } + } + } + return nil + } + return visit(meta) +} diff --git a/gopls/internal/lsp/source/methodsets/methodsets.go b/gopls/internal/lsp/source/methodsets/methodsets.go new file mode 100644 index 00000000000..2c025557821 --- /dev/null +++ b/gopls/internal/lsp/source/methodsets/methodsets.go @@ -0,0 +1,476 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package methodsets defines an incremental, serializable index of +// method-set information that allows efficient 'implements' queries +// across packages of the workspace without using the type checker. +// +// This package provides only the "global" (all workspace) search; the +// "local" search within a given package uses a different +// implementation based on type-checker data structures for a single +// package plus variants; see ../implementation2.go. +// The local algorithm is more precise as it tests function-local types too. +// +// A global index of function-local types is challenging since they +// may reference other local types, for which we would need to invent +// stable names, an unsolved problem described in passing in Go issue +// 57497. The global algorithm also does not index anonymous interface +// types, even outside function bodies. +// +// Consequently, global results are not symmetric: applying the +// operation twice may not get you back where you started. +package methodsets + +// DESIGN +// +// See https://go.dev/cl/452060 for a minimal exposition of the algorithm. +// +// For each method, we compute a fingerprint: a string representing +// the method name and type such that equal fingerprint strings mean +// identical method types. +// +// For efficiency, the fingerprint is reduced to a single bit +// of a uint64, so that the method set can be represented as +// the union of those method bits (a uint64 bitmask). +// Assignability thus reduces to a subset check on bitmasks +// followed by equality checks on fingerprints. +// +// In earlier experiments, using 128-bit masks instead of 64 reduced +// the number of candidates by about 2x. Using (like a Bloom filter) a +// different hash function to compute a second 64-bit mask and +// performing a second mask test reduced it by about 4x. +// Neither had much effect on the running time, presumably because a +// single 64-bit mask is quite effective. See CL 452060 for details. + +import ( + "fmt" + "go/token" + "go/types" + "hash/crc32" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/internal/typeparams" +) + +// An Index records the non-empty method sets of all package-level +// types in a package in a form that permits assignability queries +// without the type checker. +type Index struct { + pkg gobPackage +} + +// NewIndex returns a new index of method-set information for all +// package-level types in the specified package. +func NewIndex(fset *token.FileSet, pkg *types.Package) *Index { + return new(indexBuilder).build(fset, pkg) +} + +// A Location records the extent of an identifier in byte-offset form. +// +// Conversion to protocol (UTF-16) form is done by the caller after a +// search, not during index construction. +type Location struct { + Filename string + Start, End int // byte offsets +} + +// A Key represents the method set of a given type in a form suitable +// to pass to the (*Index).Search method of many different Indexes. +type Key struct { + mset gobMethodSet // note: lacks position information +} + +// KeyOf returns the search key for the method sets of a given type. +// It returns false if the type has no methods. +func KeyOf(t types.Type) (Key, bool) { + mset := methodSetInfo(t, nil) + if mset.Mask == 0 { + return Key{}, false // no methods + } + return Key{mset}, true +} + +// A Result reports a matching type or method in a method-set search. +type Result struct { + Location Location // location of the type or method + + // methods only: + PkgPath string // path of declaring package (may differ due to embedding) + ObjectPath objectpath.Path // path of method within declaring package +} + +// Search reports each type that implements (or is implemented by) the +// type that produced the search key. If methodID is nonempty, only +// that method of each type is reported. +// +// The result does not include the error.Error method. +// TODO(adonovan): give this special case a more systematic treatment. +func (index *Index) Search(key Key, methodID string) []Result { + var results []Result + for _, candidate := range index.pkg.MethodSets { + // Traditionally this feature doesn't report + // interface/interface elements of the relation. + // I think that's a mistake. + // TODO(adonovan): UX: change it, here and in the local implementation. + if candidate.IsInterface && key.mset.IsInterface { + continue + } + if !satisfies(candidate, key.mset) && !satisfies(key.mset, candidate) { + continue + } + + if candidate.Tricky { + // If any interface method is tricky then extra + // checking may be needed to eliminate a false positive. + // TODO(adonovan): implement it. + } + + if methodID == "" { + results = append(results, Result{Location: index.location(candidate.Posn)}) + } else { + for _, m := range candidate.Methods { + // Here we exploit knowledge of the shape of the fingerprint string. + if strings.HasPrefix(m.Fingerprint, methodID) && + m.Fingerprint[len(methodID)] == '(' { + + // Don't report error.Error among the results: + // it has no true source location, no package, + // and is excluded from the xrefs index. + if m.PkgPath == 0 || m.ObjectPath == 0 { + if methodID != "Error" { + panic("missing info for" + methodID) + } + continue + } + + results = append(results, Result{ + Location: index.location(m.Posn), + PkgPath: index.pkg.Strings[m.PkgPath], + ObjectPath: objectpath.Path(index.pkg.Strings[m.ObjectPath]), + }) + break + } + } + } + } + return results +} + +// satisfies does a fast check for whether x satisfies y. +func satisfies(x, y gobMethodSet) bool { + return y.IsInterface && x.Mask&y.Mask == y.Mask && subset(y, x) +} + +// subset reports whether method set x is a subset of y. +func subset(x, y gobMethodSet) bool { +outer: + for _, mx := range x.Methods { + for _, my := range y.Methods { + if mx.Sum == my.Sum && mx.Fingerprint == my.Fingerprint { + continue outer // found; try next x method + } + } + return false // method of x not found in y + } + return true // all methods of x found in y +} + +func (index *Index) location(posn gobPosition) Location { + return Location{ + Filename: index.pkg.Strings[posn.File], + Start: posn.Offset, + End: posn.Offset + posn.Len, + } +} + +// An indexBuilder builds an index for a single package. +type indexBuilder struct { + gobPackage + stringIndex map[string]int +} + +// build adds to the index all package-level named types of the specified package. +func (b *indexBuilder) build(fset *token.FileSet, pkg *types.Package) *Index { + _ = b.string("") // 0 => "" + + objectPos := func(obj types.Object) gobPosition { + posn := safetoken.StartPosition(fset, obj.Pos()) + return gobPosition{b.string(posn.Filename), posn.Offset, len(obj.Name())} + } + + // setindexInfo sets the (Posn, PkgPath, ObjectPath) fields for each method declaration. + setIndexInfo := func(m *gobMethod, method *types.Func) { + // error.Error has empty Position, PkgPath, and ObjectPath. + if method.Pkg() == nil { + return + } + + m.Posn = objectPos(method) + m.PkgPath = b.string(method.Pkg().Path()) + + // Instantiations of generic methods don't have an + // object path, so we use the generic. + if p, err := objectpath.For(typeparams.OriginMethod(method)); err != nil { + panic(err) // can't happen for a method of a package-level type + } else { + m.ObjectPath = b.string(string(p)) + } + } + + // We ignore aliases, though in principle they could define a + // struct{...} or interface{...} type, or an instantiation of + // a generic, that has a novel method set. + scope := pkg.Scope() + for _, name := range scope.Names() { + if tname, ok := scope.Lookup(name).(*types.TypeName); ok && !tname.IsAlias() { + if mset := methodSetInfo(tname.Type(), setIndexInfo); mset.Mask != 0 { + mset.Posn = objectPos(tname) + // Only record types with non-trivial method sets. + b.MethodSets = append(b.MethodSets, mset) + } + } + } + + return &Index{pkg: b.gobPackage} +} + +// string returns a small integer that encodes the string. +func (b *indexBuilder) string(s string) int { + i, ok := b.stringIndex[s] + if !ok { + i = len(b.Strings) + if b.stringIndex == nil { + b.stringIndex = make(map[string]int) + } + b.stringIndex[s] = i + b.Strings = append(b.Strings, s) + } + return i +} + +// methodSetInfo returns the method-set fingerprint of a type. +// It calls the optional setIndexInfo function for each gobMethod. +// This is used during index construction, but not search (KeyOf), +// to store extra information. +func methodSetInfo(t types.Type, setIndexInfo func(*gobMethod, *types.Func)) gobMethodSet { + // For non-interface types, use *T + // (if T is not already a pointer) + // since it may have more methods. + mset := types.NewMethodSet(EnsurePointer(t)) + + // Convert the method set into a compact summary. + var mask uint64 + tricky := false + methods := make([]gobMethod, mset.Len()) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i).Obj().(*types.Func) + fp, isTricky := fingerprint(m) + if isTricky { + tricky = true + } + sum := crc32.ChecksumIEEE([]byte(fp)) + methods[i] = gobMethod{Fingerprint: fp, Sum: sum} + if setIndexInfo != nil { + setIndexInfo(&methods[i], m) // set Position, PkgPath, ObjectPath + } + mask |= 1 << uint64(((sum>>24)^(sum>>16)^(sum>>8)^sum)&0x3f) + } + return gobMethodSet{ + IsInterface: types.IsInterface(t), + Tricky: tricky, + Mask: mask, + Methods: methods, + } +} + +// EnsurePointer wraps T in a types.Pointer if T is a named, non-interface type. +// This is useful to make sure you consider a named type's full method set. +func EnsurePointer(T types.Type) types.Type { + if _, ok := T.(*types.Named); ok && !types.IsInterface(T) { + return types.NewPointer(T) + } + + return T +} + +// fingerprint returns an encoding of a method signature such that two +// methods with equal encodings have identical types, except for a few +// tricky types whose encodings may spuriously match and whose exact +// identity computation requires the type checker to eliminate false +// positives (which are rare). The boolean result indicates whether +// the result was one of these tricky types. +// +// In the standard library, 99.8% of package-level types have a +// non-tricky method-set. The most common exceptions are due to type +// parameters. +// +// The fingerprint string starts with method.Id() + "(". +func fingerprint(method *types.Func) (string, bool) { + var buf strings.Builder + tricky := false + var fprint func(t types.Type) + fprint = func(t types.Type) { + switch t := t.(type) { + case *types.Named: + tname := t.Obj() + if tname.Pkg() != nil { + buf.WriteString(strconv.Quote(tname.Pkg().Path())) + buf.WriteByte('.') + } else if tname.Name() != "error" { + panic(tname) // error is the only named type with no package + } + buf.WriteString(tname.Name()) + + case *types.Array: + fmt.Fprintf(&buf, "[%d]", t.Len()) + fprint(t.Elem()) + + case *types.Slice: + buf.WriteString("[]") + fprint(t.Elem()) + + case *types.Pointer: + buf.WriteByte('*') + fprint(t.Elem()) + + case *types.Map: + buf.WriteString("map[") + fprint(t.Key()) + buf.WriteByte(']') + fprint(t.Elem()) + + case *types.Chan: + switch t.Dir() { + case types.SendRecv: + buf.WriteString("chan ") + case types.SendOnly: + buf.WriteString("<-chan ") + case types.RecvOnly: + buf.WriteString("chan<- ") + } + fprint(t.Elem()) + + case *types.Tuple: + buf.WriteByte('(') + for i := 0; i < t.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + fprint(t.At(i).Type()) + } + buf.WriteByte(')') + + case *types.Basic: + // Use canonical names for uint8 and int32 aliases. + switch t.Kind() { + case types.Byte: + buf.WriteString("byte") + case types.Rune: + buf.WriteString("rune") + default: + buf.WriteString(t.String()) + } + + case *types.Signature: + buf.WriteString("func") + fprint(t.Params()) + if t.Variadic() { + buf.WriteString("...") // not quite Go syntax + } + fprint(t.Results()) + + case *types.Struct: + // Non-empty unnamed struct types in method + // signatures are vanishingly rare. + buf.WriteString("struct{") + for i := 0; i < t.NumFields(); i++ { + if i > 0 { + buf.WriteByte(';') + } + f := t.Field(i) + // This isn't quite right for embedded type aliases. + // (See types.TypeString(StructType) and #44410 for context.) + // But this is vanishingly rare. + if !f.Embedded() { + buf.WriteString(f.Id()) + buf.WriteByte(' ') + } + fprint(f.Type()) + if tag := t.Tag(i); tag != "" { + buf.WriteByte(' ') + buf.WriteString(strconv.Quote(tag)) + } + } + buf.WriteString("}") + + case *types.Interface: + if t.NumMethods() == 0 { + buf.WriteString("any") // common case + } else { + // Interface assignability is particularly + // tricky due to the possibility of recursion. + tricky = true + // We could still give more disambiguating precision + // than "..." if we wanted to. + buf.WriteString("interface{...}") + } + + case *typeparams.TypeParam: + tricky = true + // TODO(adonovan): refine this by adding a numeric suffix + // indicating the index among the receiver type's parameters. + buf.WriteByte('?') + + default: // incl. *types.Union + panic(t) + } + } + + buf.WriteString(method.Id()) // e.g. "pkg.Type" + sig := method.Type().(*types.Signature) + fprint(sig.Params()) + fprint(sig.Results()) + return buf.String(), tricky +} + +// -- serial format of index -- + +// The cost of gob encoding and decoding for most packages in x/tools +// is under 50us, with occasional peaks of around 1-3ms. +// The encoded indexes are around 1KB-50KB. + +// A gobPackage records the method set of each package-level type for a single package. +type gobPackage struct { + Strings []string // index of strings used by gobPosition.File, gobMethod.{Pkg,Object}Path + MethodSets []gobMethodSet +} + +// A gobMethodSet records the method set of a single type. +type gobMethodSet struct { + Posn gobPosition + IsInterface bool + Tricky bool // at least one method is tricky; assignability requires go/types + Mask uint64 // mask with 1 bit from each of methods[*].sum + Methods []gobMethod +} + +// A gobMethod records the name, type, and position of a single method. +type gobMethod struct { + Fingerprint string // string of form "methodID(params...)(results)" + Sum uint32 // checksum of fingerprint + + // index records only (zero in KeyOf; also for index of error.Error). + Posn gobPosition // location of method declaration + PkgPath int // path of package containing method declaration + ObjectPath int // object path of method relative to PkgPath +} + +// A gobPosition records the file, offset, and length of an identifier. +type gobPosition struct { + File int // index into gopPackage.Strings + Offset, Len int // in bytes +} diff --git a/gopls/internal/lsp/source/options.go b/gopls/internal/lsp/source/options.go new file mode 100644 index 00000000000..aaacc59b8e2 --- /dev/null +++ b/gopls/internal/lsp/source/options.go @@ -0,0 +1,1629 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "io" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/asmdecl" + "golang.org/x/tools/go/analysis/passes/assign" + "golang.org/x/tools/go/analysis/passes/atomic" + "golang.org/x/tools/go/analysis/passes/atomicalign" + "golang.org/x/tools/go/analysis/passes/bools" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/cgocall" + "golang.org/x/tools/go/analysis/passes/composite" + "golang.org/x/tools/go/analysis/passes/copylock" + "golang.org/x/tools/go/analysis/passes/deepequalerrors" + "golang.org/x/tools/go/analysis/passes/directive" + "golang.org/x/tools/go/analysis/passes/errorsas" + "golang.org/x/tools/go/analysis/passes/fieldalignment" + "golang.org/x/tools/go/analysis/passes/httpresponse" + "golang.org/x/tools/go/analysis/passes/ifaceassert" + "golang.org/x/tools/go/analysis/passes/loopclosure" + "golang.org/x/tools/go/analysis/passes/lostcancel" + "golang.org/x/tools/go/analysis/passes/nilfunc" + "golang.org/x/tools/go/analysis/passes/nilness" + "golang.org/x/tools/go/analysis/passes/printf" + "golang.org/x/tools/go/analysis/passes/shadow" + "golang.org/x/tools/go/analysis/passes/shift" + "golang.org/x/tools/go/analysis/passes/sortslice" + "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/stringintconv" + "golang.org/x/tools/go/analysis/passes/structtag" + "golang.org/x/tools/go/analysis/passes/testinggoroutine" + "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/timeformat" + "golang.org/x/tools/go/analysis/passes/unmarshal" + "golang.org/x/tools/go/analysis/passes/unreachable" + "golang.org/x/tools/go/analysis/passes/unsafeptr" + "golang.org/x/tools/go/analysis/passes/unusedresult" + "golang.org/x/tools/go/analysis/passes/unusedwrite" + "golang.org/x/tools/gopls/internal/lsp/analysis/embeddirective" + "golang.org/x/tools/gopls/internal/lsp/analysis/fillreturns" + "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct" + "golang.org/x/tools/gopls/internal/lsp/analysis/infertypeargs" + "golang.org/x/tools/gopls/internal/lsp/analysis/nonewvars" + "golang.org/x/tools/gopls/internal/lsp/analysis/noresultvalues" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifycompositelit" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyrange" + "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyslice" + "golang.org/x/tools/gopls/internal/lsp/analysis/stubmethods" + "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname" + "golang.org/x/tools/gopls/internal/lsp/analysis/unusedparams" + "golang.org/x/tools/gopls/internal/lsp/analysis/unusedvariable" + "golang.org/x/tools/gopls/internal/lsp/analysis/useany" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/diff/myers" +) + +var ( + optionsOnce sync.Once + defaultOptions *Options +) + +// DefaultOptions is the options that are used for Gopls execution independent +// of any externally provided configuration (LSP initialization, command +// invocation, etc.). +func DefaultOptions() *Options { + optionsOnce.Do(func() { + var commands []string + for _, c := range command.Commands { + commands = append(commands, c.ID()) + } + defaultOptions = &Options{ + ClientOptions: ClientOptions{ + InsertTextFormat: protocol.PlainTextTextFormat, + PreferredContentFormat: protocol.Markdown, + ConfigurationSupported: true, + DynamicConfigurationSupported: true, + DynamicRegistrationSemanticTokensSupported: true, + DynamicWatchedFilesSupported: true, + LineFoldingOnly: false, + HierarchicalDocumentSymbolSupport: true, + }, + ServerOptions: ServerOptions{ + SupportedCodeActions: map[FileKind]map[protocol.CodeActionKind]bool{ + Go: { + protocol.SourceFixAll: true, + protocol.SourceOrganizeImports: true, + protocol.QuickFix: true, + protocol.RefactorRewrite: true, + protocol.RefactorExtract: true, + }, + Mod: { + protocol.SourceOrganizeImports: true, + protocol.QuickFix: true, + }, + Work: {}, + Sum: {}, + Tmpl: {}, + }, + SupportedCommands: commands, + }, + UserOptions: UserOptions{ + BuildOptions: BuildOptions{ + ExpandWorkspaceToModule: true, + MemoryMode: ModeNormal, + DirectoryFilters: []string{"-**/node_modules"}, + TemplateExtensions: []string{}, + StandaloneTags: []string{"ignore"}, + }, + UIOptions: UIOptions{ + DiagnosticOptions: DiagnosticOptions{ + DiagnosticsDelay: 250 * time.Millisecond, + Annotations: map[Annotation]bool{ + Bounds: true, + Escape: true, + Inline: true, + Nil: true, + }, + Vulncheck: ModeVulncheckOff, + }, + InlayHintOptions: InlayHintOptions{}, + DocumentationOptions: DocumentationOptions{ + HoverKind: FullDocumentation, + LinkTarget: "pkg.go.dev", + LinksInHover: true, + }, + NavigationOptions: NavigationOptions{ + ImportShortcut: BothShortcuts, + SymbolMatcher: SymbolFastFuzzy, + SymbolStyle: DynamicSymbols, + }, + CompletionOptions: CompletionOptions{ + Matcher: Fuzzy, + CompletionBudget: 100 * time.Millisecond, + ExperimentalPostfixCompletions: true, + }, + Codelenses: map[string]bool{ + string(command.Generate): true, + string(command.RegenerateCgo): true, + string(command.Tidy): true, + string(command.GCDetails): false, + string(command.UpgradeDependency): true, + string(command.Vendor): true, + // TODO(hyangah): enable command.RunGovulncheck. + }, + }, + }, + InternalOptions: InternalOptions{ + LiteralCompletions: true, + TempModfile: true, + CompleteUnimported: true, + CompletionDocumentation: true, + DeepCompletion: true, + ChattyDiagnostics: true, + NewDiff: "both", + }, + Hooks: Hooks{ + // TODO(adonovan): switch to new diff.Strings implementation. + ComputeEdits: myers.ComputeEdits, + URLRegexp: urlRegexp(), + DefaultAnalyzers: defaultAnalyzers(), + TypeErrorAnalyzers: typeErrorAnalyzers(), + ConvenienceAnalyzers: convenienceAnalyzers(), + StaticcheckAnalyzers: map[string]*Analyzer{}, + GoDiff: true, + }, + } + }) + return defaultOptions +} + +// Options holds various configuration that affects Gopls execution, organized +// by the nature or origin of the settings. +type Options struct { + ClientOptions + ServerOptions + UserOptions + InternalOptions + Hooks +} + +// ClientOptions holds LSP-specific configuration that is provided by the +// client. +type ClientOptions struct { + InsertTextFormat protocol.InsertTextFormat + ConfigurationSupported bool + DynamicConfigurationSupported bool + DynamicRegistrationSemanticTokensSupported bool + DynamicWatchedFilesSupported bool + PreferredContentFormat protocol.MarkupKind + LineFoldingOnly bool + HierarchicalDocumentSymbolSupport bool + SemanticTypes []string + SemanticMods []string + RelatedInformationSupported bool + CompletionTags bool + CompletionDeprecated bool + SupportedResourceOperations []protocol.ResourceOperationKind +} + +// ServerOptions holds LSP-specific configuration that is provided by the +// server. +type ServerOptions struct { + SupportedCodeActions map[FileKind]map[protocol.CodeActionKind]bool + SupportedCommands []string +} + +type BuildOptions struct { + // BuildFlags is the set of flags passed on to the build system when invoked. + // It is applied to queries like `go list`, which is used when discovering files. + // The most common use is to set `-tags`. + BuildFlags []string + + // Env adds environment variables to external commands run by `gopls`, most notably `go list`. + Env map[string]string + + // DirectoryFilters can be used to exclude unwanted directories from the + // workspace. By default, all directories are included. Filters are an + // operator, `+` to include and `-` to exclude, followed by a path prefix + // relative to the workspace folder. They are evaluated in order, and + // the last filter that applies to a path controls whether it is included. + // The path prefix can be empty, so an initial `-` excludes everything. + // + // DirectoryFilters also supports the `**` operator to match 0 or more directories. + // + // Examples: + // + // Exclude node_modules at current depth: `-node_modules` + // + // Exclude node_modules at any depth: `-**/node_modules` + // + // Include only project_a: `-` (exclude everything), `+project_a` + // + // Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules` + DirectoryFilters []string + + // TemplateExtensions gives the extensions of file names that are treateed + // as template files. (The extension + // is the part of the file name after the final dot.) + TemplateExtensions []string + + // MemoryMode controls the tradeoff `gopls` makes between memory usage and + // correctness. + // + // Values other than `Normal` are untested and may break in surprising ways. + MemoryMode MemoryMode `status:"experimental"` + + // ExpandWorkspaceToModule instructs `gopls` to adjust the scope of the + // workspace to find the best available module root. `gopls` first looks for + // a go.mod file in any parent directory of the workspace folder, expanding + // the scope to that directory if it exists. If no viable parent directory is + // found, gopls will check if there is exactly one child directory containing + // a go.mod file, narrowing the scope to that directory if it exists. + ExpandWorkspaceToModule bool `status:"experimental"` + + // AllowModfileModifications disables -mod=readonly, allowing imports from + // out-of-scope modules. This option will eventually be removed. + AllowModfileModifications bool `status:"experimental"` + + // AllowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module + // downloads rather than requiring user action. This option will eventually + // be removed. + AllowImplicitNetworkAccess bool `status:"experimental"` + + // StandaloneTags specifies a set of build constraints that identify + // individual Go source files that make up the entire main package of an + // executable. + // + // A common example of standalone main files is the convention of using the + // directive `//go:build ignore` to denote files that are not intended to be + // included in any package, for example because they are invoked directly by + // the developer using `go run`. + // + // Gopls considers a file to be a standalone main file if and only if it has + // package name "main" and has a build directive of the exact form + // "//go:build tag" or "// +build tag", where tag is among the list of tags + // configured by this setting. Notably, if the build constraint is more + // complicated than a simple tag (such as the composite constraint + // `//go:build tag && go1.18`), the file is not considered to be a standalone + // main file. + // + // This setting is only supported when gopls is built with Go 1.16 or later. + StandaloneTags []string +} + +type UIOptions struct { + DocumentationOptions + CompletionOptions + NavigationOptions + DiagnosticOptions + InlayHintOptions + + // Codelenses overrides the enabled/disabled state of code lenses. See the + // "Code Lenses" section of the + // [Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses) + // for the list of supported lenses. + // + // Example Usage: + // + // ```json5 + // "gopls": { + // ... + // "codelenses": { + // "generate": false, // Don't show the `go generate` lens. + // "gc_details": true // Show a code lens toggling the display of gc's choices. + // } + // ... + // } + // ``` + Codelenses map[string]bool + + // SemanticTokens controls whether the LSP server will send + // semantic tokens to the client. + SemanticTokens bool `status:"experimental"` + + // NoSemanticString turns off the sending of the semantic token 'string' + NoSemanticString bool `status:"experimental"` + + // NoSemanticNumber turns off the sending of the semantic token 'number' + NoSemanticNumber bool `status:"experimental"` +} + +type CompletionOptions struct { + // Placeholders enables placeholders for function parameters or struct + // fields in completion responses. + UsePlaceholders bool + + // CompletionBudget is the soft latency goal for completion requests. Most + // requests finish in a couple milliseconds, but in some cases deep + // completions can take much longer. As we use up our budget we + // dynamically reduce the search scope to ensure we return timely + // results. Zero means unlimited. + CompletionBudget time.Duration `status:"debug"` + + // Matcher sets the algorithm that is used when calculating completion + // candidates. + Matcher Matcher `status:"advanced"` + + // ExperimentalPostfixCompletions enables artificial method snippets + // such as "someSlice.sort!". + ExperimentalPostfixCompletions bool `status:"experimental"` +} + +type DocumentationOptions struct { + // HoverKind controls the information that appears in the hover text. + // SingleLine and Structured are intended for use only by authors of editor plugins. + HoverKind HoverKind + + // LinkTarget controls where documentation links go. + // It might be one of: + // + // * `"godoc.org"` + // * `"pkg.go.dev"` + // + // If company chooses to use its own `godoc.org`, its address can be used as well. + // + // Modules matching the GOPRIVATE environment variable will not have + // documentation links in hover. + LinkTarget string + + // LinksInHover toggles the presence of links to documentation in hover. + LinksInHover bool +} + +type FormattingOptions struct { + // Local is the equivalent of the `goimports -local` flag, which puts + // imports beginning with this string after third-party packages. It should + // be the prefix of the import path whose imports should be grouped + // separately. + Local string + + // Gofumpt indicates if we should run gofumpt formatting. + Gofumpt bool +} + +type DiagnosticOptions struct { + // Analyses specify analyses that the user would like to enable or disable. + // A map of the names of analysis passes that should be enabled/disabled. + // A full list of analyzers that gopls uses can be found in + // [analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md). + // + // Example Usage: + // + // ```json5 + // ... + // "analyses": { + // "unreachable": false, // Disable the unreachable analyzer. + // "unusedparams": true // Enable the unusedparams analyzer. + // } + // ... + // ``` + Analyses map[string]bool + + // Staticcheck enables additional analyses from staticcheck.io. + // These analyses are documented on + // [Staticcheck's website](https://staticcheck.io/docs/checks/). + Staticcheck bool `status:"experimental"` + + // Annotations specifies the various kinds of optimization diagnostics + // that should be reported by the gc_details command. + Annotations map[Annotation]bool `status:"experimental"` + + // Vulncheck enables vulnerability scanning. + Vulncheck VulncheckMode `status:"experimental"` + + // DiagnosticsDelay controls the amount of time that gopls waits + // after the most recent file modification before computing deep diagnostics. + // Simple diagnostics (parsing and type-checking) are always run immediately + // on recently modified packages. + // + // This option must be set to a valid duration string, for example `"250ms"`. + DiagnosticsDelay time.Duration `status:"advanced"` +} + +type InlayHintOptions struct { + // Hints specify inlay hints that users want to see. A full list of hints + // that gopls uses can be found in + // [inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md). + Hints map[string]bool `status:"experimental"` +} + +type NavigationOptions struct { + // ImportShortcut specifies whether import statements should link to + // documentation or go to definitions. + ImportShortcut ImportShortcut + + // SymbolMatcher sets the algorithm that is used when finding workspace symbols. + SymbolMatcher SymbolMatcher `status:"advanced"` + + // SymbolStyle controls how symbols are qualified in symbol responses. + // + // Example Usage: + // + // ```json5 + // "gopls": { + // ... + // "symbolStyle": "Dynamic", + // ... + // } + // ``` + SymbolStyle SymbolStyle `status:"advanced"` +} + +// UserOptions holds custom Gopls configuration (not part of the LSP) that is +// modified by the client. +type UserOptions struct { + BuildOptions + UIOptions + FormattingOptions + + // VerboseOutput enables additional debug logging. + VerboseOutput bool `status:"debug"` +} + +// EnvSlice returns Env as a slice of k=v strings. +func (u *UserOptions) EnvSlice() []string { + var result []string + for k, v := range u.Env { + result = append(result, fmt.Sprintf("%v=%v", k, v)) + } + return result +} + +// SetEnvSlice sets Env from a slice of k=v strings. +func (u *UserOptions) SetEnvSlice(env []string) { + u.Env = map[string]string{} + for _, kv := range env { + split := strings.SplitN(kv, "=", 2) + if len(split) != 2 { + continue + } + u.Env[split[0]] = split[1] + } +} + +// DiffFunction is the type for a function that produces a set of edits that +// convert from the before content to the after content. +type DiffFunction func(before, after string) []diff.Edit + +// Hooks contains configuration that is provided to the Gopls command by the +// main package. +type Hooks struct { + // LicensesText holds third party licenses for software used by gopls. + LicensesText string + + // GoDiff is used in gopls/hooks to get Myers' diff + GoDiff bool + + // Whether staticcheck is supported. + StaticcheckSupported bool + + // ComputeEdits is used to compute edits between file versions. + ComputeEdits DiffFunction + + // URLRegexp is used to find potential URLs in comments/strings. + // + // Not all matches are shown to the user: if the matched URL is not detected + // as valid, it will be skipped. + URLRegexp *regexp.Regexp + + // GofumptFormat allows the gopls module to wire-in a call to + // gofumpt/format.Source. langVersion and modulePath are used for some + // Gofumpt formatting rules -- see the Gofumpt documentation for details. + GofumptFormat func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) + + DefaultAnalyzers map[string]*Analyzer + TypeErrorAnalyzers map[string]*Analyzer + ConvenienceAnalyzers map[string]*Analyzer + StaticcheckAnalyzers map[string]*Analyzer +} + +// InternalOptions contains settings that are not intended for use by the +// average user. These may be settings used by tests or outdated settings that +// will soon be deprecated. Some of these settings may not even be configurable +// by the user. +type InternalOptions struct { + // LiteralCompletions controls whether literal candidates such as + // "&someStruct{}" are offered. Tests disable this flag to simplify + // their expected values. + LiteralCompletions bool + + // VerboseWorkDoneProgress controls whether the LSP server should send + // progress reports for all work done outside the scope of an RPC. + // Used by the regression tests. + VerboseWorkDoneProgress bool + + // The following options were previously available to users, but they + // really shouldn't be configured by anyone other than "power users". + + // CompletionDocumentation enables documentation with completion results. + CompletionDocumentation bool + + // CompleteUnimported enables completion for packages that you do not + // currently import. + CompleteUnimported bool + + // DeepCompletion enables the ability to return completions from deep + // inside relevant entities, rather than just the locally accessible ones. + // + // Consider this example: + // + // ```go + // package main + // + // import "fmt" + // + // type wrapString struct { + // str string + // } + // + // func main() { + // x := wrapString{"hello world"} + // fmt.Printf(<>) + // } + // ``` + // + // At the location of the `<>` in this program, deep completion would suggest + // the result `x.str`. + DeepCompletion bool + + // TempModfile controls the use of the -modfile flag in Go 1.14. + TempModfile bool + + // ShowBugReports causes a message to be shown when the first bug is reported + // on the server. + // This option applies only during initialization. + ShowBugReports bool + + // NewDiff controls the choice of the new diff implementation. It can be + // 'new', 'old', or 'both', which is the default. 'both' computes diffs with + // both algorithms, checks that the new algorithm has worked, and write some + // summary statistics to a file in os.TmpDir(). + NewDiff string + + // ChattyDiagnostics controls whether to report file diagnostics for each + // file change. If unset, gopls only reports diagnostics when they change, or + // when a file is opened or closed. + ChattyDiagnostics bool +} + +type ImportShortcut string + +const ( + BothShortcuts ImportShortcut = "Both" + LinkShortcut ImportShortcut = "Link" + DefinitionShortcut ImportShortcut = "Definition" +) + +func (s ImportShortcut) ShowLinks() bool { + return s == BothShortcuts || s == LinkShortcut +} + +func (s ImportShortcut) ShowDefinition() bool { + return s == BothShortcuts || s == DefinitionShortcut +} + +type Matcher string + +const ( + Fuzzy Matcher = "Fuzzy" + CaseInsensitive Matcher = "CaseInsensitive" + CaseSensitive Matcher = "CaseSensitive" +) + +type SymbolMatcher string + +const ( + SymbolFuzzy SymbolMatcher = "Fuzzy" + SymbolFastFuzzy SymbolMatcher = "FastFuzzy" + SymbolCaseInsensitive SymbolMatcher = "CaseInsensitive" + SymbolCaseSensitive SymbolMatcher = "CaseSensitive" +) + +type SymbolStyle string + +const ( + // PackageQualifiedSymbols is package qualified symbols i.e. + // "pkg.Foo.Field". + PackageQualifiedSymbols SymbolStyle = "Package" + // FullyQualifiedSymbols is fully qualified symbols, i.e. + // "path/to/pkg.Foo.Field". + FullyQualifiedSymbols SymbolStyle = "Full" + // DynamicSymbols uses whichever qualifier results in the highest scoring + // match for the given symbol query. Here a "qualifier" is any "/" or "." + // delimited suffix of the fully qualified symbol. i.e. "to/pkg.Foo.Field" or + // just "Foo.Field". + DynamicSymbols SymbolStyle = "Dynamic" +) + +type HoverKind string + +const ( + SingleLine HoverKind = "SingleLine" + NoDocumentation HoverKind = "NoDocumentation" + SynopsisDocumentation HoverKind = "SynopsisDocumentation" + FullDocumentation HoverKind = "FullDocumentation" + + // Structured is an experimental setting that returns a structured hover format. + // This format separates the signature from the documentation, so that the client + // can do more manipulation of these fields. + // + // This should only be used by clients that support this behavior. + Structured HoverKind = "Structured" +) + +type MemoryMode string + +const ( + ModeNormal MemoryMode = "Normal" + // In DegradeClosed mode, `gopls` will collect less information about + // packages without open files. As a result, features like Find + // References and Rename will miss results in such packages. + ModeDegradeClosed MemoryMode = "DegradeClosed" +) + +type VulncheckMode string + +const ( + // Disable vulnerability analysis. + ModeVulncheckOff VulncheckMode = "Off" + // In Imports mode, `gopls` will report vulnerabilities that affect packages + // directly and indirectly used by the analyzed main module. + ModeVulncheckImports VulncheckMode = "Imports" + + // TODO: VulncheckRequire, VulncheckCallgraph +) + +type OptionResults []OptionResult + +type OptionResult struct { + Name string + Value interface{} + Error error +} + +func SetOptions(options *Options, opts interface{}) OptionResults { + var results OptionResults + switch opts := opts.(type) { + case nil: + case map[string]interface{}: + // If the user's settings contains "allExperiments", set that first, + // and then let them override individual settings independently. + var enableExperiments bool + for name, value := range opts { + if b, ok := value.(bool); name == "allExperiments" && ok && b { + enableExperiments = true + options.EnableAllExperiments() + } + } + seen := map[string]struct{}{} + for name, value := range opts { + results = append(results, options.set(name, value, seen)) + } + // Finally, enable any experimental features that are specified in + // maps, which allows users to individually toggle them on or off. + if enableExperiments { + options.enableAllExperimentMaps() + } + default: + results = append(results, OptionResult{ + Value: opts, + Error: fmt.Errorf("Invalid options type %T", opts), + }) + } + return results +} + +func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) { + // Check if the client supports snippets in completion items. + if caps.Workspace.WorkspaceEdit != nil { + o.SupportedResourceOperations = caps.Workspace.WorkspaceEdit.ResourceOperations + } + if c := caps.TextDocument.Completion; c.CompletionItem.SnippetSupport { + o.InsertTextFormat = protocol.SnippetTextFormat + } + // Check if the client supports configuration messages. + o.ConfigurationSupported = caps.Workspace.Configuration + o.DynamicConfigurationSupported = caps.Workspace.DidChangeConfiguration.DynamicRegistration + o.DynamicRegistrationSemanticTokensSupported = caps.TextDocument.SemanticTokens.DynamicRegistration + o.DynamicWatchedFilesSupported = caps.Workspace.DidChangeWatchedFiles.DynamicRegistration + + // Check which types of content format are supported by this client. + if hover := caps.TextDocument.Hover; len(hover.ContentFormat) > 0 { + o.PreferredContentFormat = hover.ContentFormat[0] + } + // Check if the client supports only line folding. + fr := caps.TextDocument.FoldingRange + o.LineFoldingOnly = fr.LineFoldingOnly + // Check if the client supports hierarchical document symbols. + o.HierarchicalDocumentSymbolSupport = caps.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport + + // Client's semantic tokens + o.SemanticTypes = caps.TextDocument.SemanticTokens.TokenTypes + o.SemanticMods = caps.TextDocument.SemanticTokens.TokenModifiers + // we don't need Requests, as we support full functionality + // we don't need Formats, as there is only one, for now + + // Check if the client supports diagnostic related information. + o.RelatedInformationSupported = caps.TextDocument.PublishDiagnostics.RelatedInformation + // Check if the client completion support includes tags (preferred) or deprecation + if caps.TextDocument.Completion.CompletionItem.TagSupport.ValueSet != nil { + o.CompletionTags = true + } else if caps.TextDocument.Completion.CompletionItem.DeprecatedSupport { + o.CompletionDeprecated = true + } +} + +func (o *Options) Clone() *Options { + // TODO(rfindley): has this function gone stale? It appears that there are + // settings that are incorrectly cloned here (such as TemplateExtensions). + result := &Options{ + ClientOptions: o.ClientOptions, + InternalOptions: o.InternalOptions, + Hooks: Hooks{ + GoDiff: o.GoDiff, + StaticcheckSupported: o.StaticcheckSupported, + ComputeEdits: o.ComputeEdits, + GofumptFormat: o.GofumptFormat, + URLRegexp: o.URLRegexp, + }, + ServerOptions: o.ServerOptions, + UserOptions: o.UserOptions, + } + // Fully clone any slice or map fields. Only Hooks, ExperimentalOptions, + // and UserOptions can be modified. + copyStringMap := func(src map[string]bool) map[string]bool { + dst := make(map[string]bool) + for k, v := range src { + dst[k] = v + } + return dst + } + result.Analyses = copyStringMap(o.Analyses) + result.Codelenses = copyStringMap(o.Codelenses) + + copySlice := func(src []string) []string { + dst := make([]string, len(src)) + copy(dst, src) + return dst + } + result.SetEnvSlice(o.EnvSlice()) + result.BuildFlags = copySlice(o.BuildFlags) + result.DirectoryFilters = copySlice(o.DirectoryFilters) + result.StandaloneTags = copySlice(o.StandaloneTags) + + copyAnalyzerMap := func(src map[string]*Analyzer) map[string]*Analyzer { + dst := make(map[string]*Analyzer) + for k, v := range src { + dst[k] = v + } + return dst + } + result.DefaultAnalyzers = copyAnalyzerMap(o.DefaultAnalyzers) + result.TypeErrorAnalyzers = copyAnalyzerMap(o.TypeErrorAnalyzers) + result.ConvenienceAnalyzers = copyAnalyzerMap(o.ConvenienceAnalyzers) + result.StaticcheckAnalyzers = copyAnalyzerMap(o.StaticcheckAnalyzers) + return result +} + +func (o *Options) AddStaticcheckAnalyzer(a *analysis.Analyzer, enabled bool, severity protocol.DiagnosticSeverity) { + o.StaticcheckAnalyzers[a.Name] = &Analyzer{ + Analyzer: a, + Enabled: enabled, + Severity: severity, + } +} + +// EnableAllExperiments turns on all of the experimental "off-by-default" +// features offered by gopls. Any experimental features specified in maps +// should be enabled in enableAllExperimentMaps. +func (o *Options) EnableAllExperiments() { + o.SemanticTokens = true +} + +func (o *Options) enableAllExperimentMaps() { + if _, ok := o.Codelenses[string(command.GCDetails)]; !ok { + o.Codelenses[string(command.GCDetails)] = true + } + if _, ok := o.Codelenses[string(command.RunGovulncheck)]; !ok { + o.Codelenses[string(command.RunGovulncheck)] = true + } + if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok { + o.Analyses[unusedparams.Analyzer.Name] = true + } + if _, ok := o.Analyses[unusedvariable.Analyzer.Name]; !ok { + o.Analyses[unusedvariable.Analyzer.Name] = true + } +} + +// validateDirectoryFilter validates if the filter string +// - is not empty +// - start with either + or - +// - doesn't contain currently unsupported glob operators: *, ? +func validateDirectoryFilter(ifilter string) (string, error) { + filter := fmt.Sprint(ifilter) + if filter == "" || (filter[0] != '+' && filter[0] != '-') { + return "", fmt.Errorf("invalid filter %v, must start with + or -", filter) + } + segs := strings.Split(filter[1:], "/") + unsupportedOps := [...]string{"?", "*"} + for _, seg := range segs { + if seg != "**" { + for _, op := range unsupportedOps { + if strings.Contains(seg, op) { + return "", fmt.Errorf("invalid filter %v, operator %v not supported. If you want to have this operator supported, consider filing an issue.", filter, op) + } + } + } + } + + return strings.TrimRight(filepath.FromSlash(filter), "/"), nil +} + +func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult { + // Flatten the name in case we get options with a hierarchy. + split := strings.Split(name, ".") + name = split[len(split)-1] + + result := OptionResult{Name: name, Value: value} + if _, ok := seen[name]; ok { + result.parseErrorf("duplicate configuration for %s", name) + } + seen[name] = struct{}{} + + switch name { + case "env": + menv, ok := value.(map[string]interface{}) + if !ok { + result.parseErrorf("invalid type %T, expect map", value) + break + } + if o.Env == nil { + o.Env = make(map[string]string) + } + for k, v := range menv { + o.Env[k] = fmt.Sprint(v) + } + + case "buildFlags": + // TODO(rfindley): use asStringSlice. + iflags, ok := value.([]interface{}) + if !ok { + result.parseErrorf("invalid type %T, expect list", value) + break + } + flags := make([]string, 0, len(iflags)) + for _, flag := range iflags { + flags = append(flags, fmt.Sprintf("%s", flag)) + } + o.BuildFlags = flags + + case "directoryFilters": + // TODO(rfindley): use asStringSlice. + ifilters, ok := value.([]interface{}) + if !ok { + result.parseErrorf("invalid type %T, expect list", value) + break + } + var filters []string + for _, ifilter := range ifilters { + filter, err := validateDirectoryFilter(fmt.Sprintf("%v", ifilter)) + if err != nil { + result.parseErrorf("%v", err) + return result + } + filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/")) + } + o.DirectoryFilters = filters + + case "memoryMode": + if s, ok := result.asOneOf( + string(ModeNormal), + string(ModeDegradeClosed), + ); ok { + o.MemoryMode = MemoryMode(s) + } + case "completionDocumentation": + result.setBool(&o.CompletionDocumentation) + case "usePlaceholders": + result.setBool(&o.UsePlaceholders) + case "deepCompletion": + result.setBool(&o.DeepCompletion) + case "completeUnimported": + result.setBool(&o.CompleteUnimported) + case "completionBudget": + result.setDuration(&o.CompletionBudget) + case "matcher": + if s, ok := result.asOneOf( + string(Fuzzy), + string(CaseSensitive), + string(CaseInsensitive), + ); ok { + o.Matcher = Matcher(s) + } + + case "symbolMatcher": + if s, ok := result.asOneOf( + string(SymbolFuzzy), + string(SymbolFastFuzzy), + string(SymbolCaseInsensitive), + string(SymbolCaseSensitive), + ); ok { + o.SymbolMatcher = SymbolMatcher(s) + } + + case "symbolStyle": + if s, ok := result.asOneOf( + string(FullyQualifiedSymbols), + string(PackageQualifiedSymbols), + string(DynamicSymbols), + ); ok { + o.SymbolStyle = SymbolStyle(s) + } + + case "hoverKind": + if s, ok := result.asOneOf( + string(NoDocumentation), + string(SingleLine), + string(SynopsisDocumentation), + string(FullDocumentation), + string(Structured), + ); ok { + o.HoverKind = HoverKind(s) + } + + case "linkTarget": + result.setString(&o.LinkTarget) + + case "linksInHover": + result.setBool(&o.LinksInHover) + + case "importShortcut": + if s, ok := result.asOneOf(string(BothShortcuts), string(LinkShortcut), string(DefinitionShortcut)); ok { + o.ImportShortcut = ImportShortcut(s) + } + + case "analyses": + result.setBoolMap(&o.Analyses) + + case "hints": + result.setBoolMap(&o.Hints) + + case "annotations": + result.setAnnotationMap(&o.Annotations) + + case "vulncheck": + if s, ok := result.asOneOf( + string(ModeVulncheckOff), + string(ModeVulncheckImports), + ); ok { + o.Vulncheck = VulncheckMode(s) + } + + case "codelenses", "codelens": + var lensOverrides map[string]bool + result.setBoolMap(&lensOverrides) + if result.Error == nil { + if o.Codelenses == nil { + o.Codelenses = make(map[string]bool) + } + for lens, enabled := range lensOverrides { + o.Codelenses[lens] = enabled + } + } + + // codelens is deprecated, but still works for now. + // TODO(rstambler): Remove this for the gopls/v0.7.0 release. + if name == "codelens" { + result.deprecated("codelenses") + } + + case "staticcheck": + if v, ok := result.asBool(); ok { + o.Staticcheck = v + if v && !o.StaticcheckSupported { + result.Error = fmt.Errorf("applying setting %q: staticcheck is not supported at %s;"+ + " rebuild gopls with a more recent version of Go", result.Name, runtime.Version()) + } + } + + case "local": + result.setString(&o.Local) + + case "verboseOutput": + result.setBool(&o.VerboseOutput) + + case "verboseWorkDoneProgress": + result.setBool(&o.VerboseWorkDoneProgress) + + case "tempModfile": + result.setBool(&o.TempModfile) + + case "showBugReports": + result.setBool(&o.ShowBugReports) + + case "gofumpt": + if v, ok := result.asBool(); ok { + o.Gofumpt = v + if v && o.GofumptFormat == nil { + result.Error = fmt.Errorf("applying setting %q: gofumpt is not supported at %s;"+ + " rebuild gopls with a more recent version of Go", result.Name, runtime.Version()) + } + } + + case "semanticTokens": + result.setBool(&o.SemanticTokens) + + case "noSemanticString": + result.setBool(&o.NoSemanticString) + + case "noSemanticNumber": + result.setBool(&o.NoSemanticNumber) + + case "expandWorkspaceToModule": + result.setBool(&o.ExpandWorkspaceToModule) + + case "experimentalPostfixCompletions": + result.setBool(&o.ExperimentalPostfixCompletions) + + case "experimentalWorkspaceModule": + result.deprecated("") + + case "experimentalTemplateSupport": // TODO(pjw): remove after June 2022 + result.deprecated("") + + case "templateExtensions": + if iexts, ok := value.([]interface{}); ok { + ans := []string{} + for _, x := range iexts { + ans = append(ans, fmt.Sprint(x)) + } + o.TemplateExtensions = ans + break + } + if value == nil { + o.TemplateExtensions = nil + break + } + result.parseErrorf("unexpected type %T not []string", value) + + case "experimentalDiagnosticsDelay": + result.deprecated("diagnosticsDelay") + + case "diagnosticsDelay": + result.setDuration(&o.DiagnosticsDelay) + + case "experimentalWatchedFileDelay": + result.deprecated("") + + case "experimentalPackageCacheKey": + result.deprecated("") + + case "allowModfileModifications": + result.setBool(&o.AllowModfileModifications) + + case "allowImplicitNetworkAccess": + result.setBool(&o.AllowImplicitNetworkAccess) + + case "experimentalUseInvalidMetadata": + result.deprecated("") + + case "standaloneTags": + result.setStringSlice(&o.StandaloneTags) + + case "allExperiments": + // This setting should be handled before all of the other options are + // processed, so do nothing here. + + case "newDiff": + result.setString(&o.NewDiff) + + case "chattyDiagnostics": + result.setBool(&o.ChattyDiagnostics) + + // Replaced settings. + case "experimentalDisabledAnalyses": + result.deprecated("analyses") + + case "disableDeepCompletion": + result.deprecated("deepCompletion") + + case "disableFuzzyMatching": + result.deprecated("fuzzyMatching") + + case "wantCompletionDocumentation": + result.deprecated("completionDocumentation") + + case "wantUnimportedCompletions": + result.deprecated("completeUnimported") + + case "fuzzyMatching": + result.deprecated("matcher") + + case "caseSensitiveCompletion": + result.deprecated("matcher") + + // Deprecated settings. + case "wantSuggestedFixes": + result.deprecated("") + + case "noIncrementalSync": + result.deprecated("") + + case "watchFileChanges": + result.deprecated("") + + case "go-diff": + result.deprecated("") + + default: + result.unexpected() + } + return result +} + +// parseErrorf reports an error parsing the current configuration value. +func (r *OptionResult) parseErrorf(msg string, values ...interface{}) { + if false { + _ = fmt.Sprintf(msg, values...) // this causes vet to check this like printf + } + prefix := fmt.Sprintf("parsing setting %q: ", r.Name) + r.Error = fmt.Errorf(prefix+msg, values...) +} + +// A SoftError is an error that does not affect the functionality of gopls. +type SoftError struct { + msg string +} + +func (e *SoftError) Error() string { + return e.msg +} + +// softErrorf reports an error that does not affect the functionality of gopls +// (a warning in the UI). +// The formatted message will be shown to the user unmodified. +func (r *OptionResult) softErrorf(format string, values ...interface{}) { + msg := fmt.Sprintf(format, values...) + r.Error = &SoftError{msg} +} + +// deprecated reports the current setting as deprecated. If 'replacement' is +// non-nil, it is suggested to the user. +func (r *OptionResult) deprecated(replacement string) { + msg := fmt.Sprintf("gopls setting %q is deprecated", r.Name) + if replacement != "" { + msg = fmt.Sprintf("%s, use %q instead", msg, replacement) + } + r.Error = &SoftError{msg} +} + +// unexpected reports that the current setting is not known to gopls. +func (r *OptionResult) unexpected() { + r.Error = fmt.Errorf("unexpected gopls setting %q", r.Name) +} + +func (r *OptionResult) asBool() (bool, bool) { + b, ok := r.Value.(bool) + if !ok { + r.parseErrorf("invalid type %T, expect bool", r.Value) + return false, false + } + return b, true +} + +func (r *OptionResult) setBool(b *bool) { + if v, ok := r.asBool(); ok { + *b = v + } +} + +func (r *OptionResult) setDuration(d *time.Duration) { + if v, ok := r.asString(); ok { + parsed, err := time.ParseDuration(v) + if err != nil { + r.parseErrorf("failed to parse duration %q: %v", v, err) + return + } + *d = parsed + } +} + +func (r *OptionResult) setBoolMap(bm *map[string]bool) { + m := r.asBoolMap() + *bm = m +} + +func (r *OptionResult) setAnnotationMap(bm *map[Annotation]bool) { + all := r.asBoolMap() + if all == nil { + return + } + // Default to everything enabled by default. + m := make(map[Annotation]bool) + for k, enabled := range all { + a, err := asOneOf( + k, + string(Nil), + string(Escape), + string(Inline), + string(Bounds), + ) + if err != nil { + // In case of an error, process any legacy values. + switch k { + case "noEscape": + m[Escape] = false + r.parseErrorf(`"noEscape" is deprecated, set "Escape: false" instead`) + case "noNilcheck": + m[Nil] = false + r.parseErrorf(`"noNilcheck" is deprecated, set "Nil: false" instead`) + case "noInline": + m[Inline] = false + r.parseErrorf(`"noInline" is deprecated, set "Inline: false" instead`) + case "noBounds": + m[Bounds] = false + r.parseErrorf(`"noBounds" is deprecated, set "Bounds: false" instead`) + default: + r.parseErrorf("%v", err) + } + continue + } + m[Annotation(a)] = enabled + } + *bm = m +} + +func (r *OptionResult) asBoolMap() map[string]bool { + all, ok := r.Value.(map[string]interface{}) + if !ok { + r.parseErrorf("invalid type %T for map[string]bool option", r.Value) + return nil + } + m := make(map[string]bool) + for a, enabled := range all { + if e, ok := enabled.(bool); ok { + m[a] = e + } else { + r.parseErrorf("invalid type %T for map key %q", enabled, a) + return m + } + } + return m +} + +func (r *OptionResult) asString() (string, bool) { + b, ok := r.Value.(string) + if !ok { + r.parseErrorf("invalid type %T, expect string", r.Value) + return "", false + } + return b, true +} + +func (r *OptionResult) asStringSlice() ([]string, bool) { + iList, ok := r.Value.([]interface{}) + if !ok { + r.parseErrorf("invalid type %T, expect list", r.Value) + return nil, false + } + var list []string + for _, elem := range iList { + s, ok := elem.(string) + if !ok { + r.parseErrorf("invalid element type %T, expect string", elem) + return nil, false + } + list = append(list, s) + } + return list, true +} + +func (r *OptionResult) asOneOf(options ...string) (string, bool) { + s, ok := r.asString() + if !ok { + return "", false + } + s, err := asOneOf(s, options...) + if err != nil { + r.parseErrorf("%v", err) + } + return s, err == nil +} + +func asOneOf(str string, options ...string) (string, error) { + lower := strings.ToLower(str) + for _, opt := range options { + if strings.ToLower(opt) == lower { + return opt, nil + } + } + return "", fmt.Errorf("invalid option %q for enum", str) +} + +func (r *OptionResult) setString(s *string) { + if v, ok := r.asString(); ok { + *s = v + } +} + +func (r *OptionResult) setStringSlice(s *[]string) { + if v, ok := r.asStringSlice(); ok { + *s = v + } +} + +func typeErrorAnalyzers() map[string]*Analyzer { + return map[string]*Analyzer{ + fillreturns.Analyzer.Name: { + Analyzer: fillreturns.Analyzer, + ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, + Enabled: true, + }, + nonewvars.Analyzer.Name: { + Analyzer: nonewvars.Analyzer, + Enabled: true, + }, + noresultvalues.Analyzer.Name: { + Analyzer: noresultvalues.Analyzer, + Enabled: true, + }, + undeclaredname.Analyzer.Name: { + Analyzer: undeclaredname.Analyzer, + Fix: UndeclaredName, + Enabled: true, + }, + unusedvariable.Analyzer.Name: { + Analyzer: unusedvariable.Analyzer, + Enabled: false, + }, + } +} + +func convenienceAnalyzers() map[string]*Analyzer { + return map[string]*Analyzer{ + fillstruct.Analyzer.Name: { + Analyzer: fillstruct.Analyzer, + Fix: FillStruct, + Enabled: true, + ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite}, + }, + stubmethods.Analyzer.Name: { + Analyzer: stubmethods.Analyzer, + ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite}, + Fix: StubMethods, + Enabled: true, + }, + } +} + +func defaultAnalyzers() map[string]*Analyzer { + return map[string]*Analyzer{ + // The traditional vet suite: + asmdecl.Analyzer.Name: {Analyzer: asmdecl.Analyzer, Enabled: true}, + assign.Analyzer.Name: {Analyzer: assign.Analyzer, Enabled: true}, + atomic.Analyzer.Name: {Analyzer: atomic.Analyzer, Enabled: true}, + bools.Analyzer.Name: {Analyzer: bools.Analyzer, Enabled: true}, + buildtag.Analyzer.Name: {Analyzer: buildtag.Analyzer, Enabled: true}, + cgocall.Analyzer.Name: {Analyzer: cgocall.Analyzer, Enabled: true}, + composite.Analyzer.Name: {Analyzer: composite.Analyzer, Enabled: true}, + copylock.Analyzer.Name: {Analyzer: copylock.Analyzer, Enabled: true}, + directive.Analyzer.Name: {Analyzer: directive.Analyzer, Enabled: true}, + errorsas.Analyzer.Name: {Analyzer: errorsas.Analyzer, Enabled: true}, + httpresponse.Analyzer.Name: {Analyzer: httpresponse.Analyzer, Enabled: true}, + ifaceassert.Analyzer.Name: {Analyzer: ifaceassert.Analyzer, Enabled: true}, + loopclosure.Analyzer.Name: {Analyzer: loopclosure.Analyzer, Enabled: true}, + lostcancel.Analyzer.Name: {Analyzer: lostcancel.Analyzer, Enabled: true}, + nilfunc.Analyzer.Name: {Analyzer: nilfunc.Analyzer, Enabled: true}, + printf.Analyzer.Name: {Analyzer: printf.Analyzer, Enabled: true}, + shift.Analyzer.Name: {Analyzer: shift.Analyzer, Enabled: true}, + stdmethods.Analyzer.Name: {Analyzer: stdmethods.Analyzer, Enabled: true}, + stringintconv.Analyzer.Name: {Analyzer: stringintconv.Analyzer, Enabled: true}, + structtag.Analyzer.Name: {Analyzer: structtag.Analyzer, Enabled: true}, + tests.Analyzer.Name: {Analyzer: tests.Analyzer, Enabled: true}, + unmarshal.Analyzer.Name: {Analyzer: unmarshal.Analyzer, Enabled: true}, + unreachable.Analyzer.Name: {Analyzer: unreachable.Analyzer, Enabled: true}, + unsafeptr.Analyzer.Name: {Analyzer: unsafeptr.Analyzer, Enabled: true}, + unusedresult.Analyzer.Name: {Analyzer: unusedresult.Analyzer, Enabled: true}, + + // Non-vet analyzers: + atomicalign.Analyzer.Name: {Analyzer: atomicalign.Analyzer, Enabled: true}, + deepequalerrors.Analyzer.Name: {Analyzer: deepequalerrors.Analyzer, Enabled: true}, + fieldalignment.Analyzer.Name: {Analyzer: fieldalignment.Analyzer, Enabled: false}, + nilness.Analyzer.Name: {Analyzer: nilness.Analyzer, Enabled: false}, + shadow.Analyzer.Name: {Analyzer: shadow.Analyzer, Enabled: false}, + sortslice.Analyzer.Name: {Analyzer: sortslice.Analyzer, Enabled: true}, + testinggoroutine.Analyzer.Name: {Analyzer: testinggoroutine.Analyzer, Enabled: true}, + unusedparams.Analyzer.Name: {Analyzer: unusedparams.Analyzer, Enabled: false}, + unusedwrite.Analyzer.Name: {Analyzer: unusedwrite.Analyzer, Enabled: false}, + useany.Analyzer.Name: {Analyzer: useany.Analyzer, Enabled: false}, + infertypeargs.Analyzer.Name: {Analyzer: infertypeargs.Analyzer, Enabled: true}, + embeddirective.Analyzer.Name: {Analyzer: embeddirective.Analyzer, Enabled: true}, + timeformat.Analyzer.Name: {Analyzer: timeformat.Analyzer, Enabled: true}, + + // gofmt -s suite: + simplifycompositelit.Analyzer.Name: { + Analyzer: simplifycompositelit.Analyzer, + Enabled: true, + ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, + }, + simplifyrange.Analyzer.Name: { + Analyzer: simplifyrange.Analyzer, + Enabled: true, + ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, + }, + simplifyslice.Analyzer.Name: { + Analyzer: simplifyslice.Analyzer, + Enabled: true, + ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, + }, + } +} + +func urlRegexp() *regexp.Regexp { + // Ensure links are matched as full words, not anywhere. + re := regexp.MustCompile(`\b(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?\b`) + re.Longest() + return re +} + +type APIJSON struct { + Options map[string][]*OptionJSON + Commands []*CommandJSON + Lenses []*LensJSON + Analyzers []*AnalyzerJSON + Hints []*HintJSON +} + +type OptionJSON struct { + Name string + Type string + Doc string + EnumKeys EnumKeys + EnumValues []EnumValue + Default string + Status string + Hierarchy string +} + +func (o *OptionJSON) String() string { + return o.Name +} + +func (o *OptionJSON) Write(w io.Writer) { + fmt.Fprintf(w, "**%v** *%v*\n\n", o.Name, o.Type) + writeStatus(w, o.Status) + enumValues := collectEnums(o) + fmt.Fprintf(w, "%v%v\nDefault: `%v`.\n\n", o.Doc, enumValues, o.Default) +} + +func writeStatus(section io.Writer, status string) { + switch status { + case "": + case "advanced": + fmt.Fprint(section, "**This is an advanced setting and should not be configured by most `gopls` users.**\n\n") + case "debug": + fmt.Fprint(section, "**This setting is for debugging purposes only.**\n\n") + case "experimental": + fmt.Fprint(section, "**This setting is experimental and may be deleted.**\n\n") + default: + fmt.Fprintf(section, "**Status: %s.**\n\n", status) + } +} + +var parBreakRE = regexp.MustCompile("\n{2,}") + +func collectEnums(opt *OptionJSON) string { + var b strings.Builder + write := func(name, doc string, index, len int) { + if doc != "" { + unbroken := parBreakRE.ReplaceAllString(doc, "\\\n") + fmt.Fprintf(&b, "* %s\n", strings.TrimSpace(unbroken)) + } else { + fmt.Fprintf(&b, "* `%s`\n", name) + } + } + if len(opt.EnumValues) > 0 && opt.Type == "enum" { + b.WriteString("\nMust be one of:\n\n") + for i, val := range opt.EnumValues { + write(val.Value, val.Doc, i, len(opt.EnumValues)) + } + } else if len(opt.EnumKeys.Keys) > 0 && shouldShowEnumKeysInSettings(opt.Name) { + b.WriteString("\nCan contain any of:\n\n") + for i, val := range opt.EnumKeys.Keys { + write(val.Name, val.Doc, i, len(opt.EnumKeys.Keys)) + } + } + return b.String() +} + +func shouldShowEnumKeysInSettings(name string) bool { + // These fields have too many possible options to print. + return !(name == "analyses" || name == "codelenses" || name == "hints") +} + +type EnumKeys struct { + ValueType string + Keys []EnumKey +} + +type EnumKey struct { + Name string + Doc string + Default string +} + +type EnumValue struct { + Value string + Doc string +} + +type CommandJSON struct { + Command string + Title string + Doc string + ArgDoc string + ResultDoc string +} + +func (c *CommandJSON) String() string { + return c.Command +} + +func (c *CommandJSON) Write(w io.Writer) { + fmt.Fprintf(w, "### **%v**\nIdentifier: `%v`\n\n%v\n\n", c.Title, c.Command, c.Doc) + if c.ArgDoc != "" { + fmt.Fprintf(w, "Args:\n\n```\n%s\n```\n\n", c.ArgDoc) + } + if c.ResultDoc != "" { + fmt.Fprintf(w, "Result:\n\n```\n%s\n```\n\n", c.ResultDoc) + } +} + +type LensJSON struct { + Lens string + Title string + Doc string +} + +func (l *LensJSON) String() string { + return l.Title +} + +func (l *LensJSON) Write(w io.Writer) { + fmt.Fprintf(w, "%s (%s): %s", l.Title, l.Lens, l.Doc) +} + +type AnalyzerJSON struct { + Name string + Doc string + Default bool +} + +func (a *AnalyzerJSON) String() string { + return a.Name +} + +func (a *AnalyzerJSON) Write(w io.Writer) { + fmt.Fprintf(w, "%s (%s): %v", a.Name, a.Doc, a.Default) +} + +type HintJSON struct { + Name string + Doc string + Default bool +} + +func (h *HintJSON) String() string { + return h.Name +} + +func (h *HintJSON) Write(w io.Writer) { + fmt.Fprintf(w, "%s (%s): %v", h.Name, h.Doc, h.Default) +} diff --git a/internal/lsp/source/options_test.go b/gopls/internal/lsp/source/options_test.go similarity index 89% rename from internal/lsp/source/options_test.go rename to gopls/internal/lsp/source/options_test.go index dfc464e8c31..4fa6ecf15df 100644 --- a/internal/lsp/source/options_test.go +++ b/gopls/internal/lsp/source/options_test.go @@ -167,6 +167,28 @@ func TestSetOption(t *testing.T) { return !o.Annotations[Nil] && !o.Annotations[Bounds] }, }, + { + name: "vulncheck", + value: []interface{}{"invalid"}, + wantError: true, + check: func(o Options) bool { + return o.Vulncheck == "" // For invalid value, default to 'off'. + }, + }, + { + name: "vulncheck", + value: "Imports", + check: func(o Options) bool { + return o.Vulncheck == ModeVulncheckImports // For invalid value, default to 'off'. + }, + }, + { + name: "vulncheck", + value: "imports", + check: func(o Options) bool { + return o.Vulncheck == ModeVulncheckImports + }, + }, } for _, test := range tests { diff --git a/gopls/internal/lsp/source/references.go b/gopls/internal/lsp/source/references.go new file mode 100644 index 00000000000..fcbbc336d90 --- /dev/null +++ b/gopls/internal/lsp/source/references.go @@ -0,0 +1,5 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source diff --git a/gopls/internal/lsp/source/references2.go b/gopls/internal/lsp/source/references2.go new file mode 100644 index 00000000000..e579ab056a6 --- /dev/null +++ b/gopls/internal/lsp/source/references2.go @@ -0,0 +1,592 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +// This file defines a new implementation of the 'references' query +// based on a serializable (and eventually file-based) index +// constructed during type checking, thus avoiding the need to +// type-check packages at search time. In due course it will replace +// the old implementation, which is also used by renaming. +// +// See the ./xrefs/ subpackage for the index construction and lookup. +// +// This implementation does not intermingle objects from distinct +// calls to TypeCheck. + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" +) + +// A ReferenceInfoV2 describes an identifier that refers to the same +// object as the subject of a References query. +type ReferenceInfoV2 struct { + IsDeclaration bool + Location protocol.Location + + // TODO(adonovan): these are the same for all elements; factor out of the slice. + // TODO(adonovan): Name is currently unused. If it's still unused when we + // eliminate 'references' (v1), delete it. Or replace both fields by a *Metadata. + PkgPath PackagePath + Name string +} + +// References returns a list of all references (sorted with +// definitions before uses) to the object denoted by the identifier at +// the given file/position, searching the entire workspace. +func References(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position, includeDeclaration bool) ([]protocol.Location, error) { + references, err := referencesV2(ctx, snapshot, fh, pp, includeDeclaration) + if err != nil { + return nil, err + } + locations := make([]protocol.Location, len(references)) + for i, ref := range references { + locations[i] = ref.Location + } + return locations, nil +} + +// referencesV2 returns a list of all references (sorted with +// definitions before uses) to the object denoted by the identifier at +// the given file/position, searching the entire workspace. +func referencesV2(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]*ReferenceInfoV2, error) { + ctx, done := event.Start(ctx, "source.References2") + defer done() + + // Is the cursor within the package name declaration? + _, inPackageName, err := parsePackageNameDecl(ctx, snapshot, f, pp) + if err != nil { + return nil, err + } + + var refs []*ReferenceInfoV2 + if inPackageName { + refs, err = packageReferences(ctx, snapshot, f.URI()) + } else { + refs, err = ordinaryReferences(ctx, snapshot, f.URI(), pp) + } + if err != nil { + return nil, err + } + + sort.Slice(refs, func(i, j int) bool { + x, y := refs[i], refs[j] + if x.IsDeclaration != y.IsDeclaration { + return x.IsDeclaration // decls < refs + } + return protocol.CompareLocation(x.Location, y.Location) < 0 + }) + + // De-duplicate by location, and optionally remove declarations. + out := refs[:0] + for _, ref := range refs { + if !includeDeclaration && ref.IsDeclaration { + continue + } + if len(out) == 0 || out[len(out)-1].Location != ref.Location { + out = append(out, ref) + } + } + refs = out + + return refs, nil +} + +// packageReferences returns a list of references to the package +// declaration of the specified name and uri by searching among the +// import declarations of all packages that directly import the target +// package. +func packageReferences(ctx context.Context, snapshot Snapshot, uri span.URI) ([]*ReferenceInfoV2, error) { + metas, err := snapshot.MetadataForFile(ctx, uri) + if err != nil { + return nil, err + } + if len(metas) == 0 { + return nil, fmt.Errorf("found no package containing %s", uri) + } + + var refs []*ReferenceInfoV2 + + // Find external references to the package declaration + // from each direct import of the package. + // + // The narrowest package is the most broadly imported, + // so we choose it for the external references. + // + // But if the file ends with _test.go then we need to + // find the package it is testing; there's no direct way + // to do that, so pick a file from the same package that + // doesn't end in _test.go and start over. + narrowest := metas[0] + if narrowest.ForTest != "" && strings.HasSuffix(string(uri), "_test.go") { + for _, f := range narrowest.CompiledGoFiles { + if !strings.HasSuffix(string(f), "_test.go") { + return packageReferences(ctx, snapshot, f) + } + } + // This package has no non-test files. + // Skip the search for external references. + // (Conceivably one could blank-import an empty package, but why?) + } else { + rdeps, err := snapshot.ReverseDependencies(ctx, narrowest.ID, false) // direct + if err != nil { + return nil, err + } + for _, rdep := range rdeps { + for _, uri := range rdep.CompiledGoFiles { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + f, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return nil, err + } + for _, imp := range f.File.Imports { + if rdep.DepsByImpPath[UnquoteImportPath(imp)] == narrowest.ID { + refs = append(refs, &ReferenceInfoV2{ + IsDeclaration: false, + Location: mustLocation(f, imp), + PkgPath: narrowest.PkgPath, + Name: string(narrowest.Name), + }) + } + } + } + } + } + + // Find internal "references" to the package from + // of each package declaration in the target package itself. + // + // The widest package (possibly a test variant) has the + // greatest number of files and thus we choose it for the + // "internal" references. + widest := metas[len(metas)-1] + for _, uri := range widest.CompiledGoFiles { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + f, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return nil, err + } + refs = append(refs, &ReferenceInfoV2{ + IsDeclaration: true, // (one of many) + Location: mustLocation(f, f.File.Name), + PkgPath: widest.PkgPath, + Name: string(widest.Name), + }) + } + + return refs, nil +} + +// ordinaryReferences computes references for all ordinary objects (not package declarations). +func ordinaryReferences(ctx context.Context, snapshot Snapshot, uri span.URI, pp protocol.Position) ([]*ReferenceInfoV2, error) { + // Strategy: use the reference information computed by the + // type checker to find the declaration. First type-check this + // package to find the declaration, then type check the + // declaring package (which may be different), plus variants, + // to find local (in-package) references. + // Global references are satisfied by the index. + + // Strictly speaking, a wider package could provide a different + // declaration (e.g. because the _test.go files can change the + // meaning of a field or method selection), but the narrower + // package reports the more broadly referenced object. + pkg, pgf, err := PackageForFile(ctx, snapshot, uri, TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + + // Find the selected object (declaration or reference). + pos, err := pgf.PositionPos(pp) + if err != nil { + return nil, err + } + candidates, _, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos) + if err != nil { + return nil, err + } + + // Pick first object arbitrarily. + // The case variables of a type switch have different + // types but that difference is immaterial here. + var obj types.Object + for obj = range candidates { + break + } + if obj == nil { + return nil, ErrNoIdentFound // can't happen + } + + // nil, error, error.Error, iota, or other built-in? + if obj.Pkg() == nil { + // For some reason, existing tests require that iota has no references, + // nor an error. TODO(adonovan): do something more principled. + if obj.Name() == "iota" { + return nil, nil + } + + return nil, fmt.Errorf("references to builtin %q are not supported", obj.Name()) + } + + // Find metadata of all packages containing the object's defining file. + // This may include the query pkg, and possibly other variants. + declPosn := safetoken.StartPosition(pkg.FileSet(), obj.Pos()) + declURI := span.URIFromPath(declPosn.Filename) + variants, err := snapshot.MetadataForFile(ctx, declURI) + if err != nil { + return nil, err + } + if len(variants) == 0 { + return nil, fmt.Errorf("no packages for file %q", declURI) // can't happen + } + + // Is object exported? + // If so, compute scope and targets of the global search. + var ( + globalScope = make(map[PackageID]*Metadata) + globalTargets map[PackagePath]map[objectpath.Path]unit + ) + // TODO(adonovan): what about generic functions. Need to consider both + // uninstantiated and instantiated. The latter have no objectpath. Use Origin? + if path, err := objectpath.For(obj); err == nil && obj.Exported() { + pkgPath := variants[0].PkgPath // (all variants have same package path) + globalTargets = map[PackagePath]map[objectpath.Path]unit{ + pkgPath: {path: {}}, // primary target + } + + // How far need we search? + // For package-level objects, we need only search the direct importers. + // For fields and methods, we must search transitively. + transitive := obj.Pkg().Scope().Lookup(obj.Name()) != obj + + // The scope is the union of rdeps of each variant. + // (Each set is disjoint so there's no benefit to + // to combining the metadata graph traversals.) + for _, m := range variants { + rdeps, err := snapshot.ReverseDependencies(ctx, m.ID, transitive) + if err != nil { + return nil, err + } + for id, rdep := range rdeps { + globalScope[id] = rdep + } + } + + // Is object a method? + // + // If so, expand the search so that the targets include + // all methods that correspond to it through interface + // satisfaction, and the scope includes the rdeps of + // the package that declares each corresponding type. + if recv := effectiveReceiver(obj); recv != nil { + if err := expandMethodSearch(ctx, snapshot, obj.(*types.Func), recv, globalScope, globalTargets); err != nil { + return nil, err + } + } + } + + // The search functions will call report(loc) for each hit. + var ( + refsMu sync.Mutex + refs []*ReferenceInfoV2 + ) + report := func(loc protocol.Location, isDecl bool) { + ref := &ReferenceInfoV2{ + IsDeclaration: isDecl, + Location: loc, + PkgPath: pkg.Metadata().PkgPath, + Name: obj.Name(), + } + refsMu.Lock() + refs = append(refs, ref) + refsMu.Unlock() + } + + // Loop over the variants of the declaring package, + // and perform both the local (in-package) and global + // (cross-package) searches, in parallel. + // + // TODO(adonovan): opt: support LSP reference streaming. See: + // - https://github.com/microsoft/vscode-languageserver-node/pull/164 + // - https://github.com/microsoft/language-server-protocol/pull/182 + // + // Careful: this goroutine must not return before group.Wait. + var group errgroup.Group + + // Compute local references for each variant. + for _, m := range variants { + // We want the ordinary importable package, + // plus any test-augmented variants, since + // declarations in _test.go files may change + // the reference of a selection, or even a + // field into a method or vice versa. + // + // But we don't need intermediate test variants, + // as their local references will be covered + // already by other variants. + if m.IsIntermediateTestVariant() { + continue + } + m := m + group.Go(func() error { + return localReferences(ctx, snapshot, declURI, declPosn.Offset, m, report) + }) + } + + // Compute global references for selected reverse dependencies. + for _, m := range globalScope { + m := m + group.Go(func() error { + return globalReferences(ctx, snapshot, m, globalTargets, report) + }) + } + + if err := group.Wait(); err != nil { + return nil, err + } + return refs, nil +} + +// expandMethodSearch expands the scope and targets of a global search +// for an exported method to include all methods that correspond to +// it through interface satisfaction. +// +// recv is the method's effective receiver type, for method-set computations. +func expandMethodSearch(ctx context.Context, snapshot Snapshot, method *types.Func, recv types.Type, scope map[PackageID]*Metadata, targets map[PackagePath]map[objectpath.Path]unit) error { + // Compute the method-set fingerprint used as a key to the global search. + key, hasMethods := methodsets.KeyOf(recv) + if !hasMethods { + return bug.Errorf("KeyOf(%s)={} yet %s is a method", recv, method) + } + metas, err := snapshot.AllMetadata(ctx) + if err != nil { + return err + } + allIDs := make([]PackageID, 0, len(metas)) + for _, m := range metas { + allIDs = append(allIDs, m.ID) + } + // Search the methodset index of each package in the workspace. + allPkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, allIDs...) + if err != nil { + return err + } + var group errgroup.Group + for _, pkg := range allPkgs { + pkg := pkg + group.Go(func() error { + // Consult index for matching methods. + results := pkg.MethodSetsIndex().Search(key, method.Name()) + + // Expand global search scope to include rdeps of this pkg. + if len(results) > 0 { + rdeps, err := snapshot.ReverseDependencies(ctx, pkg.Metadata().ID, true) + if err != nil { + return err + } + for _, rdep := range rdeps { + scope[rdep.ID] = rdep + } + } + + // Add each corresponding method the to set of global search targets. + for _, res := range results { + methodPkg := PackagePath(res.PkgPath) + opaths, ok := targets[methodPkg] + if !ok { + opaths = make(map[objectpath.Path]unit) + targets[methodPkg] = opaths + } + opaths[res.ObjectPath] = unit{} + } + return nil + }) + } + return group.Wait() +} + +// localReferences reports each reference to the object +// declared at the specified URI/offset within its enclosing package m. +func localReferences(ctx context.Context, snapshot Snapshot, declURI span.URI, declOffset int, m *Metadata, report func(loc protocol.Location, isDecl bool)) error { + pkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, m.ID) + if err != nil { + return err + } + pkg := pkgs[0] // narrowest + + // Find declaration of corresponding object + // in this package based on (URI, offset). + pgf, err := pkg.File(declURI) + if err != nil { + return err + } + pos, err := safetoken.Pos(pgf.Tok, declOffset) + if err != nil { + return err + } + targets, _, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos) + if err != nil { + return err // unreachable? (probably caught earlier) + } + + // Report the locations of the declaration(s). + // TODO(adonovan): what about for corresponding methods? Add tests. + for _, node := range targets { + report(mustLocation(pgf, node), true) + } + + // If we're searching for references to a method, broaden the + // search to include references to corresponding methods of + // mutually assignable receiver types. + // (We use a slice, but objectsAt never returns >1 methods.) + var methodRecvs []types.Type + var methodName string // name of an arbitrary target, iff a method + for obj := range targets { + if t := effectiveReceiver(obj); t != nil { + methodRecvs = append(methodRecvs, t) + methodName = obj.Name() + } + } + + // matches reports whether obj either is or corresponds to a target. + // (Correspondence is defined as usual for interface methods.) + matches := func(obj types.Object) bool { + if targets[obj] != nil { + return true + } else if methodRecvs != nil && obj.Name() == methodName { + if orecv := effectiveReceiver(obj); orecv != nil { + for _, mrecv := range methodRecvs { + if concreteImplementsIntf(orecv, mrecv) { + return true + } + } + } + } + return false + } + + // Scan through syntax looking for uses of one of the target objects. + for _, pgf := range pkg.CompiledGoFiles() { + ast.Inspect(pgf.File, func(n ast.Node) bool { + if id, ok := n.(*ast.Ident); ok { + if obj, ok := pkg.GetTypesInfo().Uses[id]; ok && matches(obj) { + report(mustLocation(pgf, id), false) + } + } + return true + }) + } + return nil +} + +// effectiveReceiver returns the effective receiver type for method-set +// comparisons for obj, if it is a method, or nil otherwise. +func effectiveReceiver(obj types.Object) types.Type { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + return methodsets.EnsurePointer(recv.Type()) + } + } + return nil +} + +// objectsAt returns the non-empty set of objects denoted (def or use) +// by the specified position within a file syntax tree, or an error if +// none were found. +// +// The result may contain more than one element because all case +// variables of a type switch appear to be declared at the same +// position. +// +// Each object is mapped to the syntax node that was treated as an +// identifier, which is not always an ast.Ident. The second component +// of the result is the innermost node enclosing pos. +func objectsAt(info *types.Info, file *ast.File, pos token.Pos) (map[types.Object]ast.Node, ast.Node, error) { + path := pathEnclosingObjNode(file, pos) + if path == nil { + return nil, nil, ErrNoIdentFound + } + + targets := make(map[types.Object]ast.Node) + + switch leaf := path[0].(type) { + case *ast.Ident: + // If leaf represents an implicit type switch object or the type + // switch "assign" variable, expand to all of the type switch's + // implicit objects. + if implicits, _ := typeSwitchImplicits(info, path); len(implicits) > 0 { + for _, obj := range implicits { + targets[obj] = leaf + } + } else { + obj := info.ObjectOf(leaf) + if obj == nil { + return nil, nil, fmt.Errorf("%w for %q", errNoObjectFound, leaf.Name) + } + targets[obj] = leaf + } + case *ast.ImportSpec: + // Look up the implicit *types.PkgName. + obj := info.Implicits[leaf] + if obj == nil { + return nil, nil, fmt.Errorf("%w for import %s", errNoObjectFound, UnquoteImportPath(leaf)) + } + targets[obj] = leaf + } + + if len(targets) == 0 { + return nil, nil, fmt.Errorf("objectAt: internal error: no targets") // can't happen + } + return targets, path[0], nil +} + +// globalReferences reports each cross-package reference to one of the +// target objects denoted by (package path, object path). +func globalReferences(ctx context.Context, snapshot Snapshot, m *Metadata, targets map[PackagePath]map[objectpath.Path]unit, report func(loc protocol.Location, isDecl bool)) error { + // TODO(adonovan): opt: don't actually type-check here, + // since we quite intentionally don't look at type information. + // Instead, access the reference index computed during + // type checking that will in due course be a file-based cache. + pkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, m.ID) + if err != nil { + return err + } + for _, loc := range pkgs[0].ReferencesTo(targets) { + report(loc, false) + } + return nil +} + +// mustLocation reports the location interval a syntax node, +// which must belong to m.File. +// +// Safe for use only by references2 and implementations2. +func mustLocation(pgf *ParsedGoFile, n ast.Node) protocol.Location { + loc, err := pgf.NodeLocation(n) + if err != nil { + panic(err) // can't happen in references2 or implementations2 + } + return loc +} diff --git a/gopls/internal/lsp/source/rename.go b/gopls/internal/lsp/source/rename.go new file mode 100644 index 00000000000..da8aa272870 --- /dev/null +++ b/gopls/internal/lsp/source/rename.go @@ -0,0 +1,1206 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/refactor/satisfy" +) + +type renamer struct { + ctx context.Context + snapshot Snapshot + refs []*ReferenceInfo + objsToUpdate map[types.Object]bool + hadConflicts bool + errors string + from, to string + satisfyConstraints map[satisfy.Constraint]bool + packages map[*types.Package]Package // may include additional packages that are a dep of pkg. + msets typeutil.MethodSetCache + changeMethods bool +} + +type PrepareItem struct { + Range protocol.Range + Text string +} + +// PrepareRename searches for a valid renaming at position pp. +// +// The returned usererr is intended to be displayed to the user to explain why +// the prepare fails. Probably we could eliminate the redundancy in returning +// two errors, but for now this is done defensively. +func PrepareRename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) (_ *PrepareItem, usererr, err error) { + // Find position of the package name declaration. + ctx, done := event.Start(ctx, "source.PrepareRename") + defer done() + + // Is the cursor within the package name declaration? + if pgf, inPackageName, err := parsePackageNameDecl(ctx, snapshot, f, pp); err != nil { + return nil, err, err + } else if inPackageName { + item, err := prepareRenamePackageName(ctx, snapshot, pgf) + return item, err, err + } + + // Ordinary (non-package) renaming. + // + // Type-check the current package, locate the reference at the position, + // validate the object, and report its name and range. + // + // TODO(adonovan): in all cases below, we return usererr=nil, + // which means we return (nil, nil) at the protocol + // layer. This seems like a bug, or at best an exploitation of + // knowledge of VSCode-specific behavior. Can we avoid that? + pkg, pgf, err := PackageForFile(ctx, snapshot, f.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, nil, err + } + pos, err := pgf.PositionPos(pp) + if err != nil { + return nil, nil, err + } + targets, node, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos) + if err != nil { + return nil, nil, err + } + var obj types.Object + for obj = range targets { + break // pick one arbitrarily + } + if err := checkRenamable(obj); err != nil { + return nil, nil, err + } + rng, err := pgf.NodeRange(node) + if err != nil { + return nil, nil, err + } + if _, isImport := node.(*ast.ImportSpec); isImport { + // We're not really renaming the import path. + rng.End = rng.Start + } + return &PrepareItem{ + Range: rng, + Text: obj.Name(), + }, nil, nil +} + +func prepareRenamePackageName(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile) (*PrepareItem, error) { + // Does the client support file renaming? + fileRenameSupported := false + for _, op := range snapshot.View().Options().SupportedResourceOperations { + if op == protocol.Rename { + fileRenameSupported = true + break + } + } + if !fileRenameSupported { + return nil, errors.New("can't rename package: LSP client does not support file renaming") + } + + // Check validity of the metadata for the file's containing package. + fileMeta, err := snapshot.MetadataForFile(ctx, pgf.URI) + if err != nil { + return nil, err + } + if len(fileMeta) == 0 { + return nil, fmt.Errorf("no packages found for file %q", pgf.URI) + } + meta := fileMeta[0] + if meta.Name == "main" { + return nil, fmt.Errorf("can't rename package \"main\"") + } + if strings.HasSuffix(string(meta.Name), "_test") { + return nil, fmt.Errorf("can't rename x_test packages") + } + if meta.Module == nil { + return nil, fmt.Errorf("can't rename package: missing module information for package %q", meta.PkgPath) + } + if meta.Module.Path == string(meta.PkgPath) { + return nil, fmt.Errorf("can't rename package: package path %q is the same as module path %q", meta.PkgPath, meta.Module.Path) + } + + // Return the location of the package declaration. + rng, err := pgf.NodeRange(pgf.File.Name) + if err != nil { + return nil, err + } + return &PrepareItem{ + Range: rng, + Text: string(meta.Name), + }, nil +} + +func checkRenamable(obj types.Object) error { + if v, ok := obj.(*types.Var); ok && v.Embedded() { + return errors.New("can't rename embedded fields: rename the type directly or name the field") + } + if obj.Name() == "_" { + return errors.New("can't rename \"_\"") + } + return nil +} + +// Rename returns a map of TextEdits for each file modified when renaming a +// given identifier within a package and a boolean value of true for renaming +// package and false otherwise. +func Rename(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]protocol.TextEdit, bool, error) { + ctx, done := event.Start(ctx, "source.Rename") + defer done() + + // Cursor within package name declaration? + if _, inPackageName, err := parsePackageNameDecl(ctx, s, f, pp); err != nil { + return nil, false, err + } else if inPackageName { + return renamePackageName(ctx, s, f, pp, newName) + } + + // ordinary (non-package) rename + qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp) + if err != nil { + return nil, false, err + } + if err := checkRenamable(qos[0].obj); err != nil { + return nil, false, err + } + if qos[0].obj.Name() == newName { + return nil, false, fmt.Errorf("old and new names are the same: %s", newName) + } + if !isValidIdentifier(newName) { + return nil, false, fmt.Errorf("invalid identifier to rename: %q", newName) + } + result, err := renameObj(ctx, s, newName, qos) + if err != nil { + return nil, false, err + } + + return result, false, nil +} + +func renamePackageName(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]protocol.TextEdit, bool, error) { + if !isValidIdentifier(newName) { + return nil, true, fmt.Errorf("%q is not a valid identifier", newName) + } + + fileMeta, err := s.MetadataForFile(ctx, f.URI()) + if err != nil { + return nil, true, err + } + + if len(fileMeta) == 0 { + return nil, true, fmt.Errorf("no packages found for file %q", f.URI()) + } + + // We need metadata for the relevant package and module paths. These should + // be the same for all packages containing the file. + // + // TODO(rfindley): we mix package path and import path here haphazardly. + // Fix this. + meta := fileMeta[0] + oldPath := meta.PkgPath + var modulePath PackagePath + if mi := meta.Module; mi == nil { + return nil, true, fmt.Errorf("cannot rename package: missing module information for package %q", meta.PkgPath) + } else { + modulePath = PackagePath(mi.Path) + } + + if strings.HasSuffix(newName, "_test") { + return nil, true, fmt.Errorf("cannot rename to _test package") + } + + metadata, err := s.AllMetadata(ctx) + if err != nil { + return nil, true, err + } + + renamingEdits, err := renamePackage(ctx, s, modulePath, oldPath, PackageName(newName), metadata) + if err != nil { + return nil, true, err + } + + oldBase := filepath.Dir(span.URI.Filename(f.URI())) + newPkgDir := filepath.Join(filepath.Dir(oldBase), newName) + + // TODO: should this operate on all go.mod files, irrespective of whether they are included in the workspace? + // Get all active mod files in the workspace + modFiles := s.ModFiles() + for _, m := range modFiles { + fh, err := s.GetFile(ctx, m) + if err != nil { + return nil, true, err + } + pm, err := s.ParseMod(ctx, fh) + if err != nil { + return nil, true, err + } + + modFileDir := filepath.Dir(pm.URI.Filename()) + affectedReplaces := []*modfile.Replace{} + + // Check if any replace directives need to be fixed + for _, r := range pm.File.Replace { + if !strings.HasPrefix(r.New.Path, "/") && !strings.HasPrefix(r.New.Path, "./") && !strings.HasPrefix(r.New.Path, "../") { + continue + } + + replacedPath := r.New.Path + if strings.HasPrefix(r.New.Path, "./") || strings.HasPrefix(r.New.Path, "../") { + replacedPath = filepath.Join(modFileDir, r.New.Path) + } + + // TODO: Is there a risk of converting a '\' delimited replacement to a '/' delimited replacement? + if !strings.HasPrefix(filepath.ToSlash(replacedPath)+"/", filepath.ToSlash(oldBase)+"/") { + continue // not affected by the package renaming + } + + affectedReplaces = append(affectedReplaces, r) + } + + if len(affectedReplaces) == 0 { + continue + } + copied, err := modfile.Parse("", pm.Mapper.Content, nil) + if err != nil { + return nil, true, err + } + + for _, r := range affectedReplaces { + replacedPath := r.New.Path + if strings.HasPrefix(r.New.Path, "./") || strings.HasPrefix(r.New.Path, "../") { + replacedPath = filepath.Join(modFileDir, r.New.Path) + } + + suffix := strings.TrimPrefix(replacedPath, string(oldBase)) + + newReplacedPath, err := filepath.Rel(modFileDir, newPkgDir+suffix) + if err != nil { + return nil, true, err + } + + newReplacedPath = filepath.ToSlash(newReplacedPath) + + if !strings.HasPrefix(newReplacedPath, "/") && !strings.HasPrefix(newReplacedPath, "../") { + newReplacedPath = "./" + newReplacedPath + } + + if err := copied.AddReplace(r.Old.Path, "", newReplacedPath, ""); err != nil { + return nil, true, err + } + } + + copied.Cleanup() + newContent, err := copied.Format() + if err != nil { + return nil, true, err + } + + // Calculate the edits to be made due to the change. + diff := s.View().Options().ComputeEdits(string(pm.Mapper.Content), string(newContent)) + modFileEdits, err := ToProtocolEdits(pm.Mapper, diff) + if err != nil { + return nil, true, err + } + + renamingEdits[pm.URI] = append(renamingEdits[pm.URI], modFileEdits...) + } + + return renamingEdits, true, nil +} + +// renamePackage computes all workspace edits required to rename the package +// described by the given metadata, to newName, by renaming its package +// directory. +// +// It updates package clauses and import paths for the renamed package as well +// as any other packages affected by the directory renaming among packages +// described by allMetadata. +func renamePackage(ctx context.Context, s Snapshot, modulePath, oldPath PackagePath, newName PackageName, allMetadata []*Metadata) (map[span.URI][]protocol.TextEdit, error) { + if modulePath == oldPath { + return nil, fmt.Errorf("cannot rename package: module path %q is the same as the package path, so renaming the package directory would have no effect", modulePath) + } + + newPathPrefix := path.Join(path.Dir(string(oldPath)), string(newName)) + + edits := make(map[span.URI][]protocol.TextEdit) + seen := make(seenPackageRename) // track per-file import renaming we've already processed + + // Rename imports to the renamed package from other packages. + for _, m := range allMetadata { + // Special case: x_test packages for the renamed package will not have the + // package path as as a dir prefix, but still need their package clauses + // renamed. + if m.PkgPath == oldPath+"_test" { + newTestName := newName + "_test" + + if err := renamePackageClause(ctx, m, s, newTestName, seen, edits); err != nil { + return nil, err + } + continue + } + + // Subtle: check this condition before checking for valid module info + // below, because we should not fail this operation if unrelated packages + // lack module info. + if !strings.HasPrefix(string(m.PkgPath)+"/", string(oldPath)+"/") { + continue // not affected by the package renaming + } + + if m.Module == nil { + // This check will always fail under Bazel. + return nil, fmt.Errorf("cannot rename package: missing module information for package %q", m.PkgPath) + } + + if modulePath != PackagePath(m.Module.Path) { + continue // don't edit imports if nested package and renaming package have different module paths + } + + // Renaming a package consists of changing its import path and package name. + suffix := strings.TrimPrefix(string(m.PkgPath), string(oldPath)) + newPath := newPathPrefix + suffix + + pkgName := m.Name + if m.PkgPath == PackagePath(oldPath) { + pkgName = newName + + if err := renamePackageClause(ctx, m, s, newName, seen, edits); err != nil { + return nil, err + } + } + + imp := ImportPath(newPath) // TODO(adonovan): what if newPath has vendor/ prefix? + if err := renameImports(ctx, s, m, imp, pkgName, seen, edits); err != nil { + return nil, err + } + } + + return edits, nil +} + +// seenPackageRename tracks import path renamings that have already been +// processed. +// +// Due to test variants, files may appear multiple times in the reverse +// transitive closure of a renamed package, or in the reverse transitive +// closure of different variants of a renamed package (both are possible). +// However, in all cases the resulting edits will be the same. +type seenPackageRename map[seenPackageKey]bool +type seenPackageKey struct { + uri span.URI + path PackagePath +} + +// add reports whether uri and importPath have been seen, and records them as +// seen if not. +func (s seenPackageRename) add(uri span.URI, path PackagePath) bool { + key := seenPackageKey{uri, path} + seen := s[key] + if !seen { + s[key] = true + } + return seen +} + +// renamePackageClause computes edits renaming the package clause of files in +// the package described by the given metadata, to newName. +// +// As files may belong to multiple packages, the seen map tracks files whose +// package clause has already been updated, to prevent duplicate edits. +// +// Edits are written into the edits map. +func renamePackageClause(ctx context.Context, m *Metadata, snapshot Snapshot, newName PackageName, seen seenPackageRename, edits map[span.URI][]protocol.TextEdit) error { + // Rename internal references to the package in the renaming package. + for _, uri := range m.CompiledGoFiles { + if seen.add(uri, m.PkgPath) { + continue + } + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return err + } + f, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return err + } + if f.File.Name == nil { + continue // no package declaration + } + rng, err := f.NodeRange(f.File.Name) + if err != nil { + return err + } + edits[f.URI] = append(edits[f.URI], protocol.TextEdit{ + Range: rng, + NewText: string(newName), + }) + } + + return nil +} + +// renameImports computes the set of edits to imports resulting from renaming +// the package described by the given metadata, to a package with import path +// newPath and name newName. +// +// Edits are written into the edits map. +func renameImports(ctx context.Context, snapshot Snapshot, m *Metadata, newPath ImportPath, newName PackageName, seen seenPackageRename, edits map[span.URI][]protocol.TextEdit) error { + rdeps, err := snapshot.ReverseDependencies(ctx, m.ID, false) // find direct importers + if err != nil { + return err + } + + // Pass 1: rename import paths in import declarations. + needsTypeCheck := make(map[PackageID][]span.URI) + for _, rdep := range rdeps { + if rdep.IsIntermediateTestVariant() { + continue // for renaming, these variants are redundant + } + + for _, uri := range rdep.CompiledGoFiles { + if seen.add(uri, m.PkgPath) { + continue + } + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return err + } + f, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return err + } + if f.File.Name == nil { + continue // no package declaration + } + for _, imp := range f.File.Imports { + if rdep.DepsByImpPath[UnquoteImportPath(imp)] != m.ID { + continue // not the import we're looking for + } + + // If the import does not explicitly specify + // a local name, then we need to invoke the + // type checker to locate references to update. + // + // TODO(adonovan): is this actually true? + // Renaming an import with a local name can still + // cause conflicts: shadowing of built-ins, or of + // package-level decls in the same or another file. + if imp.Name == nil { + needsTypeCheck[rdep.ID] = append(needsTypeCheck[rdep.ID], uri) + } + + // Create text edit for the import path (string literal). + rng, err := f.NodeRange(imp.Path) + if err != nil { + return err + } + edits[uri] = append(edits[uri], protocol.TextEdit{ + Range: rng, + NewText: strconv.Quote(string(newPath)), + }) + } + } + } + + // If the imported package's name hasn't changed, + // we don't need to rename references within each file. + if newName == m.Name { + return nil + } + + // Pass 2: rename local name (types.PkgName) of imported + // package throughout one or more files of the package. + ids := make([]PackageID, 0, len(needsTypeCheck)) + for id := range needsTypeCheck { + ids = append(ids, id) + } + pkgs, err := snapshot.TypeCheck(ctx, TypecheckFull, ids...) + if err != nil { + return err + } + for i, id := range ids { + pkg := pkgs[i] + for _, uri := range needsTypeCheck[id] { + f, err := pkg.File(uri) + if err != nil { + return err + } + for _, imp := range f.File.Imports { + if imp.Name != nil { + continue // has explicit local name + } + if rdeps[id].DepsByImpPath[UnquoteImportPath(imp)] != m.ID { + continue // not the import we're looking for + } + + pkgname := pkg.GetTypesInfo().Implicits[imp].(*types.PkgName) + qos := []qualifiedObject{{obj: pkgname, pkg: pkg}} + + pkgScope := pkg.GetTypes().Scope() + fileScope := pkg.GetTypesInfo().Scopes[f.File] + + localName := string(newName) + try := 0 + + // Keep trying with fresh names until one succeeds. + for fileScope.Lookup(localName) != nil || pkgScope.Lookup(localName) != nil { + try++ + localName = fmt.Sprintf("%s%d", newName, try) + } + + // renameObj detects various conflicts, including: + // - new name conflicts with a package-level decl in this file; + // - new name hides a package-level decl in another file that + // is actually referenced in this file; + // - new name hides a built-in that is actually referenced + // in this file; + // - a reference in this file to the old package name would + // become shadowed by an intervening declaration that + // uses the new name. + // It returns the edits if no conflict was detected. + // + // TODO(adonovan): reduce the strength of this operation + // since, for imports specifically, it should require only + // the current file and the current package, which we + // already have. Finding references is trivial (Info.Uses). + changes, err := renameObj(ctx, snapshot, localName, qos) + if err != nil { + return err + } + + // If the chosen local package name matches the package's + // new name, delete the change that would have inserted + // an explicit local name, which is always the lexically + // first change. + if localName == string(newName) { + v := changes[uri] + sort.Slice(v, func(i, j int) bool { + return protocol.CompareRange(v[i].Range, v[j].Range) < 0 + }) + changes[uri] = v[1:] + } + for uri, changeEdits := range changes { + edits[uri] = append(edits[uri], changeEdits...) + } + } + } + } + return nil +} + +// renameObj returns a map of TextEdits for renaming an identifier within a file +// and boolean value of true if there is no renaming conflicts and false otherwise. +func renameObj(ctx context.Context, s Snapshot, newName string, qos []qualifiedObject) (map[span.URI][]protocol.TextEdit, error) { + refs, err := references(ctx, s, qos) + if err != nil { + return nil, err + } + r := renamer{ + ctx: ctx, + snapshot: s, + refs: refs, + objsToUpdate: make(map[types.Object]bool), + from: qos[0].obj.Name(), + to: newName, + packages: make(map[*types.Package]Package), + } + + // A renaming initiated at an interface method indicates the + // intention to rename abstract and concrete methods as needed + // to preserve assignability. + for _, ref := range refs { + if obj, ok := ref.obj.(*types.Func); ok { + recv := obj.Type().(*types.Signature).Recv() + if recv != nil && types.IsInterface(recv.Type().Underlying()) { + r.changeMethods = true + break + } + } + } + for _, from := range refs { + r.packages[from.pkg.GetTypes()] = from.pkg + } + + // Check that the renaming of the identifier is ok. + for _, ref := range refs { + r.check(ref.obj) + if r.hadConflicts { // one error is enough. + break + } + } + if r.hadConflicts { + return nil, fmt.Errorf("%s", r.errors) + } + + changes, err := r.update() + if err != nil { + return nil, err + } + + result := make(map[span.URI][]protocol.TextEdit) + for uri, edits := range changes { + // These edits should really be associated with FileHandles for maximal correctness. + // For now, this is good enough. + fh, err := s.GetFile(ctx, uri) + if err != nil { + return nil, err + } + data, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewMapper(uri, data) + protocolEdits, err := ToProtocolEdits(m, edits) + if err != nil { + return nil, err + } + result[uri] = protocolEdits + } + return result, nil +} + +// Rename all references to the identifier. +func (r *renamer) update() (map[span.URI][]diff.Edit, error) { + result := make(map[span.URI][]diff.Edit) + seen := make(map[span.Span]bool) + + docRegexp, err := regexp.Compile(`\b` + r.from + `\b`) + if err != nil { + return nil, err + } + for _, ref := range r.refs { + refSpan := ref.MappedRange.Span() + if seen[refSpan] { + continue + } + seen[refSpan] = true + + // Renaming a types.PkgName may result in the addition or removal of an identifier, + // so we deal with this separately. + if pkgName, ok := ref.obj.(*types.PkgName); ok && ref.isDeclaration { + edit, err := r.updatePkgName(pkgName) + if err != nil { + return nil, err + } + result[refSpan.URI()] = append(result[refSpan.URI()], *edit) + continue + } + + // Replace the identifier with r.to. + edit := diff.Edit{ + Start: refSpan.Start().Offset(), + End: refSpan.End().Offset(), + New: r.to, + } + + result[refSpan.URI()] = append(result[refSpan.URI()], edit) + + if !ref.isDeclaration || ref.ident == nil { // uses do not have doc comments to update. + continue + } + + doc := r.docComment(ref.pkg, ref.ident) + if doc == nil { + continue + } + + // Perform the rename in doc comments declared in the original package. + // go/parser strips out \r\n returns from the comment text, so go + // line-by-line through the comment text to get the correct positions. + for _, comment := range doc.List { + if isDirective(comment.Text) { + continue + } + // TODO(adonovan): why are we looping over lines? + // Just run the loop body once over the entire multiline comment. + lines := strings.Split(comment.Text, "\n") + tokFile := ref.pkg.FileSet().File(comment.Pos()) + commentLine := tokFile.Line(comment.Pos()) + uri := span.URIFromPath(tokFile.Name()) + for i, line := range lines { + lineStart := comment.Pos() + if i > 0 { + lineStart = tokFile.LineStart(commentLine + i) + } + for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) { + // The File.Offset static check complains + // even though these uses are manifestly safe. + start, end, _ := safetoken.Offsets(tokFile, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1])) + result[uri] = append(result[uri], diff.Edit{ + Start: start, + End: end, + New: r.to, + }) + } + } + } + } + + return result, nil +} + +// docComment returns the doc for an identifier. +func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup { + _, tokFile, nodes, _ := pathEnclosingInterval(r.ctx, r.snapshot, pkg, id.Pos(), id.End()) + for _, node := range nodes { + switch decl := node.(type) { + case *ast.FuncDecl: + return decl.Doc + case *ast.Field: + return decl.Doc + case *ast.GenDecl: + return decl.Doc + // For {Type,Value}Spec, if the doc on the spec is absent, + // search for the enclosing GenDecl + case *ast.TypeSpec: + if decl.Doc != nil { + return decl.Doc + } + case *ast.ValueSpec: + if decl.Doc != nil { + return decl.Doc + } + case *ast.Ident: + case *ast.AssignStmt: + // *ast.AssignStmt doesn't have an associated comment group. + // So, we try to find a comment just before the identifier. + + // Try to find a comment group only for short variable declarations (:=). + if decl.Tok != token.DEFINE { + return nil + } + + identLine := tokFile.Line(id.Pos()) + for _, comment := range nodes[len(nodes)-1].(*ast.File).Comments { + if comment.Pos() > id.Pos() { + // Comment is after the identifier. + continue + } + + lastCommentLine := tokFile.Line(comment.End()) + if lastCommentLine+1 == identLine { + return comment + } + } + default: + return nil + } + } + return nil +} + +// updatePkgName returns the updates to rename a pkgName in the import spec by +// only modifying the package name portion of the import declaration. +func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.Edit, error) { + // Modify ImportSpec syntax to add or remove the Name as needed. + pkg := r.packages[pkgName.Pkg()] + _, tokFile, path, _ := pathEnclosingInterval(r.ctx, r.snapshot, pkg, pkgName.Pos(), pkgName.Pos()) + if len(path) < 2 { + return nil, fmt.Errorf("no path enclosing interval for %s", pkgName.Name()) + } + spec, ok := path[1].(*ast.ImportSpec) + if !ok { + return nil, fmt.Errorf("failed to update PkgName for %s", pkgName.Name()) + } + + newText := "" + if pkgName.Imported().Name() != r.to { + newText = r.to + " " + } + + // Replace the portion (possibly empty) of the spec before the path: + // local "path" or "path" + // -> <- -><- + start, end, err := safetoken.Offsets(tokFile, spec.Pos(), spec.Path.Pos()) + if err != nil { + return nil, err + } + + return &diff.Edit{ + Start: start, + End: end, + New: newText, + }, nil +} + +// qualifiedObjsAtProtocolPos returns info for all the types.Objects referenced +// at the given position, for the following selection of packages: +// +// 1. all packages (including all test variants), in their workspace parse mode +// 2. if not included above, at least one package containing uri in full parse mode +// +// Finding objects in (1) ensures that we locate references within all +// workspace packages, including in x_test packages. Including (2) ensures that +// we find local references in the current package, for non-workspace packages +// that may be open. +func qualifiedObjsAtProtocolPos(ctx context.Context, s Snapshot, uri span.URI, pp protocol.Position) ([]qualifiedObject, error) { + fh, err := s.GetFile(ctx, uri) + if err != nil { + return nil, err + } + content, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewMapper(uri, content) + offset, err := m.PositionOffset(pp) + if err != nil { + return nil, err + } + return qualifiedObjsAtLocation(ctx, s, positionKey{uri, offset}, map[positionKey]bool{}) +} + +// A qualifiedObject is the result of resolving a reference from an +// identifier to an object. +type qualifiedObject struct { + obj types.Object // the referenced object + pkg Package // the Package that defines the object (nil => universe) +} + +// A positionKey identifies a byte offset within a file (URI). +// +// When a file has been parsed multiple times in the same FileSet, +// there may be multiple token.Pos values denoting the same logical +// position. In such situations, a positionKey may be used for +// de-duplication. +type positionKey struct { + uri span.URI + offset int +} + +// qualifiedObjsAtLocation finds all objects referenced at offset in uri, +// across all packages in the snapshot. +func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key positionKey, seen map[positionKey]bool) ([]qualifiedObject, error) { + if seen[key] { + return nil, nil + } + seen[key] = true + + // We search for referenced objects starting with all packages containing the + // current location, and then repeating the search for every distinct object + // location discovered. + // + // In the common case, there should be at most one additional location to + // consider: the definition of the object referenced by the location. But we + // try to be comprehensive in case we ever support variations on build + // constraints. + metas, err := s.MetadataForFile(ctx, key.uri) + if err != nil { + return nil, err + } + ids := make([]PackageID, len(metas)) + for i, m := range metas { + ids[i] = m.ID + } + pkgs, err := s.TypeCheck(ctx, TypecheckWorkspace, ids...) + if err != nil { + return nil, err + } + + // In order to allow basic references/rename/implementations to function when + // non-workspace packages are open, ensure that we have at least one fully + // parsed package for the current file. This allows us to find references + // inside the open package. Use WidestPackage to capture references in test + // files. + hasFullPackage := false + for _, pkg := range pkgs { + if pkg.ParseMode() == ParseFull { + hasFullPackage = true + break + } + } + if !hasFullPackage { + pkg, _, err := PackageForFile(ctx, s, key.uri, TypecheckFull, WidestPackage) + if err != nil { + return nil, err + } + pkgs = append(pkgs, pkg) + } + + // report objects in the order we encounter them. This ensures that the first + // result is at the cursor... + var qualifiedObjs []qualifiedObject + // ...but avoid duplicates. + seenObjs := map[types.Object]bool{} + + for _, searchpkg := range pkgs { + pgf, err := searchpkg.File(key.uri) + if err != nil { + return nil, err + } + pos := pgf.Tok.Pos(key.offset) + + // TODO(adonovan): replace this section with a call to objectsAt(). + path := pathEnclosingObjNode(pgf.File, pos) + if path == nil { + continue + } + var objs []types.Object + switch leaf := path[0].(type) { + case *ast.Ident: + // If leaf represents an implicit type switch object or the type + // switch "assign" variable, expand to all of the type switch's + // implicit objects. + if implicits, _ := typeSwitchImplicits(searchpkg.GetTypesInfo(), path); len(implicits) > 0 { + objs = append(objs, implicits...) + } else { + obj := searchpkg.GetTypesInfo().ObjectOf(leaf) + if obj == nil { + return nil, fmt.Errorf("no object found for %q", leaf.Name) + } + objs = append(objs, obj) + } + case *ast.ImportSpec: + // Look up the implicit *types.PkgName. + obj := searchpkg.GetTypesInfo().Implicits[leaf] + if obj == nil { + return nil, fmt.Errorf("no object found for import %s", UnquoteImportPath(leaf)) + } + objs = append(objs, obj) + } + + // Get all of the transitive dependencies of the search package. + pkgSet := map[*types.Package]Package{ + searchpkg.GetTypes(): searchpkg, + } + deps := recursiveDeps(s, searchpkg.Metadata())[1:] + // Ignore the error from type checking, but check if the context was + // canceled (which would have caused TypeCheck to exit early). + depPkgs, _ := s.TypeCheck(ctx, TypecheckWorkspace, deps...) + if ctx.Err() != nil { + return nil, ctx.Err() + } + for _, dep := range depPkgs { + // Since we ignored the error from type checking, pkg may be nil. + if dep != nil { + pkgSet[dep.GetTypes()] = dep + } + } + + for _, obj := range objs { + if obj.Parent() == types.Universe { + return nil, fmt.Errorf("%q: builtin object", obj.Name()) + } + pkg, ok := pkgSet[obj.Pkg()] + if !ok { + event.Error(ctx, fmt.Sprintf("no package for obj %s: %v", obj, obj.Pkg()), err) + continue + } + qualifiedObjs = append(qualifiedObjs, qualifiedObject{obj: obj, pkg: pkg}) + seenObjs[obj] = true + + // If the qualified object is in another file (or more likely, another + // package), it's possible that there is another copy of it in a package + // that we haven't searched, e.g. a test variant. See golang/go#47564. + // + // In order to be sure we've considered all packages, call + // qualifiedObjsAtLocation recursively for all locations we encounter. We + // could probably be more precise here, only continuing the search if obj + // is in another package, but this should be good enough to find all + // uses. + + if key, found := packagePositionKey(pkg, obj.Pos()); found { + otherObjs, err := qualifiedObjsAtLocation(ctx, s, key, seen) + if err != nil { + return nil, err + } + for _, other := range otherObjs { + if !seenObjs[other.obj] { + qualifiedObjs = append(qualifiedObjs, other) + seenObjs[other.obj] = true + } + } + } else { + return nil, fmt.Errorf("missing file for position of %q in %q", obj.Name(), obj.Pkg().Name()) + } + } + } + // Return an error if no objects were found since callers will assume that + // the slice has at least 1 element. + if len(qualifiedObjs) == 0 { + return nil, errNoObjectFound + } + return qualifiedObjs, nil +} + +// packagePositionKey finds the positionKey for the given pos. +// +// The second result reports whether the position was found. +func packagePositionKey(pkg Package, pos token.Pos) (positionKey, bool) { + for _, pgf := range pkg.CompiledGoFiles() { + offset, err := safetoken.Offset(pgf.Tok, pos) + if err == nil { + return positionKey{pgf.URI, offset}, true + } + } + return positionKey{}, false +} + +// ReferenceInfo holds information about reference to an identifier in Go source. +type ReferenceInfo struct { + MappedRange protocol.MappedRange + ident *ast.Ident + obj types.Object + pkg Package + isDeclaration bool +} + +// references is a helper function to avoid recomputing qualifiedObjsAtProtocolPos. +// The first element of qos is considered to be the declaration; +// if isDeclaration, the first result is an extra item for it. +// Only the definition-related fields of qualifiedObject are used. +// (Arguably it should accept a smaller data type.) +// +// This implementation serves Server.rename. TODO(adonovan): obviate it. +func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject) ([]*ReferenceInfo, error) { + var ( + references []*ReferenceInfo + seen = make(map[positionKey]bool) + ) + + pos := qos[0].obj.Pos() + if pos == token.NoPos { + return nil, fmt.Errorf("no position for %s", qos[0].obj) // e.g. error.Error + } + // Inv: qos[0].pkg != nil, since Pos is valid. + // Inv: qos[*].pkg != nil, since all qos are logically the same declaration. + filename := safetoken.StartPosition(qos[0].pkg.FileSet(), pos).Filename + pgf, err := qos[0].pkg.File(span.URIFromPath(filename)) + if err != nil { + return nil, err + } + declIdent, err := findIdentifier(ctx, snapshot, qos[0].pkg, pgf, qos[0].obj.Pos()) + if err != nil { + return nil, err + } + // Make sure declaration is the first item in the response. + references = append(references, &ReferenceInfo{ + MappedRange: declIdent.MappedRange, + ident: declIdent.ident, + obj: qos[0].obj, + pkg: declIdent.pkg, + isDeclaration: true, + }) + + for _, qo := range qos { + var searchPkgs []Package + + // Only search dependents if the object is exported. + if qo.obj.Exported() { + // If obj is a package-level object, we need only search + // among direct reverse dependencies. + // TODO(adonovan): opt: this will still spuriously search + // transitively for (e.g.) capitalized local variables. + // We could do better by checking for an objectpath. + transitive := qo.obj.Pkg().Scope().Lookup(qo.obj.Name()) != qo.obj + rdeps, err := snapshot.ReverseDependencies(ctx, qo.pkg.Metadata().ID, transitive) + if err != nil { + return nil, err + } + ids := make([]PackageID, 0, len(rdeps)) + for _, rdep := range rdeps { + ids = append(ids, rdep.ID) + } + // TODO(adonovan): opt: build a search index + // that doesn't require type checking. + reverseDeps, err := snapshot.TypeCheck(ctx, TypecheckFull, ids...) + if err != nil { + return nil, err + } + searchPkgs = append(searchPkgs, reverseDeps...) + } + // Add the package in which the identifier is declared. + searchPkgs = append(searchPkgs, qo.pkg) + for _, pkg := range searchPkgs { + for ident, obj := range pkg.GetTypesInfo().Uses { + // For instantiated objects (as in methods or fields on instantiated + // types), we may not have pointer-identical objects but still want to + // consider them references. + if !equalOrigin(obj, qo.obj) { + // If ident is not a use of qo.obj, skip it, with one exception: + // uses of an embedded field can be considered references of the + // embedded type name + v, ok := obj.(*types.Var) + if !ok || !v.Embedded() { + continue + } + named, ok := v.Type().(*types.Named) + if !ok || named.Obj() != qo.obj { + continue + } + } + key, found := packagePositionKey(pkg, ident.Pos()) + if !found { + bug.Reportf("ident %v (pos: %v) not found in package %v", ident.Name, ident.Pos(), pkg.Metadata().ID) + continue + } + if seen[key] { + continue + } + seen[key] = true + filename := pkg.FileSet().File(ident.Pos()).Name() + pgf, err := pkg.File(span.URIFromPath(filename)) + if err != nil { + return nil, err + } + rng, err := pgf.NodeMappedRange(ident) + if err != nil { + return nil, err + } + references = append(references, &ReferenceInfo{ + ident: ident, + pkg: pkg, + obj: obj, + MappedRange: rng, + }) + } + } + } + + return references, nil +} + +// equalOrigin reports whether obj1 and obj2 have equivalent origin object. +// This may be the case even if obj1 != obj2, if one or both of them is +// instantiated. +func equalOrigin(obj1, obj2 types.Object) bool { + return obj1.Pkg() == obj2.Pkg() && obj1.Pos() == obj2.Pos() && obj1.Name() == obj2.Name() +} + +// parsePackageNameDecl is a convenience function that parses and +// returns the package name declaration of file fh, and reports +// whether the position ppos lies within it. +// +// Note: also used by references2. +func parsePackageNameDecl(ctx context.Context, snapshot Snapshot, fh FileHandle, ppos protocol.Position) (*ParsedGoFile, bool, error) { + pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return nil, false, err + } + // Careful: because we used ParseHeader, + // pgf.Pos(ppos) may be beyond EOF => (0, err). + pos, _ := pgf.PositionPos(ppos) + return pgf, pgf.File.Name.Pos() <= pos && pos <= pgf.File.Name.End(), nil +} diff --git a/internal/lsp/source/rename_check.go b/gopls/internal/lsp/source/rename_check.go similarity index 95% rename from internal/lsp/source/rename_check.go rename to gopls/internal/lsp/source/rename_check.go index b17f9b87067..d01d2289296 100644 --- a/internal/lsp/source/rename_check.go +++ b/gopls/internal/lsp/source/rename_check.go @@ -7,12 +7,12 @@ package source import ( + "context" "fmt" "go/ast" "go/token" "go/types" "reflect" - "strconv" "strings" "unicode" @@ -372,7 +372,7 @@ func (r *renamer) checkStructField(from *types.Var) { if !ok { return } - pkg, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos()) + pkg, _, path, _ := pathEnclosingInterval(r.ctx, r.snapshot, fromPkg, from.Pos(), from.Pos()) if pkg == nil || path == nil { return } @@ -441,7 +441,7 @@ func (r *renamer) checkStructField(from *types.Var) { r.checkSelections(from) } -// checkSelection checks that all uses and selections that resolve to +// checkSelections checks that all uses and selections that resolve to // the specified object would continue to do so after the renaming. func (r *renamer) checkSelections(from types.Object) { for typ, pkg := range r.packages { @@ -553,7 +553,7 @@ func (r *renamer) checkMethod(from *types.Func) { // Check for conflict at point of declaration. // Check to ensure preservation of assignability requirements. R := recv(from).Type() - if IsInterface(R) { + if types.IsInterface(R) { // Abstract method // declaration @@ -570,7 +570,7 @@ func (r *renamer) checkMethod(from *types.Func) { for _, pkg := range r.packages { // Start with named interface types (better errors) for _, obj := range pkg.GetTypesInfo().Defs { - if obj, ok := obj.(*types.TypeName); ok && IsInterface(obj.Type()) { + if obj, ok := obj.(*types.TypeName); ok && types.IsInterface(obj.Type()) { f, _, _ := types.LookupFieldOrMethod( obj.Type(), false, from.Pkg(), from.Name()) if f == nil { @@ -642,7 +642,7 @@ func (r *renamer) checkMethod(from *types.Func) { // yields abstract method I.f. This can make error // messages less than obvious. // - if !IsInterface(key.RHS) { + if !types.IsInterface(key.RHS) { // The logic below was derived from checkSelections. rtosel := rmethods.Lookup(from.Pkg(), r.to) @@ -717,7 +717,7 @@ func (r *renamer) checkMethod(from *types.Func) { // for key := range r.satisfy() { // key = (lhs, rhs) where lhs is always an interface. - if IsInterface(key.RHS) { + if types.IsInterface(key.RHS) { continue } rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name()) @@ -791,10 +791,10 @@ func (r *renamer) satisfy() map[satisfy.Constraint]bool { // type-checker. // // Only proceed if all packages have no errors. - if pkg.HasListOrParseErrors() || pkg.HasTypeErrors() { + if pkg.HasParseErrors() || pkg.HasTypeErrors() { r.errorf(token.NoPos, // we don't have a position for this error. "renaming %q to %q not possible because %q has errors", - r.from, r.to, pkg.PkgPath()) + r.from, r.to, pkg.Metadata().PkgPath) return nil } f.Find(pkg.GetTypesInfo(), pkg.GetSyntax()) @@ -821,54 +821,61 @@ func someUse(info *types.Info, obj types.Object) *ast.Ident { return nil } -// pathEnclosingInterval returns the Package and ast.Node that +// pathEnclosingInterval returns the Package, token.File, and ast.Node that // contain source interval [start, end), and all the node's ancestors // up to the AST root. It searches all ast.Files of all packages. // exact is defined as for astutil.PathEnclosingInterval. // // The zero value is returned if not found. -func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) { +// +// TODO(rfindley): this has some redundancy with FindPackageFromPos, etc. Refactor. +func pathEnclosingInterval(ctx context.Context, s Snapshot, pkg Package, start, end token.Pos) (resPkg Package, tokFile *token.File, path []ast.Node, exact bool) { pkgs := []Package{pkg} for _, f := range pkg.GetSyntax() { for _, imp := range f.Imports { if imp == nil { continue } - importPath, err := strconv.Unquote(imp.Path.Value) - if err != nil { + importPath := UnquoteImportPath(imp) + if importPath == "" { continue } - importPkg, err := pkg.GetImport(importPath) + depID, ok := pkg.Metadata().DepsByImpPath[importPath] + if !ok { + return nil, nil, nil, false + } + depPkgs, err := s.TypeCheck(ctx, TypecheckWorkspace, depID) if err != nil { - return nil, nil, false + return nil, nil, nil, false } - pkgs = append(pkgs, importPkg) + pkgs = append(pkgs, depPkgs[0]) } } for _, p := range pkgs { for _, f := range p.GetSyntax() { - if f.Pos() == token.NoPos { + if !f.Pos().IsValid() { // This can happen if the parser saw // too many errors and bailed out. // (Use parser.AllErrors to prevent that.) continue } - if !tokenFileContainsPos(fset.File(f.Pos()), start) { + tokFile := p.FileSet().File(f.Pos()) + if !tokenFileContainsPos(tokFile, start) { continue } if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { - return pkg, path, exact + return pkg, tokFile, path, exact } } } - return nil, nil, false + return nil, nil, nil, false } // TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) func tokenFileContainsPos(tf *token.File, pos token.Pos) bool { p := int(pos) base := tf.Base() - return base <= p && p < base+tf.Size() + return base <= p && p <= base+tf.Size() } func objectKind(obj types.Object) string { diff --git a/gopls/internal/lsp/source/signature_help.go b/gopls/internal/lsp/source/signature_help.go new file mode 100644 index 00000000000..0f81b857ba0 --- /dev/null +++ b/gopls/internal/lsp/source/signature_help.go @@ -0,0 +1,185 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/event" +) + +func SignatureHelp(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.SignatureInformation, int, error) { + ctx, done := event.Start(ctx, "source.SignatureHelp") + defer done() + + // We need full type-checking here, as we must type-check function bodies in + // order to provide signature help at the requested position. + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, 0, fmt.Errorf("getting file for SignatureHelp: %w", err) + } + pos, err := pgf.PositionPos(position) + if err != nil { + return nil, 0, err + } + // Find a call expression surrounding the query position. + var callExpr *ast.CallExpr + path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) + if path == nil { + return nil, 0, fmt.Errorf("cannot find node enclosing position") + } +FindCall: + for _, node := range path { + switch node := node.(type) { + case *ast.CallExpr: + if pos >= node.Lparen && pos <= node.Rparen { + callExpr = node + break FindCall + } + case *ast.FuncLit, *ast.FuncType: + // The user is within an anonymous function, + // which may be the parameter to the *ast.CallExpr. + // Don't show signature help in this case. + return nil, 0, fmt.Errorf("no signature help within a function declaration") + case *ast.BasicLit: + if node.Kind == token.STRING { + return nil, 0, fmt.Errorf("no signature help within a string literal") + } + } + + } + if callExpr == nil || callExpr.Fun == nil { + return nil, 0, fmt.Errorf("cannot find an enclosing function") + } + + qf := Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()) + + // Get the object representing the function, if available. + // There is no object in certain cases such as calling a function returned by + // a function (e.g. "foo()()"). + var obj types.Object + switch t := callExpr.Fun.(type) { + case *ast.Ident: + obj = pkg.GetTypesInfo().ObjectOf(t) + case *ast.SelectorExpr: + obj = pkg.GetTypesInfo().ObjectOf(t.Sel) + } + + // Handle builtin functions separately. + if obj, ok := obj.(*types.Builtin); ok { + return builtinSignature(ctx, snapshot, callExpr, obj.Name(), pos) + } + + // Get the type information for the function being called. + sigType := pkg.GetTypesInfo().TypeOf(callExpr.Fun) + if sigType == nil { + return nil, 0, fmt.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun) + } + + sig, _ := sigType.Underlying().(*types.Signature) + if sig == nil { + return nil, 0, fmt.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun) + } + + activeParam := activeParameter(callExpr, sig.Params().Len(), sig.Variadic(), pos) + + var ( + name string + comment *ast.CommentGroup + ) + if obj != nil { + d, err := HoverDocForObject(ctx, snapshot, pkg, obj) + if err != nil { + return nil, 0, err + } + name = obj.Name() + comment = d + } else { + name = "func" + } + mq := MetadataQualifierForFile(snapshot, pgf.File, pkg.Metadata()) + s, err := NewSignature(ctx, snapshot, pkg, pgf.File, sig, comment, qf, mq) + if err != nil { + return nil, 0, err + } + paramInfo := make([]protocol.ParameterInformation, 0, len(s.params)) + for _, p := range s.params { + paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p}) + } + return &protocol.SignatureInformation{ + Label: name + s.Format(), + Documentation: stringToSigInfoDocumentation(s.doc, snapshot.View().Options()), + Parameters: paramInfo, + }, activeParam, nil +} + +func builtinSignature(ctx context.Context, snapshot Snapshot, callExpr *ast.CallExpr, name string, pos token.Pos) (*protocol.SignatureInformation, int, error) { + sig, err := NewBuiltinSignature(ctx, snapshot, name) + if err != nil { + return nil, 0, err + } + paramInfo := make([]protocol.ParameterInformation, 0, len(sig.params)) + for _, p := range sig.params { + paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p}) + } + activeParam := activeParameter(callExpr, len(sig.params), sig.variadic, pos) + return &protocol.SignatureInformation{ + Label: sig.name + sig.Format(), + Documentation: stringToSigInfoDocumentation(sig.doc, snapshot.View().Options()), + Parameters: paramInfo, + }, activeParam, nil + +} + +func activeParameter(callExpr *ast.CallExpr, numParams int, variadic bool, pos token.Pos) (activeParam int) { + if len(callExpr.Args) == 0 { + return 0 + } + // First, check if the position is even in the range of the arguments. + start, end := callExpr.Lparen, callExpr.Rparen + if !(start <= pos && pos <= end) { + return 0 + } + for _, expr := range callExpr.Args { + if start == token.NoPos { + start = expr.Pos() + } + end = expr.End() + if start <= pos && pos <= end { + break + } + // Don't advance the active parameter for the last parameter of a variadic function. + if !variadic || activeParam < numParams-1 { + activeParam++ + } + start = expr.Pos() + 1 // to account for commas + } + return activeParam +} + +func stringToSigInfoDocumentation(s string, options *Options) *protocol.Or_SignatureInformation_documentation { + v := s + k := protocol.PlainText + if options.PreferredContentFormat == protocol.Markdown { + v = CommentToMarkdown(s) + // whether or not content is newline terminated may not matter for LSP clients, + // but our tests expect trailing newlines to be stripped. + v = strings.TrimSuffix(v, "\n") // TODO(pjw): change the golden files + k = protocol.Markdown + } + return &protocol.Or_SignatureInformation_documentation{ + Value: protocol.MarkupContent{ + Kind: k, + Value: v, + }, + } +} diff --git a/gopls/internal/lsp/source/stub.go b/gopls/internal/lsp/source/stub.go new file mode 100644 index 00000000000..5f6fe14a852 --- /dev/null +++ b/gopls/internal/lsp/source/stub.go @@ -0,0 +1,350 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/lsp/analysis/stubmethods" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/typeparams" +) + +func stubSuggestedFixFunc(ctx context.Context, snapshot Snapshot, fh FileHandle, rng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error) { + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, nil, fmt.Errorf("GetTypedFile: %w", err) + } + nodes, pos, err := getStubNodes(pgf, rng) + if err != nil { + return nil, nil, fmt.Errorf("getNodes: %w", err) + } + si := stubmethods.GetStubInfo(pkg.GetTypesInfo(), nodes, pos) + if si == nil { + return nil, nil, fmt.Errorf("nil interface request") + } + + // A function-local type cannot be stubbed + // since there's nowhere to put the methods. + conc := si.Concrete.Obj() + if conc != conc.Pkg().Scope().Lookup(conc.Name()) { + return nil, nil, fmt.Errorf("local type %q cannot be stubbed", conc.Name()) + } + + // Parse the file defining the concrete type. + concreteFilename := safetoken.StartPosition(snapshot.FileSet(), si.Concrete.Obj().Pos()).Filename + concreteFH, err := snapshot.GetFile(ctx, span.URIFromPath(concreteFilename)) + if err != nil { + return nil, nil, err + } + parsedConcreteFile, err := snapshot.ParseGo(ctx, concreteFH, ParseFull) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse file declaring implementation type: %w", err) + } + var ( + methodsSrc []byte + stubImports []*stubImport // additional imports needed for method stubs + ) + if si.Interface.Pkg() == nil && si.Interface.Name() == "error" && si.Interface.Parent() == types.Universe { + methodsSrc = stubErr(ctx, parsedConcreteFile.File, si, snapshot) + } else { + methodsSrc, stubImports, err = stubMethods(ctx, parsedConcreteFile.File, si, snapshot) + if err != nil { + return nil, nil, fmt.Errorf("stubMethods: %w", err) + } + } + + // Splice the methods into the file. + // The insertion point is after the top-level declaration + // enclosing the (package-level) type object. + insertPos := parsedConcreteFile.File.End() + for _, decl := range parsedConcreteFile.File.Decls { + if decl.End() > conc.Pos() { + insertPos = decl.End() + break + } + } + concreteSrc, err := concreteFH.Read() + if err != nil { + return nil, nil, fmt.Errorf("error reading concrete file source: %w", err) + } + insertOffset, err := safetoken.Offset(parsedConcreteFile.Tok, insertPos) + if err != nil || insertOffset >= len(concreteSrc) { + return nil, nil, fmt.Errorf("insertion position is past the end of the file") + } + var buf bytes.Buffer + buf.Write(concreteSrc[:insertOffset]) + buf.WriteByte('\n') + buf.Write(methodsSrc) + buf.Write(concreteSrc[insertOffset:]) + + // Re-parse it, splice in imports, pretty-print it. + fset := token.NewFileSet() + newF, err := parser.ParseFile(fset, parsedConcreteFile.File.Name.Name, buf.Bytes(), parser.ParseComments) + if err != nil { + return nil, nil, fmt.Errorf("could not reparse file: %w", err) + } + for _, imp := range stubImports { + astutil.AddNamedImport(fset, newF, imp.Name, imp.Path) + } + var source strings.Builder + if err := format.Node(&source, fset, newF); err != nil { + return nil, nil, fmt.Errorf("format.Node: %w", err) + } + + // Return the diff. + diffs := snapshot.View().Options().ComputeEdits(string(parsedConcreteFile.Src), source.String()) + var edits []analysis.TextEdit + for _, edit := range diffs { + edits = append(edits, analysis.TextEdit{ + Pos: parsedConcreteFile.Tok.Pos(edit.Start), + End: parsedConcreteFile.Tok.Pos(edit.End), + NewText: []byte(edit.New), + }) + } + return snapshot.FileSet(), // to match snapshot.ParseGo above + &analysis.SuggestedFix{TextEdits: edits}, + nil +} + +// stubMethods returns the Go code of all methods +// that implement the given interface +func stubMethods(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) ([]byte, []*stubImport, error) { + concMS := types.NewMethodSet(types.NewPointer(si.Concrete.Obj().Type())) + missing, err := missingMethods(ctx, snapshot, concMS, si.Concrete.Obj().Pkg(), si.Interface, map[string]struct{}{}) + if err != nil { + return nil, nil, fmt.Errorf("missingMethods: %w", err) + } + if len(missing) == 0 { + return nil, nil, fmt.Errorf("no missing methods found") + } + var ( + stubImports []*stubImport + methodsBuffer bytes.Buffer + ) + for _, mi := range missing { + for _, m := range mi.missing { + // TODO(marwan-at-work): this should share the same logic with source.FormatVarType + // as it also accounts for type aliases. + sig := types.TypeString(m.Type(), stubmethods.RelativeToFiles(si.Concrete.Obj().Pkg(), concreteFile, mi.imports, func(name, path string) { + for _, imp := range stubImports { + if imp.Name == name && imp.Path == path { + return + } + } + stubImports = append(stubImports, &stubImport{name, path}) + })) + _, err = methodsBuffer.Write(printStubMethod(methodData{ + Method: m.Name(), + Concrete: getStubReceiver(si), + Interface: deduceIfaceName(si.Concrete.Obj().Pkg(), si.Interface.Pkg(), si.Interface), + Signature: strings.TrimPrefix(sig, "func"), + })) + if err != nil { + return nil, nil, fmt.Errorf("error printing method: %w", err) + } + methodsBuffer.WriteRune('\n') + } + } + return methodsBuffer.Bytes(), stubImports, nil +} + +// stubErr reurns the Go code implementation +// of an error interface relevant to the +// concrete type +func stubErr(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) []byte { + return printStubMethod(methodData{ + Method: "Error", + Interface: "error", + Concrete: getStubReceiver(si), + Signature: "() string", + }) +} + +// getStubReceiver returns the concrete type's name as a method receiver. +// It accounts for type parameters if they exist. +func getStubReceiver(si *stubmethods.StubInfo) string { + var concrete string + if si.Pointer { + concrete += "*" + } + concrete += si.Concrete.Obj().Name() + concrete += FormatTypeParams(typeparams.ForNamed(si.Concrete)) + return concrete +} + +type methodData struct { + Method string + Interface string + Concrete string + Signature string +} + +// printStubMethod takes methodData and returns Go code that represents the given method such as: +// +// // {{ .Method }} implements {{ .Interface }} +// func ({{ .Concrete }}) {{ .Method }}{{ .Signature }} { +// panic("unimplemented") +// } +func printStubMethod(md methodData) []byte { + var b bytes.Buffer + fmt.Fprintf(&b, "// %s implements %s\n", md.Method, md.Interface) + fmt.Fprintf(&b, "func (%s) %s%s {\n\t", md.Concrete, md.Method, md.Signature) + fmt.Fprintln(&b, `panic("unimplemented")`) + fmt.Fprintln(&b, "}") + return b.Bytes() +} + +func deduceIfaceName(concretePkg, ifacePkg *types.Package, ifaceObj types.Object) string { + if concretePkg.Path() == ifacePkg.Path() { + return ifaceObj.Name() + } + return fmt.Sprintf("%s.%s", ifacePkg.Name(), ifaceObj.Name()) +} + +func getStubNodes(pgf *ParsedGoFile, pRng protocol.Range) ([]ast.Node, token.Pos, error) { + start, end, err := pgf.RangePos(pRng) + if err != nil { + return nil, 0, err + } + nodes, _ := astutil.PathEnclosingInterval(pgf.File, start, end) + return nodes, start, nil +} + +/* +missingMethods takes a concrete type and returns any missing methods for the given interface as well as +any missing interface that might have been embedded to its parent. For example: + + type I interface { + io.Writer + Hello() + } + +returns + + []*missingInterface{ + { + iface: *types.Interface (io.Writer), + file: *ast.File: io.go, + missing []*types.Func{Write}, + }, + { + iface: *types.Interface (I), + file: *ast.File: myfile.go, + missing: []*types.Func{Hello} + }, + } +*/ +func missingMethods(ctx context.Context, snapshot Snapshot, concMS *types.MethodSet, concPkg *types.Package, ifaceObj *types.TypeName, visited map[string]struct{}) ([]*missingInterface, error) { + iface, ok := ifaceObj.Type().Underlying().(*types.Interface) + if !ok { + return nil, fmt.Errorf("expected %v to be an interface but got %T", iface, ifaceObj.Type().Underlying()) + } + + // The built-in error interface is special. + if ifaceObj.Pkg() == nil && ifaceObj.Name() == "error" { + var missingInterfaces []*missingInterface + if concMS.Lookup(nil, "Error") == nil { + errorMethod, _, _ := types.LookupFieldOrMethod(iface, false, nil, "Error") + missingInterfaces = append(missingInterfaces, &missingInterface{ + iface: ifaceObj, + missing: []*types.Func{errorMethod.(*types.Func)}, + }) + } + return missingInterfaces, nil + } + + // Parse the imports from the file that declares the interface. + ifaceFilename := safetoken.StartPosition(snapshot.FileSet(), ifaceObj.Pos()).Filename + ifaceFH, err := snapshot.GetFile(ctx, span.URIFromPath(ifaceFilename)) + if err != nil { + return nil, err + } + ifaceFile, err := snapshot.ParseGo(ctx, ifaceFH, ParseHeader) + if err != nil { + return nil, fmt.Errorf("error parsing imports from interface file: %w", err) + } + + var missing []*types.Func + + // Add all the interface methods not defined by the concrete type to missing. + for i := 0; i < iface.NumExplicitMethods(); i++ { + method := iface.ExplicitMethod(i) + if sel := concMS.Lookup(concPkg, method.Name()); sel == nil { + // Concrete type does not have the interface method. + if _, ok := visited[method.Name()]; !ok { + missing = append(missing, method) + visited[method.Name()] = struct{}{} + } + } else { + // Concrete type does have the interface method. + implSig := sel.Type().(*types.Signature) + ifaceSig := method.Type().(*types.Signature) + if !types.Identical(ifaceSig, implSig) { + return nil, fmt.Errorf("mimsatched %q function signatures:\nhave: %s\nwant: %s", method.Name(), implSig, ifaceSig) + } + } + } + + // Process embedded interfaces, recursively. + // + // TODO(adonovan): this whole computation could be expressed + // more simply without recursion, driven by the method + // sets of the interface and concrete types. Once the set + // difference (missing methods) is computed, the imports + // from the declaring file(s) could be loaded as needed. + var missingInterfaces []*missingInterface + for i := 0; i < iface.NumEmbeddeds(); i++ { + eiface := iface.Embedded(i).Obj() + em, err := missingMethods(ctx, snapshot, concMS, concPkg, eiface, visited) + if err != nil { + return nil, err + } + missingInterfaces = append(missingInterfaces, em...) + } + // The type checker is deterministic, but its choice of + // ordering of embedded interfaces varies with Go version + // (e.g. go1.17 was sorted, go1.18 was lexical order). + // Sort to ensure test portability. + sort.Slice(missingInterfaces, func(i, j int) bool { + return missingInterfaces[i].iface.Id() < missingInterfaces[j].iface.Id() + }) + + if len(missing) > 0 { + missingInterfaces = append(missingInterfaces, &missingInterface{ + iface: ifaceObj, + imports: ifaceFile.File.Imports, + missing: missing, + }) + } + return missingInterfaces, nil +} + +// missingInterface represents an interface +// that has all or some of its methods missing +// from the destination concrete type +type missingInterface struct { + iface *types.TypeName + imports []*ast.ImportSpec // the interface's import environment + missing []*types.Func +} + +// stubImport represents a newly added import +// statement to the concrete type. If name is not +// empty, then that import is required to have that name. +type stubImport struct{ Name, Path string } diff --git a/gopls/internal/lsp/source/symbols.go b/gopls/internal/lsp/source/symbols.go new file mode 100644 index 00000000000..a5c015e0aa0 --- /dev/null +++ b/gopls/internal/lsp/source/symbols.go @@ -0,0 +1,227 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/event" +) + +func DocumentSymbols(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.DocumentSymbol, error) { + ctx, done := event.Start(ctx, "source.DocumentSymbols") + defer done() + + pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) + if err != nil { + return nil, fmt.Errorf("getting file for DocumentSymbols: %w", err) + } + + // Build symbols for file declarations. When encountering a declaration with + // errors (typically because positions are invalid), we skip the declaration + // entirely. VS Code fails to show any symbols if one of the top-level + // symbols is missing position information. + var symbols []protocol.DocumentSymbol + for _, decl := range pgf.File.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + if decl.Name.Name == "_" { + continue + } + fs, err := funcSymbol(pgf.Mapper, pgf.Tok, decl) + if err == nil { + // If function is a method, prepend the type of the method. + if decl.Recv != nil && len(decl.Recv.List) > 0 { + fs.Name = fmt.Sprintf("(%s).%s", types.ExprString(decl.Recv.List[0].Type), fs.Name) + } + symbols = append(symbols, fs) + } + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + if spec.Name.Name == "_" { + continue + } + ts, err := typeSymbol(pgf.Mapper, pgf.Tok, spec) + if err == nil { + symbols = append(symbols, ts) + } + case *ast.ValueSpec: + for _, name := range spec.Names { + if name.Name == "_" { + continue + } + vs, err := varSymbol(pgf.Mapper, pgf.Tok, spec, name, decl.Tok == token.CONST) + if err == nil { + symbols = append(symbols, vs) + } + } + } + } + } + } + return symbols, nil +} + +func funcSymbol(m *protocol.Mapper, tf *token.File, decl *ast.FuncDecl) (protocol.DocumentSymbol, error) { + s := protocol.DocumentSymbol{ + Name: decl.Name.Name, + Kind: protocol.Function, + } + if decl.Recv != nil { + s.Kind = protocol.Method + } + var err error + s.Range, err = m.NodeRange(tf, decl) + if err != nil { + return protocol.DocumentSymbol{}, err + } + s.SelectionRange, err = m.NodeRange(tf, decl.Name) + if err != nil { + return protocol.DocumentSymbol{}, err + } + s.Detail = types.ExprString(decl.Type) + return s, nil +} + +func typeSymbol(m *protocol.Mapper, tf *token.File, spec *ast.TypeSpec) (protocol.DocumentSymbol, error) { + s := protocol.DocumentSymbol{ + Name: spec.Name.Name, + } + var err error + s.Range, err = m.NodeRange(tf, spec) + if err != nil { + return protocol.DocumentSymbol{}, err + } + s.SelectionRange, err = m.NodeRange(tf, spec.Name) + if err != nil { + return protocol.DocumentSymbol{}, err + } + s.Kind, s.Detail, s.Children = typeDetails(m, tf, spec.Type) + return s, nil +} + +func typeDetails(m *protocol.Mapper, tf *token.File, typExpr ast.Expr) (kind protocol.SymbolKind, detail string, children []protocol.DocumentSymbol) { + switch typExpr := typExpr.(type) { + case *ast.StructType: + kind = protocol.Struct + children = fieldListSymbols(m, tf, typExpr.Fields, protocol.Field) + if len(children) > 0 { + detail = "struct{...}" + } else { + detail = "struct{}" + } + + // Find interface methods and embedded types. + case *ast.InterfaceType: + kind = protocol.Interface + children = fieldListSymbols(m, tf, typExpr.Methods, protocol.Method) + if len(children) > 0 { + detail = "interface{...}" + } else { + detail = "interface{}" + } + + case *ast.FuncType: + kind = protocol.Function + detail = types.ExprString(typExpr) + + default: + kind = protocol.Class // catch-all, for cases where we don't know the kind syntactically + detail = types.ExprString(typExpr) + } + return +} + +func fieldListSymbols(m *protocol.Mapper, tf *token.File, fields *ast.FieldList, fieldKind protocol.SymbolKind) []protocol.DocumentSymbol { + if fields == nil { + return nil + } + + var symbols []protocol.DocumentSymbol + for _, field := range fields.List { + detail, children := "", []protocol.DocumentSymbol(nil) + if field.Type != nil { + _, detail, children = typeDetails(m, tf, field.Type) + } + if len(field.Names) == 0 { // embedded interface or struct field + // By default, use the formatted type details as the name of this field. + // This handles potentially invalid syntax, as well as type embeddings in + // interfaces. + child := protocol.DocumentSymbol{ + Name: detail, + Kind: protocol.Field, // consider all embeddings to be fields + Children: children, + } + + // If the field is a valid embedding, promote the type name to field + // name. + selection := field.Type + if id := embeddedIdent(field.Type); id != nil { + child.Name = id.Name + child.Detail = detail + selection = id + } + + if rng, err := m.NodeRange(tf, field.Type); err == nil { + child.Range = rng + } + if rng, err := m.NodeRange(tf, selection); err == nil { + child.SelectionRange = rng + } + + symbols = append(symbols, child) + } else { + for _, name := range field.Names { + child := protocol.DocumentSymbol{ + Name: name.Name, + Kind: fieldKind, + Detail: detail, + Children: children, + } + + if rng, err := m.NodeRange(tf, field); err == nil { + child.Range = rng + } + if rng, err := m.NodeRange(tf, name); err == nil { + child.SelectionRange = rng + } + + symbols = append(symbols, child) + } + } + + } + return symbols +} + +func varSymbol(m *protocol.Mapper, tf *token.File, spec *ast.ValueSpec, name *ast.Ident, isConst bool) (protocol.DocumentSymbol, error) { + s := protocol.DocumentSymbol{ + Name: name.Name, + Kind: protocol.Variable, + } + if isConst { + s.Kind = protocol.Constant + } + var err error + s.Range, err = m.NodeRange(tf, spec) + if err != nil { + return protocol.DocumentSymbol{}, err + } + s.SelectionRange, err = m.NodeRange(tf, name) + if err != nil { + return protocol.DocumentSymbol{}, err + } + if spec.Type != nil { // type may be missing from the syntax + _, s.Detail, s.Children = typeDetails(m, tf, spec.Type) + } + return s, nil +} diff --git a/gopls/internal/lsp/source/type_definition.go b/gopls/internal/lsp/source/type_definition.go new file mode 100644 index 00000000000..2a54fdfdbcb --- /dev/null +++ b/gopls/internal/lsp/source/type_definition.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/token" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/event" +) + +// TypeDefinition handles the textDocument/typeDefinition request for Go files. +func TypeDefinition(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Location, error) { + ctx, done := event.Start(ctx, "source.TypeDefinition") + defer done() + + pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), TypecheckFull, NarrowestPackage) + if err != nil { + return nil, err + } + pos, err := pgf.PositionPos(position) + if err != nil { + return nil, err + } + + obj := referencedObject(pkg, pgf, pos) + if obj == nil { + return nil, nil + } + + typObj := typeToObject(obj.Type()) + if typObj == nil { + return nil, fmt.Errorf("no type definition for %s", obj.Name()) + } + + // Identifiers with the type "error" are a special case with no position. + if hasErrorType(typObj) { + // TODO(rfindley): we can do better here, returning a link to the builtin + // file. + return nil, nil + } + + loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, typObj.Pos(), typObj.Pos()+token.Pos(len(typObj.Name()))) + if err != nil { + return nil, err + } + return []protocol.Location{loc}, nil +} diff --git a/gopls/internal/lsp/source/types_format.go b/gopls/internal/lsp/source/types_format.go new file mode 100644 index 00000000000..379d11aa449 --- /dev/null +++ b/gopls/internal/lsp/source/types_format.go @@ -0,0 +1,515 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "bytes" + "context" + "fmt" + "go/ast" + "go/doc" + "go/printer" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/typeparams" +) + +// FormatType returns the detail and kind for a types.Type. +func FormatType(typ types.Type, qf types.Qualifier) (detail string, kind protocol.CompletionItemKind) { + if types.IsInterface(typ) { + detail = "interface{...}" + kind = protocol.InterfaceCompletion + } else if _, ok := typ.(*types.Struct); ok { + detail = "struct{...}" + kind = protocol.StructCompletion + } else if typ != typ.Underlying() { + detail, kind = FormatType(typ.Underlying(), qf) + } else { + detail = types.TypeString(typ, qf) + kind = protocol.ClassCompletion + } + return detail, kind +} + +type signature struct { + name, doc string + typeParams, params, results []string + variadic bool + needResultParens bool +} + +func (s *signature) Format() string { + var b strings.Builder + b.WriteByte('(') + for i, p := range s.params { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(p) + } + b.WriteByte(')') + + // Add space between parameters and results. + if len(s.results) > 0 { + b.WriteByte(' ') + } + if s.needResultParens { + b.WriteByte('(') + } + for i, r := range s.results { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(r) + } + if s.needResultParens { + b.WriteByte(')') + } + return b.String() +} + +func (s *signature) TypeParams() []string { + return s.typeParams +} + +func (s *signature) Params() []string { + return s.params +} + +// NewBuiltinSignature returns signature for the builtin object with a given +// name, if a builtin object with the name exists. +func NewBuiltinSignature(ctx context.Context, s Snapshot, name string) (*signature, error) { + fset := s.FileSet() + builtin, err := s.BuiltinFile(ctx) + if err != nil { + return nil, err + } + obj := builtin.File.Scope.Lookup(name) + if obj == nil { + return nil, fmt.Errorf("no builtin object for %s", name) + } + decl, ok := obj.Decl.(*ast.FuncDecl) + if !ok { + return nil, fmt.Errorf("no function declaration for builtin: %s", name) + } + if decl.Type == nil { + return nil, fmt.Errorf("no type for builtin decl %s", decl.Name) + } + var variadic bool + if decl.Type.Params.List != nil { + numParams := len(decl.Type.Params.List) + lastParam := decl.Type.Params.List[numParams-1] + if _, ok := lastParam.Type.(*ast.Ellipsis); ok { + variadic = true + } + } + params, _ := formatFieldList(ctx, fset, decl.Type.Params, variadic) + results, needResultParens := formatFieldList(ctx, fset, decl.Type.Results, false) + d := decl.Doc.Text() + switch s.View().Options().HoverKind { + case SynopsisDocumentation: + d = doc.Synopsis(d) + case NoDocumentation: + d = "" + } + return &signature{ + doc: d, + name: name, + needResultParens: needResultParens, + params: params, + results: results, + variadic: variadic, + }, nil +} + +var replacer = strings.NewReplacer( + `ComplexType`, `complex128`, + `FloatType`, `float64`, + `IntegerType`, `int`, +) + +func formatFieldList(ctx context.Context, fset *token.FileSet, list *ast.FieldList, variadic bool) ([]string, bool) { + if list == nil { + return nil, false + } + var writeResultParens bool + var result []string + for i := 0; i < len(list.List); i++ { + if i >= 1 { + writeResultParens = true + } + p := list.List[i] + cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4} + b := &bytes.Buffer{} + if err := cfg.Fprint(b, fset, p.Type); err != nil { + event.Error(ctx, "unable to print type", nil, tag.Type.Of(p.Type)) + continue + } + typ := replacer.Replace(b.String()) + if len(p.Names) == 0 { + result = append(result, typ) + } + for _, name := range p.Names { + if name.Name != "" { + if i == 0 { + writeResultParens = true + } + result = append(result, fmt.Sprintf("%s %s", name.Name, typ)) + } else { + result = append(result, typ) + } + } + } + if variadic { + result[len(result)-1] = strings.Replace(result[len(result)-1], "[]", "...", 1) + } + return result, writeResultParens +} + +// FormatTypeParams turns TypeParamList into its Go representation, such as: +// [T, Y]. Note that it does not print constraints as this is mainly used for +// formatting type params in method receivers. +func FormatTypeParams(tparams *typeparams.TypeParamList) string { + if tparams == nil || tparams.Len() == 0 { + return "" + } + var buf bytes.Buffer + buf.WriteByte('[') + for i := 0; i < tparams.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(tparams.At(i).Obj().Name()) + } + buf.WriteByte(']') + return buf.String() +} + +// NewSignature returns formatted signature for a types.Signature struct. +func NewSignature(ctx context.Context, s Snapshot, pkg Package, srcFile *ast.File, sig *types.Signature, comment *ast.CommentGroup, qf types.Qualifier, mq MetadataQualifier) (*signature, error) { + var tparams []string + tpList := typeparams.ForSignature(sig) + for i := 0; i < tpList.Len(); i++ { + tparam := tpList.At(i) + // TODO: is it possible to reuse the logic from FormatVarType here? + s := tparam.Obj().Name() + " " + tparam.Constraint().String() + tparams = append(tparams, s) + } + + params := make([]string, 0, sig.Params().Len()) + for i := 0; i < sig.Params().Len(); i++ { + el := sig.Params().At(i) + typ, err := FormatVarType(ctx, s, pkg, srcFile, el, qf, mq) + if err != nil { + return nil, err + } + p := typ + if el.Name() != "" { + p = el.Name() + " " + typ + } + params = append(params, p) + } + + var needResultParens bool + results := make([]string, 0, sig.Results().Len()) + for i := 0; i < sig.Results().Len(); i++ { + if i >= 1 { + needResultParens = true + } + el := sig.Results().At(i) + typ, err := FormatVarType(ctx, s, pkg, srcFile, el, qf, mq) + if err != nil { + return nil, err + } + if el.Name() == "" { + results = append(results, typ) + } else { + if i == 0 { + needResultParens = true + } + results = append(results, el.Name()+" "+typ) + } + } + var d string + if comment != nil { + d = comment.Text() + } + switch s.View().Options().HoverKind { + case SynopsisDocumentation: + d = doc.Synopsis(d) + case NoDocumentation: + d = "" + } + return &signature{ + doc: d, + typeParams: tparams, + params: params, + results: results, + variadic: sig.Variadic(), + needResultParens: needResultParens, + }, nil +} + +// FormatVarType formats a *types.Var, accounting for type aliases. +// To do this, it looks in the AST of the file in which the object is declared. +// On any errors, it always falls back to types.TypeString. +// +// TODO(rfindley): this function could return the actual name used in syntax, +// for better parameter names. +func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, srcFile *ast.File, obj *types.Var, qf types.Qualifier, mq MetadataQualifier) (string, error) { + // TODO(rfindley): This looks wrong. The previous comment said: + // "If the given expr refers to a type parameter, then use the + // object's Type instead of the type parameter declaration. This helps + // format the instantiated type as opposed to the original undeclared + // generic type". + // + // But of course, if obj is a type param, we are formatting a generic type + // and not an instantiated type. Handling for instantiated types must be done + // at a higher level. + // + // Left this during refactoring in order to preserve pre-existing logic. + if typeparams.IsTypeParam(obj.Type()) { + return types.TypeString(obj.Type(), qf), nil + } + + if obj.Pkg() == nil || !obj.Pos().IsValid() { + // This is defensive, though it is extremely unlikely we'll ever have a + // builtin var. + return types.TypeString(obj.Type(), qf), nil + } + + targetpgf, pos, err := parseFull(ctx, snapshot, srcpkg, obj.Pos()) + if err != nil { + return "", err // e.g. ctx cancelled + } + + targetMeta := findFileInDepsMetadata(snapshot, srcpkg.Metadata(), targetpgf.URI) + if targetMeta == nil { + // If we have an object from type-checking, it should exist in a file in + // the forward transitive closure. + return "", bug.Errorf("failed to find file %q in deps of %q", targetpgf.URI, srcpkg.Metadata().ID) + } + + decl, spec, field := FindDeclInfo([]*ast.File{targetpgf.File}, pos) + + // We can't handle type parameters correctly, so we fall back on TypeString + // for parameterized decls. + if decl, _ := decl.(*ast.FuncDecl); decl != nil { + if typeparams.ForFuncType(decl.Type).NumFields() > 0 { + return types.TypeString(obj.Type(), qf), nil // in generic function + } + if decl.Recv != nil && len(decl.Recv.List) > 0 { + if x, _, _, _ := typeparams.UnpackIndexExpr(decl.Recv.List[0].Type); x != nil { + return types.TypeString(obj.Type(), qf), nil // in method of generic type + } + } + } + if spec, _ := spec.(*ast.TypeSpec); spec != nil && typeparams.ForTypeSpec(spec).NumFields() > 0 { + return types.TypeString(obj.Type(), qf), nil // in generic type decl + } + + if field == nil { + // TODO(rfindley): we should never reach here from an ordinary var, so + // should probably return an error here. + return types.TypeString(obj.Type(), qf), nil + } + expr := field.Type + + rq := requalifier(snapshot, targetpgf.File, targetMeta, mq) + + // The type names in the AST may not be correctly qualified. + // Determine the package name to use based on the package that originated + // the query and the package in which the type is declared. + // We then qualify the value by cloning the AST node and editing it. + expr = qualifyTypeExpr(expr, rq) + + // If the request came from a different package than the one in which the + // types are defined, we may need to modify the qualifiers. + return FormatNodeFile(targetpgf.Tok, expr), nil +} + +// qualifyTypeExpr clones the type expression expr after re-qualifying type +// names using the given function, which accepts the current syntactic +// qualifier (possibly "" for unqualified idents), and returns a new qualifier +// (again, possibly "" if the identifier should be unqualified). +// +// The resulting expression may be inaccurate: without type-checking we don't +// properly account for "." imported identifiers or builtins. +// +// TODO(rfindley): add many more tests for this function. +func qualifyTypeExpr(expr ast.Expr, qf func(string) string) ast.Expr { + switch expr := expr.(type) { + case *ast.ArrayType: + return &ast.ArrayType{ + Lbrack: expr.Lbrack, + Elt: qualifyTypeExpr(expr.Elt, qf), + Len: expr.Len, + } + + case *ast.BinaryExpr: + if expr.Op != token.OR { + return expr + } + return &ast.BinaryExpr{ + X: qualifyTypeExpr(expr.X, qf), + OpPos: expr.OpPos, + Op: expr.Op, + Y: qualifyTypeExpr(expr.Y, qf), + } + + case *ast.ChanType: + return &ast.ChanType{ + Arrow: expr.Arrow, + Begin: expr.Begin, + Dir: expr.Dir, + Value: qualifyTypeExpr(expr.Value, qf), + } + + case *ast.Ellipsis: + return &ast.Ellipsis{ + Ellipsis: expr.Ellipsis, + Elt: qualifyTypeExpr(expr.Elt, qf), + } + + case *ast.FuncType: + return &ast.FuncType{ + Func: expr.Func, + Params: qualifyFieldList(expr.Params, qf), + Results: qualifyFieldList(expr.Results, qf), + } + + case *ast.Ident: + // Unqualified type (builtin, package local, or dot-imported). + + // Don't qualify names that look like builtins. + // + // Without type-checking this may be inaccurate. It could be made accurate + // by doing syntactic object resolution for the entire package, but that + // does not seem worthwhile and we generally want to avoid using + // ast.Object, which may be inaccurate. + if obj := types.Universe.Lookup(expr.Name); obj != nil { + return expr + } + + newName := qf("") + if newName != "" { + return &ast.SelectorExpr{ + X: &ast.Ident{ + NamePos: expr.Pos(), + Name: newName, + }, + Sel: expr, + } + } + return expr + + case *ast.IndexExpr: + return &ast.IndexExpr{ + X: qualifyTypeExpr(expr.X, qf), + Lbrack: expr.Lbrack, + Index: qualifyTypeExpr(expr.Index, qf), + Rbrack: expr.Rbrack, + } + + case *typeparams.IndexListExpr: + indices := make([]ast.Expr, len(expr.Indices)) + for i, idx := range expr.Indices { + indices[i] = qualifyTypeExpr(idx, qf) + } + return &typeparams.IndexListExpr{ + X: qualifyTypeExpr(expr.X, qf), + Lbrack: expr.Lbrack, + Indices: indices, + Rbrack: expr.Rbrack, + } + + case *ast.InterfaceType: + return &ast.InterfaceType{ + Interface: expr.Interface, + Methods: qualifyFieldList(expr.Methods, qf), + Incomplete: expr.Incomplete, + } + + case *ast.MapType: + return &ast.MapType{ + Map: expr.Map, + Key: qualifyTypeExpr(expr.Key, qf), + Value: qualifyTypeExpr(expr.Value, qf), + } + + case *ast.ParenExpr: + return &ast.ParenExpr{ + Lparen: expr.Lparen, + Rparen: expr.Rparen, + X: qualifyTypeExpr(expr.X, qf), + } + + case *ast.SelectorExpr: + if id, ok := expr.X.(*ast.Ident); ok { + // qualified type + newName := qf(id.Name) + if newName == "" { + return expr.Sel + } + return &ast.SelectorExpr{ + X: &ast.Ident{ + NamePos: id.NamePos, + Name: newName, + }, + Sel: expr.Sel, + } + } + return expr + + case *ast.StarExpr: + return &ast.StarExpr{ + Star: expr.Star, + X: qualifyTypeExpr(expr.X, qf), + } + + case *ast.StructType: + return &ast.StructType{ + Struct: expr.Struct, + Fields: qualifyFieldList(expr.Fields, qf), + Incomplete: expr.Incomplete, + } + + default: + return expr + } +} + +func qualifyFieldList(fl *ast.FieldList, qf func(string) string) *ast.FieldList { + if fl == nil { + return nil + } + if fl.List == nil { + return &ast.FieldList{ + Closing: fl.Closing, + Opening: fl.Opening, + } + } + list := make([]*ast.Field, 0, len(fl.List)) + for _, f := range fl.List { + list = append(list, &ast.Field{ + Comment: f.Comment, + Doc: f.Doc, + Names: f.Names, + Tag: f.Tag, + Type: qualifyTypeExpr(f.Type, qf), + }) + } + return &ast.FieldList{ + Closing: fl.Closing, + Opening: fl.Opening, + List: list, + } +} diff --git a/gopls/internal/lsp/source/util.go b/gopls/internal/lsp/source/util.go new file mode 100644 index 00000000000..8340c5ec9d4 --- /dev/null +++ b/gopls/internal/lsp/source/util.go @@ -0,0 +1,651 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/tokeninternal" + "golang.org/x/tools/internal/typeparams" +) + +// IsGenerated gets and reads the file denoted by uri and reports +// whether it contains a "go:generated" directive as described at +// https://golang.org/s/generatedcode. +// +// TODO(adonovan): opt: this function does too much. +// Move snapshot.GetFile into the caller (most of which have already done it). +func IsGenerated(ctx context.Context, snapshot Snapshot, uri span.URI) bool { + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return false + } + pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader) + if err != nil { + return false + } + for _, commentGroup := range pgf.File.Comments { + for _, comment := range commentGroup.List { + if matched := generatedRx.MatchString(comment.Text); matched { + // Check if comment is at the beginning of the line in source. + if safetoken.Position(pgf.Tok, comment.Slash).Column == 1 { + return true + } + } + } + } + return false +} + +// adjustedObjEnd returns the end position of obj, possibly modified for +// package names. +// +// TODO(rfindley): eliminate this function, by inlining it at callsites where +// it makes sense. +func adjustedObjEnd(obj types.Object) token.Pos { + nameLen := len(obj.Name()) + if pkgName, ok := obj.(*types.PkgName); ok { + // An imported Go package has a package-local, unqualified name. + // When the name matches the imported package name, there is no + // identifier in the import spec with the local package name. + // + // For example: + // import "go/ast" // name "ast" matches package name + // import a "go/ast" // name "a" does not match package name + // + // When the identifier does not appear in the source, have the range + // of the object be the import path, including quotes. + if pkgName.Imported().Name() == pkgName.Name() { + nameLen = len(pkgName.Imported().Path()) + len(`""`) + } + } + return obj.Pos() + token.Pos(nameLen) +} + +// posToMappedRange returns the MappedRange for the given [start, end) span, +// which must be among the transitive dependencies of pkg. +// +// TODO(adonovan): many of the callers need only the ParsedGoFile so +// that they can call pgf.PosRange(pos, end) to get a Range; they +// don't actually need a MappedRange. +func posToMappedRange(ctx context.Context, snapshot Snapshot, pkg Package, pos, end token.Pos) (protocol.MappedRange, error) { + if !pos.IsValid() { + return protocol.MappedRange{}, fmt.Errorf("invalid start position") + } + if !end.IsValid() { + return protocol.MappedRange{}, fmt.Errorf("invalid end position") + } + + logicalFilename := pkg.FileSet().File(pos).Name() // ignore line directives + pgf, _, err := findFileInDeps(ctx, snapshot, pkg, span.URIFromPath(logicalFilename)) + if err != nil { + return protocol.MappedRange{}, err + } + return pgf.PosMappedRange(pos, end) +} + +// Matches cgo generated comment as well as the proposed standard: +// +// https://golang.org/s/generatedcode +var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) + +// FileKindForLang returns the file kind associated with the given language ID, +// or UnknownKind if the language ID is not recognized. +func FileKindForLang(langID string) FileKind { + switch langID { + case "go": + return Go + case "go.mod": + return Mod + case "go.sum": + return Sum + case "tmpl", "gotmpl": + return Tmpl + case "go.work": + return Work + default: + return UnknownKind + } +} + +// nodeAtPos returns the index and the node whose position is contained inside +// the node list. +func nodeAtPos(nodes []ast.Node, pos token.Pos) (ast.Node, int) { + if nodes == nil { + return nil, -1 + } + for i, node := range nodes { + if node.Pos() <= pos && pos <= node.End() { + return node, i + } + } + return nil, -1 +} + +// FormatNode returns the "pretty-print" output for an ast node. +func FormatNode(fset *token.FileSet, n ast.Node) string { + var buf strings.Builder + if err := printer.Fprint(&buf, fset, n); err != nil { + return "" + } + return buf.String() +} + +// FormatNodeFile is like FormatNode, but requires only the token.File for the +// syntax containing the given ast node. +func FormatNodeFile(file *token.File, n ast.Node) string { + fset := SingletonFileSet(file) + return FormatNode(fset, n) +} + +// SingletonFileSet creates a new token.FileSet containing a file that is +// identical to f (same base, size, and line), for use in APIs that require a +// FileSet. +func SingletonFileSet(f *token.File) *token.FileSet { + fset := token.NewFileSet() + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := tokeninternal.GetLines(f) + f2.SetLines(lines) + return fset +} + +// Deref returns a pointer's element type, traversing as many levels as needed. +// Otherwise it returns typ. +// +// It can return a pointer type for cyclic types (see golang/go#45510). +func Deref(typ types.Type) types.Type { + var seen map[types.Type]struct{} + for { + p, ok := typ.Underlying().(*types.Pointer) + if !ok { + return typ + } + if _, ok := seen[p.Elem()]; ok { + return typ + } + + typ = p.Elem() + + if seen == nil { + seen = make(map[types.Type]struct{}) + } + seen[typ] = struct{}{} + } +} + +func SortDiagnostics(d []*Diagnostic) { + sort.Slice(d, func(i int, j int) bool { + return CompareDiagnostic(d[i], d[j]) < 0 + }) +} + +func CompareDiagnostic(a, b *Diagnostic) int { + if r := protocol.CompareRange(a.Range, b.Range); r != 0 { + return r + } + if a.Source < b.Source { + return -1 + } + if a.Source > b.Source { + return +1 + } + if a.Message < b.Message { + return -1 + } + if a.Message > b.Message { + return +1 + } + return 0 +} + +// findFileInDeps finds uri in pkg or its dependencies. +// +// TODO(rfindley): eliminate this function. +func findFileInDeps(ctx context.Context, snapshot Snapshot, pkg Package, uri span.URI) (*ParsedGoFile, Package, error) { + pkgs := []Package{pkg} + deps := recursiveDeps(snapshot, pkg.Metadata())[1:] + // Ignore the error from type checking, but check if the context was + // canceled (which would have caused TypeCheck to exit early). + depPkgs, _ := snapshot.TypeCheck(ctx, TypecheckWorkspace, deps...) + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + for _, dep := range depPkgs { + // Since we ignored the error from type checking, pkg may be nil. + if dep != nil { + pkgs = append(pkgs, dep) + } + } + for _, pkg := range pkgs { + if pgf, err := pkg.File(uri); err == nil { + return pgf, pkg, nil + } + } + return nil, nil, fmt.Errorf("no file for %s in deps of package %s", uri, pkg.Metadata().ID) +} + +// findFileInDepsMetadata finds package metadata containing URI in the +// transitive dependencies of m. When using the Go command, the answer is +// unique. +// +// TODO(rfindley): refactor to share logic with findPackageInDeps? +func findFileInDepsMetadata(s MetadataSource, m *Metadata, uri span.URI) *Metadata { + seen := make(map[PackageID]bool) + var search func(*Metadata) *Metadata + search = func(m *Metadata) *Metadata { + if seen[m.ID] { + return nil + } + seen[m.ID] = true + for _, cgf := range m.CompiledGoFiles { + if cgf == uri { + return m + } + } + for _, dep := range m.DepsByPkgPath { + m := s.Metadata(dep) + if m == nil { + bug.Reportf("nil metadata for %q", dep) + continue + } + if found := search(m); found != nil { + return found + } + } + return nil + } + return search(m) +} + +// recursiveDeps finds unique transitive dependencies of m, including m itself. +// +// Invariant: for the resulting slice res, res[0] == m.ID. +// +// TODO(rfindley): consider replacing this with a snapshot.ForwardDependencies +// method, or exposing the metadata graph itself. +func recursiveDeps(s MetadataSource, m *Metadata) []PackageID { + seen := make(map[PackageID]bool) + var ids []PackageID + var add func(*Metadata) + add = func(m *Metadata) { + if seen[m.ID] { + return + } + seen[m.ID] = true + ids = append(ids, m.ID) + for _, dep := range m.DepsByPkgPath { + m := s.Metadata(dep) + if m == nil { + bug.Reportf("nil metadata for %q", dep) + continue + } + add(m) + } + } + add(m) + return ids +} + +// UnquoteImportPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func UnquoteImportPath(s *ast.ImportSpec) ImportPath { + path, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return ImportPath(path) +} + +// NodeContains returns true if a node encloses a given position pos. +func NodeContains(n ast.Node, pos token.Pos) bool { + return n != nil && n.Pos() <= pos && pos <= n.End() +} + +// CollectScopes returns all scopes in an ast path, ordered as innermost scope +// first. +func CollectScopes(info *types.Info, path []ast.Node, pos token.Pos) []*types.Scope { + // scopes[i], where i import path mapping. + inverseDeps := make(map[PackageID]PackagePath) + for path, id := range m.DepsByPkgPath { + inverseDeps[id] = path + } + importsByPkgPath := make(map[PackagePath]ImportPath) // best import paths by pkgPath + for impPath, id := range m.DepsByImpPath { + if id == "" { + continue + } + pkgPath := inverseDeps[id] + _, hasPath := importsByPkgPath[pkgPath] + _, hasImp := localNames[impPath] + // In rare cases, there may be multiple import paths with the same package + // path. In such scenarios, prefer an import path that already exists in + // the file. + if !hasPath || hasImp { + importsByPkgPath[pkgPath] = impPath + } + } + + return func(pkgName PackageName, impPath ImportPath, pkgPath PackagePath) string { + // If supplied, translate the package path to an import path in the source + // package. + if pkgPath != "" { + if srcImp := importsByPkgPath[pkgPath]; srcImp != "" { + impPath = srcImp + } + if pkgPath == m.PkgPath { + return "" + } + } + if localName, ok := localNames[impPath]; ok && impPath != "" { + return string(localName) + } + if pkgName != "" { + return string(pkgName) + } + idx := strings.LastIndexByte(string(impPath), '/') + return string(impPath[idx+1:]) + } +} + +// importInfo collects information about the import specified by imp, +// extracting its file-local name, package name, import path, and package path. +// +// If metadata is missing for the import, the resulting package name and +// package path may be empty, and the file local name may be guessed based on +// the import path. +// +// Note: previous versions of this helper used a PackageID->PackagePath map +// extracted from m, for extracting package path even in the case where +// metadata for a dep was missing. This should not be necessary, as we should +// always have metadata for IDs contained in DepsByPkgPath. +func importInfo(s MetadataSource, imp *ast.ImportSpec, m *Metadata) (string, PackageName, ImportPath, PackagePath) { + var ( + name string // local name + pkgName PackageName + impPath = UnquoteImportPath(imp) + pkgPath PackagePath + ) + + // If the import has a local name, use it. + if imp.Name != nil { + name = imp.Name.Name + } + + // Try to find metadata for the import. If successful and there is no local + // name, the package name is the local name. + if depID := m.DepsByImpPath[impPath]; depID != "" { + if depm := s.Metadata(depID); depm != nil { + if name == "" { + name = string(depm.Name) + } + pkgName = depm.Name + pkgPath = depm.PkgPath + } + } + + // If the local name is still unknown, guess it based on the import path. + if name == "" { + idx := strings.LastIndexByte(string(impPath), '/') + name = string(impPath[idx+1:]) + } + return name, pkgName, impPath, pkgPath +} + +// isDirective reports whether c is a comment directive. +// +// Copied and adapted from go/src/go/ast/ast.go. +func isDirective(c string) bool { + if len(c) < 3 { + return false + } + if c[1] != '/' { + return false + } + //-style comment (no newline at the end) + c = c[2:] + if len(c) == 0 { + // empty line + return false + } + // "//line " is a line directive. + // (The // has been removed.) + if strings.HasPrefix(c, "line ") { + return true + } + + // "//[a-z0-9]+:[a-z0-9]" + // (The // has been removed.) + colon := strings.Index(c, ":") + if colon <= 0 || colon+1 >= len(c) { + return false + } + for i := 0; i <= colon+1; i++ { + if i == colon { + continue + } + b := c[i] + if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { + return false + } + } + return true +} + +// InDir checks whether path is in the file tree rooted at dir. +// It checks only the lexical form of the file names. +// It does not consider symbolic links. +// +// Copied from go/src/cmd/go/internal/search/search.go. +func InDir(dir, path string) bool { + pv := strings.ToUpper(filepath.VolumeName(path)) + dv := strings.ToUpper(filepath.VolumeName(dir)) + path = path[len(pv):] + dir = dir[len(dv):] + switch { + default: + return false + case pv != dv: + return false + case len(path) == len(dir): + if path == dir { + return true + } + return false + case dir == "": + return path != "" + case len(path) > len(dir): + if dir[len(dir)-1] == filepath.Separator { + if path[:len(dir)] == dir { + return path[len(dir):] != "" + } + return false + } + if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir { + if len(path) == len(dir)+1 { + return true + } + return path[len(dir)+1:] != "" + } + return false + } +} + +// IsValidImport returns whether importPkgPath is importable +// by pkgPath +func IsValidImport(pkgPath, importPkgPath PackagePath) bool { + i := strings.LastIndex(string(importPkgPath), "/internal/") + if i == -1 { + return true + } + // TODO(rfindley): this looks wrong: IsCommandLineArguments is meant to + // operate on package IDs, not package paths. + if IsCommandLineArguments(PackageID(pkgPath)) { + return true + } + // TODO(rfindley): this is wrong. mod.testx/p should not be able to + // import mod.test/internal: https://go.dev/play/p/-Ca6P-E4V4q + return strings.HasPrefix(string(pkgPath), string(importPkgPath[:i])) +} + +// IsCommandLineArguments reports whether a given value denotes +// "command-line-arguments" package, which is a package with an unknown ID +// created by the go command. It can have a test variant, which is why callers +// should not check that a value equals "command-line-arguments" directly. +func IsCommandLineArguments(id PackageID) bool { + return strings.Contains(string(id), "command-line-arguments") +} + +// RecvIdent returns the type identifier of a method receiver. +// e.g. A for all of A, *A, A[T], *A[T], etc. +func RecvIdent(recv *ast.FieldList) *ast.Ident { + if recv == nil || len(recv.List) == 0 { + return nil + } + x := recv.List[0].Type + if star, ok := x.(*ast.StarExpr); ok { + x = star.X + } + switch ix := x.(type) { // check for instantiated receivers + case *ast.IndexExpr: + x = ix.X + case *typeparams.IndexListExpr: + x = ix.X + } + if ident, ok := x.(*ast.Ident); ok { + return ident + } + return nil +} + +// embeddedIdent returns the type name identifier for an embedding x, if x in a +// valid embedding. Otherwise, it returns nil. +// +// Spec: An embedded field must be specified as a type name T or as a pointer +// to a non-interface type name *T +func embeddedIdent(x ast.Expr) *ast.Ident { + if star, ok := x.(*ast.StarExpr); ok { + x = star.X + } + switch ix := x.(type) { // check for instantiated receivers + case *ast.IndexExpr: + x = ix.X + case *typeparams.IndexListExpr: + x = ix.X + } + switch x := x.(type) { + case *ast.Ident: + return x + case *ast.SelectorExpr: + if _, ok := x.X.(*ast.Ident); ok { + return x.Sel + } + } + return nil +} diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go new file mode 100644 index 00000000000..58385f7ae97 --- /dev/null +++ b/gopls/internal/lsp/source/view.go @@ -0,0 +1,859 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "bytes" + "context" + "crypto/sha256" + "errors" + "fmt" + "go/ast" + "go/scanner" + "go/token" + "go/types" + "io" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" + "golang.org/x/tools/internal/packagesinternal" +) + +// A GlobalSnapshotID uniquely identifies a snapshot within this process and +// increases monotonically with snapshot creation time. +// +// We use a distinct integral type for global IDs to help enforce correct +// usage. +type GlobalSnapshotID uint64 + +// Snapshot represents the current state for the given view. +type Snapshot interface { + // SequenceID is the sequence id of this snapshot within its containing + // view. + // + // Relative to their view sequence ids are monotonically increasing, but this + // does not hold globally: when new views are created their initial snapshot + // has sequence ID 0. For operations that span multiple views, use global + // IDs. + SequenceID() uint64 + + // GlobalID is a globally unique identifier for this snapshot. Global IDs are + // monotonic: subsequent snapshots will have higher global ID, though + // subsequent snapshots in a view may not have adjacent global IDs. + GlobalID() GlobalSnapshotID + + // View returns the View associated with this snapshot. + View() View + + // BackgroundContext returns a context used for all background processing + // on behalf of this snapshot. + BackgroundContext() context.Context + + // Fileset returns the Fileset used to parse all the Go files in this snapshot. + // + // If the files are known to belong to a specific Package, use + // Package.FileSet instead. (We plan to eliminate the + // Snapshot's cache of parsed files, and thus the need for a + // snapshot-wide FileSet.) + FileSet() *token.FileSet + + // ValidBuildConfiguration returns true if there is some error in the + // user's workspace. In particular, if they are both outside of a module + // and their GOPATH. + ValidBuildConfiguration() bool + + // FindFile returns the FileHandle for the given URI, if it is already + // in the given snapshot. + FindFile(uri span.URI) FileHandle + + // GetFile returns the FileHandle for a given URI, initializing it if it is + // not already part of the snapshot. + GetFile(ctx context.Context, uri span.URI) (FileHandle, error) + + // AwaitInitialized waits until the snapshot's view is initialized. + AwaitInitialized(ctx context.Context) + + // IsOpen returns whether the editor currently has a file open. + IsOpen(uri span.URI) bool + + // IgnoredFile reports if a file would be ignored by a `go list` of the whole + // workspace. + IgnoredFile(uri span.URI) bool + + // Templates returns the .tmpl files + Templates() map[span.URI]FileHandle + + // ParseGo returns the parsed AST for the file. + // If the file is not available, returns nil and an error. + // Position information is added to FileSet(). + ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error) + + // Analyze runs the specified analyzers on the given package at this snapshot. + Analyze(ctx context.Context, id PackageID, analyzers []*Analyzer) ([]*Diagnostic, error) + + // RunGoCommandPiped runs the given `go` command, writing its output + // to stdout and stderr. Verb, Args, and WorkingDir must be specified. + // + // RunGoCommandPiped runs the command serially using gocommand.RunPiped, + // enforcing that this command executes exclusively to other commands on the + // server. + RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error + + // RunGoCommandDirect runs the given `go` command. Verb, Args, and + // WorkingDir must be specified. + RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) + + // RunGoCommands runs a series of `go` commands that updates the go.mod + // and go.sum file for wd, and returns their updated contents. + RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) + + // RunProcessEnvFunc runs fn with the process env for this snapshot's view. + // Note: the process env contains cached module and filesystem state. + RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error + + // ModFiles are the go.mod files enclosed in the snapshot's view and known + // to the snapshot. + ModFiles() []span.URI + + // ParseMod is used to parse go.mod files. + ParseMod(ctx context.Context, fh FileHandle) (*ParsedModule, error) + + // ModWhy returns the results of `go mod why` for the module specified by + // the given go.mod file. + ModWhy(ctx context.Context, fh FileHandle) (map[string]string, error) + + // ModTidy returns the results of `go mod tidy` for the module specified by + // the given go.mod file. + ModTidy(ctx context.Context, pm *ParsedModule) (*TidiedModule, error) + + // ModVuln returns import vulnerability analysis for the given go.mod URI. + // Concurrent requests are combined into a single command. + ModVuln(ctx context.Context, modURI span.URI) (*govulncheck.Result, error) + + // GoModForFile returns the URI of the go.mod file for the given URI. + GoModForFile(uri span.URI) span.URI + + // WorkFile, if non-empty, is the go.work file for the workspace. + WorkFile() span.URI + + // ParseWork is used to parse go.work files. + ParseWork(ctx context.Context, fh FileHandle) (*ParsedWorkFile, error) + + // BuiltinFile returns information about the special builtin package. + BuiltinFile(ctx context.Context) (*ParsedGoFile, error) + + // IsBuiltin reports whether uri is part of the builtin package. + IsBuiltin(ctx context.Context, uri span.URI) bool + + // ReverseDependencies returns a new mapping whose entries are + // the ID and Metadata of each package in the workspace that + // directly or transitively depend on the package denoted by id, + // excluding id itself. + ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*Metadata, error) + + // CachedImportPaths returns all the imported packages loaded in this + // snapshot, indexed by their package path (not import path, despite the name) + // and checked in TypecheckWorkspace mode. + // + // To reduce latency, it does not wait for type-checking to complete. + // It is intended for use only in completions. + CachedImportPaths(ctx context.Context) (map[PackagePath]*types.Package, error) + + // ActiveMetadata returns a new, unordered slice containing + // metadata for all packages considered 'active' in the workspace. + // + // In normal memory mode, this is all workspace packages. In degraded memory + // mode, this is just the reverse transitive closure of open packages. + ActiveMetadata(ctx context.Context) ([]*Metadata, error) + + // AllMetadata returns a new unordered array of metadata for all packages in the workspace. + AllMetadata(ctx context.Context) ([]*Metadata, error) + + // Symbols returns all symbols in the snapshot. + Symbols(ctx context.Context) map[span.URI][]Symbol + + // Metadata returns the metadata for the specified package, + // or nil if it was not found. + Metadata(id PackageID) *Metadata + + // MetadataForFile returns a new slice containing metadata for each + // package containing the Go file identified by uri, ordered by the + // number of CompiledGoFiles (i.e. "narrowest" to "widest" package). + // The result may include tests and intermediate test variants of + // importable packages. + // It returns an error if the context was cancelled. + MetadataForFile(ctx context.Context, uri span.URI) ([]*Metadata, error) + + // TypeCheck parses and type-checks the specified packages, + // and returns them in the same order as the ids. + TypeCheck(ctx context.Context, mode TypecheckMode, ids ...PackageID) ([]Package, error) + + // GetCriticalError returns any critical errors in the workspace. + // + // A nil result may mean success, or context cancellation. + GetCriticalError(ctx context.Context) *CriticalError +} + +// SnapshotLabels returns a new slice of labels that should be used for events +// related to a snapshot. +func SnapshotLabels(snapshot Snapshot) []label.Label { + return []label.Label{tag.Snapshot.Of(snapshot.SequenceID()), tag.Directory.Of(snapshot.View().Folder())} +} + +// PackageForFile is a convenience function that selects a package to +// which this file belongs (narrowest or widest), type-checks it in +// the requested mode (full or workspace), and returns it, along with +// the parse tree of that file. +// +// Type-checking is expensive. Call snapshot.ParseGo if all you need +// is a parse tree, or snapshot.MetadataForFile if you only need metadata. +func PackageForFile(ctx context.Context, snapshot Snapshot, uri span.URI, mode TypecheckMode, pkgSel PackageSelector) (Package, *ParsedGoFile, error) { + metas, err := snapshot.MetadataForFile(ctx, uri) + if err != nil { + return nil, nil, err + } + if len(metas) == 0 { + return nil, nil, fmt.Errorf("no package metadata for file %s", uri) + } + switch pkgSel { + case NarrowestPackage: + metas = metas[:1] + case WidestPackage: + metas = metas[len(metas)-1:] + } + pkgs, err := snapshot.TypeCheck(ctx, mode, metas[0].ID) + if err != nil { + return nil, nil, err + } + pkg := pkgs[0] + pgf, err := pkg.File(uri) + if err != nil { + return nil, nil, err // "can't happen" + } + return pkg, pgf, err +} + +// PackageSelector sets how a package is selected out from a set of packages +// containing a given file. +type PackageSelector int + +const ( + // NarrowestPackage picks the "narrowest" package for a given file. + // By "narrowest" package, we mean the package with the fewest number of + // files that includes the given file. This solves the problem of test + // variants, as the test will have more files than the non-test package. + NarrowestPackage PackageSelector = iota + + // WidestPackage returns the Package containing the most files. + // This is useful for something like diagnostics, where we'd prefer to + // offer diagnostics for as many files as possible. + WidestPackage +) + +// InvocationFlags represents the settings of a particular go command invocation. +// It is a mode, plus a set of flag bits. +type InvocationFlags int + +const ( + // Normal is appropriate for commands that might be run by a user and don't + // deliberately modify go.mod files, e.g. `go test`. + Normal InvocationFlags = iota + // WriteTemporaryModFile is for commands that need information from a + // modified version of the user's go.mod file, e.g. `go mod tidy` used to + // generate diagnostics. + WriteTemporaryModFile + // LoadWorkspace is for packages.Load, and other operations that should + // consider the whole workspace at once. + LoadWorkspace + + // AllowNetwork is a flag bit that indicates the invocation should be + // allowed to access the network. + AllowNetwork InvocationFlags = 1 << 10 +) + +func (m InvocationFlags) Mode() InvocationFlags { + return m & (AllowNetwork - 1) +} + +func (m InvocationFlags) AllowNetwork() bool { + return m&AllowNetwork != 0 +} + +// View represents a single workspace. +// This is the level at which we maintain configuration like working directory +// and build tags. +type View interface { + // Name returns the name this view was constructed with. + Name() string + + // Folder returns the folder with which this view was created. + Folder() span.URI + + // Options returns a copy of the Options for this view. + Options() *Options + + // Snapshot returns the current snapshot for the view, and a + // release function that must be called when the Snapshot is + // no longer needed. + // + // If the view is shut down, the resulting error will be non-nil, and the + // release function need not be called. + Snapshot() (Snapshot, func(), error) + + // IsGoPrivatePath reports whether target is a private import path, as identified + // by the GOPRIVATE environment variable. + IsGoPrivatePath(path string) bool + + // ModuleUpgrades returns known module upgrades for the dependencies of + // modfile. + ModuleUpgrades(modfile span.URI) map[string]string + + // RegisterModuleUpgrades registers that upgrades exist for the given modules + // required by modfile. + RegisterModuleUpgrades(modfile span.URI, upgrades map[string]string) + + // ClearModuleUpgrades clears all upgrades for the modules in modfile. + ClearModuleUpgrades(modfile span.URI) + + // Vulnerabilities returns known vulnerabilities for the given modfile. + // TODO(suzmue): replace command.Vuln with a different type, maybe + // https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck/govulnchecklib#Summary? + Vulnerabilities(modfile ...span.URI) map[span.URI]*govulncheck.Result + + // SetVulnerabilities resets the list of vulnerabilities that exists for the given modules + // required by modfile. + SetVulnerabilities(modfile span.URI, vulncheckResult *govulncheck.Result) + + // FileKind returns the type of a file. + // + // We can't reliably deduce the kind from the file name alone, + // as some editors can be told to interpret a buffer as + // language different from the file name heuristic, e.g. that + // an .html file actually contains Go "html/template" syntax, + // or even that a .go file contains Python. + FileKind(FileHandle) FileKind + + // GoVersion returns the configured Go version for this view. + GoVersion() int + + // GoVersionString returns the go version string configured for this view. + // Unlike [GoVersion], this encodes the minor version and commit hash information. + GoVersionString() string +} + +// A FileSource maps uris to FileHandles. +type FileSource interface { + // GetFile returns the FileHandle for a given URI. + GetFile(ctx context.Context, uri span.URI) (FileHandle, error) +} + +// A MetadataSource maps package IDs to metadata. +// +// TODO(rfindley): replace this with a concrete metadata graph, once it is +// exposed from the snapshot. +type MetadataSource interface { + // Metadata returns Metadata for the given package ID, or nil if it does not + // exist. + Metadata(PackageID) *Metadata +} + +// A ParsedGoFile contains the results of parsing a Go file. +type ParsedGoFile struct { + URI span.URI + Mode ParseMode + File *ast.File + Tok *token.File + // Source code used to build the AST. It may be different from the + // actual content of the file if we have fixed the AST. + Src []byte + Fixed bool + Mapper *protocol.Mapper // may map fixed Src, not file content + ParseErr scanner.ErrorList +} + +// -- go/token domain convenience helpers -- + +// PositionPos returns the token.Pos of protocol position p within the file. +func (pgf *ParsedGoFile) PositionPos(p protocol.Position) (token.Pos, error) { + offset, err := pgf.Mapper.PositionOffset(p) + if err != nil { + return token.NoPos, err + } + return safetoken.Pos(pgf.Tok, offset) +} + +// PosRange returns a protocol Range for the token.Pos interval in this file. +func (pgf *ParsedGoFile) PosRange(start, end token.Pos) (protocol.Range, error) { + return pgf.Mapper.PosRange(pgf.Tok, start, end) +} + +// PosMappedRange returns a MappedRange for the token.Pos interval in this file. +// A MappedRange can be converted to any other form. +func (pgf *ParsedGoFile) PosMappedRange(start, end token.Pos) (protocol.MappedRange, error) { + return pgf.Mapper.PosMappedRange(pgf.Tok, start, end) +} + +// PosLocation returns a protocol Location for the token.Pos interval in this file. +func (pgf *ParsedGoFile) PosLocation(start, end token.Pos) (protocol.Location, error) { + return pgf.Mapper.PosLocation(pgf.Tok, start, end) +} + +// NodeRange returns a protocol Range for the ast.Node interval in this file. +func (pgf *ParsedGoFile) NodeRange(node ast.Node) (protocol.Range, error) { + return pgf.Mapper.NodeRange(pgf.Tok, node) +} + +// NodeMappedRange returns a MappedRange for the ast.Node interval in this file. +// A MappedRange can be converted to any other form. +func (pgf *ParsedGoFile) NodeMappedRange(node ast.Node) (protocol.MappedRange, error) { + return pgf.Mapper.NodeMappedRange(pgf.Tok, node) +} + +// NodeLocation returns a protocol Location for the ast.Node interval in this file. +func (pgf *ParsedGoFile) NodeLocation(node ast.Node) (protocol.Location, error) { + return pgf.Mapper.PosLocation(pgf.Tok, node.Pos(), node.End()) +} + +// RangePos parses a protocol Range back into the go/token domain. +func (pgf *ParsedGoFile) RangePos(r protocol.Range) (token.Pos, token.Pos, error) { + start, end, err := pgf.Mapper.RangeOffsets(r) + if err != nil { + return token.NoPos, token.NoPos, err + } + return pgf.Tok.Pos(start), pgf.Tok.Pos(end), nil +} + +// A ParsedModule contains the results of parsing a go.mod file. +type ParsedModule struct { + URI span.URI + File *modfile.File + Mapper *protocol.Mapper + ParseErrors []*Diagnostic +} + +// A ParsedWorkFile contains the results of parsing a go.work file. +type ParsedWorkFile struct { + URI span.URI + File *modfile.WorkFile + Mapper *protocol.Mapper + ParseErrors []*Diagnostic +} + +// A TidiedModule contains the results of running `go mod tidy` on a module. +type TidiedModule struct { + // Diagnostics representing changes made by `go mod tidy`. + Diagnostics []*Diagnostic + // The bytes of the go.mod file after it was tidied. + TidiedContent []byte +} + +// Metadata represents package metadata retrieved from go/packages. +type Metadata struct { + ID PackageID + PkgPath PackagePath + Name PackageName + GoFiles []span.URI + CompiledGoFiles []span.URI + ForTest PackagePath // package path under test, or "" + TypesSizes types.Sizes + Errors []packages.Error + DepsByImpPath map[ImportPath]PackageID // may contain dups; empty ID => missing + DepsByPkgPath map[PackagePath]PackageID // values are unique and non-empty + Module *packages.Module + DepsErrors []*packagesinternal.PackageError + LoadDir string // directory from which go/packages was run +} + +// IsIntermediateTestVariant reports whether the given package is an +// intermediate test variant, e.g. "net/http [net/url.test]". +// +// Such test variants arise when an x_test package (in this case net/url_test) +// imports a package (in this case net/http) that itself imports the the +// non-x_test package (in this case net/url). +// +// This is done so that the forward transitive closure of net/url_test has +// only one package for the "net/url" import. +// The intermediate test variant exists to hold the test variant import: +// +// net/url_test [net/url.test] +// +// | "net/http" -> net/http [net/url.test] +// | "net/url" -> net/url [net/url.test] +// | ... +// +// net/http [net/url.test] +// +// | "net/url" -> net/url [net/url.test] +// | ... +// +// This restriction propagates throughout the import graph of net/http: for +// every package imported by net/http that imports net/url, there must be an +// intermediate test variant that instead imports "net/url [net/url.test]". +// +// As one can see from the example of net/url and net/http, intermediate test +// variants can result in many additional packages that are essentially (but +// not quite) identical. For this reason, we filter these variants wherever +// possible. +func (m *Metadata) IsIntermediateTestVariant() bool { + return m.ForTest != "" && m.ForTest != m.PkgPath && m.ForTest+"_test" != m.PkgPath +} + +// RemoveIntermediateTestVariants removes intermediate test variants, modifying the array. +func RemoveIntermediateTestVariants(metas []*Metadata) []*Metadata { + res := metas[:0] + for _, m := range metas { + if !m.IsIntermediateTestVariant() { + res = append(res, m) + } + } + return res +} + +var ErrViewExists = errors.New("view already exists for session") + +// FileModification represents a modification to a file. +type FileModification struct { + URI span.URI + Action FileAction + + // OnDisk is true if a watched file is changed on disk. + // If true, Version will be -1 and Text will be nil. + OnDisk bool + + // Version will be -1 and Text will be nil when they are not supplied, + // specifically on textDocument/didClose and for on-disk changes. + Version int32 + Text []byte + + // LanguageID is only sent from the language client on textDocument/didOpen. + LanguageID string +} + +type FileAction int + +const ( + UnknownFileAction = FileAction(iota) + Open + Change + Close + Save + Create + Delete + InvalidateMetadata +) + +func (a FileAction) String() string { + switch a { + case Open: + return "Open" + case Change: + return "Change" + case Close: + return "Close" + case Save: + return "Save" + case Create: + return "Create" + case Delete: + return "Delete" + case InvalidateMetadata: + return "InvalidateMetadata" + default: + return "Unknown" + } +} + +var ErrTmpModfileUnsupported = errors.New("-modfile is unsupported for this Go version") +var ErrNoModOnDisk = errors.New("go.mod file is not on disk") + +func IsNonFatalGoModError(err error) bool { + return err == ErrTmpModfileUnsupported || err == ErrNoModOnDisk +} + +// ParseMode controls the content of the AST produced when parsing a source file. +type ParseMode int + +const ( + // ParseHeader specifies that the main package declaration and imports are needed. + // This is the mode used when attempting to examine the package graph structure. + ParseHeader ParseMode = iota + + // ParseExported specifies that the package is used only as a dependency, + // and only its exported declarations are needed. More may be included if + // necessary to avoid type errors. + ParseExported + + // ParseFull specifies the full AST is needed. + // This is used for files of direct interest where the entire contents must + // be considered. + ParseFull +) + +// AllParseModes contains all possible values of ParseMode. +// It is used for cache invalidation on a file content change. +var AllParseModes = []ParseMode{ParseHeader, ParseExported, ParseFull} + +// TypecheckMode controls what kind of parsing should be done (see ParseMode) +// while type checking a package. +type TypecheckMode int + +const ( + // TypecheckFull means to use ParseFull. + TypecheckFull TypecheckMode = iota + // TypecheckWorkspace means to use ParseFull for workspace packages, and + // ParseExported for others. + TypecheckWorkspace +) + +// A FileHandle is an interface to files tracked by the LSP session, which may +// be either files read from disk, or open in the editor session (overlays). +type FileHandle interface { + // URI is the URI for this file handle. + // TODO(rfindley): this is not actually well-defined. In some cases, there + // may be more than one URI that resolve to the same FileHandle. Which one is + // this? + URI() span.URI + // FileIdentity returns a FileIdentity for the file, even if there was an + // error reading it. + FileIdentity() FileIdentity + // Saved reports whether the file has the same content on disk. + // For on-disk files, this is trivially true. + Saved() bool + // Version returns the file version, as defined by the LSP client. + // For on-disk file handles, Version returns 0. + Version() int32 + // Read reads the contents of a file. + // If the file is not available, returns a nil slice and an error. + Read() ([]byte, error) +} + +// A Hash is a cryptographic digest of the contents of a file. +// (Although at 32B it is larger than a 16B string header, it is smaller +// and has better locality than the string header + 64B of hex digits.) +type Hash [sha256.Size]byte + +// HashOf returns the hash of some data. +func HashOf(data []byte) Hash { + return Hash(sha256.Sum256(data)) +} + +// Hashf returns the hash of a printf-formatted string. +func Hashf(format string, args ...interface{}) Hash { + // Although this looks alloc-heavy, it is faster than using + // Fprintf on sha256.New() because the allocations don't escape. + return HashOf([]byte(fmt.Sprintf(format, args...))) +} + +// String returns the digest as a string of hex digits. +func (h Hash) String() string { + return fmt.Sprintf("%64x", [sha256.Size]byte(h)) +} + +// Less returns true if the given hash is less than the other. +func (h Hash) Less(other Hash) bool { + return bytes.Compare(h[:], other[:]) < 0 +} + +// XORWith updates *h to *h XOR h2. +func (h *Hash) XORWith(h2 Hash) { + // Small enough that we don't need crypto/subtle.XORBytes. + for i := range h { + h[i] ^= h2[i] + } +} + +// FileIdentity uniquely identifies a file at a version from a FileSystem. +type FileIdentity struct { + URI span.URI + Hash Hash // digest of file contents +} + +func (id FileIdentity) String() string { + return fmt.Sprintf("%s%s", id.URI, id.Hash) +} + +// FileKind describes the kind of the file in question. +// It can be one of Go,mod, Sum, or Tmpl. +type FileKind int + +const ( + // UnknownKind is a file type we don't know about. + UnknownKind = FileKind(iota) + + // Go is a normal go source file. + Go + // Mod is a go.mod file. + Mod + // Sum is a go.sum file. + Sum + // Tmpl is a template file. + Tmpl + // Work is a go.work file. + Work +) + +func (k FileKind) String() string { + switch k { + case Go: + return "go" + case Mod: + return "go.mod" + case Sum: + return "go.sum" + case Tmpl: + return "tmpl" + case Work: + return "go.work" + default: + return fmt.Sprintf("internal error: unknown file kind %d", k) + } +} + +// Analyzer represents a go/analysis analyzer with some boolean properties +// that let the user know how to use the analyzer. +type Analyzer struct { + Analyzer *analysis.Analyzer + + // Enabled reports whether the analyzer is enabled. This value can be + // configured per-analysis in user settings. For staticcheck analyzers, + // the value of the Staticcheck setting overrides this field. + // + // Most clients should use the IsEnabled method. + Enabled bool + + // Fix is the name of the suggested fix name used to invoke the suggested + // fixes for the analyzer. It is non-empty if we expect this analyzer to + // provide its fix separately from its diagnostics. That is, we should apply + // the analyzer's suggested fixes through a Command, not a TextEdit. + Fix string + + // ActionKind is the kind of code action this analyzer produces. If + // unspecified the type defaults to quickfix. + ActionKind []protocol.CodeActionKind + + // Severity is the severity set for diagnostics reported by this + // analyzer. If left unset it defaults to Warning. + Severity protocol.DiagnosticSeverity +} + +func (a *Analyzer) String() string { return a.Analyzer.String() } + +// IsEnabled reports whether this analyzer is enabled by the given options. +func (a Analyzer) IsEnabled(options *Options) bool { + // Staticcheck analyzers can only be enabled when staticcheck is on. + if _, ok := options.StaticcheckAnalyzers[a.Analyzer.Name]; ok { + if !options.Staticcheck { + return false + } + } + if enabled, ok := options.Analyses[a.Analyzer.Name]; ok { + return enabled + } + return a.Enabled +} + +// Declare explicit types for package paths, names, and IDs to ensure that we +// never use an ID where a path belongs, and vice versa. If we confused these, +// it would result in confusing errors because package IDs often look like +// package paths. +type ( + PackageID string // go list's unique identifier for a package (e.g. "vendor/example.com/foo [vendor/example.com/bar.test]") + PackagePath string // name used to prefix linker symbols (e.g. "vendor/example.com/foo") + PackageName string // identifier in 'package' declaration (e.g. "foo") + ImportPath string // path that appears in an import declaration (e.g. "example.com/foo") +) + +// Package represents a Go package that has been parsed and type-checked. +// It maintains only the relevant fields of a *go/packages.Package. +type Package interface { + Metadata() *Metadata + + // Results of parsing: + FileSet() *token.FileSet + ParseMode() ParseMode + CompiledGoFiles() []*ParsedGoFile // (borrowed) + File(uri span.URI) (*ParsedGoFile, error) + GetSyntax() []*ast.File // (borrowed) + HasParseErrors() bool + + // Results of type checking: + GetTypes() *types.Package + GetTypesInfo() *types.Info + HasTypeErrors() bool + DiagnosticsForFile(ctx context.Context, s Snapshot, uri span.URI) ([]*Diagnostic, error) + ReferencesTo(map[PackagePath]map[objectpath.Path]unit) []protocol.Location + MethodSetsIndex() *methodsets.Index +} + +type unit = struct{} + +// A CriticalError is a workspace-wide error that generally prevents gopls from +// functioning correctly. In the presence of critical errors, other diagnostics +// in the workspace may not make sense. +type CriticalError struct { + // MainError is the primary error. Must be non-nil. + MainError error + + // Diagnostics contains any supplemental (structured) diagnostics. + Diagnostics []*Diagnostic +} + +// An Diagnostic corresponds to an LSP Diagnostic. +// https://microsoft.github.io/language-server-protocol/specification#diagnostic +type Diagnostic struct { + URI span.URI + Range protocol.Range + Severity protocol.DiagnosticSeverity + Code string + CodeHref string + + // Source is a human-readable description of the source of the error. + // Diagnostics generated by an analysis.Analyzer set it to Analyzer.Name. + Source DiagnosticSource + + Message string + + Tags []protocol.DiagnosticTag + Related []RelatedInformation + + // Fields below are used internally to generate quick fixes. They aren't + // part of the LSP spec and don't leave the server. + SuggestedFixes []SuggestedFix +} + +func (d *Diagnostic) String() string { + return fmt.Sprintf("%v: %s", d.Range, d.Message) +} + +type DiagnosticSource string + +const ( + UnknownError DiagnosticSource = "" + ListError DiagnosticSource = "go list" + ParseError DiagnosticSource = "syntax" + TypeError DiagnosticSource = "compiler" + ModTidyError DiagnosticSource = "go mod tidy" + OptimizationDetailsError DiagnosticSource = "optimizer details" + UpgradeNotification DiagnosticSource = "upgrade available" + Vulncheck DiagnosticSource = "vulncheck imports" + Govulncheck DiagnosticSource = "govulncheck" + TemplateError DiagnosticSource = "template" + WorkFileError DiagnosticSource = "go.work file" +) + +func AnalyzerErrorKind(name string) DiagnosticSource { + return DiagnosticSource(name) +} diff --git a/gopls/internal/lsp/source/workspace_symbol.go b/gopls/internal/lsp/source/workspace_symbol.go new file mode 100644 index 00000000000..1b157c6a51e --- /dev/null +++ b/gopls/internal/lsp/source/workspace_symbol.go @@ -0,0 +1,629 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/types" + "path" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + "unicode" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/fuzzy" +) + +// Symbol holds a precomputed symbol value. Note: we avoid using the +// protocol.SymbolInformation struct here in order to reduce the size of each +// symbol. +type Symbol struct { + Name string + Kind protocol.SymbolKind + Range protocol.Range +} + +// maxSymbols defines the maximum number of symbol results that should ever be +// sent in response to a client. +const maxSymbols = 100 + +// WorkspaceSymbols matches symbols across all views using the given query, +// according to the match semantics parameterized by matcherType and style. +// +// The workspace symbol method is defined in the spec as follows: +// +// The workspace symbol request is sent from the client to the server to +// list project-wide symbols matching the query string. +// +// It is unclear what "project-wide" means here, but given the parameters of +// workspace/symbol do not include any workspace identifier, then it has to be +// assumed that "project-wide" means "across all workspaces". Hence why +// WorkspaceSymbols receives the views []View. +// +// However, it then becomes unclear what it would mean to call WorkspaceSymbols +// with a different configured SymbolMatcher per View. Therefore we assume that +// Session level configuration will define the SymbolMatcher to be used for the +// WorkspaceSymbols method. +func WorkspaceSymbols(ctx context.Context, matcher SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) { + ctx, done := event.Start(ctx, "source.WorkspaceSymbols") + defer done() + if query == "" { + return nil, nil + } + + var s symbolizer + switch style { + case DynamicSymbols: + s = dynamicSymbolMatch + case FullyQualifiedSymbols: + s = fullyQualifiedSymbolMatch + case PackageQualifiedSymbols: + s = packageSymbolMatch + default: + panic(fmt.Errorf("unknown symbol style: %v", style)) + } + + return collectSymbols(ctx, views, matcher, s, query) +} + +// A matcherFunc returns the index and score of a symbol match. +// +// See the comment for symbolCollector for more information. +type matcherFunc func(chunks []string) (int, float64) + +// A symbolizer returns the best symbol match for a name with pkg, according to +// some heuristic. The symbol name is passed as the slice nameParts of logical +// name pieces. For example, for myType.field the caller can pass either +// []string{"myType.field"} or []string{"myType.", "field"}. +// +// See the comment for symbolCollector for more information. +// +// The space argument is an empty slice with spare capacity that may be used +// to allocate the result. +type symbolizer func(space []string, name string, pkg *Metadata, m matcherFunc) ([]string, float64) + +func fullyQualifiedSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) { + if _, score := dynamicSymbolMatch(space, name, pkg, matcher); score > 0 { + return append(space, string(pkg.PkgPath), ".", name), score + } + return nil, 0 +} + +func dynamicSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) { + if IsCommandLineArguments(pkg.ID) { + // command-line-arguments packages have a non-sensical package path, so + // just use their package name. + return packageSymbolMatch(space, name, pkg, matcher) + } + + var score float64 + + endsInPkgName := strings.HasSuffix(string(pkg.PkgPath), string(pkg.Name)) + + // If the package path does not end in the package name, we need to check the + // package-qualified symbol as an extra pass first. + if !endsInPkgName { + pkgQualified := append(space, string(pkg.Name), ".", name) + idx, score := matcher(pkgQualified) + nameStart := len(pkg.Name) + 1 + if score > 0 { + // If our match is contained entirely within the unqualified portion, + // just return that. + if idx >= nameStart { + return append(space, name), score + } + // Lower the score for matches that include the package name. + return pkgQualified, score * 0.8 + } + } + + // Now try matching the fully qualified symbol. + fullyQualified := append(space, string(pkg.PkgPath), ".", name) + idx, score := matcher(fullyQualified) + + // As above, check if we matched just the unqualified symbol name. + nameStart := len(pkg.PkgPath) + 1 + if idx >= nameStart { + return append(space, name), score + } + + // If our package path ends in the package name, we'll have skipped the + // initial pass above, so check if we matched just the package-qualified + // name. + if endsInPkgName && idx >= 0 { + pkgStart := len(pkg.PkgPath) - len(pkg.Name) + if idx >= pkgStart { + return append(space, string(pkg.Name), ".", name), score + } + } + + // Our match was not contained within the unqualified or package qualified + // symbol. Return the fully qualified symbol but discount the score. + return fullyQualified, score * 0.6 +} + +func packageSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) { + qualified := append(space, string(pkg.Name), ".", name) + if _, s := matcher(qualified); s > 0 { + return qualified, s + } + return nil, 0 +} + +func buildMatcher(matcher SymbolMatcher, query string) matcherFunc { + switch matcher { + case SymbolFuzzy: + return parseQuery(query, newFuzzyMatcher) + case SymbolFastFuzzy: + return parseQuery(query, func(query string) matcherFunc { + return fuzzy.NewSymbolMatcher(query).Match + }) + case SymbolCaseSensitive: + return matchExact(query) + case SymbolCaseInsensitive: + q := strings.ToLower(query) + exact := matchExact(q) + wrapper := []string{""} + return func(chunks []string) (int, float64) { + s := strings.Join(chunks, "") + wrapper[0] = strings.ToLower(s) + return exact(wrapper) + } + } + panic(fmt.Errorf("unknown symbol matcher: %v", matcher)) +} + +func newFuzzyMatcher(query string) matcherFunc { + fm := fuzzy.NewMatcher(query) + return func(chunks []string) (int, float64) { + score := float64(fm.ScoreChunks(chunks)) + ranges := fm.MatchedRanges() + if len(ranges) > 0 { + return ranges[0], score + } + return -1, score + } +} + +// parseQuery parses a field-separated symbol query, extracting the special +// characters listed below, and returns a matcherFunc corresponding to the AND +// of all field queries. +// +// Special characters: +// +// ^ match exact prefix +// $ match exact suffix +// ' match exact +// +// In all three of these special queries, matches are 'smart-cased', meaning +// they are case sensitive if the symbol query contains any upper-case +// characters, and case insensitive otherwise. +func parseQuery(q string, newMatcher func(string) matcherFunc) matcherFunc { + fields := strings.Fields(q) + if len(fields) == 0 { + return func([]string) (int, float64) { return -1, 0 } + } + var funcs []matcherFunc + for _, field := range fields { + var f matcherFunc + switch { + case strings.HasPrefix(field, "^"): + prefix := field[1:] + f = smartCase(prefix, func(chunks []string) (int, float64) { + s := strings.Join(chunks, "") + if strings.HasPrefix(s, prefix) { + return 0, 1 + } + return -1, 0 + }) + case strings.HasPrefix(field, "'"): + exact := field[1:] + f = smartCase(exact, matchExact(exact)) + case strings.HasSuffix(field, "$"): + suffix := field[0 : len(field)-1] + f = smartCase(suffix, func(chunks []string) (int, float64) { + s := strings.Join(chunks, "") + if strings.HasSuffix(s, suffix) { + return len(s) - len(suffix), 1 + } + return -1, 0 + }) + default: + f = newMatcher(field) + } + funcs = append(funcs, f) + } + if len(funcs) == 1 { + return funcs[0] + } + return comboMatcher(funcs).match +} + +func matchExact(exact string) matcherFunc { + return func(chunks []string) (int, float64) { + s := strings.Join(chunks, "") + if idx := strings.LastIndex(s, exact); idx >= 0 { + return idx, 1 + } + return -1, 0 + } +} + +// smartCase returns a matcherFunc that is case-sensitive if q contains any +// upper-case characters, and case-insensitive otherwise. +func smartCase(q string, m matcherFunc) matcherFunc { + insensitive := strings.ToLower(q) == q + wrapper := []string{""} + return func(chunks []string) (int, float64) { + s := strings.Join(chunks, "") + if insensitive { + s = strings.ToLower(s) + } + wrapper[0] = s + return m(wrapper) + } +} + +type comboMatcher []matcherFunc + +func (c comboMatcher) match(chunks []string) (int, float64) { + score := 1.0 + first := 0 + for _, f := range c { + idx, s := f(chunks) + if idx < first { + first = idx + } + score *= s + } + return first, score +} + +// collectSymbols calls snapshot.Symbols to walk the syntax trees of +// all files in the views' current snapshots, and returns a sorted, +// scored list of symbols that best match the parameters. +// +// How it matches symbols is parameterized by two interfaces: +// - A matcherFunc determines how well a string symbol matches a query. It +// returns a non-negative score indicating the quality of the match. A score +// of zero indicates no match. +// - A symbolizer determines how we extract the symbol for an object. This +// enables the 'symbolStyle' configuration option. +func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher, symbolizer symbolizer, query string) ([]protocol.SymbolInformation, error) { + + // Extract symbols from all files. + var work []symbolFile + var roots []string + seen := make(map[span.URI]bool) + // TODO(adonovan): opt: parallelize this loop? How often is len > 1? + for _, v := range views { + snapshot, release, err := v.Snapshot() + if err != nil { + continue // view is shut down; continue with others + } + defer release() + + // Use the root view URIs for determining (lexically) + // whether a URI is in any open workspace. + roots = append(roots, strings.TrimRight(string(v.Folder()), "/")) + + filters := v.Options().DirectoryFilters + filterer := NewFilterer(filters) + folder := filepath.ToSlash(v.Folder().Filename()) + for uri, syms := range snapshot.Symbols(ctx) { + norm := filepath.ToSlash(uri.Filename()) + nm := strings.TrimPrefix(norm, folder) + if filterer.Disallow(nm) { + continue + } + // Only scan each file once. + if seen[uri] { + continue + } + mds, err := snapshot.MetadataForFile(ctx, uri) + if err != nil { + event.Error(ctx, fmt.Sprintf("missing metadata for %q", uri), err) + continue + } + if len(mds) == 0 { + // TODO: should use the bug reporting API + continue + } + seen[uri] = true + work = append(work, symbolFile{uri, mds[0], syms}) + } + } + + // Match symbols in parallel. + // Each worker has its own symbolStore, + // which we merge at the end. + nmatchers := runtime.GOMAXPROCS(-1) // matching is CPU bound + results := make(chan *symbolStore) + for i := 0; i < nmatchers; i++ { + go func(i int) { + matcher := buildMatcher(matcherType, query) + store := new(symbolStore) + // Assign files to workers in round-robin fashion. + for j := i; j < len(work); j += nmatchers { + matchFile(store, symbolizer, matcher, roots, work[j]) + } + results <- store + }(i) + } + + // Gather and merge results as they arrive. + var unified symbolStore + for i := 0; i < nmatchers; i++ { + store := <-results + for _, syms := range store.res { + unified.store(syms) + } + } + return unified.results(), nil +} + +type Filterer struct { + // Whether a filter is excluded depends on the operator (first char of the raw filter). + // Slices filters and excluded then should have the same length. + filters []*regexp.Regexp + excluded []bool +} + +// NewFilterer computes regular expression form of all raw filters +func NewFilterer(rawFilters []string) *Filterer { + var f Filterer + for _, filter := range rawFilters { + filter = path.Clean(filepath.ToSlash(filter)) + // TODO(dungtuanle): fix: validate [+-] prefix. + op, prefix := filter[0], filter[1:] + // convertFilterToRegexp adds "/" at the end of prefix to handle cases where a filter is a prefix of another filter. + // For example, it prevents [+foobar, -foo] from excluding "foobar". + f.filters = append(f.filters, convertFilterToRegexp(filepath.ToSlash(prefix))) + f.excluded = append(f.excluded, op == '-') + } + + return &f +} + +// Disallow return true if the path is excluded from the filterer's filters. +func (f *Filterer) Disallow(path string) bool { + // Ensure trailing but not leading slash. + path = strings.TrimPrefix(path, "/") + if !strings.HasSuffix(path, "/") { + path += "/" + } + + // TODO(adonovan): opt: iterate in reverse and break at first match. + excluded := false + for i, filter := range f.filters { + if filter.MatchString(path) { + excluded = f.excluded[i] // last match wins + } + } + return excluded +} + +// convertFilterToRegexp replaces glob-like operator substrings in a string file path to their equivalent regex forms. +// Supporting glob-like operators: +// - **: match zero or more complete path segments +func convertFilterToRegexp(filter string) *regexp.Regexp { + if filter == "" { + return regexp.MustCompile(".*") + } + var ret strings.Builder + ret.WriteString("^") + segs := strings.Split(filter, "/") + for _, seg := range segs { + // Inv: seg != "" since path is clean. + if seg == "**" { + ret.WriteString(".*") + } else { + ret.WriteString(regexp.QuoteMeta(seg)) + } + ret.WriteString("/") + } + pattern := ret.String() + + // Remove unnecessary "^.*" prefix, which increased + // BenchmarkWorkspaceSymbols time by ~20% (even though + // filter CPU time increased by only by ~2.5%) when the + // default filter was changed to "**/node_modules". + pattern = strings.TrimPrefix(pattern, "^.*") + + return regexp.MustCompile(pattern) +} + +// symbolFile holds symbol information for a single file. +type symbolFile struct { + uri span.URI + md *Metadata + syms []Symbol +} + +// matchFile scans a symbol file and adds matching symbols to the store. +func matchFile(store *symbolStore, symbolizer symbolizer, matcher matcherFunc, roots []string, i symbolFile) { + space := make([]string, 0, 3) + for _, sym := range i.syms { + symbolParts, score := symbolizer(space, sym.Name, i.md, matcher) + + // Check if the score is too low before applying any downranking. + if store.tooLow(score) { + continue + } + + // Factors to apply to the match score for the purpose of downranking + // results. + // + // These numbers were crudely calibrated based on trial-and-error using a + // small number of sample queries. Adjust as necessary. + // + // All factors are multiplicative, meaning if more than one applies they are + // multiplied together. + const ( + // nonWorkspaceFactor is applied to symbols outside of any active + // workspace. Developers are less likely to want to jump to code that they + // are not actively working on. + nonWorkspaceFactor = 0.5 + // nonWorkspaceUnexportedFactor is applied to unexported symbols outside of + // any active workspace. Since one wouldn't usually jump to unexported + // symbols to understand a package API, they are particularly irrelevant. + nonWorkspaceUnexportedFactor = 0.5 + // every field or method nesting level to access the field decreases + // the score by a factor of 1.0 - depth*depthFactor, up to a depth of + // 3. + depthFactor = 0.2 + ) + + startWord := true + exported := true + depth := 0.0 + for _, r := range sym.Name { + if startWord && !unicode.IsUpper(r) { + exported = false + } + if r == '.' { + startWord = true + depth++ + } else { + startWord = false + } + } + + inWorkspace := false + for _, root := range roots { + if strings.HasPrefix(string(i.uri), root) { + inWorkspace = true + break + } + } + + // Apply downranking based on workspace position. + if !inWorkspace { + score *= nonWorkspaceFactor + if !exported { + score *= nonWorkspaceUnexportedFactor + } + } + + // Apply downranking based on symbol depth. + if depth > 3 { + depth = 3 + } + score *= 1.0 - depth*depthFactor + + if store.tooLow(score) { + continue + } + + si := symbolInformation{ + score: score, + symbol: strings.Join(symbolParts, ""), + kind: sym.Kind, + uri: i.uri, + rng: sym.Range, + container: string(i.md.PkgPath), + } + store.store(si) + } +} + +type symbolStore struct { + res [maxSymbols]symbolInformation +} + +// store inserts si into the sorted results, if si has a high enough score. +func (sc *symbolStore) store(si symbolInformation) { + if sc.tooLow(si.score) { + return + } + insertAt := sort.Search(len(sc.res), func(i int) bool { + // Sort by score, then symbol length, and finally lexically. + if sc.res[i].score != si.score { + return sc.res[i].score < si.score + } + if len(sc.res[i].symbol) != len(si.symbol) { + return len(sc.res[i].symbol) > len(si.symbol) + } + return sc.res[i].symbol > si.symbol + }) + if insertAt < len(sc.res)-1 { + copy(sc.res[insertAt+1:], sc.res[insertAt:len(sc.res)-1]) + } + sc.res[insertAt] = si +} + +func (sc *symbolStore) tooLow(score float64) bool { + return score <= sc.res[len(sc.res)-1].score +} + +func (sc *symbolStore) results() []protocol.SymbolInformation { + var res []protocol.SymbolInformation + for _, si := range sc.res { + if si.score <= 0 { + return res + } + res = append(res, si.asProtocolSymbolInformation()) + } + return res +} + +func typeToKind(typ types.Type) protocol.SymbolKind { + switch typ := typ.Underlying().(type) { + case *types.Interface: + return protocol.Interface + case *types.Struct: + return protocol.Struct + case *types.Signature: + if typ.Recv() != nil { + return protocol.Method + } + return protocol.Function + case *types.Named: + return typeToKind(typ.Underlying()) + case *types.Basic: + i := typ.Info() + switch { + case i&types.IsNumeric != 0: + return protocol.Number + case i&types.IsBoolean != 0: + return protocol.Boolean + case i&types.IsString != 0: + return protocol.String + } + } + return protocol.Variable +} + +// symbolInformation is a cut-down version of protocol.SymbolInformation that +// allows struct values of this type to be used as map keys. +type symbolInformation struct { + score float64 + symbol string + container string + kind protocol.SymbolKind + uri span.URI + rng protocol.Range +} + +// asProtocolSymbolInformation converts s to a protocol.SymbolInformation value. +// +// TODO: work out how to handle tags if/when they are needed. +func (s symbolInformation) asProtocolSymbolInformation() protocol.SymbolInformation { + return protocol.SymbolInformation{ + Name: s.symbol, + Kind: s.kind, + Location: protocol.Location{ + URI: protocol.URIFromSpanURI(s.uri), + Range: s.rng, + }, + ContainerName: s.container, + } +} diff --git a/gopls/internal/lsp/source/workspace_symbol_test.go b/gopls/internal/lsp/source/workspace_symbol_test.go new file mode 100644 index 00000000000..24fb8b45210 --- /dev/null +++ b/gopls/internal/lsp/source/workspace_symbol_test.go @@ -0,0 +1,136 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "testing" +) + +func TestParseQuery(t *testing.T) { + tests := []struct { + query, s string + wantMatch bool + }{ + {"", "anything", false}, + {"any", "anything", true}, + {"any$", "anything", false}, + {"ing$", "anything", true}, + {"ing$", "anythinG", true}, + {"inG$", "anything", false}, + {"^any", "anything", true}, + {"^any", "Anything", true}, + {"^Any", "anything", false}, + {"at", "anything", true}, + // TODO: this appears to be a bug in the fuzzy matching algorithm. 'At' + // should cause a case-sensitive match. + // {"At", "anything", false}, + {"At", "Anything", true}, + {"'yth", "Anything", true}, + {"'yti", "Anything", false}, + {"'any 'thing", "Anything", true}, + {"anythn nythg", "Anything", true}, + {"ntx", "Anything", false}, + {"anythn", "anything", true}, + {"ing", "anything", true}, + {"anythn nythgx", "anything", false}, + } + + for _, test := range tests { + matcher := parseQuery(test.query, newFuzzyMatcher) + if _, score := matcher([]string{test.s}); score > 0 != test.wantMatch { + t.Errorf("parseQuery(%q) match for %q: %.2g, want match: %t", test.query, test.s, score, test.wantMatch) + } + } +} + +func TestFiltererDisallow(t *testing.T) { + tests := []struct { + filters []string + included []string + excluded []string + }{ + { + []string{"+**/c.go"}, + []string{"a/c.go", "a/b/c.go"}, + []string{}, + }, + { + []string{"+a/**/c.go"}, + []string{"a/b/c.go", "a/b/d/c.go", "a/c.go"}, + []string{}, + }, + { + []string{"-a/c.go", "+a/**"}, + []string{"a/c.go"}, + []string{}, + }, + { + []string{"+a/**/c.go", "-**/c.go"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+a/**/c.go", "-a/**"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+**/c.go", "-a/**/c.go"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+foobar", "-foo"}, + []string{"foobar", "foobar/a"}, + []string{"foo", "foo/a"}, + }, + { + []string{"+", "-"}, + []string{}, + []string{"foobar", "foobar/a", "foo", "foo/a"}, + }, + { + []string{"-", "+"}, + []string{"foobar", "foobar/a", "foo", "foo/a"}, + []string{}, + }, + { + []string{"-a/**/b/**/c.go"}, + []string{}, + []string{"a/x/y/z/b/f/g/h/c.go"}, + }, + // tests for unsupported glob operators + { + []string{"+**/c.go", "-a/*/c.go"}, + []string{"a/b/c.go"}, + []string{}, + }, + { + []string{"+**/c.go", "-a/?/c.go"}, + []string{"a/b/c.go"}, + []string{}, + }, + { + []string{"-b"}, // should only filter paths prefixed with the "b" directory + []string{"a/b/c.go", "bb"}, + []string{"b/c/d.go", "b"}, + }, + } + + for _, test := range tests { + filterer := NewFilterer(test.filters) + for _, inc := range test.included { + if filterer.Disallow(inc) { + t.Errorf("Filters %v excluded %v, wanted included", test.filters, inc) + } + } + + for _, exc := range test.excluded { + if !filterer.Disallow(exc) { + t.Errorf("Filters %v included %v, wanted excluded", test.filters, exc) + } + } + } +} diff --git a/gopls/internal/lsp/source/xrefs/xrefs.go b/gopls/internal/lsp/source/xrefs/xrefs.go new file mode 100644 index 00000000000..6a8b391e911 --- /dev/null +++ b/gopls/internal/lsp/source/xrefs/xrefs.go @@ -0,0 +1,213 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xrefs defines the serializable index of cross-package +// references that is computed during type checking. +// +// See ../references2.go for the 'references' query. +package xrefs + +import ( + "bytes" + "encoding/gob" + "go/ast" + "go/types" + "log" + "sort" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +// Index constructs a serializable index of outbound cross-references +// for the specified type-checked package. +func Index(files []*source.ParsedGoFile, pkg *types.Package, info *types.Info) []byte { + // pkgObjects maps each referenced package Q to a mapping: + // from each referenced symbol in Q to the ordered list + // of references to that symbol from this package. + // A nil types.Object indicates a reference + // to the package as a whole: an import. + pkgObjects := make(map[*types.Package]map[types.Object]*gobObject) + + // getObjects returns the object-to-references mapping for a package. + getObjects := func(pkg *types.Package) map[types.Object]*gobObject { + objects, ok := pkgObjects[pkg] + if !ok { + objects = make(map[types.Object]*gobObject) + pkgObjects[pkg] = objects + } + return objects + } + + for fileIndex, pgf := range files { + + nodeRange := func(n ast.Node) protocol.Range { + rng, err := pgf.PosRange(n.Pos(), n.End()) + if err != nil { + panic(err) // can't fail + } + return rng + } + + ast.Inspect(pgf.File, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.Ident: + // Report a reference for each identifier that + // uses a symbol exported from another package. + // (The built-in error.Error method has no package.) + if n.IsExported() { + if obj, ok := info.Uses[n]; ok && + obj.Pkg() != nil && + obj.Pkg() != pkg { + + objects := getObjects(obj.Pkg()) + gobObj, ok := objects[obj] + if !ok { + path, err := objectpath.For(obj) + if err != nil { + // Capitalized but not exported + // (e.g. local const/var/type). + return true + } + gobObj = &gobObject{Path: path} + objects[obj] = gobObj + } + + gobObj.Refs = append(gobObj.Refs, gobRef{ + FileIndex: fileIndex, + Range: nodeRange(n), + }) + } + } + + case *ast.ImportSpec: + // Report a reference from each import path + // string to the imported package. + var obj types.Object + if n.Name != nil { + obj = info.Defs[n.Name] + } else { + obj = info.Implicits[n] + } + if obj == nil { + return true // missing import + } + objects := getObjects(obj.(*types.PkgName).Imported()) + gobObj, ok := objects[nil] + if !ok { + gobObj = &gobObject{Path: ""} + objects[nil] = gobObj + } + gobObj.Refs = append(gobObj.Refs, gobRef{ + FileIndex: fileIndex, + Range: nodeRange(n.Path), + }) + } + return true + }) + } + + // Flatten the maps into slices, and sort for determinism. + var packages []*gobPackage + for p := range pkgObjects { + objects := pkgObjects[p] + gp := &gobPackage{ + PkgPath: source.PackagePath(p.Path()), + Objects: make([]*gobObject, 0, len(objects)), + } + for _, gobObj := range objects { + gp.Objects = append(gp.Objects, gobObj) + } + sort.Slice(gp.Objects, func(i, j int) bool { + return gp.Objects[i].Path < gp.Objects[j].Path + }) + packages = append(packages, gp) + } + sort.Slice(packages, func(i, j int) bool { + return packages[i].PkgPath < packages[j].PkgPath + }) + + return mustEncode(packages) +} + +// Lookup searches a serialized index produced by an indexPackage +// operation on m, and returns the locations of all references from m +// to any object in the target set. Each object is denoted by a pair +// of (package path, object path). +func Lookup(m *source.Metadata, data []byte, targets map[source.PackagePath]map[objectpath.Path]struct{}) (locs []protocol.Location) { + + // TODO(adonovan): opt: evaluate whether it would be faster to decode + // in two passes, first with struct { PkgPath string; Objects BLOB } + // to find the relevant record without decoding the Objects slice, + // then decode just the desired BLOB into a slice. BLOB would be a + // type whose Unmarshal method just retains (a copy of) the bytes. + var packages []gobPackage + mustDecode(data, &packages) + + for _, gp := range packages { + if objectSet, ok := targets[gp.PkgPath]; ok { + for _, gobObj := range gp.Objects { + if _, ok := objectSet[gobObj.Path]; ok { + for _, ref := range gobObj.Refs { + uri := m.CompiledGoFiles[ref.FileIndex] + locs = append(locs, protocol.Location{ + URI: protocol.URIFromSpanURI(uri), + Range: ref.Range, + }) + } + } + } + } + } + + return locs +} + +// -- serialized representation -- + +// The cross-reference index records the location of all references +// from one package to symbols defined in other packages +// (dependencies). It does not record within-package references. +// The index for package P consists of a list of gopPackage records, +// each enumerating references to symbols defined a single dependency, Q. + +// TODO(adonovan): opt: choose a more compact encoding. Gzip reduces +// the gob output to about one third its size, so clearly there's room +// to improve. The gobRef.Range field is the obvious place to begin. +// Even a zero-length slice gob-encodes to ~285 bytes. + +// A gobPackage records the set of outgoing references from the index +// package to symbols defined in a dependency package. +type gobPackage struct { + PkgPath source.PackagePath // defining package (Q) + Objects []*gobObject // set of Q objects referenced by P +} + +// A gobObject records all references to a particular symbol. +type gobObject struct { + Path objectpath.Path // symbol name within package; "" => import of package itself + Refs []gobRef // locations of references within P, in lexical order +} + +type gobRef struct { + FileIndex int // index of enclosing file within P's CompiledGoFiles + Range protocol.Range // source range of reference +} + +// -- duplicated from ../../cache/analysis.go -- + +func mustEncode(x interface{}) []byte { + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(x); err != nil { + log.Fatalf("internal error encoding %T: %v", x, err) + } + return buf.Bytes() +} + +func mustDecode(data []byte, ptr interface{}) { + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil { + log.Fatalf("internal error decoding %T: %v", ptr, err) + } +} diff --git a/gopls/internal/lsp/symbols.go b/gopls/internal/lsp/symbols.go new file mode 100644 index 00000000000..40fa0d05e11 --- /dev/null +++ b/gopls/internal/lsp/symbols.go @@ -0,0 +1,60 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/template" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" +) + +func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) { + ctx, done := event.Start(ctx, "lsp.Server.documentSymbol") + defer done() + + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) + defer release() + if !ok { + return []interface{}{}, err + } + var docSymbols []protocol.DocumentSymbol + switch snapshot.View().FileKind(fh) { + case source.Tmpl: + docSymbols, err = template.DocumentSymbols(snapshot, fh) + case source.Go: + docSymbols, err = source.DocumentSymbols(ctx, snapshot, fh) + default: + return []interface{}{}, nil + } + if err != nil { + event.Error(ctx, "DocumentSymbols failed", err, tag.URI.Of(fh.URI())) + return []interface{}{}, nil + } + // Convert the symbols to an interface array. + // TODO: Remove this once the lsp deprecates SymbolInformation. + symbols := make([]interface{}, len(docSymbols)) + for i, s := range docSymbols { + if snapshot.View().Options().HierarchicalDocumentSymbolSupport { + symbols[i] = s + continue + } + // If the client does not support hierarchical document symbols, then + // we need to be backwards compatible for now and return SymbolInformation. + symbols[i] = protocol.SymbolInformation{ + Name: s.Name, + Kind: s.Kind, + Deprecated: s.Deprecated, + Location: protocol.Location{ + URI: params.TextDocument.URI, + Range: s.Range, + }, + } + } + return symbols, nil +} diff --git a/gopls/internal/lsp/template/completion.go b/gopls/internal/lsp/template/completion.go new file mode 100644 index 00000000000..292563a88cd --- /dev/null +++ b/gopls/internal/lsp/template/completion.go @@ -0,0 +1,287 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "context" + "fmt" + "go/scanner" + "go/token" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +// information needed for completion +type completer struct { + p *Parsed + pos protocol.Position + offset int // offset of the start of the Token + ctx protocol.CompletionContext + syms map[string]symbol +} + +func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, pos protocol.Position, context protocol.CompletionContext) (*protocol.CompletionList, error) { + all := New(snapshot.Templates()) + var start int // the beginning of the Token (completed or not) + syms := make(map[string]symbol) + var p *Parsed + for fn, fc := range all.files { + // collect symbols from all template files + filterSyms(syms, fc.symbols) + if fn.Filename() != fh.URI().Filename() { + continue + } + if start = inTemplate(fc, pos); start == -1 { + return nil, nil + } + p = fc + } + if p == nil { + // this cannot happen unless the search missed a template file + return nil, fmt.Errorf("%s not found", fh.FileIdentity().URI.Filename()) + } + c := completer{ + p: p, + pos: pos, + offset: start + len(Left), + ctx: context, + syms: syms, + } + return c.complete() +} + +func filterSyms(syms map[string]symbol, ns []symbol) { + for _, xsym := range ns { + switch xsym.kind { + case protocol.Method, protocol.Package, protocol.Boolean, protocol.Namespace, + protocol.Function: + syms[xsym.name] = xsym // we don't care which symbol we get + case protocol.Variable: + if xsym.name != "dot" { + syms[xsym.name] = xsym + } + case protocol.Constant: + if xsym.name == "nil" { + syms[xsym.name] = xsym + } + } + } +} + +// return the starting position of the enclosing token, or -1 if none +func inTemplate(fc *Parsed, pos protocol.Position) int { + // pos is the pos-th character. if the cursor is at the beginning + // of the file, pos is 0. That is, we've only seen characters before pos + // 1. pos might be in a Token, return tk.Start + // 2. pos might be after an elided but before a Token, return elided + // 3. return -1 for false + offset := fc.FromPosition(pos) + // this could be a binary search, as the tokens are ordered + for _, tk := range fc.tokens { + if tk.Start < offset && offset <= tk.End { + return tk.Start + } + } + for _, x := range fc.elided { + if x > offset { + // fc.elided is sorted + break + } + // If the interval [x,offset] does not contain Left or Right + // then provide completions. (do we need the test for Right?) + if !bytes.Contains(fc.buf[x:offset], []byte(Left)) && !bytes.Contains(fc.buf[x:offset], []byte(Right)) { + return x + } + } + return -1 +} + +var ( + keywords = []string{"if", "with", "else", "block", "range", "template", "end}}", "end"} + globals = []string{"and", "call", "html", "index", "slice", "js", "len", "not", "or", + "urlquery", "printf", "println", "print", "eq", "ne", "le", "lt", "ge", "gt"} +) + +// find the completions. start is the offset of either the Token enclosing pos, or where +// the incomplete token starts. +// The error return is always nil. +func (c *completer) complete() (*protocol.CompletionList, error) { + ans := &protocol.CompletionList{IsIncomplete: true, Items: []protocol.CompletionItem{}} + start := c.p.FromPosition(c.pos) + sofar := c.p.buf[c.offset:start] + if len(sofar) == 0 || sofar[len(sofar)-1] == ' ' || sofar[len(sofar)-1] == '\t' { + return ans, nil + } + // sofar could be parsed by either c.analyzer() or scan(). The latter is precise + // and slower, but fast enough + words := scan(sofar) + // 1. if pattern starts $, show variables + // 2. if pattern starts ., show methods (and . by itself?) + // 3. if len(words) == 1, show firstWords (but if it were a |, show functions and globals) + // 4. ...? (parenthetical expressions, arguments, ...) (packages, namespaces, nil?) + if len(words) == 0 { + return nil, nil // if this happens, why were we called? + } + pattern := string(words[len(words)-1]) + if pattern[0] == '$' { + // should we also return a raw "$"? + for _, s := range c.syms { + if s.kind == protocol.Variable && weakMatch(s.name, pattern) > 0 { + ans.Items = append(ans.Items, protocol.CompletionItem{ + Label: s.name, + Kind: protocol.VariableCompletion, + Detail: "Variable", + }) + } + } + return ans, nil + } + if pattern[0] == '.' { + for _, s := range c.syms { + if s.kind == protocol.Method && weakMatch("."+s.name, pattern) > 0 { + ans.Items = append(ans.Items, protocol.CompletionItem{ + Label: s.name, + Kind: protocol.MethodCompletion, + Detail: "Method/member", + }) + } + } + return ans, nil + } + // could we get completion attempts in strings or numbers, and if so, do we care? + // globals + for _, kw := range globals { + if weakMatch(kw, string(pattern)) != 0 { + ans.Items = append(ans.Items, protocol.CompletionItem{ + Label: kw, + Kind: protocol.KeywordCompletion, + Detail: "Function", + }) + } + } + // and functions + for _, s := range c.syms { + if s.kind == protocol.Function && weakMatch(s.name, pattern) != 0 { + ans.Items = append(ans.Items, protocol.CompletionItem{ + Label: s.name, + Kind: protocol.FunctionCompletion, + Detail: "Function", + }) + } + } + // keywords if we're at the beginning + if len(words) <= 1 || len(words[len(words)-2]) == 1 && words[len(words)-2][0] == '|' { + for _, kw := range keywords { + if weakMatch(kw, string(pattern)) != 0 { + ans.Items = append(ans.Items, protocol.CompletionItem{ + Label: kw, + Kind: protocol.KeywordCompletion, + Detail: "keyword", + }) + } + } + } + return ans, nil +} + +// someday think about comments, strings, backslashes, etc +// this would repeat some of the template parsing, but because the user is typing +// there may be no parse tree here. +// (go/scanner will report 2 tokens for $a, as $ is not a legal go identifier character) +// (go/scanner is about 2.7 times more expensive) +func (c *completer) analyze(buf []byte) [][]byte { + // we want to split on whitespace and before dots + var working []byte + var ans [][]byte + for _, ch := range buf { + if ch == '.' && len(working) > 0 { + ans = append(ans, working) + working = []byte{'.'} + continue + } + if ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' { + if len(working) > 0 { + ans = append(ans, working) + working = []byte{} + continue + } + } + working = append(working, ch) + } + if len(working) > 0 { + ans = append(ans, working) + } + ch := buf[len(buf)-1] + if ch == ' ' || ch == '\t' { + // avoid completing on whitespace + ans = append(ans, []byte{ch}) + } + return ans +} + +// version of c.analyze that uses go/scanner. +func scan(buf []byte) []string { + fset := token.NewFileSet() + fp := fset.AddFile("", -1, len(buf)) + var sc scanner.Scanner + sc.Init(fp, buf, func(pos token.Position, msg string) {}, scanner.ScanComments) + ans := make([]string, 0, 10) // preallocating gives a measurable savings + for { + _, tok, lit := sc.Scan() // tok is an int + if tok == token.EOF { + break // done + } else if tok == token.SEMICOLON && lit == "\n" { + continue // don't care, but probably can't happen + } else if tok == token.PERIOD { + ans = append(ans, ".") // lit is empty + } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "." { + ans[len(ans)-1] = "." + lit + } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "$" { + ans[len(ans)-1] = "$" + lit + } else if lit != "" { + ans = append(ans, lit) + } + } + return ans +} + +// pattern is what the user has typed +func weakMatch(choice, pattern string) float64 { + lower := strings.ToLower(choice) + // for now, use only lower-case everywhere + pattern = strings.ToLower(pattern) + // The first char has to match + if pattern[0] != lower[0] { + return 0 + } + // If they start with ., then the second char has to match + from := 1 + if pattern[0] == '.' { + if len(pattern) < 2 { + return 1 // pattern just a ., so it matches + } + if pattern[1] != lower[1] { + return 0 + } + from = 2 + } + // check that all the characters of pattern occur as a subsequence of choice + i, j := from, from + for ; i < len(lower) && j < len(pattern); j++ { + if pattern[j] == lower[i] { + i++ + if i >= len(lower) { + return 0 + } + } + } + if j < len(pattern) { + return 0 + } + return 1 +} diff --git a/gopls/internal/lsp/template/completion_test.go b/gopls/internal/lsp/template/completion_test.go new file mode 100644 index 00000000000..0fc478842ee --- /dev/null +++ b/gopls/internal/lsp/template/completion_test.go @@ -0,0 +1,102 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "log" + "sort" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +func init() { + log.SetFlags(log.Lshortfile) +} + +type tparse struct { + marked string // ^ shows where to ask for completions. (The user just typed the following character.) + wanted []string // expected completions +} + +// Test completions in templates that parse enough (if completion needs symbols) +// Seen characters up to the ^ +func TestParsed(t *testing.T) { + var tests = []tparse{ + {"{{x}}{{12. xx^", nil}, // https://github.com/golang/go/issues/50430 + {``, nil}, + {"{{i^f}}", []string{"index", "if"}}, + {"{{if .}}{{e^ {{end}}", []string{"eq", "end}}", "else", "end"}}, + {"{{foo}}{{f^", []string{"foo"}}, + {"{{$^}}", []string{"$"}}, + {"{{$x:=4}}{{$^", []string{"$x"}}, + {"{{$x:=4}}{{$ ^ ", []string{}}, + {"{{len .Modified}}{{.^Mo", []string{"Modified"}}, + {"{{len .Modified}}{{.mf^", []string{"Modified"}}, + {"{{$^ }}", []string{"$"}}, + {"{{$a =3}}{{$^", []string{"$a"}}, + // .two is not good here: fix someday + {`{{.Modified}}{{.^{{if $.one.two}}xxx{{end}}`, []string{"Modified", "one", "two"}}, + {`{{.Modified}}{{.o^{{if $.one.two}}xxx{{end}}`, []string{"one"}}, + {"{{.Modiifed}}{{.one.t^{{if $.one.two}}xxx{{end}}", []string{"two"}}, + {`{{block "foo" .}}{{i^`, []string{"index", "if"}}, + {"{{in^{{Internal}}", []string{"index", "Internal", "if"}}, + // simple number has no completions + {"{{4^e", []string{}}, + // simple string has no completions + {"{{`e^", []string{}}, + {"{{`No i^", []string{}}, // example of why go/scanner is used + {"{{xavier}}{{12. x^", []string{"xavier"}}, + } + for _, tx := range tests { + c := testCompleter(t, tx) + var v []string + if c != nil { + ans, _ := c.complete() + for _, a := range ans.Items { + v = append(v, a.Label) + } + } + if len(v) != len(tx.wanted) { + t.Errorf("%q: got %q, wanted %q %d,%d", tx.marked, v, tx.wanted, len(v), len(tx.wanted)) + continue + } + sort.Strings(tx.wanted) + sort.Strings(v) + for i := 0; i < len(v); i++ { + if tx.wanted[i] != v[i] { + t.Errorf("%q at %d: got %v, wanted %v", tx.marked, i, v, tx.wanted) + break + } + } + } +} + +func testCompleter(t *testing.T, tx tparse) *completer { + t.Helper() + // seen chars up to ^ + col := strings.Index(tx.marked, "^") + buf := strings.Replace(tx.marked, "^", "", 1) + p := parseBuffer([]byte(buf)) + pos := protocol.Position{Line: 0, Character: uint32(col)} + if p.ParseErr != nil { + log.Printf("%q: %v", tx.marked, p.ParseErr) + } + offset := inTemplate(p, pos) + if offset == -1 { + return nil + } + syms := make(map[string]symbol) + filterSyms(syms, p.symbols) + c := &completer{ + p: p, + pos: protocol.Position{Line: 0, Character: uint32(col)}, + offset: offset + len(Left), + ctx: protocol.CompletionContext{TriggerKind: protocol.Invoked}, + syms: syms, + } + return c +} diff --git a/gopls/internal/lsp/template/highlight.go b/gopls/internal/lsp/template/highlight.go new file mode 100644 index 00000000000..1e06b92085e --- /dev/null +++ b/gopls/internal/lsp/template/highlight.go @@ -0,0 +1,96 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "context" + "fmt" + "regexp" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +func Highlight(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.DocumentHighlight, error) { + buf, err := fh.Read() + if err != nil { + return nil, err + } + p := parseBuffer(buf) + pos := p.FromPosition(loc) + var ans []protocol.DocumentHighlight + if p.ParseErr == nil { + for _, s := range p.symbols { + if s.start <= pos && pos < s.start+s.length { + return markSymbols(p, s) + } + } + } + // these tokens exist whether or not there was a parse error + // (symbols require a successful parse) + for _, tok := range p.tokens { + if tok.Start <= pos && pos < tok.End { + wordAt := findWordAt(p, pos) + if len(wordAt) > 0 { + return markWordInToken(p, wordAt) + } + } + } + // find the 'word' at pos, etc: someday + // until then we get the default action, which doesn't respect word boundaries + return ans, nil +} + +func markSymbols(p *Parsed, sym symbol) ([]protocol.DocumentHighlight, error) { + var ans []protocol.DocumentHighlight + for _, s := range p.symbols { + if s.name == sym.name { + kind := protocol.Read + if s.vardef { + kind = protocol.Write + } + ans = append(ans, protocol.DocumentHighlight{ + Range: p.Range(s.start, s.length), + Kind: kind, + }) + } + } + return ans, nil +} + +// A token is {{...}}, and this marks words in the token that equal the give word +func markWordInToken(p *Parsed, wordAt string) ([]protocol.DocumentHighlight, error) { + var ans []protocol.DocumentHighlight + pat, err := regexp.Compile(fmt.Sprintf(`\b%s\b`, wordAt)) + if err != nil { + return nil, fmt.Errorf("%q: unmatchable word (%v)", wordAt, err) + } + for _, tok := range p.tokens { + got := pat.FindAllIndex(p.buf[tok.Start:tok.End], -1) + for i := 0; i < len(got); i++ { + ans = append(ans, protocol.DocumentHighlight{ + Range: p.Range(got[i][0], got[i][1]-got[i][0]), + Kind: protocol.Text, + }) + } + } + return ans, nil +} + +var wordRe = regexp.MustCompile(`[$]?\w+$`) +var moreRe = regexp.MustCompile(`^[$]?\w+`) + +// findWordAt finds the word the cursor is in (meaning in or just before) +func findWordAt(p *Parsed, pos int) string { + if pos >= len(p.buf) { + return "" // can't happen, as we are called with pos < tok.End + } + after := moreRe.Find(p.buf[pos:]) + if len(after) == 0 { + return "" // end of the word + } + got := wordRe.Find(p.buf[:pos+len(after)]) + return string(got) +} diff --git a/internal/lsp/template/implementations.go b/gopls/internal/lsp/template/implementations.go similarity index 94% rename from internal/lsp/template/implementations.go rename to gopls/internal/lsp/template/implementations.go index cda3e7ef0a5..ed9b986a76b 100644 --- a/internal/lsp/template/implementations.go +++ b/gopls/internal/lsp/template/implementations.go @@ -11,9 +11,9 @@ import ( "strconv" "time" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" ) // line number (1-based) and message @@ -22,7 +22,7 @@ var errRe = regexp.MustCompile(`template.*:(\d+): (.*)`) // Diagnose returns parse errors. There is only one. // The errors are not always helpful. For instance { {end}} // will likely point to the end of the file. -func Diagnose(f source.VersionedFileHandle) []*source.Diagnostic { +func Diagnose(f source.FileHandle) []*source.Diagnostic { // no need for skipTemplate check, as Diagnose is called on the // snapshot's template files buf, err := f.Read() @@ -73,7 +73,7 @@ func Diagnose(f source.VersionedFileHandle) []*source.Diagnostic { // does not understand scoping (if any) in templates. This code is // for definitions, type definitions, and implementations. // Results only for variables and templates. -func Definition(snapshot source.Snapshot, fh source.VersionedFileHandle, loc protocol.Position) ([]protocol.Location, error) { +func Definition(snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.Location, error) { x, _, err := symAtPosition(fh, loc) if err != nil { return nil, err diff --git a/gopls/internal/lsp/template/parse.go b/gopls/internal/lsp/template/parse.go new file mode 100644 index 00000000000..a6befdcb928 --- /dev/null +++ b/gopls/internal/lsp/template/parse.go @@ -0,0 +1,508 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package template contains code for dealing with templates +package template + +// template files are small enough that the code reprocesses them each time +// this may be a bad choice for projects with lots of template files. + +// This file contains the parsing code, some debugging printing, and +// implementations for Diagnose, Definition, Hover, References + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "regexp" + "runtime" + "sort" + "text/template" + "text/template/parse" + "unicode/utf8" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" +) + +var ( + Left = []byte("{{") + Right = []byte("}}") +) + +type Parsed struct { + buf []byte //contents + lines [][]byte // needed?, other than for debugging? + elided []int // offsets where Left was replaced by blanks + + // tokens are matched Left-Right pairs, computed before trying to parse + tokens []Token + + // result of parsing + named []*template.Template // the template and embedded templates + ParseErr error + symbols []symbol + stack []parse.Node // used while computing symbols + + // for mapping from offsets in buf to LSP coordinates + // See FromPosition() and LineCol() + nls []int // offset of newlines before each line (nls[0]==-1) + lastnl int // last line seen + check int // used to decide whether to use lastnl or search through nls + nonASCII bool // are there any non-ascii runes in buf? +} + +// Token is a single {{...}}. More precisely, Left...Right +type Token struct { + Start, End int // offset from start of template + Multiline bool +} + +// All contains the Parse of all the template files +type All struct { + files map[span.URI]*Parsed +} + +// New returns the Parses of the snapshot's tmpl files +// (maybe cache these, but then avoiding import cycles needs code rearrangements) +func New(tmpls map[span.URI]source.FileHandle) *All { + all := make(map[span.URI]*Parsed) + for k, v := range tmpls { + buf, err := v.Read() + if err != nil { // PJW: decide what to do with these errors + log.Printf("failed to read %s (%v)", v.URI().Filename(), err) + continue + } + all[k] = parseBuffer(buf) + } + return &All{files: all} +} + +func parseBuffer(buf []byte) *Parsed { + ans := &Parsed{ + buf: buf, + check: -1, + nls: []int{-1}, + } + if len(buf) == 0 { + return ans + } + // how to compute allAscii... + for _, b := range buf { + if b >= utf8.RuneSelf { + ans.nonASCII = true + break + } + } + if buf[len(buf)-1] != '\n' { + ans.buf = append(buf, '\n') + } + for i, p := range ans.buf { + if p == '\n' { + ans.nls = append(ans.nls, i) + } + } + ans.setTokens() // ans.buf may be a new []byte + ans.lines = bytes.Split(ans.buf, []byte{'\n'}) + t, err := template.New("").Parse(string(ans.buf)) + if err != nil { + funcs := make(template.FuncMap) + for t == nil && ans.ParseErr == nil { + // in 1.17 it may be possible to avoid getting this error + // template: :2: function "foo" not defined + matches := parseErrR.FindStringSubmatch(err.Error()) + if len(matches) == 2 { + // suppress the error by giving it a function with the right name + funcs[matches[1]] = func() interface{} { return nil } + t, err = template.New("").Funcs(funcs).Parse(string(ans.buf)) + continue + } + ans.ParseErr = err // unfixed error + return ans + } + } + ans.named = t.Templates() + // set the symbols + for _, t := range ans.named { + ans.stack = append(ans.stack, t.Root) + ans.findSymbols() + if t.Name() != "" { + // defining a template. The pos is just after {{define...}} (or {{block...}}?) + at, sz := ans.FindLiteralBefore(int(t.Root.Pos)) + s := symbol{start: at, length: sz, name: t.Name(), kind: protocol.Namespace, vardef: true} + ans.symbols = append(ans.symbols, s) + } + } + + sort.Slice(ans.symbols, func(i, j int) bool { + left, right := ans.symbols[i], ans.symbols[j] + if left.start != right.start { + return left.start < right.start + } + if left.vardef != right.vardef { + return left.vardef + } + return left.kind < right.kind + }) + return ans +} + +// FindLiteralBefore locates the first preceding string literal +// returning its position and length in buf +// or returns -1 if there is none. +// Assume double-quoted string rather than backquoted string for now. +func (p *Parsed) FindLiteralBefore(pos int) (int, int) { + left, right := -1, -1 + for i := pos - 1; i >= 0; i-- { + if p.buf[i] != '"' { + continue + } + if right == -1 { + right = i + continue + } + left = i + break + } + if left == -1 { + return -1, 0 + } + return left + 1, right - left - 1 +} + +var ( + parseErrR = regexp.MustCompile(`template:.*function "([^"]+)" not defined`) +) + +func (p *Parsed) setTokens() { + const ( + // InRaw and InString only occur inside an action (SeenLeft) + Start = iota + InRaw + InString + SeenLeft + ) + state := Start + var left, oldState int + for n := 0; n < len(p.buf); n++ { + c := p.buf[n] + switch state { + case InRaw: + if c == '`' { + state = oldState + } + case InString: + if c == '"' && !isEscaped(p.buf[:n]) { + state = oldState + } + case SeenLeft: + if c == '`' { + oldState = state // it's SeenLeft, but a little clearer this way + state = InRaw + continue + } + if c == '"' { + oldState = state + state = InString + continue + } + if bytes.HasPrefix(p.buf[n:], Right) { + right := n + len(Right) + tok := Token{Start: left, + End: right, + Multiline: bytes.Contains(p.buf[left:right], []byte{'\n'}), + } + p.tokens = append(p.tokens, tok) + state = Start + } + // If we see (unquoted) Left then the original left is probably the user + // typing. Suppress the original left + if bytes.HasPrefix(p.buf[n:], Left) { + p.elideAt(left) + left = n + n += len(Left) - 1 // skip the rest + } + case Start: + if bytes.HasPrefix(p.buf[n:], Left) { + left = n + state = SeenLeft + n += len(Left) - 1 // skip the rest (avoids {{{ bug) + } + } + } + // this error occurs after typing {{ at the end of the file + if state != Start { + // Unclosed Left. remove the Left at left + p.elideAt(left) + } +} + +func (p *Parsed) elideAt(left int) { + if p.elided == nil { + // p.buf is the same buffer that v.Read() returns, so copy it. + // (otherwise the next time it's parsed, elided information is lost) + b := make([]byte, len(p.buf)) + copy(b, p.buf) + p.buf = b + } + for i := 0; i < len(Left); i++ { + p.buf[left+i] = ' ' + } + p.elided = append(p.elided, left) +} + +// isEscaped reports whether the byte after buf is escaped +func isEscaped(buf []byte) bool { + backSlashes := 0 + for j := len(buf) - 1; j >= 0 && buf[j] == '\\'; j-- { + backSlashes++ + } + return backSlashes%2 == 1 +} + +func (p *Parsed) Tokens() []Token { + return p.tokens +} + +// TODO(adonovan): the next 100 lines could perhaps replaced by use of protocol.Mapper. + +func (p *Parsed) utf16len(buf []byte) int { + cnt := 0 + if !p.nonASCII { + return len(buf) + } + // we need a utf16len(rune), but we don't have it + for _, r := range string(buf) { + cnt++ + if r >= 1<<16 { + cnt++ + } + } + return cnt +} + +func (p *Parsed) TokenSize(t Token) (int, error) { + if t.Multiline { + return -1, fmt.Errorf("TokenSize called with Multiline token %#v", t) + } + ans := p.utf16len(p.buf[t.Start:t.End]) + return ans, nil +} + +// RuneCount counts runes in line l, from col s to e +// (e==0 for end of line. called only for multiline tokens) +func (p *Parsed) RuneCount(l, s, e uint32) uint32 { + start := p.nls[l] + 1 + int(s) + end := p.nls[l] + 1 + int(e) + if e == 0 || end > p.nls[l+1] { + end = p.nls[l+1] + } + return uint32(utf8.RuneCount(p.buf[start:end])) +} + +// LineCol converts from a 0-based byte offset to 0-based line, col. col in runes +func (p *Parsed) LineCol(x int) (uint32, uint32) { + if x < p.check { + p.lastnl = 0 + } + p.check = x + for i := p.lastnl; i < len(p.nls); i++ { + if p.nls[i] <= x { + continue + } + p.lastnl = i + var count int + if i > 0 && x == p.nls[i-1] { // \n + count = 0 + } else { + count = p.utf16len(p.buf[p.nls[i-1]+1 : x]) + } + return uint32(i - 1), uint32(count) + } + if x == len(p.buf)-1 { // trailing \n + return uint32(len(p.nls) - 1), 0 + } + // shouldn't happen + for i := 1; i < 4; i++ { + _, f, l, ok := runtime.Caller(i) + if !ok { + break + } + log.Printf("%d: %s:%d", i, f, l) + } + + msg := fmt.Errorf("LineCol off the end, %d of %d, nls=%v, %q", x, len(p.buf), p.nls, p.buf[x:]) + event.Error(context.Background(), "internal error", msg) + return 0, 0 +} + +// Position produces a protocol.Position from an offset in the template +func (p *Parsed) Position(pos int) protocol.Position { + line, col := p.LineCol(pos) + return protocol.Position{Line: line, Character: col} +} + +func (p *Parsed) Range(x, length int) protocol.Range { + line, col := p.LineCol(x) + ans := protocol.Range{ + Start: protocol.Position{Line: line, Character: col}, + End: protocol.Position{Line: line, Character: col + uint32(length)}, + } + return ans +} + +// FromPosition translates a protocol.Position into an offset into the template +func (p *Parsed) FromPosition(x protocol.Position) int { + l, c := int(x.Line), int(x.Character) + if l >= len(p.nls) || p.nls[l]+1 >= len(p.buf) { + // paranoia to avoid panic. return the largest offset + return len(p.buf) + } + line := p.buf[p.nls[l]+1:] + cnt := 0 + for w := range string(line) { + if cnt >= c { + return w + p.nls[l] + 1 + } + cnt++ + } + // do we get here? NO + pos := int(x.Character) + p.nls[int(x.Line)] + 1 + event.Error(context.Background(), "internal error", fmt.Errorf("surprise %#v", x)) + return pos +} + +func symAtPosition(fh source.FileHandle, loc protocol.Position) (*symbol, *Parsed, error) { + buf, err := fh.Read() + if err != nil { + return nil, nil, err + } + p := parseBuffer(buf) + pos := p.FromPosition(loc) + syms := p.SymsAtPos(pos) + if len(syms) == 0 { + return nil, p, fmt.Errorf("no symbol found") + } + if len(syms) > 1 { + log.Printf("Hover: %d syms, not 1 %v", len(syms), syms) + } + sym := syms[0] + return &sym, p, nil +} + +func (p *Parsed) SymsAtPos(pos int) []symbol { + ans := []symbol{} + for _, s := range p.symbols { + if s.start <= pos && pos < s.start+s.length { + ans = append(ans, s) + } + } + return ans +} + +type wrNode struct { + p *Parsed + w io.Writer +} + +// WriteNode is for debugging +func (p *Parsed) WriteNode(w io.Writer, n parse.Node) { + wr := wrNode{p: p, w: w} + wr.writeNode(n, "") +} + +func (wr wrNode) writeNode(n parse.Node, indent string) { + if n == nil { + return + } + at := func(pos parse.Pos) string { + line, col := wr.p.LineCol(int(pos)) + return fmt.Sprintf("(%d)%v:%v", pos, line, col) + } + switch x := n.(type) { + case *parse.ActionNode: + fmt.Fprintf(wr.w, "%sActionNode at %s\n", indent, at(x.Pos)) + wr.writeNode(x.Pipe, indent+". ") + case *parse.BoolNode: + fmt.Fprintf(wr.w, "%sBoolNode at %s, %v\n", indent, at(x.Pos), x.True) + case *parse.BranchNode: + fmt.Fprintf(wr.w, "%sBranchNode at %s\n", indent, at(x.Pos)) + wr.writeNode(x.Pipe, indent+"Pipe. ") + wr.writeNode(x.List, indent+"List. ") + wr.writeNode(x.ElseList, indent+"Else. ") + case *parse.ChainNode: + fmt.Fprintf(wr.w, "%sChainNode at %s, %v\n", indent, at(x.Pos), x.Field) + case *parse.CommandNode: + fmt.Fprintf(wr.w, "%sCommandNode at %s, %d children\n", indent, at(x.Pos), len(x.Args)) + for _, a := range x.Args { + wr.writeNode(a, indent+". ") + } + //case *parse.CommentNode: // 1.16 + case *parse.DotNode: + fmt.Fprintf(wr.w, "%sDotNode at %s\n", indent, at(x.Pos)) + case *parse.FieldNode: + fmt.Fprintf(wr.w, "%sFieldNode at %s, %v\n", indent, at(x.Pos), x.Ident) + case *parse.IdentifierNode: + fmt.Fprintf(wr.w, "%sIdentifierNode at %s, %v\n", indent, at(x.Pos), x.Ident) + case *parse.IfNode: + fmt.Fprintf(wr.w, "%sIfNode at %s\n", indent, at(x.Pos)) + wr.writeNode(&x.BranchNode, indent+". ") + case *parse.ListNode: + if x == nil { + return // nil BranchNode.ElseList + } + fmt.Fprintf(wr.w, "%sListNode at %s, %d children\n", indent, at(x.Pos), len(x.Nodes)) + for _, n := range x.Nodes { + wr.writeNode(n, indent+". ") + } + case *parse.NilNode: + fmt.Fprintf(wr.w, "%sNilNode at %s\n", indent, at(x.Pos)) + case *parse.NumberNode: + fmt.Fprintf(wr.w, "%sNumberNode at %s, %s\n", indent, at(x.Pos), x.Text) + case *parse.PipeNode: + if x == nil { + return // {{template "xxx"}} + } + fmt.Fprintf(wr.w, "%sPipeNode at %s, %d vars, %d cmds, IsAssign:%v\n", + indent, at(x.Pos), len(x.Decl), len(x.Cmds), x.IsAssign) + for _, d := range x.Decl { + wr.writeNode(d, indent+"Decl. ") + } + for _, c := range x.Cmds { + wr.writeNode(c, indent+"Cmd. ") + } + case *parse.RangeNode: + fmt.Fprintf(wr.w, "%sRangeNode at %s\n", indent, at(x.Pos)) + wr.writeNode(&x.BranchNode, indent+". ") + case *parse.StringNode: + fmt.Fprintf(wr.w, "%sStringNode at %s, %s\n", indent, at(x.Pos), x.Quoted) + case *parse.TemplateNode: + fmt.Fprintf(wr.w, "%sTemplateNode at %s, %s\n", indent, at(x.Pos), x.Name) + wr.writeNode(x.Pipe, indent+". ") + case *parse.TextNode: + fmt.Fprintf(wr.w, "%sTextNode at %s, len %d\n", indent, at(x.Pos), len(x.Text)) + case *parse.VariableNode: + fmt.Fprintf(wr.w, "%sVariableNode at %s, %v\n", indent, at(x.Pos), x.Ident) + case *parse.WithNode: + fmt.Fprintf(wr.w, "%sWithNode at %s\n", indent, at(x.Pos)) + wr.writeNode(&x.BranchNode, indent+". ") + } +} + +var kindNames = []string{"", "File", "Module", "Namespace", "Package", "Class", "Method", "Property", + "Field", "Constructor", "Enum", "Interface", "Function", "Variable", "Constant", "String", + "Number", "Boolean", "Array", "Object", "Key", "Null", "EnumMember", "Struct", "Event", + "Operator", "TypeParameter"} + +func kindStr(k protocol.SymbolKind) string { + n := int(k) + if n < 1 || n >= len(kindNames) { + return fmt.Sprintf("?SymbolKind %d?", n) + } + return kindNames[n] +} diff --git a/internal/lsp/template/parse_test.go b/gopls/internal/lsp/template/parse_test.go similarity index 100% rename from internal/lsp/template/parse_test.go rename to gopls/internal/lsp/template/parse_test.go diff --git a/gopls/internal/lsp/template/symbols.go b/gopls/internal/lsp/template/symbols.go new file mode 100644 index 00000000000..24f9604c100 --- /dev/null +++ b/gopls/internal/lsp/template/symbols.go @@ -0,0 +1,230 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "context" + "fmt" + "text/template/parse" + "unicode/utf8" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" +) + +// in local coordinates, to be translated to protocol.DocumentSymbol +type symbol struct { + start int // for sorting + length int // in runes (unicode code points) + name string + kind protocol.SymbolKind + vardef bool // is this a variable definition? + // do we care about selection range, or children? + // no children yet, and selection range is the same as range +} + +func (s symbol) String() string { + return fmt.Sprintf("{%d,%d,%s,%s,%v}", s.start, s.length, s.name, s.kind, s.vardef) +} + +// for FieldNode or VariableNode (or ChainNode?) +func (p *Parsed) fields(flds []string, x parse.Node) []symbol { + ans := []symbol{} + // guessing that there are no embedded blanks allowed. The doc is unclear + lookfor := "" + switch x.(type) { + case *parse.FieldNode: + for _, f := range flds { + lookfor += "." + f // quadratic, but probably ok + } + case *parse.VariableNode: + lookfor = flds[0] + for i := 1; i < len(flds); i++ { + lookfor += "." + flds[i] + } + case *parse.ChainNode: // PJW, what are these? + for _, f := range flds { + lookfor += "." + f // quadratic, but probably ok + } + default: + // If these happen they will happen even if gopls is restarted + // and the users does the same thing, so it is better not to panic. + // context.Background() is used because we don't have access + // to any other context. [we could, but it would be complicated] + event.Log(context.Background(), fmt.Sprintf("%T unexpected in fields()", x)) + return nil + } + if len(lookfor) == 0 { + event.Log(context.Background(), fmt.Sprintf("no strings in fields() %#v", x)) + return nil + } + startsAt := int(x.Position()) + ix := bytes.Index(p.buf[startsAt:], []byte(lookfor)) // HasPrefix? PJW? + if ix < 0 || ix > len(lookfor) { // lookfor expected to be at start (or so) + // probably golang.go/#43388, so back up + startsAt -= len(flds[0]) + 1 + ix = bytes.Index(p.buf[startsAt:], []byte(lookfor)) // ix might be 1? PJW + if ix < 0 { + return ans + } + } + at := ix + startsAt + for _, f := range flds { + at += 1 // . + kind := protocol.Method + if f[0] == '$' { + kind = protocol.Variable + } + sym := symbol{name: f, kind: kind, start: at, length: utf8.RuneCount([]byte(f))} + if kind == protocol.Variable && len(p.stack) > 1 { + if pipe, ok := p.stack[len(p.stack)-2].(*parse.PipeNode); ok { + for _, y := range pipe.Decl { + if x == y { + sym.vardef = true + } + } + } + } + ans = append(ans, sym) + at += len(f) + } + return ans +} + +func (p *Parsed) findSymbols() { + if len(p.stack) == 0 { + return + } + n := p.stack[len(p.stack)-1] + pop := func() { + p.stack = p.stack[:len(p.stack)-1] + } + if n == nil { // allowing nil simplifies the code + pop() + return + } + nxt := func(nd parse.Node) { + p.stack = append(p.stack, nd) + p.findSymbols() + } + switch x := n.(type) { + case *parse.ActionNode: + nxt(x.Pipe) + case *parse.BoolNode: + // need to compute the length from the value + msg := fmt.Sprintf("%v", x.True) + p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(msg), kind: protocol.Boolean}) + case *parse.BranchNode: + nxt(x.Pipe) + nxt(x.List) + nxt(x.ElseList) + case *parse.ChainNode: + p.symbols = append(p.symbols, p.fields(x.Field, x)...) + nxt(x.Node) + case *parse.CommandNode: + for _, a := range x.Args { + nxt(a) + } + //case *parse.CommentNode: // go 1.16 + // log.Printf("implement %d", x.Type()) + case *parse.DotNode: + sym := symbol{name: "dot", kind: protocol.Variable, start: int(x.Pos), length: 1} + p.symbols = append(p.symbols, sym) + case *parse.FieldNode: + p.symbols = append(p.symbols, p.fields(x.Ident, x)...) + case *parse.IdentifierNode: + sym := symbol{name: x.Ident, kind: protocol.Function, start: int(x.Pos), + length: utf8.RuneCount([]byte(x.Ident))} + p.symbols = append(p.symbols, sym) + case *parse.IfNode: + nxt(&x.BranchNode) + case *parse.ListNode: + if x != nil { // wretched typed nils. Node should have an IfNil + for _, nd := range x.Nodes { + nxt(nd) + } + } + case *parse.NilNode: + sym := symbol{name: "nil", kind: protocol.Constant, start: int(x.Pos), length: 3} + p.symbols = append(p.symbols, sym) + case *parse.NumberNode: + // no name; ascii + p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(x.Text), kind: protocol.Number}) + case *parse.PipeNode: + if x == nil { // {{template "foo"}} + return + } + for _, d := range x.Decl { + nxt(d) + } + for _, c := range x.Cmds { + nxt(c) + } + case *parse.RangeNode: + nxt(&x.BranchNode) + case *parse.StringNode: + // no name + sz := utf8.RuneCount([]byte(x.Text)) + p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.String}) + case *parse.TemplateNode: // invoking a template + // x.Pos points to the quote before the name + p.symbols = append(p.symbols, symbol{name: x.Name, kind: protocol.Package, start: int(x.Pos) + 1, + length: utf8.RuneCount([]byte(x.Name))}) + nxt(x.Pipe) + case *parse.TextNode: + if len(x.Text) == 1 && x.Text[0] == '\n' { + break + } + // nothing to report, but build one for hover + sz := utf8.RuneCount([]byte(x.Text)) + p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.Constant}) + case *parse.VariableNode: + p.symbols = append(p.symbols, p.fields(x.Ident, x)...) + case *parse.WithNode: + nxt(&x.BranchNode) + + } + pop() +} + +// DocumentSymbols returns a hierarchy of the symbols defined in a template file. +// (The hierarchy is flat. SymbolInformation might be better.) +func DocumentSymbols(snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentSymbol, error) { + buf, err := fh.Read() + if err != nil { + return nil, err + } + p := parseBuffer(buf) + if p.ParseErr != nil { + return nil, p.ParseErr + } + var ans []protocol.DocumentSymbol + for _, s := range p.symbols { + if s.kind == protocol.Constant { + continue + } + d := kindStr(s.kind) + if d == "Namespace" { + d = "Template" + } + if s.vardef { + d += "(def)" + } else { + d += "(use)" + } + r := p.Range(s.start, s.length) + y := protocol.DocumentSymbol{ + Name: s.name, + Detail: d, + Kind: s.kind, + Range: r, + SelectionRange: r, // or should this be the entire {{...}}? + } + ans = append(ans, y) + } + return ans, nil +} diff --git a/internal/lsp/testdata/%percent/perc%ent.go b/gopls/internal/lsp/testdata/%percent/perc%ent.go similarity index 100% rename from internal/lsp/testdata/%percent/perc%ent.go rename to gopls/internal/lsp/testdata/%percent/perc%ent.go diff --git a/internal/lsp/testdata/addimport/addimport.go.golden b/gopls/internal/lsp/testdata/addimport/addimport.go.golden similarity index 100% rename from internal/lsp/testdata/addimport/addimport.go.golden rename to gopls/internal/lsp/testdata/addimport/addimport.go.golden diff --git a/internal/lsp/testdata/addimport/addimport.go.in b/gopls/internal/lsp/testdata/addimport/addimport.go.in similarity index 100% rename from internal/lsp/testdata/addimport/addimport.go.in rename to gopls/internal/lsp/testdata/addimport/addimport.go.in diff --git a/internal/lsp/testdata/address/address.go b/gopls/internal/lsp/testdata/address/address.go similarity index 100% rename from internal/lsp/testdata/address/address.go rename to gopls/internal/lsp/testdata/address/address.go diff --git a/gopls/internal/lsp/testdata/analyzer/bad_test.go b/gopls/internal/lsp/testdata/analyzer/bad_test.go new file mode 100644 index 00000000000..b1724c66693 --- /dev/null +++ b/gopls/internal/lsp/testdata/analyzer/bad_test.go @@ -0,0 +1,24 @@ +package analyzer + +import ( + "fmt" + "sync" + "testing" + "time" +) + +func Testbad(t *testing.T) { //@diag("", "tests", "Testbad has malformed name: first letter after 'Test' must not be lowercase", "warning") + var x sync.Mutex + _ = x //@diag("x", "copylocks", "assignment copies lock value to _: sync.Mutex", "warning") + + printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, "printf", "golang.org/lsptests/analyzer.printfWrapper format %s reads arg #1, but call has 0 args", "warning") +} + +func printfWrapper(format string, args ...interface{}) { + fmt.Printf(format, args...) +} + +func _() { + now := time.Now() + fmt.Println(now.Format("2006-02-01")) //@diag("2006-02-01", "timeformat", "2006-02-01 should be 2006-01-02", "warning") +} diff --git a/internal/lsp/testdata/anon/anon.go.in b/gopls/internal/lsp/testdata/anon/anon.go.in similarity index 100% rename from internal/lsp/testdata/anon/anon.go.in rename to gopls/internal/lsp/testdata/anon/anon.go.in diff --git a/internal/lsp/testdata/append/append.go b/gopls/internal/lsp/testdata/append/append.go similarity index 100% rename from internal/lsp/testdata/append/append.go rename to gopls/internal/lsp/testdata/append/append.go diff --git a/internal/lsp/testdata/append/append2.go.in b/gopls/internal/lsp/testdata/append/append2.go.in similarity index 100% rename from internal/lsp/testdata/append/append2.go.in rename to gopls/internal/lsp/testdata/append/append2.go.in diff --git a/internal/lsp/testdata/arraytype/array_type.go.in b/gopls/internal/lsp/testdata/arraytype/array_type.go.in similarity index 83% rename from internal/lsp/testdata/arraytype/array_type.go.in rename to gopls/internal/lsp/testdata/arraytype/array_type.go.in index 7e9a96f7b0d..ac1a3e78297 100644 --- a/internal/lsp/testdata/arraytype/array_type.go.in +++ b/gopls/internal/lsp/testdata/arraytype/array_type.go.in @@ -1,7 +1,7 @@ package arraytype import ( - "golang.org/x/tools/internal/lsp/foo" + "golang.org/lsptests/foo" ) func _() { @@ -9,7 +9,8 @@ func _() { val string //@item(atVal, "val", "string", "var") ) - [] //@complete(" //", PackageFoo) + // disabled - see issue #54822 + [] // complete(" //", PackageFoo) []val //@complete(" //") @@ -33,7 +34,8 @@ func _() { var s []myInt //@item(atS, "s", "[]myInt", "var") s = []m //@complete(" //", atMyInt) - s = [] //@complete(" //", atMyInt, PackageFoo) + // disabled - see issue #54822 + s = [] // complete(" //", atMyInt, PackageFoo) var a [1]myInt a = [1]m //@complete(" //", atMyInt) diff --git a/internal/lsp/testdata/assign/assign.go.in b/gopls/internal/lsp/testdata/assign/assign.go.in similarity index 89% rename from internal/lsp/testdata/assign/assign.go.in rename to gopls/internal/lsp/testdata/assign/assign.go.in index 8c00ae9e0e5..93a622c8326 100644 --- a/internal/lsp/testdata/assign/assign.go.in +++ b/gopls/internal/lsp/testdata/assign/assign.go.in @@ -1,6 +1,6 @@ package assign -import "golang.org/x/tools/internal/lsp/assign/internal/secret" +import "golang.org/lsptests/assign/internal/secret" func _() { secret.Hello() diff --git a/internal/lsp/testdata/assign/internal/secret/secret.go b/gopls/internal/lsp/testdata/assign/internal/secret/secret.go similarity index 100% rename from internal/lsp/testdata/assign/internal/secret/secret.go rename to gopls/internal/lsp/testdata/assign/internal/secret/secret.go diff --git a/gopls/internal/lsp/testdata/bad/bad0.go b/gopls/internal/lsp/testdata/bad/bad0.go new file mode 100644 index 00000000000..9eedf4aead0 --- /dev/null +++ b/gopls/internal/lsp/testdata/bad/bad0.go @@ -0,0 +1,24 @@ +//go:build go1.11 +// +build go1.11 + +package bad + +import _ "golang.org/lsptests/assign/internal/secret" //@diag("\"golang.org/lsptests/assign/internal/secret\"", "compiler", "could not import golang.org/lsptests/assign/internal/secret \\(invalid use of internal package golang.org/lsptests/assign/internal/secret\\)", "error") + +func stuff() { //@item(stuff, "stuff", "func()", "func") + x := "heeeeyyyy" + random2(x) //@diag("x", "compiler", "cannot use x \\(variable of type string\\) as int value in argument to random2", "error") + random2(1) //@complete("dom", random, random2, random3) + y := 3 //@diag("y", "compiler", "y declared (and|but) not used", "error") +} + +type bob struct { //@item(bob, "bob", "struct{...}", "struct") + x int +} + +func _() { + var q int + _ = &bob{ + f: q, //@diag("f: q", "compiler", "unknown field f in struct literal", "error") + } +} diff --git a/gopls/internal/lsp/testdata/bad/bad1.go b/gopls/internal/lsp/testdata/bad/bad1.go new file mode 100644 index 00000000000..13b3d0af61c --- /dev/null +++ b/gopls/internal/lsp/testdata/bad/bad1.go @@ -0,0 +1,34 @@ +//go:build go1.11 +// +build go1.11 + +package bad + +// See #36637 +type stateFunc func() stateFunc //@item(stateFunc, "stateFunc", "func() stateFunc", "type") + +var a unknown //@item(global_a, "a", "unknown", "var"),diag("unknown", "compiler", "(undeclared name|undefined): unknown", "error") + +func random() int { //@item(random, "random", "func() int", "func") + //@complete("", global_a, bob, random, random2, random3, stateFunc, stuff) + return 0 +} + +func random2(y int) int { //@item(random2, "random2", "func(y int) int", "func"),item(bad_y_param, "y", "int", "var") + x := 6 //@item(x, "x", "int", "var"),diag("x", "compiler", "x declared (and|but) not used", "error") + var q blah //@item(q, "q", "blah", "var"),diag("q", "compiler", "q declared (and|but) not used", "error"),diag("blah", "compiler", "(undeclared name|undefined): blah", "error") + var t **blob //@item(t, "t", "**blob", "var"),diag("t", "compiler", "t declared (and|but) not used", "error"),diag("blob", "compiler", "(undeclared name|undefined): blob", "error") + //@complete("", q, t, x, bad_y_param, global_a, bob, random, random2, random3, stateFunc, stuff) + + return y +} + +func random3(y ...int) { //@item(random3, "random3", "func(y ...int)", "func"),item(y_variadic_param, "y", "[]int", "var") + //@complete("", y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff) + + var ch chan (favType1) //@item(ch, "ch", "chan (favType1)", "var"),diag("ch", "compiler", "ch declared (and|but) not used", "error"),diag("favType1", "compiler", "(undeclared name|undefined): favType1", "error") + var m map[keyType]int //@item(m, "m", "map[keyType]int", "var"),diag("m", "compiler", "m declared (and|but) not used", "error"),diag("keyType", "compiler", "(undeclared name|undefined): keyType", "error") + var arr []favType2 //@item(arr, "arr", "[]favType2", "var"),diag("arr", "compiler", "arr declared (and|but) not used", "error"),diag("favType2", "compiler", "(undeclared name|undefined): favType2", "error") + var fn1 func() badResult //@item(fn1, "fn1", "func() badResult", "var"),diag("fn1", "compiler", "fn1 declared (and|but) not used", "error"),diag("badResult", "compiler", "(undeclared name|undefined): badResult", "error") + var fn2 func(badParam) //@item(fn2, "fn2", "func(badParam)", "var"),diag("fn2", "compiler", "fn2 declared (and|but) not used", "error"),diag("badParam", "compiler", "(undeclared name|undefined): badParam", "error") + //@complete("", arr, ch, fn1, fn2, m, y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff) +} diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in new file mode 100644 index 00000000000..81aee201d7f --- /dev/null +++ b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in @@ -0,0 +1,29 @@ +package badstmt + +import ( + "golang.org/lsptests/foo" +) + +// The nonewvars expectation asserts that the go/analysis framework ran. +// See comments in noparse. + +func _(x int) { + defer foo.F //@complete(" //", Foo),diag(" //", "syntax", "function must be invoked in defer statement|expression in defer must be function call", "error") + defer foo.F //@complete(" //", Foo) + x := 123 //@diag(":=", "nonewvars", "no new variables", "warning") +} + +func _() { + switch true { + case true: + go foo.F //@complete(" //", Foo) + } +} + +func _() { + defer func() { + foo.F //@complete(" //", Foo),snippet(" //", Foo, "Foo()", "Foo()") + + foo. //@rank(" //", Foo) + } +} diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in new file mode 100644 index 00000000000..6af9c35e3cf --- /dev/null +++ b/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in @@ -0,0 +1,9 @@ +package badstmt + +import ( + "golang.org/lsptests/foo" +) + +func _() { + defer func() { foo. } //@rank(" }", Foo) +} diff --git a/internal/lsp/testdata/badstmt/badstmt_3.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in similarity index 75% rename from internal/lsp/testdata/badstmt/badstmt_3.go.in rename to gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in index be774e84b05..d135e201505 100644 --- a/internal/lsp/testdata/badstmt/badstmt_3.go.in +++ b/gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in @@ -1,7 +1,7 @@ package badstmt import ( - "golang.org/x/tools/internal/lsp/foo" + "golang.org/lsptests/foo" ) func _() { diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in new file mode 100644 index 00000000000..6afd635ec2d --- /dev/null +++ b/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in @@ -0,0 +1,11 @@ +package badstmt + +import ( + "golang.org/lsptests/foo" +) + +func _() { + go func() { + defer foo. //@rank(" //", Foo, IntFoo) + } +} diff --git a/internal/lsp/testdata/bar/bar.go.in b/gopls/internal/lsp/testdata/bar/bar.go.in similarity index 90% rename from internal/lsp/testdata/bar/bar.go.in rename to gopls/internal/lsp/testdata/bar/bar.go.in index c0f4b4c45c2..502bdf74060 100644 --- a/internal/lsp/testdata/bar/bar.go.in +++ b/gopls/internal/lsp/testdata/bar/bar.go.in @@ -3,7 +3,7 @@ package bar import ( - "golang.org/x/tools/internal/lsp/foo" //@item(foo, "foo", "\"golang.org/x/tools/internal/lsp/foo\"", "package") + "golang.org/lsptests/foo" //@item(foo, "foo", "\"golang.org/lsptests/foo\"", "package") ) func helper(i foo.IntFoo) {} //@item(helper, "helper", "func(i foo.IntFoo)", "func") diff --git a/gopls/internal/lsp/testdata/basiclit/basiclit.go b/gopls/internal/lsp/testdata/basiclit/basiclit.go new file mode 100644 index 00000000000..ab895dc011c --- /dev/null +++ b/gopls/internal/lsp/testdata/basiclit/basiclit.go @@ -0,0 +1,13 @@ +package basiclit + +func _() { + var a int // something for lexical completions + + _ = "hello." //@complete(".") + + _ = 1 //@complete(" //") + + _ = 1. //@complete(".") + + _ = 'a' //@complete("' ") +} diff --git a/internal/lsp/testdata/baz/baz.go.in b/gopls/internal/lsp/testdata/baz/baz.go.in similarity index 87% rename from internal/lsp/testdata/baz/baz.go.in rename to gopls/internal/lsp/testdata/baz/baz.go.in index 3b74ee580c3..94952e1267b 100644 --- a/internal/lsp/testdata/baz/baz.go.in +++ b/gopls/internal/lsp/testdata/baz/baz.go.in @@ -3,9 +3,9 @@ package baz import ( - "golang.org/x/tools/internal/lsp/bar" + "golang.org/lsptests/bar" - f "golang.org/x/tools/internal/lsp/foo" + f "golang.org/lsptests/foo" ) var FooStruct f.StructFoo diff --git a/internal/lsp/testdata/builtins/builtin_args.go b/gopls/internal/lsp/testdata/builtins/builtin_args.go similarity index 100% rename from internal/lsp/testdata/builtins/builtin_args.go rename to gopls/internal/lsp/testdata/builtins/builtin_args.go diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go117.go b/gopls/internal/lsp/testdata/builtins/builtin_go117.go new file mode 100644 index 00000000000..57abcde1517 --- /dev/null +++ b/gopls/internal/lsp/testdata/builtins/builtin_go117.go @@ -0,0 +1,8 @@ +//go:build !go1.18 +// +build !go1.18 + +package builtins + +func _() { + //@complete("", append, bool, byte, cap, close, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) +} diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go118.go b/gopls/internal/lsp/testdata/builtins/builtin_go118.go new file mode 100644 index 00000000000..dabffcc679c --- /dev/null +++ b/gopls/internal/lsp/testdata/builtins/builtin_go118.go @@ -0,0 +1,8 @@ +//go:build go1.18 && !go1.21 +// +build go1.18,!go1.21 + +package builtins + +func _() { + //@complete("", any, append, bool, byte, cap, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) +} diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go121.go b/gopls/internal/lsp/testdata/builtins/builtin_go121.go new file mode 100644 index 00000000000..cb8e8fae3ab --- /dev/null +++ b/gopls/internal/lsp/testdata/builtins/builtin_go121.go @@ -0,0 +1,8 @@ +//go:build go1.21 +// +build go1.21 + +package builtins + +func _() { + //@complete("", any, append, bool, byte, cap, clear, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) +} diff --git a/internal/lsp/testdata/builtins/builtin_types.go b/gopls/internal/lsp/testdata/builtins/builtin_types.go similarity index 100% rename from internal/lsp/testdata/builtins/builtin_types.go rename to gopls/internal/lsp/testdata/builtins/builtin_types.go diff --git a/internal/lsp/testdata/builtins/builtins.go b/gopls/internal/lsp/testdata/builtins/builtins.go similarity index 89% rename from internal/lsp/testdata/builtins/builtins.go rename to gopls/internal/lsp/testdata/builtins/builtins.go index 25c29f21e6c..75c6e418312 100644 --- a/internal/lsp/testdata/builtins/builtins.go +++ b/gopls/internal/lsp/testdata/builtins/builtins.go @@ -1,15 +1,16 @@ package builtins -func _() { - //@complete("", append, bool, byte, cap, close, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) -} +// Definitions of builtin completion items. +/* any */ //@item(any, "any", "", "interface") /* Create markers for builtin types. Only for use by this test. /* append(slice []Type, elems ...Type) []Type */ //@item(append, "append", "func(slice []Type, elems ...Type) []Type", "func") /* bool */ //@item(bool, "bool", "", "type") /* byte */ //@item(byte, "byte", "", "type") /* cap(v Type) int */ //@item(cap, "cap", "func(v Type) int", "func") +/* clear[T interface{ ~[]Type | ~map[Type]Type1 }](t T) */ //@item(clear, "clear", "func(t T)", "func") /* close(c chan<- Type) */ //@item(close, "close", "func(c chan<- Type)", "func") +/* comparable */ //@item(comparable, "comparable", "", "interface") /* complex(r float64, i float64) */ //@item(complex, "complex", "func(r float64, i float64) complex128", "func") /* complex128 */ //@item(complex128, "complex128", "", "type") /* complex64 */ //@item(complex64, "complex64", "", "type") diff --git a/internal/lsp/testdata/builtins/constants.go b/gopls/internal/lsp/testdata/builtins/constants.go similarity index 100% rename from internal/lsp/testdata/builtins/constants.go rename to gopls/internal/lsp/testdata/builtins/constants.go diff --git a/internal/lsp/testdata/callhierarchy/callhierarchy.go b/gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go similarity index 95% rename from internal/lsp/testdata/callhierarchy/callhierarchy.go rename to gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go index 58c23bdd634..252e8054f40 100644 --- a/internal/lsp/testdata/callhierarchy/callhierarchy.go +++ b/gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go @@ -4,7 +4,7 @@ package callhierarchy -import "golang.org/x/tools/internal/lsp/callhierarchy/outgoing" +import "golang.org/lsptests/callhierarchy/outgoing" func a() { //@mark(hierarchyA, "a") D() diff --git a/internal/lsp/testdata/callhierarchy/incoming/incoming.go b/gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go similarity index 84% rename from internal/lsp/testdata/callhierarchy/incoming/incoming.go rename to gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go index 3bfb4ad998d..c629aa87929 100644 --- a/internal/lsp/testdata/callhierarchy/incoming/incoming.go +++ b/gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go @@ -4,7 +4,7 @@ package incoming -import "golang.org/x/tools/internal/lsp/callhierarchy" +import "golang.org/lsptests/callhierarchy" // A is exported to test incoming calls across packages func A() { //@mark(incomingA, "A") diff --git a/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go b/gopls/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go similarity index 100% rename from internal/lsp/testdata/callhierarchy/outgoing/outgoing.go rename to gopls/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go diff --git a/internal/lsp/testdata/casesensitive/casesensitive.go b/gopls/internal/lsp/testdata/casesensitive/casesensitive.go similarity index 100% rename from internal/lsp/testdata/casesensitive/casesensitive.go rename to gopls/internal/lsp/testdata/casesensitive/casesensitive.go diff --git a/internal/lsp/testdata/cast/cast.go.in b/gopls/internal/lsp/testdata/cast/cast.go.in similarity index 100% rename from internal/lsp/testdata/cast/cast.go.in rename to gopls/internal/lsp/testdata/cast/cast.go.in diff --git a/internal/lsp/testdata/cgo/declarecgo.go b/gopls/internal/lsp/testdata/cgo/declarecgo.go similarity index 100% rename from internal/lsp/testdata/cgo/declarecgo.go rename to gopls/internal/lsp/testdata/cgo/declarecgo.go diff --git a/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden b/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden new file mode 100644 index 00000000000..0d6fbb0fff6 --- /dev/null +++ b/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden @@ -0,0 +1,30 @@ +-- funccgoexample-definition -- +cgo/declarecgo.go:18:6-13: defined here as ```go +func Example() +``` + +[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example) +-- funccgoexample-definition-json -- +{ + "span": { + "uri": "file://cgo/declarecgo.go", + "start": { + "line": 18, + "column": 6, + "offset": 151 + }, + "end": { + "line": 18, + "column": 13, + "offset": 158 + } + }, + "description": "```go\nfunc Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)" +} + +-- funccgoexample-hoverdef -- +```go +func Example() +``` + +[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example) diff --git a/internal/lsp/testdata/cgo/declarecgo_nocgo.go b/gopls/internal/lsp/testdata/cgo/declarecgo_nocgo.go similarity index 100% rename from internal/lsp/testdata/cgo/declarecgo_nocgo.go rename to gopls/internal/lsp/testdata/cgo/declarecgo_nocgo.go diff --git a/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden new file mode 100644 index 00000000000..03fc22468ca --- /dev/null +++ b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden @@ -0,0 +1,30 @@ +-- funccgoexample-definition -- +cgo/declarecgo.go:18:6-13: defined here as ```go +func cgo.Example() +``` + +[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example) +-- funccgoexample-definition-json -- +{ + "span": { + "uri": "file://cgo/declarecgo.go", + "start": { + "line": 18, + "column": 6, + "offset": 151 + }, + "end": { + "line": 18, + "column": 13, + "offset": 158 + } + }, + "description": "```go\nfunc cgo.Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)" +} + +-- funccgoexample-hoverdef -- +```go +func cgo.Example() +``` + +[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example) diff --git a/internal/lsp/testdata/cgoimport/usecgo.go.in b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.in similarity index 76% rename from internal/lsp/testdata/cgoimport/usecgo.go.in rename to gopls/internal/lsp/testdata/cgoimport/usecgo.go.in index f258682ea13..414a739da99 100644 --- a/internal/lsp/testdata/cgoimport/usecgo.go.in +++ b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.in @@ -1,7 +1,7 @@ package cgoimport import ( - "golang.org/x/tools/internal/lsp/cgo" + "golang.org/lsptests/cgo" ) func _() { diff --git a/internal/lsp/testdata/channel/channel.go b/gopls/internal/lsp/testdata/channel/channel.go similarity index 100% rename from internal/lsp/testdata/channel/channel.go rename to gopls/internal/lsp/testdata/channel/channel.go diff --git a/internal/lsp/testdata/codelens/codelens_test.go b/gopls/internal/lsp/testdata/codelens/codelens_test.go similarity index 100% rename from internal/lsp/testdata/codelens/codelens_test.go rename to gopls/internal/lsp/testdata/codelens/codelens_test.go diff --git a/internal/lsp/testdata/comment_completion/comment_completion.go.in b/gopls/internal/lsp/testdata/comment_completion/comment_completion.go.in similarity index 100% rename from internal/lsp/testdata/comment_completion/comment_completion.go.in rename to gopls/internal/lsp/testdata/comment_completion/comment_completion.go.in diff --git a/internal/lsp/testdata/complit/complit.go.in b/gopls/internal/lsp/testdata/complit/complit.go.in similarity index 100% rename from internal/lsp/testdata/complit/complit.go.in rename to gopls/internal/lsp/testdata/complit/complit.go.in diff --git a/internal/lsp/testdata/constant/constant.go b/gopls/internal/lsp/testdata/constant/constant.go similarity index 100% rename from internal/lsp/testdata/constant/constant.go rename to gopls/internal/lsp/testdata/constant/constant.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_for.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_for.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_for.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_for_init.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_for_init.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_if.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_if.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_if.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_eof.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_eof.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_if_eof.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_if_eof.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_if_init.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_if_init.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_multiline_if.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_selector_1.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_1.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_selector_1.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_selector_1.go diff --git a/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go new file mode 100644 index 00000000000..8d4b15bff6a --- /dev/null +++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go @@ -0,0 +1,8 @@ +package danglingstmt + +import "golang.org/lsptests/foo" + +func _() { + foo. //@rank(" //", Foo) + var _ = []string{foo.} //@rank("}", Foo) +} diff --git a/internal/lsp/testdata/danglingstmt/dangling_switch_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_switch_init.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init.go diff --git a/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go similarity index 100% rename from internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go rename to gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go diff --git a/internal/lsp/testdata/deep/deep.go b/gopls/internal/lsp/testdata/deep/deep.go similarity index 92% rename from internal/lsp/testdata/deep/deep.go rename to gopls/internal/lsp/testdata/deep/deep.go index 6ed5ff83999..6908824f82f 100644 --- a/internal/lsp/testdata/deep/deep.go +++ b/gopls/internal/lsp/testdata/deep/deep.go @@ -28,6 +28,13 @@ func _() { wantsContext(c) //@rank(")", ctxBackground),rank(")", ctxTODO) } +func _() { + var cork struct{ err error } + cork.err //@item(deepCorkErr, "cork.err", "error", "field") + context //@item(deepContextPkg, "context", "\"context\"", "package") + var _ error = co //@rank(" //", deepCorkErr, deepContextPkg) +} + func _() { // deepCircle is circular. type deepCircle struct { diff --git a/gopls/internal/lsp/testdata/errors/errors.go b/gopls/internal/lsp/testdata/errors/errors.go new file mode 100644 index 00000000000..e14cde69e9e --- /dev/null +++ b/gopls/internal/lsp/testdata/errors/errors.go @@ -0,0 +1,10 @@ +package errors + +import ( + "golang.org/lsptests/types" +) + +func _() { + bob.Bob() //@complete(".") + types.b //@complete(" //", Bob_interface) +} diff --git a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_args_returns.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_basic.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_basic.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden diff --git a/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go new file mode 100644 index 00000000000..71f969e48c8 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go @@ -0,0 +1,12 @@ +package extract + +func _() { + a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a") + // Comment on its own line //@mark(exSt19, "Comment") + _ = 3 + 4 //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_") + // Comment right after 3 + 4 + + // Comment after with space //@mark(exEn20, "Comment") + + //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20) +} diff --git a/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden new file mode 100644 index 00000000000..1b2869ef7f5 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden @@ -0,0 +1,57 @@ +-- functionextraction_extract_basic_comment_4_2 -- +package extract + +func _() { + /* comment in the middle of a line */ + //@mark(exSt18, "a") + // Comment on its own line //@mark(exSt19, "Comment") + newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_") + // Comment right after 3 + 4 + + // Comment after with space //@mark(exEn20, "Comment") + + //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20) +} + +func newFunction() { + a := 1 + + _ = 3 + 4 +} + +-- functionextraction_extract_basic_comment_5_5 -- +package extract + +func _() { + a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a") + // Comment on its own line //@mark(exSt19, "Comment") + newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_") + // Comment right after 3 + 4 + + // Comment after with space //@mark(exEn20, "Comment") + + //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20) +} + +func newFunction() { + _ = 3 + 4 +} + +-- functionextraction_extract_basic_comment_6_2 -- +package extract + +func _() { + a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a") + // Comment on its own line //@mark(exSt19, "Comment") + newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_") + // Comment right after 3 + 4 + + // Comment after with space //@mark(exEn20, "Comment") + + //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20) +} + +func newFunction() { + _ = 3 + 4 +} + diff --git a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_issue_44813.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_redefine.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_redefine.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_basic.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_complex.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_init.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_scope.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_scope.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_smart_return.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden diff --git a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go rename to gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go diff --git a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden similarity index 100% rename from internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden rename to gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go similarity index 100% rename from internal/lsp/testdata/extract/extract_method/extract_basic.go rename to gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go diff --git a/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden new file mode 100644 index 00000000000..3310d973e01 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden @@ -0,0 +1,364 @@ +-- functionextraction_extract_basic_13_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func newFunction(a *A) int { + sum := a.x + a.y + return sum +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- functionextraction_extract_basic_14_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func newFunction(sum int) int { + return sum +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- functionextraction_extract_basic_18_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func newFunction(a A) bool { + return a.x < a.y +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- functionextraction_extract_basic_22_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func newFunction(a A) int { + sum := a.x + a.y + return sum +} + +-- functionextraction_extract_basic_23_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func newFunction(sum int) int { + return sum +} + +-- functionextraction_extract_basic_9_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func newFunction(a *A) bool { + return a.x < a.y +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- methodextraction_extract_basic_13_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a *A) newMethod() int { + sum := a.x + a.y + return sum +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- methodextraction_extract_basic_14_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (*A) newMethod(sum int) int { + return sum +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- methodextraction_extract_basic_18_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) newMethod() bool { + return a.x < a.y +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +-- methodextraction_extract_basic_22_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) newMethod() int { + sum := a.x + a.y + return sum +} + +-- methodextraction_extract_basic_23_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (A) newMethod(sum int) int { + return sum +} + +-- methodextraction_extract_basic_9_2 -- +package extract + +type A struct { + x int + y int +} + +func (a *A) XLessThanYP() bool { + return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a *A) newMethod() bool { + return a.x < a.y +} + +func (a *A) AddP() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + +func (a A) XLessThanY() bool { + return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") +} + +func (a A) Add() int { + sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") + return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") +} + diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go new file mode 100644 index 00000000000..cbb70a04cd1 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go @@ -0,0 +1,6 @@ +package extract + +func _() { + var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "") + var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "") +} diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden new file mode 100644 index 00000000000..3fd9b328711 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden @@ -0,0 +1,18 @@ +-- suggestedfix_extract_basic_lit_4_10 -- +package extract + +func _() { + x := 1 + var _ = x + 2 //@suggestedfix("1", "refactor.extract", "") + var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "") +} + +-- suggestedfix_extract_basic_lit_5_10 -- +package extract + +func _() { + var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "") + x := 3 + 4 + var _ = x //@suggestedfix("3 + 4", "refactor.extract", "") +} + diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go similarity index 78% rename from internal/lsp/testdata/extract/extract_variable/extract_func_call.go rename to gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go index badc010dce4..a20b45f5869 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go @@ -3,7 +3,7 @@ package extract import "strconv" func _() { - x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract") + x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") + b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") } diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden new file mode 100644 index 00000000000..d59c0ee99f2 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden @@ -0,0 +1,24 @@ +-- suggestedfix_extract_func_call_6_8 -- +package extract + +import "strconv" + +func _() { + x := append([]int{}, 1) + x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") + str := "1" + b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") +} + +-- suggestedfix_extract_func_call_8_12 -- +package extract + +import "strconv" + +func _() { + x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") + str := "1" + x, x1 := strconv.Atoi(str) + b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") +} + diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go new file mode 100644 index 00000000000..c14ad709212 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go @@ -0,0 +1,13 @@ +package extract + +import "go/ast" + +func _() { + x0 := 0 + if true { + y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") + } + if true { + x1 := !false //@suggestedfix("!false", "refactor.extract", "") + } +} diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden new file mode 100644 index 00000000000..1c2f64b7df7 --- /dev/null +++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden @@ -0,0 +1,32 @@ +-- suggestedfix_extract_scope_11_9 -- +package extract + +import "go/ast" + +func _() { + x0 := 0 + if true { + y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") + } + if true { + x := !false + x1 := x //@suggestedfix("!false", "refactor.extract", "") + } +} + +-- suggestedfix_extract_scope_8_8 -- +package extract + +import "go/ast" + +func _() { + x0 := 0 + if true { + x := ast.CompositeLit{} + y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") + } + if true { + x1 := !false //@suggestedfix("!false", "refactor.extract", "") + } +} + diff --git a/internal/lsp/testdata/fieldlist/field_list.go b/gopls/internal/lsp/testdata/fieldlist/field_list.go similarity index 100% rename from internal/lsp/testdata/fieldlist/field_list.go rename to gopls/internal/lsp/testdata/fieldlist/field_list.go diff --git a/gopls/internal/lsp/testdata/fillstruct/a.go b/gopls/internal/lsp/testdata/fillstruct/a.go new file mode 100644 index 00000000000..e1add2d4713 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a.go @@ -0,0 +1,27 @@ +package fillstruct + +import ( + "golang.org/lsptests/fillstruct/data" +) + +type basicStruct struct { + foo int +} + +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/gopls/internal/lsp/testdata/fillstruct/a.go.golden b/gopls/internal/lsp/testdata/fillstruct/a.go.golden new file mode 100644 index 00000000000..ca1db04ead8 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a.go.golden @@ -0,0 +1,126 @@ +-- suggestedfix_a_11_21 -- +package fillstruct + +import ( + "golang.org/lsptests/fillstruct/data" +) + +type basicStruct struct { + foo int +} + +var _ = basicStruct{ + foo: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a_18_22 -- +package fillstruct + +import ( + "golang.org/lsptests/fillstruct/data" +) + +type basicStruct struct { + foo int +} + +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{ + foo: 0, + bar: "", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a_25_22 -- +package fillstruct + +import ( + "golang.org/lsptests/fillstruct/data" +) + +type basicStruct struct { + foo int +} + +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{ + bar: "", + basic: basicStruct{}, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a_27_16 -- +package fillstruct + +import ( + "golang.org/lsptests/fillstruct/data" +) + +type basicStruct struct { + foo int +} + +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStruct struct { + foo int + bar string +} + +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStruct struct { + bar string + basic basicStruct +} + +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = data.B{ + ExportedInt: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + diff --git a/gopls/internal/lsp/testdata/fillstruct/a2.go b/gopls/internal/lsp/testdata/fillstruct/a2.go new file mode 100644 index 00000000000..b5e30a84f1e --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a2.go @@ -0,0 +1,29 @@ +package fillstruct + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructCompex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/gopls/internal/lsp/testdata/fillstruct/a2.go.golden b/gopls/internal/lsp/testdata/fillstruct/a2.go.golden new file mode 100644 index 00000000000..2eca3e349a1 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a2.go.golden @@ -0,0 +1,139 @@ +-- suggestedfix_a2_11_21 -- +package fillstruct + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{ + m: map[string]int{}, + s: []int{}, + c: make(chan int), + c1: make(<-chan int), + a: [2]string{}, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructCompex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a2_17_19 -- +package fillstruct + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{ + fn: func(i int) int { + }, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructCompex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a2_23_25 -- +package fillstruct + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructCompex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructCompex{ + fn: func(i int, s string) (string, int) { + }, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +-- suggestedfix_a2_29_24 -- +package fillstruct + +type typedStruct struct { + m map[string]int + s []int + c chan int + c1 <-chan int + a [2]string +} + +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStruct struct { + fn func(i int) int +} + +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructCompex struct { + fn func(i int, s string) (string, int) +} + +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type funStructEmpty struct { + fn func() +} + +var _ = funStructEmpty{ + fn: func() { + }, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + diff --git a/internal/lsp/testdata/fillstruct/a3.go b/gopls/internal/lsp/testdata/fillstruct/a3.go similarity index 79% rename from internal/lsp/testdata/fillstruct/a3.go rename to gopls/internal/lsp/testdata/fillstruct/a3.go index 730db305423..59cd9fa28b5 100644 --- a/internal/lsp/testdata/fillstruct/a3.go +++ b/gopls/internal/lsp/testdata/fillstruct/a3.go @@ -14,7 +14,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -25,7 +25,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -33,10 +33,10 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a3.go.golden b/gopls/internal/lsp/testdata/fillstruct/a3.go.golden similarity index 79% rename from internal/lsp/testdata/fillstruct/a3.go.golden rename to gopls/internal/lsp/testdata/fillstruct/a3.go.golden index 1d8672927d9..a7c7baa8d27 100644 --- a/internal/lsp/testdata/fillstruct/a3.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/a3.go.golden @@ -18,7 +18,7 @@ type Bar struct { var _ = Bar{ X: &Foo{}, Y: &Foo{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -29,7 +29,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -37,13 +37,13 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_28_24 -- package fillstruct @@ -62,7 +62,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -81,7 +81,7 @@ var _ = importedStruct{ fn: func(ast_decl ast.DeclStmt) ast.Ellipsis { }, st: ast.CompositeLit{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -89,13 +89,13 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_36_30 -- package fillstruct @@ -114,7 +114,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -125,7 +125,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -137,13 +137,13 @@ var _ = pointerBuiltinStruct{ b: new(bool), s: new(string), i: new(int), -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_39_3 -- package fillstruct @@ -162,7 +162,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -173,7 +173,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -181,17 +181,17 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ { ValuePos: 0, Kind: 0, Value: "", - }, //@suggestedfix("}", "refactor.rewrite") + }, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_42_25 -- package fillstruct @@ -210,7 +210,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -221,7 +221,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -229,15 +229,15 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } var _ = []ast.BasicLit{{ ValuePos: 0, Kind: 0, Value: "", -}} //@suggestedfix("}", "refactor.rewrite") +}} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/gopls/internal/lsp/testdata/fillstruct/a4.go b/gopls/internal/lsp/testdata/fillstruct/a4.go new file mode 100644 index 00000000000..5f52a55fa72 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a4.go @@ -0,0 +1,39 @@ +package fillstruct + +import "go/ast" + +type iStruct struct { + X int +} + +type sStruct struct { + str string +} + +type multiFill struct { + num int + strin string + arr []int +} + +type assignStruct struct { + n ast.Node +} + +func fill() { + var x int + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var s string + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var n int + _ = []int{} + if true { + arr := []int{1, 2} + } + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var node *ast.CompositeLit + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/gopls/internal/lsp/testdata/fillstruct/a4.go.golden b/gopls/internal/lsp/testdata/fillstruct/a4.go.golden new file mode 100644 index 00000000000..b1e376f05f1 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/a4.go.golden @@ -0,0 +1,174 @@ +-- suggestedfix_a4_25_18 -- +package fillstruct + +import "go/ast" + +type iStruct struct { + X int +} + +type sStruct struct { + str string +} + +type multiFill struct { + num int + strin string + arr []int +} + +type assignStruct struct { + n ast.Node +} + +func fill() { + var x int + var _ = iStruct{ + X: x, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + + var s string + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var n int + _ = []int{} + if true { + arr := []int{1, 2} + } + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var node *ast.CompositeLit + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_a4_28_18 -- +package fillstruct + +import "go/ast" + +type iStruct struct { + X int +} + +type sStruct struct { + str string +} + +type multiFill struct { + num int + strin string + arr []int +} + +type assignStruct struct { + n ast.Node +} + +func fill() { + var x int + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var s string + var _ = sStruct{ + str: s, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + + var n int + _ = []int{} + if true { + arr := []int{1, 2} + } + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var node *ast.CompositeLit + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_a4_35_20 -- +package fillstruct + +import "go/ast" + +type iStruct struct { + X int +} + +type sStruct struct { + str string +} + +type multiFill struct { + num int + strin string + arr []int +} + +type assignStruct struct { + n ast.Node +} + +func fill() { + var x int + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var s string + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var n int + _ = []int{} + if true { + arr := []int{1, 2} + } + var _ = multiFill{ + num: n, + strin: s, + arr: []int{}, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + + var node *ast.CompositeLit + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_a4_38_23 -- +package fillstruct + +import "go/ast" + +type iStruct struct { + X int +} + +type sStruct struct { + str string +} + +type multiFill struct { + num int + strin string + arr []int +} + +type assignStruct struct { + n ast.Node +} + +func fill() { + var x int + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var s string + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var n int + _ = []int{} + if true { + arr := []int{1, 2} + } + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") + + var node *ast.CompositeLit + var _ = assignStruct{ + n: node, + } //@suggestedfix("}", "refactor.rewrite", "Fill") +} + diff --git a/internal/lsp/testdata/fillstruct/data/a.go b/gopls/internal/lsp/testdata/fillstruct/data/a.go similarity index 100% rename from internal/lsp/testdata/fillstruct/data/a.go rename to gopls/internal/lsp/testdata/fillstruct/data/a.go diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go new file mode 100644 index 00000000000..3da904741d0 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go @@ -0,0 +1,26 @@ +package fillstruct + +type StructA struct { + unexportedIntField int + ExportedIntField int + MapA map[int]string + Array []int + StructB +} + +type StructA2 struct { + B *StructB +} + +type StructA3 struct { + B StructB +} + +func fill() { + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + if true { + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden new file mode 100644 index 00000000000..de01a40f052 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden @@ -0,0 +1,124 @@ +-- suggestedfix_fill_struct_20_15 -- +package fillstruct + +type StructA struct { + unexportedIntField int + ExportedIntField int + MapA map[int]string + Array []int + StructB +} + +type StructA2 struct { + B *StructB +} + +type StructA3 struct { + B StructB +} + +func fill() { + a := StructA{ + unexportedIntField: 0, + ExportedIntField: 0, + MapA: map[int]string{}, + Array: []int{}, + StructB: StructB{}, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + if true { + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} + +-- suggestedfix_fill_struct_21_16 -- +package fillstruct + +type StructA struct { + unexportedIntField int + ExportedIntField int + MapA map[int]string + Array []int + StructB +} + +type StructA2 struct { + B *StructB +} + +type StructA3 struct { + B StructB +} + +func fill() { + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{ + B: &StructB{}, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + if true { + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} + +-- suggestedfix_fill_struct_22_16 -- +package fillstruct + +type StructA struct { + unexportedIntField int + ExportedIntField int + MapA map[int]string + Array []int + StructB +} + +type StructA2 struct { + B *StructB +} + +type StructA3 struct { + B StructB +} + +func fill() { + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{ + B: StructB{}, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + if true { + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} + +-- suggestedfix_fill_struct_24_16 -- +package fillstruct + +type StructA struct { + unexportedIntField int + ExportedIntField int + MapA map[int]string + Array []int + StructB +} + +type StructA2 struct { + B *StructB +} + +type StructA3 struct { + B StructB +} + +func fill() { + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") + if true { + _ = StructA3{ + B: StructB{}, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} + diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go new file mode 100644 index 00000000000..2c099a80ea7 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go @@ -0,0 +1,14 @@ +package fillstruct + +type StructAnon struct { + a struct{} + b map[string]interface{} + c map[string]struct { + d int + e bool + } +} + +func fill() { + _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden similarity index 85% rename from internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden index eb6ffd66136..7cc9ac23d02 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden @@ -15,6 +15,6 @@ func fill() { a: struct{}{}, b: map[string]interface{}{}, c: map[string]struct{d int; e bool}{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go new file mode 100644 index 00000000000..ab7be5a7b58 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go @@ -0,0 +1,15 @@ +package fillstruct + +type StructB struct { + StructC +} + +type StructC struct { + unexportedInt int +} + +func nested() { + c := StructB{ + StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite", "Fill") + } +} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden similarity index 80% rename from internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden index 30061a5d72a..c902ee7f12b 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden @@ -13,7 +13,7 @@ func nested() { c := StructB{ StructC: StructC{ unexportedInt: 0, - }, //@suggestedfix("}", "refactor.rewrite") + }, //@suggestedfix("}", "refactor.rewrite", "Fill") } } diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go new file mode 100644 index 00000000000..ef35627c8ea --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go @@ -0,0 +1,12 @@ +package fillstruct + +import ( + h2 "net/http" + + "golang.org/lsptests/fillstruct/data" +) + +func unexported() { + a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden new file mode 100644 index 00000000000..0cdbfc820ba --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden @@ -0,0 +1,36 @@ +-- suggestedfix_fill_struct_package_10_14 -- +package fillstruct + +import ( + h2 "net/http" + + "golang.org/lsptests/fillstruct/data" +) + +func unexported() { + a := data.B{ + ExportedInt: 0, + } //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_fill_struct_package_11_16 -- +package fillstruct + +import ( + h2 "net/http" + + "golang.org/lsptests/fillstruct/data" +) + +func unexported() { + a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = h2.Client{ + Transport: nil, + CheckRedirect: func(req *h2.Request, via []*h2.Request) error { + }, + Jar: nil, + Timeout: 0, + } //@suggestedfix("}", "refactor.rewrite", "Fill") +} + diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go similarity index 78% rename from internal/lsp/testdata/fillstruct/fill_struct_partial.go rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go index 97b517dcdc3..5de1722c783 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go @@ -14,11 +14,11 @@ type StructPartialB struct { func fill() { a := StructPartialA{ PrefilledInt: 5, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ /* this comment should disappear */ PrefilledInt: 7, // This comment should be blown away. /* As should this one */ - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden similarity index 79% rename from internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden index 2d063c14d39..3aa437a0334 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden @@ -17,13 +17,13 @@ func fill() { PrefilledInt: 5, UnfilledInt: 0, StructPartialB: StructPartialB{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ /* this comment should disappear */ PrefilledInt: 7, // This comment should be blown away. /* As should this one */ - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_fill_struct_partial_23_2 -- @@ -43,10 +43,10 @@ type StructPartialB struct { func fill() { a := StructPartialA{ PrefilledInt: 5, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ PrefilledInt: 7, UnfilledInt: 0, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go new file mode 100644 index 00000000000..6a468cd544c --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go @@ -0,0 +1,9 @@ +package fillstruct + +type StructD struct { + ExportedIntField int +} + +func spaces() { + d := StructD{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden similarity index 76% rename from internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden index 0d755334c99..590c91611d0 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden @@ -8,6 +8,6 @@ type StructD struct { func spaces() { d := StructD{ ExportedIntField: 0, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go new file mode 100644 index 00000000000..f5e42a4f2fe --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go @@ -0,0 +1,12 @@ +package fillstruct + +import "unsafe" + +type unsafeStruct struct { + x int + p unsafe.Pointer +} + +func fill() { + _ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden similarity index 78% rename from internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden rename to gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden index 99369544373..7e8e1952f86 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden +++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden @@ -12,6 +12,6 @@ func fill() { _ := unsafeStruct{ x: 0, p: nil, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/gopls/internal/lsp/testdata/fillstruct/typeparams.go b/gopls/internal/lsp/testdata/fillstruct/typeparams.go new file mode 100644 index 00000000000..c0b702f57c7 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/typeparams.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} diff --git a/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden b/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden new file mode 100644 index 00000000000..625df7577b7 --- /dev/null +++ b/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden @@ -0,0 +1,206 @@ +-- suggestedfix_typeparams_14_40 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{ + foo: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_typeparams_21_49 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{ + foo: "", + bar: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_typeparams_25_1 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + foo: 0, + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_typeparams_32_36 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{ + bar: "", + basic: basicStructWithTypeParams{}, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill") +} + +-- suggestedfix_typeparams_36_8 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} // no suggested fix + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +func _[T any]() { + type S struct{ t T } + _ = S{ + t: *new(T), + } //@suggestedfix("}", "refactor.rewrite", "Fill") +} + diff --git a/internal/lsp/testdata/folding/a.go b/gopls/internal/lsp/testdata/folding/a.go similarity index 100% rename from internal/lsp/testdata/folding/a.go rename to gopls/internal/lsp/testdata/folding/a.go diff --git a/gopls/internal/lsp/testdata/folding/a.go.golden b/gopls/internal/lsp/testdata/folding/a.go.golden new file mode 100644 index 00000000000..b04ca4dab3f --- /dev/null +++ b/gopls/internal/lsp/testdata/folding/a.go.golden @@ -0,0 +1,722 @@ +-- foldingRange-0 -- +package folding //@fold("package") + +import (<>) + +import _ "os" + +// bar is a function.<> +func bar(<>) string {<>} + +-- foldingRange-1 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch {<>} + /* This is a multiline<> + + /* This is a multiline<> + _ = []int{<>} + _ = [2]string{<>} + _ = map[string]int{<>} + type T struct {<>} + _ = T{<>} + x, y := make(<>), make(<>) + select {<>} + // This is a multiline comment<> + return <> +} + +-- foldingRange-2 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true:<> + case false:<> + default:<> + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x:<> + case <-y:<> + default:<> + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-3 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true {<>} else {<>} + case false: + fmt.Println(<>) + default: + fmt.Println(<>) + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val {<>} else {<>} + case <-y: + fmt.Println(<>) + default: + fmt.Println(<>) + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-4 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true { + fmt.Println(<>) + } else { + fmt.Println(<>) + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val { + fmt.Println(<>) + } else { + fmt.Println(<>) + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-comment-0 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function.<> +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true { + fmt.Println("true") + } else { + fmt.Println("false") + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline<> + + /* This is a multiline<> + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val { + fmt.Println("true from x") + } else { + fmt.Println("false from x") + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment<> + return ` +this string +is not indented` +} + +-- foldingRange-imports-0 -- +package folding //@fold("package") + +import (<>) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true { + fmt.Println("true") + } else { + fmt.Println("false") + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val { + fmt.Println("true from x") + } else { + fmt.Println("false from x") + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-lineFolding-0 -- +package folding //@fold("package") + +import (<> +) + +import _ "os" + +// bar is a function.<> +func bar() string {<> +} + +-- foldingRange-lineFolding-1 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch {<> + } + /* This is a multiline<> + + /* This is a multiline<> + _ = []int{<>, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{<>, + } + type T struct {<> + } + _ = T{<>, + } + x, y := make(chan bool), make(chan bool) + select {<> + } + // This is a multiline comment<> + return <> +} + +-- foldingRange-lineFolding-2 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true:<> + case false:<> + default:<> + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x:<> + case <-y:<> + default:<> + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-lineFolding-3 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true {<> + } else {<> + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val {<> + } else {<> + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + +-- foldingRange-lineFolding-comment-0 -- +package folding //@fold("package") + +import ( + "fmt" + _ "log" +) + +import _ "os" + +// bar is a function.<> +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true { + fmt.Println("true") + } else { + fmt.Println("false") + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline<> + + /* This is a multiline<> + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val { + fmt.Println("true from x") + } else { + fmt.Println("false from x") + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment<> + return ` +this string +is not indented` +} + +-- foldingRange-lineFolding-imports-0 -- +package folding //@fold("package") + +import (<> +) + +import _ "os" + +// bar is a function. +// With a multiline doc comment. +func bar() string { + /* This is a single line comment */ + switch { + case true: + if true { + fmt.Println("true") + } else { + fmt.Println("false") + } + case false: + fmt.Println("false") + default: + fmt.Println("default") + } + /* This is a multiline + block + comment */ + + /* This is a multiline + block + comment */ + // Followed by another comment. + _ = []int{ + 1, + 2, + 3, + } + _ = [2]string{"d", + "e", + } + _ = map[string]int{ + "a": 1, + "b": 2, + "c": 3, + } + type T struct { + f string + g int + h string + } + _ = T{ + f: "j", + g: 4, + h: "i", + } + x, y := make(chan bool), make(chan bool) + select { + case val := <-x: + if val { + fmt.Println("true from x") + } else { + fmt.Println("false from x") + } + case <-y: + fmt.Println("y") + default: + fmt.Println("default") + } + // This is a multiline comment + // that is not a doc comment. + return ` +this string +is not indented` +} + diff --git a/gopls/internal/lsp/testdata/folding/bad.go.golden b/gopls/internal/lsp/testdata/folding/bad.go.golden new file mode 100644 index 00000000000..ab274f75ac6 --- /dev/null +++ b/gopls/internal/lsp/testdata/folding/bad.go.golden @@ -0,0 +1,81 @@ +-- foldingRange-0 -- +package folding //@fold("package") + +import (<>) + +import (<>) + +// badBar is a function. +func badBar(<>) string {<>} + +-- foldingRange-1 -- +package folding //@fold("package") + +import ( "fmt" + _ "log" +) + +import ( + _ "os" ) + +// badBar is a function. +func badBar() string { x := true + if x {<>} else {<>} + return +} + +-- foldingRange-2 -- +package folding //@fold("package") + +import ( "fmt" + _ "log" +) + +import ( + _ "os" ) + +// badBar is a function. +func badBar() string { x := true + if x { + // This is the only foldable thing in this file when lineFoldingOnly + fmt.Println(<>) + } else { + fmt.Println(<>) } + return +} + +-- foldingRange-imports-0 -- +package folding //@fold("package") + +import (<>) + +import (<>) + +// badBar is a function. +func badBar() string { x := true + if x { + // This is the only foldable thing in this file when lineFoldingOnly + fmt.Println("true") + } else { + fmt.Println("false") } + return +} + +-- foldingRange-lineFolding-0 -- +package folding //@fold("package") + +import ( "fmt" + _ "log" +) + +import ( + _ "os" ) + +// badBar is a function. +func badBar() string { x := true + if x {<> + } else { + fmt.Println("false") } + return +} + diff --git a/internal/lsp/testdata/folding/bad.go.in b/gopls/internal/lsp/testdata/folding/bad.go.in similarity index 100% rename from internal/lsp/testdata/folding/bad.go.in rename to gopls/internal/lsp/testdata/folding/bad.go.in diff --git a/gopls/internal/lsp/testdata/foo/foo.go b/gopls/internal/lsp/testdata/foo/foo.go new file mode 100644 index 00000000000..66631c58ca9 --- /dev/null +++ b/gopls/internal/lsp/testdata/foo/foo.go @@ -0,0 +1,30 @@ +package foo //@mark(PackageFoo, "foo"),item(PackageFoo, "foo", "\"golang.org/lsptests/foo\"", "package") + +type StructFoo struct { //@item(StructFoo, "StructFoo", "struct{...}", "struct") + Value int //@item(Value, "Value", "int", "field") +} + +// Pre-set this marker, as we don't have a "source" for it in this package. +/* Error() */ //@item(Error, "Error", "func() string", "method") + +func Foo() { //@item(Foo, "Foo", "func()", "func") + var err error + err.Error() //@complete("E", Error) +} + +func _() { + var sFoo StructFoo //@mark(sFoo1, "sFoo"),complete("t", StructFoo) + if x := sFoo; x.Value == 1 { //@mark(sFoo2, "sFoo"),complete("V", Value),typdef("sFoo", StructFoo),refs("sFo", sFoo1, sFoo2) + return + } +} + +func _() { + shadowed := 123 + { + shadowed := "hi" //@item(shadowed, "shadowed", "string", "var"),refs("shadowed", shadowed) + sha //@complete("a", shadowed) + } +} + +type IntFoo int //@item(IntFoo, "IntFoo", "int", "type") diff --git a/gopls/internal/lsp/testdata/format/bad_format.go.golden b/gopls/internal/lsp/testdata/format/bad_format.go.golden new file mode 100644 index 00000000000..f0c24d6356e --- /dev/null +++ b/gopls/internal/lsp/testdata/format/bad_format.go.golden @@ -0,0 +1,21 @@ +-- gofmt -- +package format //@format("package") + +import ( + "fmt" + "log" + "runtime" +) + +func hello() { + + var x int //@diag("x", "compiler", "x declared (and|but) not used", "error") +} + +func hi() { + runtime.GOROOT() + fmt.Printf("") + + log.Printf("") +} + diff --git a/gopls/internal/lsp/testdata/format/bad_format.go.in b/gopls/internal/lsp/testdata/format/bad_format.go.in new file mode 100644 index 00000000000..995ec399a11 --- /dev/null +++ b/gopls/internal/lsp/testdata/format/bad_format.go.in @@ -0,0 +1,22 @@ +package format //@format("package") + +import ( + "runtime" + "fmt" + "log" +) + +func hello() { + + + + + var x int //@diag("x", "compiler", "x declared (and|but) not used", "error") +} + +func hi() { + runtime.GOROOT() + fmt.Printf("") + + log.Printf("") +} diff --git a/internal/lsp/testdata/format/good_format.go b/gopls/internal/lsp/testdata/format/good_format.go similarity index 100% rename from internal/lsp/testdata/format/good_format.go rename to gopls/internal/lsp/testdata/format/good_format.go diff --git a/internal/lsp/testdata/format/good_format.go.golden b/gopls/internal/lsp/testdata/format/good_format.go.golden similarity index 100% rename from internal/lsp/testdata/format/good_format.go.golden rename to gopls/internal/lsp/testdata/format/good_format.go.golden diff --git a/internal/lsp/testdata/format/newline_format.go.golden b/gopls/internal/lsp/testdata/format/newline_format.go.golden similarity index 100% rename from internal/lsp/testdata/format/newline_format.go.golden rename to gopls/internal/lsp/testdata/format/newline_format.go.golden diff --git a/internal/lsp/testdata/format/newline_format.go.in b/gopls/internal/lsp/testdata/format/newline_format.go.in similarity index 100% rename from internal/lsp/testdata/format/newline_format.go.in rename to gopls/internal/lsp/testdata/format/newline_format.go.in diff --git a/internal/lsp/testdata/format/one_line.go.golden b/gopls/internal/lsp/testdata/format/one_line.go.golden similarity index 100% rename from internal/lsp/testdata/format/one_line.go.golden rename to gopls/internal/lsp/testdata/format/one_line.go.golden diff --git a/internal/lsp/testdata/format/one_line.go.in b/gopls/internal/lsp/testdata/format/one_line.go.in similarity index 100% rename from internal/lsp/testdata/format/one_line.go.in rename to gopls/internal/lsp/testdata/format/one_line.go.in diff --git a/internal/lsp/testdata/func_rank/func_rank.go.in b/gopls/internal/lsp/testdata/func_rank/func_rank.go.in similarity index 100% rename from internal/lsp/testdata/func_rank/func_rank.go.in rename to gopls/internal/lsp/testdata/func_rank/func_rank.go.in diff --git a/internal/lsp/testdata/funcsig/func_sig.go b/gopls/internal/lsp/testdata/funcsig/func_sig.go similarity index 100% rename from internal/lsp/testdata/funcsig/func_sig.go rename to gopls/internal/lsp/testdata/funcsig/func_sig.go diff --git a/internal/lsp/testdata/funcvalue/func_value.go b/gopls/internal/lsp/testdata/funcvalue/func_value.go similarity index 100% rename from internal/lsp/testdata/funcvalue/func_value.go rename to gopls/internal/lsp/testdata/funcvalue/func_value.go diff --git a/internal/lsp/testdata/fuzzymatch/fuzzymatch.go b/gopls/internal/lsp/testdata/fuzzymatch/fuzzymatch.go similarity index 100% rename from internal/lsp/testdata/fuzzymatch/fuzzymatch.go rename to gopls/internal/lsp/testdata/fuzzymatch/fuzzymatch.go diff --git a/internal/lsp/testdata/generate/generate.go b/gopls/internal/lsp/testdata/generate/generate.go similarity index 100% rename from internal/lsp/testdata/generate/generate.go rename to gopls/internal/lsp/testdata/generate/generate.go diff --git a/gopls/internal/lsp/testdata/generated/generated.go b/gopls/internal/lsp/testdata/generated/generated.go new file mode 100644 index 00000000000..c7adc180409 --- /dev/null +++ b/gopls/internal/lsp/testdata/generated/generated.go @@ -0,0 +1,7 @@ +package generated + +// Code generated by generator.go. DO NOT EDIT. + +func _() { + var y int //@diag("y", "compiler", "y declared (and|but) not used", "error") +} diff --git a/gopls/internal/lsp/testdata/generated/generator.go b/gopls/internal/lsp/testdata/generated/generator.go new file mode 100644 index 00000000000..8e2a4fab722 --- /dev/null +++ b/gopls/internal/lsp/testdata/generated/generator.go @@ -0,0 +1,5 @@ +package generated + +func _() { + var x int //@diag("x", "compiler", "x declared (and|but) not used", "error") +} diff --git a/gopls/internal/lsp/testdata/godef/a/a_x_test.go b/gopls/internal/lsp/testdata/godef/a/a_x_test.go new file mode 100644 index 00000000000..f166f055084 --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/a/a_x_test.go @@ -0,0 +1,9 @@ +package a_test + +import ( + "testing" +) + +func TestA2(t *testing.T) { //@TestA2,godef(TestA2, TestA2) + Nonexistant() //@diag("Nonexistant", "compiler", "(undeclared name|undefined): Nonexistant", "error") +} diff --git a/internal/lsp/testdata/godef/a/a_x_test.go.golden b/gopls/internal/lsp/testdata/godef/a/a_x_test.go.golden similarity index 100% rename from internal/lsp/testdata/godef/a/a_x_test.go.golden rename to gopls/internal/lsp/testdata/godef/a/a_x_test.go.golden diff --git a/gopls/internal/lsp/testdata/godef/a/d.go b/gopls/internal/lsp/testdata/godef/a/d.go new file mode 100644 index 00000000000..a1d17ad0da3 --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/a/d.go @@ -0,0 +1,69 @@ +package a //@mark(a, "a "),hoverdef("a ", a) + +import "fmt" + +type Thing struct { //@Thing + Member string //@Member +} + +var Other Thing //@Other + +func Things(val []string) []Thing { //@Things + return nil +} + +func (t Thing) Method(i int) string { //@Method + return t.Member +} + +func (t Thing) Method3() { +} + +func (t *Thing) Method2(i int, j int) (error, string) { + return nil, t.Member +} + +func (t *Thing) private() { +} + +func useThings() { + t := Thing{ //@mark(aStructType, "ing") + Member: "string", //@mark(fMember, "ember") + } + fmt.Print(t.Member) //@mark(aMember, "ember") + fmt.Print(Other) //@mark(aVar, "ther") + Things() //@mark(aFunc, "ings") + t.Method() //@mark(aMethod, "eth") +} + +type NextThing struct { //@NextThing + Thing + Value int +} + +func (n NextThing) another() string { + return n.Member +} + +// Shadows Thing.Method3 +func (n *NextThing) Method3() int { + return n.Value +} + +var nextThing NextThing //@hoverdef("NextThing", NextThing) + +/*@ +godef(aStructType, Thing) +godef(aMember, Member) +godef(aVar, Other) +godef(aFunc, Things) +godef(aMethod, Method) +godef(fMember, Member) +godef(Member, Member) + +//param +//package name +//const +//anon field + +*/ diff --git a/gopls/internal/lsp/testdata/godef/a/d.go.golden b/gopls/internal/lsp/testdata/godef/a/d.go.golden new file mode 100644 index 00000000000..ee687750c3e --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/a/d.go.golden @@ -0,0 +1,191 @@ +-- Member-definition -- +godef/a/d.go:6:2-8: defined here as ```go +field Member string +``` + +@Member + + +[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member) +-- Member-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 6, + "column": 2, + "offset": 90 + }, + "end": { + "line": 6, + "column": 8, + "offset": 96 + } + }, + "description": "```go\nfield Member string\n```\n\n@Member\n\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)" +} + +-- Member-hoverdef -- +```go +field Member string +``` + +@Member + + +[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member) +-- Method-definition -- +godef/a/d.go:15:16-22: defined here as ```go +func (Thing).Method(i int) string +``` + +[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method) +-- Method-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 15, + "column": 16, + "offset": 219 + }, + "end": { + "line": 15, + "column": 22, + "offset": 225 + } + }, + "description": "```go\nfunc (Thing).Method(i int) string\n```\n\n[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method)" +} + +-- Method-hoverdef -- +```go +func (Thing).Method(i int) string +``` + +[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method) +-- NextThing-hoverdef -- +```go +type NextThing struct { + Thing + Value int +} + +func (*NextThing).Method3() int +func (NextThing).another() string +``` + +[`a.NextThing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#NextThing) +-- Other-definition -- +godef/a/d.go:9:5-10: defined here as ```go +var Other Thing +``` + +@Other + + +[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other) +-- Other-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 9, + "column": 5, + "offset": 121 + }, + "end": { + "line": 9, + "column": 10, + "offset": 126 + } + }, + "description": "```go\nvar Other Thing\n```\n\n@Other\n\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)" +} + +-- Other-hoverdef -- +```go +var Other Thing +``` + +@Other + + +[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other) +-- Thing-definition -- +godef/a/d.go:5:6-11: defined here as ```go +type Thing struct { + Member string //@Member +} + +func (Thing).Method(i int) string +func (*Thing).Method2(i int, j int) (error, string) +func (Thing).Method3() +func (*Thing).private() +``` + +[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing) +-- Thing-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 5, + "column": 6, + "offset": 65 + }, + "end": { + "line": 5, + "column": 11, + "offset": 70 + } + }, + "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n\nfunc (Thing).Method(i int) string\nfunc (*Thing).Method2(i int, j int) (error, string)\nfunc (Thing).Method3()\nfunc (*Thing).private()\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)" +} + +-- Thing-hoverdef -- +```go +type Thing struct { + Member string //@Member +} + +func (Thing).Method(i int) string +func (*Thing).Method2(i int, j int) (error, string) +func (Thing).Method3() +func (*Thing).private() +``` + +[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing) +-- Things-definition -- +godef/a/d.go:11:6-12: defined here as ```go +func Things(val []string) []Thing +``` + +[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things) +-- Things-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 11, + "column": 6, + "offset": 148 + }, + "end": { + "line": 11, + "column": 12, + "offset": 154 + } + }, + "description": "```go\nfunc Things(val []string) []Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)" +} + +-- Things-hoverdef -- +```go +func Things(val []string) []Thing +``` + +[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things) +-- a-hoverdef -- +Package a is a package for testing go to definition. + diff --git a/internal/lsp/testdata/godef/a/f.go b/gopls/internal/lsp/testdata/godef/a/f.go similarity index 85% rename from internal/lsp/testdata/godef/a/f.go rename to gopls/internal/lsp/testdata/godef/a/f.go index 589c45fc1ae..10f88262a81 100644 --- a/internal/lsp/testdata/godef/a/f.go +++ b/gopls/internal/lsp/testdata/godef/a/f.go @@ -1,3 +1,4 @@ +// Package a is a package for testing go to definition. package a import "fmt" diff --git a/internal/lsp/testdata/godef/a/f.go.golden b/gopls/internal/lsp/testdata/godef/a/f.go.golden similarity index 100% rename from internal/lsp/testdata/godef/a/f.go.golden rename to gopls/internal/lsp/testdata/godef/a/f.go.golden diff --git a/internal/lsp/testdata/godef/a/g.go b/gopls/internal/lsp/testdata/godef/a/g.go similarity index 100% rename from internal/lsp/testdata/godef/a/g.go rename to gopls/internal/lsp/testdata/godef/a/g.go diff --git a/gopls/internal/lsp/testdata/godef/a/g.go.golden b/gopls/internal/lsp/testdata/godef/a/g.go.golden new file mode 100644 index 00000000000..f7a2e1b0775 --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/a/g.go.golden @@ -0,0 +1,7 @@ +-- dur-hoverdef -- +```go +const dur time.Duration = 910350000000 // 15m10.35s +``` + +dur is a constant of type time.Duration. + diff --git a/internal/lsp/testdata/godef/a/h.go b/gopls/internal/lsp/testdata/godef/a/h.go similarity index 100% rename from internal/lsp/testdata/godef/a/h.go rename to gopls/internal/lsp/testdata/godef/a/h.go diff --git a/gopls/internal/lsp/testdata/godef/a/h.go.golden b/gopls/internal/lsp/testdata/godef/a/h.go.golden new file mode 100644 index 00000000000..295876647ab --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/a/h.go.golden @@ -0,0 +1,158 @@ +-- arrD-hoverdef -- +```go +field d int +``` + +d field + +-- arrE-hoverdef -- +```go +field e struct{f int} +``` + +e nested struct + +-- arrF-hoverdef -- +```go +field f int +``` + +f field of nested struct + +-- complexH-hoverdef -- +```go +field h int +``` + +h field + +-- complexI-hoverdef -- +```go +field i struct{j int} +``` + +i nested struct + +-- complexJ-hoverdef -- +```go +field j int +``` + +j field of nested struct + +-- mapStructKeyX-hoverdef -- +```go +field x []string +``` + +X key field + +-- mapStructKeyY-hoverdef -- +```go +field y string +``` +-- mapStructValueX-hoverdef -- +```go +field x string +``` + +X value field + +-- nestedMap-hoverdef -- +```go +field m map[string]float64 +``` + +nested map + +-- nestedNumber-hoverdef -- +```go +field number int64 +``` + +nested number + +-- nestedString-hoverdef -- +```go +field str string +``` + +nested string + +-- openMethod-hoverdef -- +```go +func (interface).open() error +``` + +open method comment + +-- returnX-hoverdef -- +```go +field x int +``` + +X coord + +-- returnY-hoverdef -- +```go +field y int +``` + +Y coord + +-- structA-hoverdef -- +```go +field a int +``` + +a field + +-- structB-hoverdef -- +```go +field b struct{c int} +``` + +b nested struct + +-- structC-hoverdef -- +```go +field c int +``` + +c field of nested struct + +-- testDescription-hoverdef -- +```go +field desc string +``` + +test description + +-- testInput-hoverdef -- +```go +field in map[string][]struct{key string; value interface{}} +``` + +test input + +-- testInputKey-hoverdef -- +```go +field key string +``` + +test key + +-- testInputValue-hoverdef -- +```go +field value interface{} +``` + +test value + +-- testResultValue-hoverdef -- +```go +field value int +``` + +expected test value + diff --git a/internal/lsp/testdata/godef/b/e.go b/gopls/internal/lsp/testdata/godef/b/e.go similarity index 93% rename from internal/lsp/testdata/godef/b/e.go rename to gopls/internal/lsp/testdata/godef/b/e.go index 7b96cd7e8ae..9c81cad3171 100644 --- a/internal/lsp/testdata/godef/b/e.go +++ b/gopls/internal/lsp/testdata/godef/b/e.go @@ -3,7 +3,7 @@ package b import ( "fmt" - "golang.org/x/tools/internal/lsp/godef/a" + "golang.org/lsptests/godef/a" ) func useThings() { diff --git a/gopls/internal/lsp/testdata/godef/b/e.go.golden b/gopls/internal/lsp/testdata/godef/b/e.go.golden new file mode 100644 index 00000000000..3d7d8979771 --- /dev/null +++ b/gopls/internal/lsp/testdata/godef/b/e.go.golden @@ -0,0 +1,156 @@ +-- Member-definition -- +godef/a/d.go:6:2-8: defined here as ```go +field Member string +``` + +@Member + + +[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member) +-- Member-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 6, + "column": 2, + "offset": 90 + }, + "end": { + "line": 6, + "column": 8, + "offset": 96 + } + }, + "description": "```go\nfield Member string\n```\n\n@Member\n\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)" +} + +-- Member-hoverdef -- +```go +field Member string +``` + +@Member + + +[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member) +-- Other-definition -- +godef/a/d.go:9:5-10: defined here as ```go +var a.Other a.Thing +``` + +@Other + + +[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other) +-- Other-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 9, + "column": 5, + "offset": 121 + }, + "end": { + "line": 9, + "column": 10, + "offset": 126 + } + }, + "description": "```go\nvar a.Other a.Thing\n```\n\n@Other\n\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)" +} + +-- Other-hoverdef -- +```go +var a.Other a.Thing +``` + +@Other + + +[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other) +-- Thing-definition -- +godef/a/d.go:5:6-11: defined here as ```go +type Thing struct { + Member string //@Member +} + +func (a.Thing).Method(i int) string +func (*a.Thing).Method2(i int, j int) (error, string) +func (a.Thing).Method3() +``` + +[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing) +-- Thing-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 5, + "column": 6, + "offset": 65 + }, + "end": { + "line": 5, + "column": 11, + "offset": 70 + } + }, + "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n\nfunc (a.Thing).Method(i int) string\nfunc (*a.Thing).Method2(i int, j int) (error, string)\nfunc (a.Thing).Method3()\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)" +} + +-- Thing-hoverdef -- +```go +type Thing struct { + Member string //@Member +} + +func (a.Thing).Method(i int) string +func (*a.Thing).Method2(i int, j int) (error, string) +func (a.Thing).Method3() +``` + +[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing) +-- Things-definition -- +godef/a/d.go:11:6-12: defined here as ```go +func a.Things(val []string) []a.Thing +``` + +[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things) +-- Things-definition-json -- +{ + "span": { + "uri": "file://godef/a/d.go", + "start": { + "line": 11, + "column": 6, + "offset": 148 + }, + "end": { + "line": 11, + "column": 12, + "offset": 154 + } + }, + "description": "```go\nfunc a.Things(val []string) []a.Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)" +} + +-- Things-hoverdef -- +```go +func a.Things(val []string) []a.Thing +``` + +[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things) +-- eInt-hoverdef -- +```go +var x int +``` +-- eInterface-hoverdef -- +```go +var x interface{} +``` +-- eString-hoverdef -- +```go +var x string +``` diff --git a/internal/lsp/testdata/godef/broken/unclosedIf.go.golden b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden similarity index 79% rename from internal/lsp/testdata/godef/broken/unclosedIf.go.golden rename to gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden index 5c3329d8b67..9ce869848cb 100644 --- a/internal/lsp/testdata/godef/broken/unclosedIf.go.golden +++ b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden @@ -3,7 +3,7 @@ godef/broken/unclosedIf.go:7:7-19: defined here as ```go var myUnclosedIf string ``` -\@myUnclosedIf +@myUnclosedIf -- myUnclosedIf-definition-json -- { "span": { @@ -19,7 +19,7 @@ var myUnclosedIf string "offset": 80 } }, - "description": "```go\nvar myUnclosedIf string\n```\n\n\\@myUnclosedIf" + "description": "```go\nvar myUnclosedIf string\n```\n\n@myUnclosedIf" } -- myUnclosedIf-hoverdef -- @@ -27,4 +27,5 @@ var myUnclosedIf string var myUnclosedIf string ``` -\@myUnclosedIf +@myUnclosedIf + diff --git a/internal/lsp/testdata/godef/broken/unclosedIf.go.in b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.in similarity index 100% rename from internal/lsp/testdata/godef/broken/unclosedIf.go.in rename to gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.in diff --git a/internal/lsp/testdata/good/good0.go b/gopls/internal/lsp/testdata/good/good0.go similarity index 100% rename from internal/lsp/testdata/good/good0.go rename to gopls/internal/lsp/testdata/good/good0.go diff --git a/internal/lsp/testdata/good/good1.go b/gopls/internal/lsp/testdata/good/good1.go similarity index 81% rename from internal/lsp/testdata/good/good1.go rename to gopls/internal/lsp/testdata/good/good1.go index c4664a7e5d4..624d8147af2 100644 --- a/internal/lsp/testdata/good/good1.go +++ b/gopls/internal/lsp/testdata/good/good1.go @@ -1,7 +1,7 @@ package good //@diag("package", "no_diagnostics", "", "error") import ( - "golang.org/x/tools/internal/lsp/types" //@item(types_import, "types", "\"golang.org/x/tools/internal/lsp/types\"", "package") + "golang.org/lsptests/types" //@item(types_import, "types", "\"golang.org/lsptests/types\"", "package") ) func random() int { //@item(good_random, "random", "func() int", "func") @@ -14,6 +14,7 @@ func random2(y int) int { //@item(good_random2, "random2", "func(y int) int", "f //@complete("", good_y_param, types_import, good_random, good_random2, good_stuff) var b types.Bob = &types.X{} //@prepare("ypes","types", "types") if _, ok := b.(*types.X); ok { //@complete("X", X_struct, Y_struct, Bob_interface, CoolAlias) + _ = 0 // suppress "empty branch" diagnostic } return y diff --git a/internal/lsp/testdata/highlights/highlights.go b/gopls/internal/lsp/testdata/highlights/highlights.go similarity index 100% rename from internal/lsp/testdata/highlights/highlights.go rename to gopls/internal/lsp/testdata/highlights/highlights.go diff --git a/gopls/internal/lsp/testdata/implementation/implementation.go b/gopls/internal/lsp/testdata/implementation/implementation.go new file mode 100644 index 00000000000..4c1a22dd4f0 --- /dev/null +++ b/gopls/internal/lsp/testdata/implementation/implementation.go @@ -0,0 +1,37 @@ +package implementation + +import "golang.org/lsptests/implementation/other" + +type ImpP struct{} //@ImpP,implementations("ImpP", Laugher, OtherLaugher) + +func (*ImpP) Laugh() { //@mark(LaughP, "Laugh"),implementations("Laugh", Laugh, OtherLaugh) +} + +type ImpS struct{} //@ImpS,implementations("ImpS", Laugher, OtherLaugher) + +func (ImpS) Laugh() { //@mark(LaughS, "Laugh"),implementations("Laugh", Laugh, OtherLaugh) +} + +type Laugher interface { //@Laugher,implementations("Laugher", ImpP, OtherImpP, ImpS, OtherImpS, embedsImpP) + Laugh() //@Laugh,implementations("Laugh", LaughP, OtherLaughP, LaughS, OtherLaughS) +} + +type Foo struct { //@implementations("Foo", Joker) + other.Foo +} + +type Joker interface { //@Joker + Joke() //@Joke,implementations("Joke", ImpJoker) +} + +type cryer int //@implementations("cryer", Cryer) + +func (cryer) Cry(other.CryType) {} //@mark(CryImpl, "Cry"),implementations("Cry", Cry) + +type Empty interface{} //@implementations("Empty") + +var _ interface{ Joke() } //@implementations("Joke", ImpJoker) + +type embedsImpP struct { //@embedsImpP + ImpP //@implementations("ImpP", Laugher, OtherLaugher) +} diff --git a/gopls/internal/lsp/testdata/implementation/implementation_generics.go b/gopls/internal/lsp/testdata/implementation/implementation_generics.go new file mode 100644 index 00000000000..1f02d166b1e --- /dev/null +++ b/gopls/internal/lsp/testdata/implementation/implementation_generics.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +package implementation + +// -- generics -- + +type GenIface[T any] interface { //@mark(GenIface, "GenIface"),implementations("GenIface", GC) + F(int, string, T) //@mark(GenIfaceF, "F"),implementations("F", GCF) +} + +type GenConc[U any] int //@mark(GenConc, "GenConc"),implementations("GenConc", GI) + +func (GenConc[V]) F(int, string, V) {} //@mark(GenConcF, "F"),implementations("F", GIF) + +type GenConcString struct{ GenConc[string] } //@mark(GenConcString, "GenConcString"),implementations(GenConcString, GIString) diff --git a/internal/lsp/testdata/implementation/other/other.go b/gopls/internal/lsp/testdata/implementation/other/other.go similarity index 100% rename from internal/lsp/testdata/implementation/other/other.go rename to gopls/internal/lsp/testdata/implementation/other/other.go diff --git a/gopls/internal/lsp/testdata/implementation/other/other_generics.go b/gopls/internal/lsp/testdata/implementation/other/other_generics.go new file mode 100644 index 00000000000..4b4c29f7d4a --- /dev/null +++ b/gopls/internal/lsp/testdata/implementation/other/other_generics.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +package other + +// -- generics (limited support) -- + +type GI[T any] interface { //@mark(GI, "GI"),implementations("GI", GenConc) + F(int, string, T) //@mark(GIF, "F"),implementations("F", GenConcF) +} + +type GIString GI[string] //@mark(GIString, "GIString"),implementations("GIString", GenConcString) + +type GC[U any] int //@mark(GC, "GC"),implementations("GC", GenIface) + +func (GC[V]) F(int, string, V) {} //@mark(GCF, "F"),implementations("F", GenIfaceF) diff --git a/internal/lsp/testdata/implementation/other/other_test.go b/gopls/internal/lsp/testdata/implementation/other/other_test.go similarity index 100% rename from internal/lsp/testdata/implementation/other/other_test.go rename to gopls/internal/lsp/testdata/implementation/other/other_test.go diff --git a/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in b/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in new file mode 100644 index 00000000000..2f4cbada141 --- /dev/null +++ b/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in @@ -0,0 +1,42 @@ +package importedcomplit + +import ( + "golang.org/lsptests/foo" + + // import completions + "fm" //@complete("\" //", fmtImport) + "go/pars" //@complete("\" //", parserImport) + "golang.org/lsptests/signa" //@complete("na\" //", signatureImport) + "golang.org/lspte" //@complete("\" //", lsptestsImport) + "crypto/elli" //@complete("\" //", cryptoImport) + "golang.org/lsptests/sign" //@complete("\" //", signatureImport) + "golang.org/lsptests/sign" //@complete("ests", lsptestsImport) + namedParser "go/pars" //@complete("\" //", parserImport) +) + +func _() { + var V int //@item(icVVar, "V", "int", "var") + _ = foo.StructFoo{V} //@complete("}", Value, icVVar) +} + +func _() { + var ( + aa string //@item(icAAVar, "aa", "string", "var") + ab int //@item(icABVar, "ab", "int", "var") + ) + + _ = foo.StructFoo{a} //@complete("}", abVar, aaVar) + + var s struct { + AA string //@item(icFieldAA, "AA", "string", "field") + AB int //@item(icFieldAB, "AB", "int", "field") + } + + _ = foo.StructFoo{s.} //@complete("}", icFieldAB, icFieldAA) +} + +/* "fmt" */ //@item(fmtImport, "fmt", "\"fmt\"", "package") +/* "go/parser" */ //@item(parserImport, "parser", "\"go/parser\"", "package") +/* "golang.org/lsptests/signature" */ //@item(signatureImport, "signature", "\"golang.org/lsptests/signature\"", "package") +/* "golang.org/lsptests/" */ //@item(lsptestsImport, "lsptests/", "\"golang.org/lsptests/\"", "package") +/* "crypto/elliptic" */ //@item(cryptoImport, "elliptic", "\"crypto/elliptic\"", "package") diff --git a/internal/lsp/testdata/imports/add_import.go.golden b/gopls/internal/lsp/testdata/imports/add_import.go.golden similarity index 100% rename from internal/lsp/testdata/imports/add_import.go.golden rename to gopls/internal/lsp/testdata/imports/add_import.go.golden diff --git a/internal/lsp/testdata/imports/add_import.go.in b/gopls/internal/lsp/testdata/imports/add_import.go.in similarity index 100% rename from internal/lsp/testdata/imports/add_import.go.in rename to gopls/internal/lsp/testdata/imports/add_import.go.in diff --git a/internal/lsp/testdata/imports/good_imports.go.golden b/gopls/internal/lsp/testdata/imports/good_imports.go.golden similarity index 100% rename from internal/lsp/testdata/imports/good_imports.go.golden rename to gopls/internal/lsp/testdata/imports/good_imports.go.golden diff --git a/internal/lsp/testdata/imports/good_imports.go.in b/gopls/internal/lsp/testdata/imports/good_imports.go.in similarity index 100% rename from internal/lsp/testdata/imports/good_imports.go.in rename to gopls/internal/lsp/testdata/imports/good_imports.go.in diff --git a/internal/lsp/testdata/imports/issue35458.go.golden b/gopls/internal/lsp/testdata/imports/issue35458.go.golden similarity index 100% rename from internal/lsp/testdata/imports/issue35458.go.golden rename to gopls/internal/lsp/testdata/imports/issue35458.go.golden diff --git a/internal/lsp/testdata/imports/issue35458.go.in b/gopls/internal/lsp/testdata/imports/issue35458.go.in similarity index 100% rename from internal/lsp/testdata/imports/issue35458.go.in rename to gopls/internal/lsp/testdata/imports/issue35458.go.in diff --git a/internal/lsp/testdata/imports/multiple_blocks.go.golden b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.golden similarity index 100% rename from internal/lsp/testdata/imports/multiple_blocks.go.golden rename to gopls/internal/lsp/testdata/imports/multiple_blocks.go.golden diff --git a/internal/lsp/testdata/imports/multiple_blocks.go.in b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.in similarity index 100% rename from internal/lsp/testdata/imports/multiple_blocks.go.in rename to gopls/internal/lsp/testdata/imports/multiple_blocks.go.in diff --git a/internal/lsp/testdata/imports/needs_imports.go.golden b/gopls/internal/lsp/testdata/imports/needs_imports.go.golden similarity index 100% rename from internal/lsp/testdata/imports/needs_imports.go.golden rename to gopls/internal/lsp/testdata/imports/needs_imports.go.golden diff --git a/internal/lsp/testdata/imports/needs_imports.go.in b/gopls/internal/lsp/testdata/imports/needs_imports.go.in similarity index 100% rename from internal/lsp/testdata/imports/needs_imports.go.in rename to gopls/internal/lsp/testdata/imports/needs_imports.go.in diff --git a/internal/lsp/testdata/imports/remove_import.go.golden b/gopls/internal/lsp/testdata/imports/remove_import.go.golden similarity index 100% rename from internal/lsp/testdata/imports/remove_import.go.golden rename to gopls/internal/lsp/testdata/imports/remove_import.go.golden diff --git a/internal/lsp/testdata/imports/remove_import.go.in b/gopls/internal/lsp/testdata/imports/remove_import.go.in similarity index 100% rename from internal/lsp/testdata/imports/remove_import.go.in rename to gopls/internal/lsp/testdata/imports/remove_import.go.in diff --git a/internal/lsp/testdata/imports/remove_imports.go.golden b/gopls/internal/lsp/testdata/imports/remove_imports.go.golden similarity index 100% rename from internal/lsp/testdata/imports/remove_imports.go.golden rename to gopls/internal/lsp/testdata/imports/remove_imports.go.golden diff --git a/internal/lsp/testdata/imports/remove_imports.go.in b/gopls/internal/lsp/testdata/imports/remove_imports.go.in similarity index 100% rename from internal/lsp/testdata/imports/remove_imports.go.in rename to gopls/internal/lsp/testdata/imports/remove_imports.go.in diff --git a/internal/lsp/testdata/imports/two_lines.go.golden b/gopls/internal/lsp/testdata/imports/two_lines.go.golden similarity index 100% rename from internal/lsp/testdata/imports/two_lines.go.golden rename to gopls/internal/lsp/testdata/imports/two_lines.go.golden diff --git a/internal/lsp/testdata/imports/two_lines.go.in b/gopls/internal/lsp/testdata/imports/two_lines.go.in similarity index 100% rename from internal/lsp/testdata/imports/two_lines.go.in rename to gopls/internal/lsp/testdata/imports/two_lines.go.in diff --git a/internal/lsp/testdata/index/index.go b/gopls/internal/lsp/testdata/index/index.go similarity index 100% rename from internal/lsp/testdata/index/index.go rename to gopls/internal/lsp/testdata/index/index.go diff --git a/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go new file mode 100644 index 00000000000..b05c95ec800 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go @@ -0,0 +1,27 @@ +package inlayHint //@inlayHint("package") + +import "fmt" + +func fieldNames() { + for _, c := range []struct { + in, want string + }{ + struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + {"Hello, äø–ē•Œ", "ē•Œäø– ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + +func fieldNamesPointers() { + for _, c := range []*struct { + in, want string + }{ + &struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + {"Hello, äø–ē•Œ", "ē•Œäø– ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} diff --git a/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden new file mode 100644 index 00000000000..eb2febdb6a3 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden @@ -0,0 +1,29 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +import "fmt" + +func fieldNames() { + for _< int>, c< struct{in string; want string}> := range []struct { + in, want string + }{ + struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + {"Hello, äø–ē•Œ", "ē•Œäø– ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + +func fieldNamesPointers() { + for _< int>, c< *struct{in string; want string}> := range []*struct { + in, want string + }{ + &struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + <&struct{in string; want string}>{"Hello, äø–ē•Œ", "ē•Œäø– ,olleH"}, + <&struct{in string; want string}>{"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + diff --git a/gopls/internal/lsp/testdata/inlay_hint/constant_values.go b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go new file mode 100644 index 00000000000..e3339b0f303 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go @@ -0,0 +1,45 @@ +package inlayHint //@inlayHint("package") + +const True = true + +type Kind int + +const ( + KindNone Kind = iota + KindPrint + KindPrintf + KindErrorf +) + +const ( + u = iota * 4 + v float64 = iota * 42 + w = iota * 42 +) + +const ( + a, b = 1, 2 + c, d + e, f = 5 * 5, "hello" + "world" + g, h + i, j = true, f +) + +// No hint +const ( + Int = 3 + Float = 3.14 + Bool = true + Rune = '3' + Complex = 2.7i + String = "Hello, world!" +) + +var ( + varInt = 3 + varFloat = 3.14 + varBool = true + varRune = '3' + '4' + varComplex = 2.7i + varString = "Hello, world!" +) diff --git a/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden new file mode 100644 index 00000000000..edc46debc37 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden @@ -0,0 +1,47 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +const True = true + +type Kind int + +const ( + KindNone Kind = iota< = 0> + KindPrint< = 1> + KindPrintf< = 2> + KindErrorf< = 3> +) + +const ( + u = iota * 4< = 0> + v float64 = iota * 42< = 42> + w = iota * 42< = 84> +) + +const ( + a, b = 1, 2 + c, d< = 1, 2> + e, f = 5 * 5, "hello" + "world"< = 25, "helloworld"> + g, h< = 25, "helloworld"> + i, j = true, f< = true, "helloworld"> +) + +// No hint +const ( + Int = 3 + Float = 3.14 + Bool = true + Rune = '3' + Complex = 2.7i + String = "Hello, world!" +) + +var ( + varInt = 3 + varFloat = 3.14 + varBool = true + varRune = '3' + '4' + varComplex = 2.7i + varString = "Hello, world!" +) + diff --git a/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go new file mode 100644 index 00000000000..0d930e5d426 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go @@ -0,0 +1,50 @@ +package inlayHint //@inlayHint("package") + +import "fmt" + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} + +type foo struct{} + +func (*foo) bar(baz string, qux int) int { + if baz != "" { + return qux + 1 + } + return qux +} + +func kase(foo int, bar bool, baz ...string) { + fmt.Println(foo, bar, baz) +} + +func kipp(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func plex(foo, bar string, baz string) { + fmt.Println(foo, bar, baz) +} + +func tars(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func foobar() { + var x foo + x.bar("", 1) + kase(0, true, "c", "d", "e") + kipp("a", "b", "c") + plex("a", "b", "c") + tars("a", "b", "c") + foo, bar, baz := "a", "b", "c" + kipp(foo, bar, baz) + plex("a", bar, baz) + tars(foo+foo, (bar), "c") + +} diff --git a/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden new file mode 100644 index 00000000000..4e93a4f9268 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden @@ -0,0 +1,52 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +import "fmt" + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} + +type foo struct{} + +func (*foo) bar(baz string, qux int) int { + if baz != "" { + return qux + 1 + } + return qux +} + +func kase(foo int, bar bool, baz ...string) { + fmt.Println(foo, bar, baz) +} + +func kipp(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func plex(foo, bar string, baz string) { + fmt.Println(foo, bar, baz) +} + +func tars(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func foobar() { + var x foo + x.bar("", 1) + kase(0, true, "c", "d", "e") + kipp("a", "b", "c") + plex("a", "b", "c") + tars("a", "b", "c") + foo< string>, bar< string>, baz< string> := "a", "b", "c" + kipp(foo, bar, baz) + plex("a", bar, baz) + tars(foo+foo, (bar), "c") + +} + diff --git a/gopls/internal/lsp/testdata/inlay_hint/type_params.go b/gopls/internal/lsp/testdata/inlay_hint/type_params.go new file mode 100644 index 00000000000..3a3c7e53734 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/type_params.go @@ -0,0 +1,45 @@ +//go:build go1.18 +// +build go1.18 + +package inlayHint //@inlayHint("package") + +func main() { + ints := map[string]int64{ + "first": 34, + "second": 12, + } + + floats := map[string]float64{ + "first": 35.98, + "second": 26.99, + } + + SumIntsOrFloats[string, int64](ints) + SumIntsOrFloats[string, float64](floats) + + SumIntsOrFloats(ints) + SumIntsOrFloats(floats) + + SumNumbers(ints) + SumNumbers(floats) +} + +type Number interface { + int64 | float64 +} + +func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V { + var s V + for _, v := range m { + s += v + } + return s +} + +func SumNumbers[K comparable, V Number](m map[K]V) V { + var s V + for _, v := range m { + s += v + } + return s +} diff --git a/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden b/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden new file mode 100644 index 00000000000..4819963b7a4 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden @@ -0,0 +1,47 @@ +-- inlayHint -- +//go:build go1.18 +// +build go1.18 + +package inlayHint //@inlayHint("package") + +func main() { + ints< map[string]int64> := map[string]int64{ + "first": 34, + "second": 12, + } + + floats< map[string]float64> := map[string]float64{ + "first": 35.98, + "second": 26.99, + } + + SumIntsOrFloats[string, int64](ints) + SumIntsOrFloats[string, float64](floats) + + SumIntsOrFloats<[string, int64]>(ints) + SumIntsOrFloats<[string, float64]>(floats) + + SumNumbers<[string, int64]>(ints) + SumNumbers<[string, float64]>(floats) +} + +type Number interface { + int64 | float64 +} + +func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V { + var s V + for _< K>, v< V> := range m { + s += v + } + return s +} + +func SumNumbers[K comparable, V Number](m map[K]V) V { + var s V + for _< K>, v< V> := range m { + s += v + } + return s +} + diff --git a/gopls/internal/lsp/testdata/inlay_hint/variable_types.go b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go new file mode 100644 index 00000000000..219af7059c7 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go @@ -0,0 +1,20 @@ +package inlayHint //@inlayHint("package") + +func assignTypes() { + i, j := 0, len([]string{})-1 + println(i, j) +} + +func rangeTypes() { + for k, v := range []string{} { + println(k, v) + } +} + +func funcLitType() { + myFunc := func(a string) string { return "" } +} + +func compositeLitType() { + foo := map[string]interface{}{"": ""} +} diff --git a/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden new file mode 100644 index 00000000000..6039950d5f3 --- /dev/null +++ b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden @@ -0,0 +1,22 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +func assignTypes() { + i< int>, j< int> := 0, len([]string{})-1 + println(i, j) +} + +func rangeTypes() { + for k< int>, v< string> := range []string{} { + println(k, v) + } +} + +func funcLitType() { + myFunc< func(a string) string> := func(a string) string { return "" } +} + +func compositeLitType() { + foo< map[string]interface{}> := map[string]interface{}{"": ""} +} + diff --git a/internal/lsp/testdata/interfacerank/interface_rank.go b/gopls/internal/lsp/testdata/interfacerank/interface_rank.go similarity index 100% rename from internal/lsp/testdata/interfacerank/interface_rank.go rename to gopls/internal/lsp/testdata/interfacerank/interface_rank.go diff --git a/gopls/internal/lsp/testdata/issues/issue56505.go b/gopls/internal/lsp/testdata/issues/issue56505.go new file mode 100644 index 00000000000..8c641bfb852 --- /dev/null +++ b/gopls/internal/lsp/testdata/issues/issue56505.go @@ -0,0 +1,8 @@ +package issues + +// Test for golang/go#56505: completion on variables of type *error should not +// panic. +func _() { + var e *error + e.x //@complete(" //") +} diff --git a/internal/lsp/testdata/keywords/accidental_keywords.go.in b/gopls/internal/lsp/testdata/keywords/accidental_keywords.go.in similarity index 100% rename from internal/lsp/testdata/keywords/accidental_keywords.go.in rename to gopls/internal/lsp/testdata/keywords/accidental_keywords.go.in diff --git a/internal/lsp/testdata/keywords/empty_select.go b/gopls/internal/lsp/testdata/keywords/empty_select.go similarity index 100% rename from internal/lsp/testdata/keywords/empty_select.go rename to gopls/internal/lsp/testdata/keywords/empty_select.go diff --git a/internal/lsp/testdata/keywords/empty_switch.go b/gopls/internal/lsp/testdata/keywords/empty_switch.go similarity index 100% rename from internal/lsp/testdata/keywords/empty_switch.go rename to gopls/internal/lsp/testdata/keywords/empty_switch.go diff --git a/gopls/internal/lsp/testdata/keywords/keywords.go b/gopls/internal/lsp/testdata/keywords/keywords.go new file mode 100644 index 00000000000..0bcaa63bffb --- /dev/null +++ b/gopls/internal/lsp/testdata/keywords/keywords.go @@ -0,0 +1,100 @@ +package keywords + +//@rank("", type),rank("", func),rank("", var),rank("", const),rank("", import) + +func _() { + var test int //@rank(" //", int, interface) + var tChan chan int + var _ m //@complete(" //", map) + var _ f //@complete(" //", func) + var _ c //@complete(" //", chan) + + var _ str //@rank(" //", string, struct) + + type _ int //@rank(" //", interface, int) + + type _ str //@rank(" //", struct, string) + + switch test { + case 1: // TODO: trying to complete case here will break because the parser won't return *ast.Ident + b //@complete(" //", break) + case 2: + f //@complete(" //", fallthrough, for) + r //@complete(" //", return) + d //@complete(" //", default, defer) + c //@complete(" //", case, const) + } + + switch test.(type) { + case fo: //@complete(":") + case int: + b //@complete(" //", break) + case int32: + f //@complete(" //", for) + d //@complete(" //", default, defer) + r //@complete(" //", return) + c //@complete(" //", case, const) + } + + select { + case <-tChan: + b //@complete(" //", break) + c //@complete(" //", case, const) + } + + for index := 0; index < test; index++ { + c //@complete(" //", const, continue) + b //@complete(" //", break) + } + + for range []int{} { + c //@complete(" //", const, continue) + b //@complete(" //", break) + } + + // Test function level keywords + + //Using 2 characters to test because map output order is random + sw //@complete(" //", switch) + se //@complete(" //", select) + + f //@complete(" //", for) + d //@complete(" //", defer) + g //@rank(" //", go),rank(" //", goto) + r //@complete(" //", return) + i //@complete(" //", if) + e //@complete(" //", else) + v //@complete(" //", var) + c //@complete(" //", const) + + for i := r //@complete(" //", range) +} + +/* package */ //@item(package, "package", "", "keyword") +/* import */ //@item(import, "import", "", "keyword") +/* func */ //@item(func, "func", "", "keyword") +/* type */ //@item(type, "type", "", "keyword") +/* var */ //@item(var, "var", "", "keyword") +/* const */ //@item(const, "const", "", "keyword") +/* break */ //@item(break, "break", "", "keyword") +/* default */ //@item(default, "default", "", "keyword") +/* case */ //@item(case, "case", "", "keyword") +/* defer */ //@item(defer, "defer", "", "keyword") +/* go */ //@item(go, "go", "", "keyword") +/* for */ //@item(for, "for", "", "keyword") +/* if */ //@item(if, "if", "", "keyword") +/* else */ //@item(else, "else", "", "keyword") +/* switch */ //@item(switch, "switch", "", "keyword") +/* select */ //@item(select, "select", "", "keyword") +/* fallthrough */ //@item(fallthrough, "fallthrough", "", "keyword") +/* continue */ //@item(continue, "continue", "", "keyword") +/* return */ //@item(return, "return", "", "keyword") +/* var */ //@item(var, "var", "", "keyword") +/* const */ //@item(const, "const", "", "keyword") +/* goto */ //@item(goto, "goto", "", "keyword") +/* struct */ //@item(struct, "struct", "", "keyword") +/* interface */ //@item(interface, "interface", "", "keyword") +/* map */ //@item(map, "map", "", "keyword") +/* func */ //@item(func, "func", "", "keyword") +/* chan */ //@item(chan, "chan", "", "keyword") +/* range */ //@item(range, "range", "", "keyword") diff --git a/internal/lsp/testdata/labels/labels.go b/gopls/internal/lsp/testdata/labels/labels.go similarity index 100% rename from internal/lsp/testdata/labels/labels.go rename to gopls/internal/lsp/testdata/labels/labels.go diff --git a/gopls/internal/lsp/testdata/links/links.go b/gopls/internal/lsp/testdata/links/links.go new file mode 100644 index 00000000000..378134341b4 --- /dev/null +++ b/gopls/internal/lsp/testdata/links/links.go @@ -0,0 +1,26 @@ +package links + +import ( + "fmt" //@link(`fmt`,"https://pkg.go.dev/fmt") + + "golang.org/lsptests/foo" //@link(`golang.org/lsptests/foo`,`https://pkg.go.dev/golang.org/lsptests/foo`) + + _ "database/sql" //@link(`database/sql`, `https://pkg.go.dev/database/sql`) +) + +var ( + _ fmt.Formatter + _ foo.StructFoo + _ errors.Formatter +) + +// Foo function +func Foo() string { + /*https://example.com/comment */ //@link("https://example.com/comment","https://example.com/comment") + + url := "https://example.com/string_literal" //@link("https://example.com/string_literal","https://example.com/string_literal") + return url + + // TODO(golang/go#1234): Link the relevant issue. //@link("golang/go#1234", "https://github.com/golang/go/issues/1234") + // TODO(microsoft/vscode-go#12): Another issue. //@link("microsoft/vscode-go#12", "https://github.com/microsoft/vscode-go/issues/12") +} diff --git a/internal/lsp/testdata/maps/maps.go.in b/gopls/internal/lsp/testdata/maps/maps.go.in similarity index 100% rename from internal/lsp/testdata/maps/maps.go.in rename to gopls/internal/lsp/testdata/maps/maps.go.in diff --git a/gopls/internal/lsp/testdata/missingfunction/channels.go b/gopls/internal/lsp/testdata/missingfunction/channels.go new file mode 100644 index 00000000000..303770cd7aa --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/channels.go @@ -0,0 +1,9 @@ +package missingfunction + +func channels(s string) { + undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "") +} + +func c() (<-chan string, chan string) { + return make(<-chan string), make(chan string) +} diff --git a/internal/lsp/testdata/missingfunction/channels.go.golden b/gopls/internal/lsp/testdata/missingfunction/channels.go.golden similarity index 96% rename from internal/lsp/testdata/missingfunction/channels.go.golden rename to gopls/internal/lsp/testdata/missingfunction/channels.go.golden index f5078fed17a..998ce589e1d 100644 --- a/internal/lsp/testdata/missingfunction/channels.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/channels.go.golden @@ -2,7 +2,7 @@ package missingfunction func channels(s string) { - undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix") + undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "") } func undefinedChannels(ch1 <-chan string, ch2 chan string) { diff --git a/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go new file mode 100644 index 00000000000..f2fb3c04132 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go @@ -0,0 +1,6 @@ +package missingfunction + +func consecutiveParams() { + var s string + undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "") +} diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden similarity index 88% rename from internal/lsp/testdata/missingfunction/consecutive_params.go.golden rename to gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden index 14a766496fb..4b852ce141b 100644 --- a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden @@ -3,7 +3,7 @@ package missingfunction func consecutiveParams() { var s string - undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix") + undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "") } func undefinedConsecutiveParams(s1, s2 string) { diff --git a/gopls/internal/lsp/testdata/missingfunction/error_param.go b/gopls/internal/lsp/testdata/missingfunction/error_param.go new file mode 100644 index 00000000000..d0484f0ff56 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/error_param.go @@ -0,0 +1,6 @@ +package missingfunction + +func errorParam() { + var err error + undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "") +} diff --git a/internal/lsp/testdata/missingfunction/error_param.go.golden b/gopls/internal/lsp/testdata/missingfunction/error_param.go.golden similarity index 93% rename from internal/lsp/testdata/missingfunction/error_param.go.golden rename to gopls/internal/lsp/testdata/missingfunction/error_param.go.golden index 2e12711817d..de78646a5f1 100644 --- a/internal/lsp/testdata/missingfunction/error_param.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/error_param.go.golden @@ -3,7 +3,7 @@ package missingfunction func errorParam() { var err error - undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix") + undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "") } func undefinedErrorParam(err error) { diff --git a/gopls/internal/lsp/testdata/missingfunction/literals.go b/gopls/internal/lsp/testdata/missingfunction/literals.go new file mode 100644 index 00000000000..0099b1a08ad --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/literals.go @@ -0,0 +1,7 @@ +package missingfunction + +type T struct{} + +func literals() { + undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "") +} diff --git a/gopls/internal/lsp/testdata/missingfunction/literals.go.golden b/gopls/internal/lsp/testdata/missingfunction/literals.go.golden new file mode 100644 index 00000000000..cb85de4eb11 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/literals.go.golden @@ -0,0 +1,13 @@ +-- suggestedfix_literals_6_2 -- +package missingfunction + +type T struct{} + +func literals() { + undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "") +} + +func undefinedLiterals(s string, t1 T, t2 *T) { + panic("unimplemented") +} + diff --git a/gopls/internal/lsp/testdata/missingfunction/operation.go b/gopls/internal/lsp/testdata/missingfunction/operation.go new file mode 100644 index 00000000000..a4913ec10b2 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/operation.go @@ -0,0 +1,7 @@ +package missingfunction + +import "time" + +func operation() { + undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "") +} diff --git a/gopls/internal/lsp/testdata/missingfunction/operation.go.golden b/gopls/internal/lsp/testdata/missingfunction/operation.go.golden new file mode 100644 index 00000000000..6f9e6ffab6d --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/operation.go.golden @@ -0,0 +1,13 @@ +-- suggestedfix_operation_6_2 -- +package missingfunction + +import "time" + +func operation() { + undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "") +} + +func undefinedOperation(duration time.Duration) { + panic("unimplemented") +} + diff --git a/gopls/internal/lsp/testdata/missingfunction/selector.go b/gopls/internal/lsp/testdata/missingfunction/selector.go new file mode 100644 index 00000000000..93a04027138 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/selector.go @@ -0,0 +1,6 @@ +package missingfunction + +func selector() { + m := map[int]bool{} + undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "") +} diff --git a/internal/lsp/testdata/missingfunction/selector.go.golden b/gopls/internal/lsp/testdata/missingfunction/selector.go.golden similarity index 94% rename from internal/lsp/testdata/missingfunction/selector.go.golden rename to gopls/internal/lsp/testdata/missingfunction/selector.go.golden index c48691c4ed5..44e2dde3aa7 100644 --- a/internal/lsp/testdata/missingfunction/selector.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/selector.go.golden @@ -3,7 +3,7 @@ package missingfunction func selector() { m := map[int]bool{} - undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix") + undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "") } func undefinedSelector(b bool) { diff --git a/gopls/internal/lsp/testdata/missingfunction/slice.go b/gopls/internal/lsp/testdata/missingfunction/slice.go new file mode 100644 index 00000000000..48b1a52b3f3 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/slice.go @@ -0,0 +1,5 @@ +package missingfunction + +func slice() { + undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "") +} diff --git a/internal/lsp/testdata/missingfunction/slice.go.golden b/gopls/internal/lsp/testdata/missingfunction/slice.go.golden similarity index 92% rename from internal/lsp/testdata/missingfunction/slice.go.golden rename to gopls/internal/lsp/testdata/missingfunction/slice.go.golden index 0ccb8611b6c..2a05d9a0f54 100644 --- a/internal/lsp/testdata/missingfunction/slice.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/slice.go.golden @@ -2,7 +2,7 @@ package missingfunction func slice() { - undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix") + undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "") } func undefinedSlice(i []int) { diff --git a/gopls/internal/lsp/testdata/missingfunction/tuple.go b/gopls/internal/lsp/testdata/missingfunction/tuple.go new file mode 100644 index 00000000000..4059ced983a --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/tuple.go @@ -0,0 +1,9 @@ +package missingfunction + +func tuple() { + undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "") +} + +func b() (string, error) { + return "", nil +} diff --git a/internal/lsp/testdata/missingfunction/tuple.go.golden b/gopls/internal/lsp/testdata/missingfunction/tuple.go.golden similarity index 97% rename from internal/lsp/testdata/missingfunction/tuple.go.golden rename to gopls/internal/lsp/testdata/missingfunction/tuple.go.golden index 1e12bb70860..e1118a3f348 100644 --- a/internal/lsp/testdata/missingfunction/tuple.go.golden +++ b/gopls/internal/lsp/testdata/missingfunction/tuple.go.golden @@ -2,7 +2,7 @@ package missingfunction func tuple() { - undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix") + undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "") } func undefinedTuple(s string, err error) { diff --git a/gopls/internal/lsp/testdata/missingfunction/unique_params.go b/gopls/internal/lsp/testdata/missingfunction/unique_params.go new file mode 100644 index 00000000000..00479bf7554 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/unique_params.go @@ -0,0 +1,7 @@ +package missingfunction + +func uniqueArguments() { + var s string + var i int + undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "") +} diff --git a/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden b/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden new file mode 100644 index 00000000000..8d6352cded4 --- /dev/null +++ b/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden @@ -0,0 +1,13 @@ +-- suggestedfix_unique_params_6_2 -- +package missingfunction + +func uniqueArguments() { + var s string + var i int + undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "") +} + +func undefinedUniqueArguments(s1 string, i int, s2 string) { + panic("unimplemented") +} + diff --git a/internal/lsp/testdata/multireturn/multi_return.go.in b/gopls/internal/lsp/testdata/multireturn/multi_return.go.in similarity index 100% rename from internal/lsp/testdata/multireturn/multi_return.go.in rename to gopls/internal/lsp/testdata/multireturn/multi_return.go.in diff --git a/internal/lsp/testdata/nested_complit/nested_complit.go.in b/gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in similarity index 75% rename from internal/lsp/testdata/nested_complit/nested_complit.go.in rename to gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in index 1dddd5b1b53..3ad2d213e98 100644 --- a/internal/lsp/testdata/nested_complit/nested_complit.go.in +++ b/gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in @@ -9,6 +9,7 @@ type ncBar struct { //@item(structNCBar, "ncBar", "struct{...}", "struct") func _() { []ncFoo{} //@item(litNCFoo, "[]ncFoo{}", "", "var") _ := ncBar{ - baz: [] //@complete(" //", structNCFoo, structNCBar) + // disabled - see issue #54822 + baz: [] // complete(" //", structNCFoo, structNCBar) } } diff --git a/internal/lsp/testdata/nodisk/empty b/gopls/internal/lsp/testdata/nodisk/empty similarity index 100% rename from internal/lsp/testdata/nodisk/empty rename to gopls/internal/lsp/testdata/nodisk/empty diff --git a/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go b/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go new file mode 100644 index 00000000000..08aebd12f7b --- /dev/null +++ b/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go @@ -0,0 +1,9 @@ +package nodisk + +import ( + "golang.org/lsptests/foo" +) + +func _() { + foo.Foo() //@complete("F", Foo, IntFoo, StructFoo) +} diff --git a/gopls/internal/lsp/testdata/noparse/noparse.go.in b/gopls/internal/lsp/testdata/noparse/noparse.go.in new file mode 100644 index 00000000000..8b0bfaa035c --- /dev/null +++ b/gopls/internal/lsp/testdata/noparse/noparse.go.in @@ -0,0 +1,24 @@ +package noparse + +// The type error was chosen carefully to exercise a type-error analyzer. +// We use the 'nonewvars' analyzer because the other candidates are tricky: +// +// - The 'unusedvariable' analyzer is disabled by default, so it is not +// consistently enabled across Test{LSP,CommandLine} tests, which +// both process this file. +// - The 'undeclaredname' analyzer depends on the text of the go/types +// "undeclared name" error, which changed in go1.20. +// - The 'noresultvalues' analyzer produces a diagnostic containing newlines, +// which breaks the parser used by TestCommandLine. +// +// This comment is all that remains of my afternoon. + +func bye(x int) { + x := 123 //@diag(":=", "nonewvars", "no new variables", "warning") +} + +func stuff() { + +} + +func .() {} //@diag(".", "syntax", "expected 'IDENT', found '.'", "error") diff --git a/internal/lsp/testdata/noparse_format/noparse_format.go.golden b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.golden similarity index 100% rename from internal/lsp/testdata/noparse_format/noparse_format.go.golden rename to gopls/internal/lsp/testdata/noparse_format/noparse_format.go.golden diff --git a/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in new file mode 100644 index 00000000000..311a99aafb3 --- /dev/null +++ b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in @@ -0,0 +1,14 @@ +// +build go1.11 + +package noparse_format //@format("package") + +// The nonewvars expectation asserts that the go/analysis framework ran. +// See comments in badstmt. + +func what() { + var hi func() + if { hi() //@diag("{", "syntax", "missing condition in if statement", "error") + } + hi := nil //@diag(":=", "nonewvars", "no new variables", "warning") +} + diff --git a/internal/lsp/testdata/noparse_format/parse_format.go.golden b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.golden similarity index 100% rename from internal/lsp/testdata/noparse_format/parse_format.go.golden rename to gopls/internal/lsp/testdata/noparse_format/parse_format.go.golden diff --git a/internal/lsp/testdata/noparse_format/parse_format.go.in b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.in similarity index 100% rename from internal/lsp/testdata/noparse_format/parse_format.go.in rename to gopls/internal/lsp/testdata/noparse_format/parse_format.go.in diff --git a/internal/lsp/testdata/printf/printf.go b/gopls/internal/lsp/testdata/printf/printf.go similarity index 100% rename from internal/lsp/testdata/printf/printf.go rename to gopls/internal/lsp/testdata/printf/printf.go diff --git a/internal/lsp/testdata/rank/assign_rank.go.in b/gopls/internal/lsp/testdata/rank/assign_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/assign_rank.go.in rename to gopls/internal/lsp/testdata/rank/assign_rank.go.in diff --git a/internal/lsp/testdata/rank/binexpr_rank.go.in b/gopls/internal/lsp/testdata/rank/binexpr_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/binexpr_rank.go.in rename to gopls/internal/lsp/testdata/rank/binexpr_rank.go.in diff --git a/internal/lsp/testdata/rank/boolexpr_rank.go b/gopls/internal/lsp/testdata/rank/boolexpr_rank.go similarity index 100% rename from internal/lsp/testdata/rank/boolexpr_rank.go rename to gopls/internal/lsp/testdata/rank/boolexpr_rank.go diff --git a/internal/lsp/testdata/rank/convert_rank.go.in b/gopls/internal/lsp/testdata/rank/convert_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/convert_rank.go.in rename to gopls/internal/lsp/testdata/rank/convert_rank.go.in diff --git a/internal/lsp/testdata/rank/struct/struct_rank.go b/gopls/internal/lsp/testdata/rank/struct/struct_rank.go similarity index 100% rename from internal/lsp/testdata/rank/struct/struct_rank.go rename to gopls/internal/lsp/testdata/rank/struct/struct_rank.go diff --git a/internal/lsp/testdata/rank/switch_rank.go.in b/gopls/internal/lsp/testdata/rank/switch_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/switch_rank.go.in rename to gopls/internal/lsp/testdata/rank/switch_rank.go.in diff --git a/internal/lsp/testdata/rank/type_assert_rank.go.in b/gopls/internal/lsp/testdata/rank/type_assert_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/type_assert_rank.go.in rename to gopls/internal/lsp/testdata/rank/type_assert_rank.go.in diff --git a/internal/lsp/testdata/rank/type_switch_rank.go.in b/gopls/internal/lsp/testdata/rank/type_switch_rank.go.in similarity index 100% rename from internal/lsp/testdata/rank/type_switch_rank.go.in rename to gopls/internal/lsp/testdata/rank/type_switch_rank.go.in diff --git a/internal/lsp/testdata/references/another/another.go b/gopls/internal/lsp/testdata/references/another/another.go similarity index 82% rename from internal/lsp/testdata/references/another/another.go rename to gopls/internal/lsp/testdata/references/another/another.go index 47bda1e4acf..20e3ebca1cb 100644 --- a/internal/lsp/testdata/references/another/another.go +++ b/gopls/internal/lsp/testdata/references/another/another.go @@ -2,7 +2,7 @@ package another import ( - other "golang.org/x/tools/internal/lsp/references/other" + other "golang.org/lsptests/references/other" ) func _() { diff --git a/internal/lsp/testdata/references/interfaces/interfaces.go b/gopls/internal/lsp/testdata/references/interfaces/interfaces.go similarity index 100% rename from internal/lsp/testdata/references/interfaces/interfaces.go rename to gopls/internal/lsp/testdata/references/interfaces/interfaces.go diff --git a/gopls/internal/lsp/testdata/references/other/other.go b/gopls/internal/lsp/testdata/references/other/other.go new file mode 100644 index 00000000000..daac1a0282b --- /dev/null +++ b/gopls/internal/lsp/testdata/references/other/other.go @@ -0,0 +1,19 @@ +package other + +import ( + references "golang.org/lsptests/references" +) + +func GetXes() []references.X { + return []references.X{ + { + Y: 1, //@mark(GetXesY, "Y"),refs("Y", typeXY, GetXesY, anotherXY) + }, + } +} + +func _() { + references.Q = "hello" //@mark(assignExpQ, "Q") + bob := func(_ string) {} + bob(references.Q) //@mark(bobExpQ, "Q") +} diff --git a/gopls/internal/lsp/testdata/references/refs.go b/gopls/internal/lsp/testdata/references/refs.go new file mode 100644 index 00000000000..e7ff5049430 --- /dev/null +++ b/gopls/internal/lsp/testdata/references/refs.go @@ -0,0 +1,53 @@ +// Package refs is a package used to test find references. +package refs + +import "os" //@mark(osDecl, `"os"`),refs("os", osDecl, osUse) + +type i int //@mark(typeI, "i"),refs("i", typeI, argI, returnI, embeddedI) + +type X struct { + Y int //@mark(typeXY, "Y") +} + +func _(_ i) []bool { //@mark(argI, "i") + return nil +} + +func _(_ []byte) i { //@mark(returnI, "i") + return 0 +} + +var q string //@mark(declQ, "q"),refs("q", declQ, assignQ, bobQ) + +var Q string //@mark(declExpQ, "Q"),refs("Q", declExpQ, assignExpQ, bobExpQ) + +func _() { + q = "hello" //@mark(assignQ, "q") + bob := func(_ string) {} + bob(q) //@mark(bobQ, "q") +} + +type e struct { + i //@mark(embeddedI, "i"),refs("i", embeddedI, embeddedIUse) +} + +func _() { + _ = e{}.i //@mark(embeddedIUse, "i") +} + +const ( + foo = iota //@refs("iota") +) + +func _(x interface{}) { + // We use the _ prefix because the markers inhabit a single + // namespace and yDecl is already used in ../highlights/highlights.go. + switch _y := x.(type) { //@mark(_yDecl, "_y"),refs("_y", _yDecl, _yInt, _yDefault) + case int: + println(_y) //@mark(_yInt, "_y"),refs("_y", _yDecl, _yInt, _yDefault) + default: + println(_y) //@mark(_yDefault, "_y") + } + + os.Getwd() //@mark(osUse, "os") +} diff --git a/internal/lsp/testdata/references/refs_test.go b/gopls/internal/lsp/testdata/references/refs_test.go similarity index 100% rename from internal/lsp/testdata/references/refs_test.go rename to gopls/internal/lsp/testdata/references/refs_test.go diff --git a/internal/lsp/testdata/rename/a/random.go.golden b/gopls/internal/lsp/testdata/rename/a/random.go.golden similarity index 100% rename from internal/lsp/testdata/rename/a/random.go.golden rename to gopls/internal/lsp/testdata/rename/a/random.go.golden diff --git a/internal/lsp/testdata/rename/a/random.go.in b/gopls/internal/lsp/testdata/rename/a/random.go.in similarity index 100% rename from internal/lsp/testdata/rename/a/random.go.in rename to gopls/internal/lsp/testdata/rename/a/random.go.in diff --git a/internal/lsp/testdata/rename/b/b.go b/gopls/internal/lsp/testdata/rename/b/b.go similarity index 100% rename from internal/lsp/testdata/rename/b/b.go rename to gopls/internal/lsp/testdata/rename/b/b.go diff --git a/gopls/internal/lsp/testdata/rename/b/b.go.golden b/gopls/internal/lsp/testdata/rename/b/b.go.golden new file mode 100644 index 00000000000..36c6d39d0e8 --- /dev/null +++ b/gopls/internal/lsp/testdata/rename/b/b.go.golden @@ -0,0 +1,78 @@ +-- Bob-rename -- +package b + +var c int //@rename("int", "uint") + +func _() { + a := 1 //@rename("a", "error") + a = 2 + _ = a +} + +var ( + // Hello there. + // Bob does the thing. + Bob int //@rename("Foo", "Bob") +) + +/* +Hello description +*/ +func Hello() {} //@rename("Hello", "Goodbye") + +-- Goodbye-rename -- +b.go: +package b + +var c int //@rename("int", "uint") + +func _() { + a := 1 //@rename("a", "error") + a = 2 + _ = a +} + +var ( + // Hello there. + // Foo does the thing. + Foo int //@rename("Foo", "Bob") +) + +/* +Goodbye description +*/ +func Goodbye() {} //@rename("Hello", "Goodbye") + +c.go: +package c + +import "golang.org/lsptests/rename/b" + +func _() { + b.Goodbye() //@rename("Hello", "Goodbye") +} + +-- error-rename -- +package b + +var c int //@rename("int", "uint") + +func _() { + error := 1 //@rename("a", "error") + error = 2 + _ = error +} + +var ( + // Hello there. + // Foo does the thing. + Foo int //@rename("Foo", "Bob") +) + +/* +Hello description +*/ +func Hello() {} //@rename("Hello", "Goodbye") + +-- uint-rename -- +"int": builtin object diff --git a/gopls/internal/lsp/testdata/rename/bad/bad.go.golden b/gopls/internal/lsp/testdata/rename/bad/bad.go.golden new file mode 100644 index 00000000000..1b27e1782f3 --- /dev/null +++ b/gopls/internal/lsp/testdata/rename/bad/bad.go.golden @@ -0,0 +1,2 @@ +-- rFunc-rename -- +renaming "sFunc" to "rFunc" not possible because "golang.org/lsptests/rename/bad" has errors diff --git a/internal/lsp/testdata/rename/bad/bad.go.in b/gopls/internal/lsp/testdata/rename/bad/bad.go.in similarity index 100% rename from internal/lsp/testdata/rename/bad/bad.go.in rename to gopls/internal/lsp/testdata/rename/bad/bad.go.in diff --git a/internal/lsp/testdata/rename/bad/bad_test.go.in b/gopls/internal/lsp/testdata/rename/bad/bad_test.go.in similarity index 100% rename from internal/lsp/testdata/rename/bad/bad_test.go.in rename to gopls/internal/lsp/testdata/rename/bad/bad_test.go.in diff --git a/gopls/internal/lsp/testdata/rename/c/c.go b/gopls/internal/lsp/testdata/rename/c/c.go new file mode 100644 index 00000000000..6332c78f3f9 --- /dev/null +++ b/gopls/internal/lsp/testdata/rename/c/c.go @@ -0,0 +1,7 @@ +package c + +import "golang.org/lsptests/rename/b" + +func _() { + b.Hello() //@rename("Hello", "Goodbye") +} diff --git a/gopls/internal/lsp/testdata/rename/c/c.go.golden b/gopls/internal/lsp/testdata/rename/c/c.go.golden new file mode 100644 index 00000000000..d56250693a9 --- /dev/null +++ b/gopls/internal/lsp/testdata/rename/c/c.go.golden @@ -0,0 +1,32 @@ +-- Goodbye-rename -- +b.go: +package b + +var c int //@rename("int", "uint") + +func _() { + a := 1 //@rename("a", "error") + a = 2 + _ = a +} + +var ( + // Hello there. + // Foo does the thing. + Foo int //@rename("Foo", "Bob") +) + +/* +Goodbye description +*/ +func Goodbye() {} //@rename("Hello", "Goodbye") + +c.go: +package c + +import "golang.org/lsptests/rename/b" + +func _() { + b.Goodbye() //@rename("Hello", "Goodbye") +} + diff --git a/internal/lsp/testdata/rename/c/c2.go b/gopls/internal/lsp/testdata/rename/c/c2.go similarity index 100% rename from internal/lsp/testdata/rename/c/c2.go rename to gopls/internal/lsp/testdata/rename/c/c2.go diff --git a/internal/lsp/testdata/rename/c/c2.go.golden b/gopls/internal/lsp/testdata/rename/c/c2.go.golden similarity index 100% rename from internal/lsp/testdata/rename/c/c2.go.golden rename to gopls/internal/lsp/testdata/rename/c/c2.go.golden diff --git a/internal/lsp/testdata/rename/crosspkg/another/another.go b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go similarity index 100% rename from internal/lsp/testdata/rename/crosspkg/another/another.go rename to gopls/internal/lsp/testdata/rename/crosspkg/another/another.go diff --git a/internal/lsp/testdata/rename/crosspkg/another/another.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go.golden similarity index 100% rename from internal/lsp/testdata/rename/crosspkg/another/another.go.golden rename to gopls/internal/lsp/testdata/rename/crosspkg/another/another.go.golden diff --git a/internal/lsp/testdata/rename/crosspkg/crosspkg.go b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go similarity index 100% rename from internal/lsp/testdata/rename/crosspkg/crosspkg.go rename to gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go diff --git a/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden similarity index 81% rename from internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden rename to gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden index 810926de627..49ff7f841cf 100644 --- a/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden +++ b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden @@ -11,7 +11,7 @@ var Bar int //@rename("Bar", "Tomato") other.go: package other -import "golang.org/x/tools/internal/lsp/rename/crosspkg" +import "golang.org/lsptests/rename/crosspkg" func Other() { crosspkg.Bar @@ -31,7 +31,7 @@ var Tomato int //@rename("Bar", "Tomato") other.go: package other -import "golang.org/x/tools/internal/lsp/rename/crosspkg" +import "golang.org/lsptests/rename/crosspkg" func Other() { crosspkg.Tomato diff --git a/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go new file mode 100644 index 00000000000..5fd147da62e --- /dev/null +++ b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go @@ -0,0 +1,8 @@ +package other + +import "golang.org/lsptests/rename/crosspkg" + +func Other() { + crosspkg.Bar + crosspkg.Foo() //@rename("Foo", "Flamingo") +} diff --git a/internal/lsp/testdata/rename/crosspkg/other/other.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden similarity index 81% rename from internal/lsp/testdata/rename/crosspkg/other/other.go.golden rename to gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden index 2722ad96e61..f7b4aaad42f 100644 --- a/internal/lsp/testdata/rename/crosspkg/other/other.go.golden +++ b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden @@ -11,7 +11,7 @@ var Bar int //@rename("Bar", "Tomato") other.go: package other -import "golang.org/x/tools/internal/lsp/rename/crosspkg" +import "golang.org/lsptests/rename/crosspkg" func Other() { crosspkg.Bar diff --git a/internal/lsp/testdata/rename/generics/embedded.go b/gopls/internal/lsp/testdata/rename/generics/embedded.go similarity index 100% rename from internal/lsp/testdata/rename/generics/embedded.go rename to gopls/internal/lsp/testdata/rename/generics/embedded.go diff --git a/internal/lsp/testdata/rename/generics/embedded.go.golden b/gopls/internal/lsp/testdata/rename/generics/embedded.go.golden similarity index 100% rename from internal/lsp/testdata/rename/generics/embedded.go.golden rename to gopls/internal/lsp/testdata/rename/generics/embedded.go.golden diff --git a/internal/lsp/testdata/rename/generics/generics.go b/gopls/internal/lsp/testdata/rename/generics/generics.go similarity index 100% rename from internal/lsp/testdata/rename/generics/generics.go rename to gopls/internal/lsp/testdata/rename/generics/generics.go diff --git a/internal/lsp/testdata/rename/generics/generics.go.golden b/gopls/internal/lsp/testdata/rename/generics/generics.go.golden similarity index 100% rename from internal/lsp/testdata/rename/generics/generics.go.golden rename to gopls/internal/lsp/testdata/rename/generics/generics.go.golden diff --git a/internal/lsp/testdata/rename/generics/unions.go b/gopls/internal/lsp/testdata/rename/generics/unions.go similarity index 100% rename from internal/lsp/testdata/rename/generics/unions.go rename to gopls/internal/lsp/testdata/rename/generics/unions.go diff --git a/internal/lsp/testdata/rename/generics/unions.go.golden b/gopls/internal/lsp/testdata/rename/generics/unions.go.golden similarity index 100% rename from internal/lsp/testdata/rename/generics/unions.go.golden rename to gopls/internal/lsp/testdata/rename/generics/unions.go.golden diff --git a/internal/lsp/testdata/rename/issue39614/issue39614.go.golden b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue39614/issue39614.go.golden rename to gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.golden diff --git a/internal/lsp/testdata/rename/issue39614/issue39614.go.in b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.in similarity index 100% rename from internal/lsp/testdata/rename/issue39614/issue39614.go.in rename to gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.in diff --git a/internal/lsp/testdata/rename/issue42134/1.go b/gopls/internal/lsp/testdata/rename/issue42134/1.go similarity index 100% rename from internal/lsp/testdata/rename/issue42134/1.go rename to gopls/internal/lsp/testdata/rename/issue42134/1.go diff --git a/internal/lsp/testdata/rename/issue42134/1.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/1.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue42134/1.go.golden rename to gopls/internal/lsp/testdata/rename/issue42134/1.go.golden diff --git a/internal/lsp/testdata/rename/issue42134/2.go b/gopls/internal/lsp/testdata/rename/issue42134/2.go similarity index 100% rename from internal/lsp/testdata/rename/issue42134/2.go rename to gopls/internal/lsp/testdata/rename/issue42134/2.go diff --git a/internal/lsp/testdata/rename/issue42134/2.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/2.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue42134/2.go.golden rename to gopls/internal/lsp/testdata/rename/issue42134/2.go.golden diff --git a/internal/lsp/testdata/rename/issue42134/3.go b/gopls/internal/lsp/testdata/rename/issue42134/3.go similarity index 100% rename from internal/lsp/testdata/rename/issue42134/3.go rename to gopls/internal/lsp/testdata/rename/issue42134/3.go diff --git a/internal/lsp/testdata/rename/issue42134/3.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/3.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue42134/3.go.golden rename to gopls/internal/lsp/testdata/rename/issue42134/3.go.golden diff --git a/internal/lsp/testdata/rename/issue42134/4.go b/gopls/internal/lsp/testdata/rename/issue42134/4.go similarity index 100% rename from internal/lsp/testdata/rename/issue42134/4.go rename to gopls/internal/lsp/testdata/rename/issue42134/4.go diff --git a/internal/lsp/testdata/rename/issue42134/4.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/4.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue42134/4.go.golden rename to gopls/internal/lsp/testdata/rename/issue42134/4.go.golden diff --git a/internal/lsp/testdata/rename/issue43616/issue43616.go.golden b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.golden similarity index 100% rename from internal/lsp/testdata/rename/issue43616/issue43616.go.golden rename to gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.golden diff --git a/internal/lsp/testdata/rename/issue43616/issue43616.go.in b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.in similarity index 100% rename from internal/lsp/testdata/rename/issue43616/issue43616.go.in rename to gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.in diff --git a/internal/lsp/testdata/rename/shadow/shadow.go b/gopls/internal/lsp/testdata/rename/shadow/shadow.go similarity index 100% rename from internal/lsp/testdata/rename/shadow/shadow.go rename to gopls/internal/lsp/testdata/rename/shadow/shadow.go diff --git a/internal/lsp/testdata/rename/shadow/shadow.go.golden b/gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden similarity index 100% rename from internal/lsp/testdata/rename/shadow/shadow.go.golden rename to gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden diff --git a/internal/lsp/testdata/rename/testy/testy.go b/gopls/internal/lsp/testdata/rename/testy/testy.go similarity index 100% rename from internal/lsp/testdata/rename/testy/testy.go rename to gopls/internal/lsp/testdata/rename/testy/testy.go diff --git a/internal/lsp/testdata/rename/testy/testy.go.golden b/gopls/internal/lsp/testdata/rename/testy/testy.go.golden similarity index 100% rename from internal/lsp/testdata/rename/testy/testy.go.golden rename to gopls/internal/lsp/testdata/rename/testy/testy.go.golden diff --git a/internal/lsp/testdata/rename/testy/testy_test.go b/gopls/internal/lsp/testdata/rename/testy/testy_test.go similarity index 100% rename from internal/lsp/testdata/rename/testy/testy_test.go rename to gopls/internal/lsp/testdata/rename/testy/testy_test.go diff --git a/internal/lsp/testdata/rename/testy/testy_test.go.golden b/gopls/internal/lsp/testdata/rename/testy/testy_test.go.golden similarity index 100% rename from internal/lsp/testdata/rename/testy/testy_test.go.golden rename to gopls/internal/lsp/testdata/rename/testy/testy_test.go.golden diff --git a/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go b/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go new file mode 100644 index 00000000000..783e9a55f17 --- /dev/null +++ b/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go @@ -0,0 +1,14 @@ +package rundespiteerrors + +// This test verifies that analyzers without RunDespiteErrors are not +// executed on a package containing type errors (see issue #54762). +func _() { + // A type error. + _ = 1 + "" //@diag("1", "compiler", "mismatched types|cannot convert", "error") + + // A violation of an analyzer for which RunDespiteErrors=false: + // no diagnostic is produced; the diag comment is merely illustrative. + for _ = range "" { //diag("for _", "simplifyrange", "simplify range expression", "warning") + + } +} diff --git a/gopls/internal/lsp/testdata/selectionrange/foo.go b/gopls/internal/lsp/testdata/selectionrange/foo.go new file mode 100644 index 00000000000..1bf41340ce6 --- /dev/null +++ b/gopls/internal/lsp/testdata/selectionrange/foo.go @@ -0,0 +1,13 @@ +package foo + +import "time" + +func Bar(x, y int, t time.Time) int { + zs := []int{1, 2, 3} //@selectionrange("1") + + for _, z := range zs { + x = x + z + y + zs[1] //@selectionrange("1") + } + + return x + y //@selectionrange("+") +} diff --git a/gopls/internal/lsp/testdata/selectionrange/foo.go.golden b/gopls/internal/lsp/testdata/selectionrange/foo.go.golden new file mode 100644 index 00000000000..fe70b30b711 --- /dev/null +++ b/gopls/internal/lsp/testdata/selectionrange/foo.go.golden @@ -0,0 +1,29 @@ +-- selectionrange_foo_12_11 -- +Ranges 0: + 11:8-11:13 "x + y" + 11:1-11:13 "return x + y" + 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}" + 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}" + 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}" + +-- selectionrange_foo_6_14 -- +Ranges 0: + 5:13-5:14 "1" + 5:7-5:21 "[]int{1, 2, 3}" + 5:1-5:21 "zs := []int{1, 2, 3}" + 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}" + 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}" + 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}" + +-- selectionrange_foo_9_22 -- +Ranges 0: + 8:21-8:22 "1" + 8:18-8:23 "zs[1]" + 8:6-8:23 "x + z + y + zs[1]" + 8:2-8:23 "x = x + z + y + zs[1]" + 7:22-9:2 "{\\n\t\tx = x + z +...onrange(\"1\")\\n\t}" + 7:1-9:2 "for _, z := ran...onrange(\"1\")\\n\t}" + 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}" + 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}" + 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}" + diff --git a/internal/lsp/testdata/selector/selector.go.in b/gopls/internal/lsp/testdata/selector/selector.go.in similarity index 96% rename from internal/lsp/testdata/selector/selector.go.in rename to gopls/internal/lsp/testdata/selector/selector.go.in index 277f98bde7c..b1498a08c77 100644 --- a/internal/lsp/testdata/selector/selector.go.in +++ b/gopls/internal/lsp/testdata/selector/selector.go.in @@ -3,7 +3,7 @@ package selector import ( - "golang.org/x/tools/internal/lsp/bar" + "golang.org/lsptests/bar" ) type S struct { diff --git a/internal/lsp/testdata/semantic/README.md b/gopls/internal/lsp/testdata/semantic/README.md similarity index 100% rename from internal/lsp/testdata/semantic/README.md rename to gopls/internal/lsp/testdata/semantic/README.md diff --git a/internal/lsp/testdata/semantic/a.go b/gopls/internal/lsp/testdata/semantic/a.go similarity index 100% rename from internal/lsp/testdata/semantic/a.go rename to gopls/internal/lsp/testdata/semantic/a.go diff --git a/gopls/internal/lsp/testdata/semantic/a.go.golden b/gopls/internal/lsp/testdata/semantic/a.go.golden new file mode 100644 index 00000000000..047a031a784 --- /dev/null +++ b/gopls/internal/lsp/testdata/semantic/a.go.golden @@ -0,0 +1,83 @@ +-- semantic -- +/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("") + +/*⇒6,keyword,[]*/import ( + _ "encoding/utf8" + /*⇒3,namespace,[]*/utf "encoding/utf8" + "fmt"/*⇐3,namespace,[]*/ /*⇒19,comment,[]*///@ semantic("fmt") + . "fmt" + "unicode/utf8"/*⇐4,namespace,[]*/ +) + +/*⇒3,keyword,[]*/var ( + /*⇒1,variable,[definition]*/a = /*⇒3,namespace,[]*/fmt./*⇒5,function,[]*/Print + /*⇒1,variable,[definition]*/b []/*⇒6,type,[defaultLibrary]*/string = []/*⇒6,type,[defaultLibrary]*/string{/*⇒5,string,[]*/"foo"} + /*⇒2,variable,[definition]*/c1 /*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int + /*⇒2,variable,[definition]*/c2 /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int + /*⇒2,variable,[definition]*/c3 = /*⇒4,function,[defaultLibrary]*/make([]/*⇒4,keyword,[]*/chan/*⇒2,operator,[]*/<- /*⇒3,type,[defaultLibrary]*/int) + /*⇒1,variable,[definition]*/b = /*⇒1,type,[]*/A{/*⇒1,variable,[]*/X: /*⇒2,number,[]*/23} + /*⇒1,variable,[definition]*/m /*⇒3,keyword,[]*/map[/*⇒4,type,[defaultLibrary]*/bool][/*⇒1,number,[]*/3]/*⇒1,operator,[]*/*/*⇒7,type,[defaultLibrary]*/float64 +) + +/*⇒5,keyword,[]*/const ( + /*⇒2,variable,[definition readonly]*/xx /*⇒1,type,[]*/F = /*⇒4,variable,[readonly]*/iota + /*⇒2,variable,[definition readonly]*/yy = /*⇒2,variable,[readonly]*/xx /*⇒1,operator,[]*/+ /*⇒1,number,[]*/3 + /*⇒2,variable,[definition readonly]*/zz = /*⇒2,string,[]*/"" + /*⇒2,variable,[definition readonly]*/ww = /*⇒6,string,[]*/"not " /*⇒1,operator,[]*/+ /*⇒2,variable,[readonly]*/zz +) + +/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct { + /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,string,[]*/`foof` +} +/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface { + /*⇒1,type,[]*/A + /*⇒3,method,[definition]*/sad(/*⇒3,type,[defaultLibrary]*/int) /*⇒4,type,[defaultLibrary]*/bool +} + +/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/F /*⇒3,type,[defaultLibrary]*/int + +/*⇒4,keyword,[]*/func (/*⇒1,variable,[]*/a /*⇒1,operator,[]*/*/*⇒1,type,[]*/A) /*⇒1,method,[definition]*/f() /*⇒4,type,[defaultLibrary]*/bool { + /*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/z /*⇒6,type,[defaultLibrary]*/string + /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"foo" + /*⇒1,variable,[]*/a(/*⇒1,variable,[]*/x) + /*⇒1,variable,[definition]*/y /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"bar" /*⇒1,operator,[]*/+ /*⇒1,variable,[]*/x + /*⇒6,keyword,[]*/switch /*⇒1,variable,[]*/z { + /*⇒4,keyword,[]*/case /*⇒4,string,[]*/"xx": + /*⇒7,keyword,[]*/default: + } + /*⇒6,keyword,[]*/select { + /*⇒4,keyword,[]*/case /*⇒1,variable,[definition]*/z /*⇒2,operator,[]*/:= /*⇒2,operator,[]*/<-/*⇒2,variable,[]*/c3[/*⇒1,number,[]*/0]: + /*⇒7,keyword,[]*/default: + } + /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/k, /*⇒1,variable,[definition]*/v := /*⇒5,keyword,[]*/range /*⇒1,variable,[]*/m { + /*⇒6,keyword,[]*/return (/*⇒1,operator,[]*/!/*⇒1,variable,[]*/k) /*⇒2,operator,[]*/&& /*⇒1,variable,[]*/v[/*⇒1,number,[]*/0] /*⇒2,operator,[]*/== /*⇒3,variable,[readonly defaultLibrary]*/nil + } + /*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[]*/X + /*⇒1,variable,[definition]*/w /*⇒2,operator,[]*/:= /*⇒1,variable,[]*/b[/*⇒1,number,[]*/4:] + /*⇒1,variable,[definition]*/j /*⇒2,operator,[]*/:= /*⇒3,function,[defaultLibrary]*/len(/*⇒1,variable,[]*/x) + /*⇒1,variable,[]*/j/*⇒2,operator,[]*/-- + /*⇒1,variable,[definition]*/q /*⇒2,operator,[]*/:= []/*⇒9,keyword,[]*/interface{}{/*⇒1,variable,[]*/j, /*⇒3,number,[]*/23i, /*⇒1,operator,[]*/&/*⇒1,variable,[]*/y} + /*⇒1,function,[]*/g(/*⇒1,variable,[]*/q/*⇒3,operator,[]*/...) + /*⇒6,keyword,[]*/return /*⇒4,variable,[readonly]*/true +} + +/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/g(/*⇒2,parameter,[definition]*/vv /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) { + /*⇒2,variable,[definition]*/ff /*⇒2,operator,[]*/:= /*⇒4,keyword,[]*/func() {} + /*⇒5,keyword,[]*/defer /*⇒2,function,[]*/ff() + /*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,function,[]*/RuneCount(/*⇒2,string,[]*/"") + /*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,parameter,[]*/vv.(/*⇒6,type,[]*/string)) + /*⇒2,keyword,[]*/if /*⇒4,variable,[readonly]*/true { + } /*⇒4,keyword,[]*/else { + } +/*⇒5,parameter,[definition]*/Never: + /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/i /*⇒2,operator,[]*/:= /*⇒1,number,[]*/0; /*⇒1,variable,[]*/i /*⇒1,operator,[]*/< /*⇒2,number,[]*/10; { + /*⇒5,keyword,[]*/break Never + } + _, /*⇒2,variable,[definition]*/ok /*⇒2,operator,[]*/:= /*⇒2,parameter,[]*/vv[/*⇒1,number,[]*/0].(/*⇒1,type,[]*/A) + /*⇒2,keyword,[]*/if /*⇒1,operator,[]*/!/*⇒2,variable,[]*/ok { + /*⇒6,keyword,[]*/switch /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒2,parameter,[]*/vv[/*⇒1,number,[]*/0].(/*⇒4,keyword,[]*/type) { + } + /*⇒4,keyword,[]*/goto Never + } +} + diff --git a/internal/lsp/testdata/semantic/b.go b/gopls/internal/lsp/testdata/semantic/b.go similarity index 100% rename from internal/lsp/testdata/semantic/b.go rename to gopls/internal/lsp/testdata/semantic/b.go diff --git a/internal/lsp/testdata/semantic/b.go.golden b/gopls/internal/lsp/testdata/semantic/b.go.golden similarity index 100% rename from internal/lsp/testdata/semantic/b.go.golden rename to gopls/internal/lsp/testdata/semantic/b.go.golden diff --git a/internal/lsp/testdata/semantic/semantic_test.go b/gopls/internal/lsp/testdata/semantic/semantic_test.go similarity index 100% rename from internal/lsp/testdata/semantic/semantic_test.go rename to gopls/internal/lsp/testdata/semantic/semantic_test.go diff --git a/internal/lsp/testdata/signature/signature.go b/gopls/internal/lsp/testdata/signature/signature.go similarity index 100% rename from internal/lsp/testdata/signature/signature.go rename to gopls/internal/lsp/testdata/signature/signature.go diff --git a/internal/lsp/testdata/signature/signature.go.golden b/gopls/internal/lsp/testdata/signature/signature.go.golden similarity index 86% rename from internal/lsp/testdata/signature/signature.go.golden rename to gopls/internal/lsp/testdata/signature/signature.go.golden index d7a65b3b873..90a4facf9a7 100644 --- a/internal/lsp/testdata/signature/signature.go.golden +++ b/gopls/internal/lsp/testdata/signature/signature.go.golden @@ -10,12 +10,6 @@ Bar(float64, ...byte) -- Foo(a string, b int) (c bool)-signature -- Foo(a string, b int) (c bool) --- GetAlias() Alias-signature -- -GetAlias() Alias - --- GetAliasPtr() *Alias-signature -- -GetAliasPtr() *Alias - -- Next(n int) []byte-signature -- Next(n int) []byte @@ -24,12 +18,6 @@ Next returns a slice containing the next n bytes from the buffer, advancing the -- OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias-signature -- OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias --- SetAliasSlice(a []*Alias)-signature -- -SetAliasSlice(a []*Alias) - --- SetOtherAliasMap(a map[*Alias]OtherAlias)-signature -- -SetOtherAliasMap(a map[*Alias]OtherAlias) - -- fn(hi string, there string) func(i int) rune-signature -- fn(hi string, there string) func(i int) rune diff --git a/internal/lsp/testdata/signature/signature2.go.golden b/gopls/internal/lsp/testdata/signature/signature2.go.golden similarity index 100% rename from internal/lsp/testdata/signature/signature2.go.golden rename to gopls/internal/lsp/testdata/signature/signature2.go.golden diff --git a/internal/lsp/testdata/signature/signature2.go.in b/gopls/internal/lsp/testdata/signature/signature2.go.in similarity index 100% rename from internal/lsp/testdata/signature/signature2.go.in rename to gopls/internal/lsp/testdata/signature/signature2.go.in diff --git a/internal/lsp/testdata/signature/signature3.go.golden b/gopls/internal/lsp/testdata/signature/signature3.go.golden similarity index 100% rename from internal/lsp/testdata/signature/signature3.go.golden rename to gopls/internal/lsp/testdata/signature/signature3.go.golden diff --git a/internal/lsp/testdata/signature/signature3.go.in b/gopls/internal/lsp/testdata/signature/signature3.go.in similarity index 100% rename from internal/lsp/testdata/signature/signature3.go.in rename to gopls/internal/lsp/testdata/signature/signature3.go.in diff --git a/internal/lsp/testdata/signature/signature_test.go b/gopls/internal/lsp/testdata/signature/signature_test.go similarity index 90% rename from internal/lsp/testdata/signature/signature_test.go rename to gopls/internal/lsp/testdata/signature/signature_test.go index 62e54a23834..500247dbdec 100644 --- a/internal/lsp/testdata/signature/signature_test.go +++ b/gopls/internal/lsp/testdata/signature/signature_test.go @@ -3,7 +3,7 @@ package signature_test import ( "testing" - sig "golang.org/x/tools/internal/lsp/signature" + sig "golang.org/lsptests/signature" ) func TestSignature(t *testing.T) { diff --git a/gopls/internal/lsp/testdata/signature/signature_test.go.golden b/gopls/internal/lsp/testdata/signature/signature_test.go.golden new file mode 100644 index 00000000000..9e6561ac529 --- /dev/null +++ b/gopls/internal/lsp/testdata/signature/signature_test.go.golden @@ -0,0 +1,9 @@ +-- AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)-signature -- +AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias) + +-- AliasSlice(a []*sig.Alias) (b sig.Alias)-signature -- +AliasSlice(a []*sig.Alias) (b sig.Alias) + +-- OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias-signature -- +OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias + diff --git a/internal/lsp/testdata/snippets/func_snippets118.go.in b/gopls/internal/lsp/testdata/snippets/func_snippets118.go.in similarity index 100% rename from internal/lsp/testdata/snippets/func_snippets118.go.in rename to gopls/internal/lsp/testdata/snippets/func_snippets118.go.in diff --git a/gopls/internal/lsp/testdata/snippets/literal.go b/gopls/internal/lsp/testdata/snippets/literal.go new file mode 100644 index 00000000000..fbb642f08a5 --- /dev/null +++ b/gopls/internal/lsp/testdata/snippets/literal.go @@ -0,0 +1,22 @@ +package snippets + +import ( + "golang.org/lsptests/signature" + t "golang.org/lsptests/types" +) + +type structy struct { + x signature.MyType +} + +func X(_ map[signature.Alias]t.CoolAlias) (map[signature.Alias]t.CoolAlias) { + return nil +} + +func _() { + X() //@signature(")", "X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias", 0) + _ = signature.MyType{} //@item(literalMyType, "signature.MyType{}", "", "var") + s := structy{ + x: //@snippet(" //", literalMyType, "signature.MyType{\\}", "signature.MyType{\\}") + } +} \ No newline at end of file diff --git a/gopls/internal/lsp/testdata/snippets/literal.go.golden b/gopls/internal/lsp/testdata/snippets/literal.go.golden new file mode 100644 index 00000000000..c91e5e9e086 --- /dev/null +++ b/gopls/internal/lsp/testdata/snippets/literal.go.golden @@ -0,0 +1,3 @@ +-- X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias-signature -- +X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias + diff --git a/internal/lsp/testdata/snippets/literal_snippets.go.in b/gopls/internal/lsp/testdata/snippets/literal_snippets.go.in similarity index 99% rename from internal/lsp/testdata/snippets/literal_snippets.go.in rename to gopls/internal/lsp/testdata/snippets/literal_snippets.go.in index 4a2a01dfa1f..c6e6c0fbd60 100644 --- a/internal/lsp/testdata/snippets/literal_snippets.go.in +++ b/gopls/internal/lsp/testdata/snippets/literal_snippets.go.in @@ -7,7 +7,7 @@ import ( "net/http" "sort" - "golang.org/x/tools/internal/lsp/foo" + "golang.org/lsptests/foo" ) func _() { diff --git a/internal/lsp/testdata/snippets/literal_snippets118.go.in b/gopls/internal/lsp/testdata/snippets/literal_snippets118.go.in similarity index 100% rename from internal/lsp/testdata/snippets/literal_snippets118.go.in rename to gopls/internal/lsp/testdata/snippets/literal_snippets118.go.in diff --git a/internal/lsp/testdata/snippets/postfix.go b/gopls/internal/lsp/testdata/snippets/postfix.go similarity index 100% rename from internal/lsp/testdata/snippets/postfix.go rename to gopls/internal/lsp/testdata/snippets/postfix.go diff --git a/internal/lsp/testdata/snippets/snippets.go.golden b/gopls/internal/lsp/testdata/snippets/snippets.go.golden similarity index 100% rename from internal/lsp/testdata/snippets/snippets.go.golden rename to gopls/internal/lsp/testdata/snippets/snippets.go.golden diff --git a/internal/lsp/testdata/snippets/snippets.go.in b/gopls/internal/lsp/testdata/snippets/snippets.go.in similarity index 100% rename from internal/lsp/testdata/snippets/snippets.go.in rename to gopls/internal/lsp/testdata/snippets/snippets.go.in diff --git a/internal/lsp/testdata/statements/append.go b/gopls/internal/lsp/testdata/statements/append.go similarity index 100% rename from internal/lsp/testdata/statements/append.go rename to gopls/internal/lsp/testdata/statements/append.go diff --git a/internal/lsp/testdata/statements/if_err_check_return.go b/gopls/internal/lsp/testdata/statements/if_err_check_return.go similarity index 100% rename from internal/lsp/testdata/statements/if_err_check_return.go rename to gopls/internal/lsp/testdata/statements/if_err_check_return.go diff --git a/internal/lsp/testdata/statements/if_err_check_return_2.go b/gopls/internal/lsp/testdata/statements/if_err_check_return_2.go similarity index 100% rename from internal/lsp/testdata/statements/if_err_check_return_2.go rename to gopls/internal/lsp/testdata/statements/if_err_check_return_2.go diff --git a/internal/lsp/testdata/statements/if_err_check_test.go b/gopls/internal/lsp/testdata/statements/if_err_check_test.go similarity index 100% rename from internal/lsp/testdata/statements/if_err_check_test.go rename to gopls/internal/lsp/testdata/statements/if_err_check_test.go diff --git a/internal/lsp/testdata/stub/other/other.go b/gopls/internal/lsp/testdata/stub/other/other.go similarity index 100% rename from internal/lsp/testdata/stub/other/other.go rename to gopls/internal/lsp/testdata/stub/other/other.go diff --git a/internal/lsp/testdata/stub/stub_add_selector.go b/gopls/internal/lsp/testdata/stub/stub_add_selector.go similarity index 92% rename from internal/lsp/testdata/stub/stub_add_selector.go rename to gopls/internal/lsp/testdata/stub/stub_add_selector.go index a15afd7c244..4037b7ad3a0 100644 --- a/internal/lsp/testdata/stub/stub_add_selector.go +++ b/gopls/internal/lsp/testdata/stub/stub_add_selector.go @@ -7,6 +7,6 @@ import "io" // then our implementation must add the import/package selector // in the concrete method if the concrete type is outside of the interface // package -var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite") +var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "") type readerFrom struct{} diff --git a/internal/lsp/testdata/stub/stub_add_selector.go.golden b/gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden similarity index 95% rename from internal/lsp/testdata/stub/stub_add_selector.go.golden rename to gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden index e885483eaaf..8f08ca1efe2 100644 --- a/internal/lsp/testdata/stub/stub_add_selector.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden @@ -8,7 +8,7 @@ import "io" // then our implementation must add the import/package selector // in the concrete method if the concrete type is outside of the interface // package -var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite") +var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "") type readerFrom struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_assign.go b/gopls/internal/lsp/testdata/stub/stub_assign.go new file mode 100644 index 00000000000..d3f09313f25 --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_assign.go @@ -0,0 +1,10 @@ +package stub + +import "io" + +func main() { + var br io.ByteWriter + br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "") +} + +type byteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign.go.golden b/gopls/internal/lsp/testdata/stub/stub_assign.go.golden similarity index 78% rename from internal/lsp/testdata/stub/stub_assign.go.golden rename to gopls/internal/lsp/testdata/stub/stub_assign.go.golden index a52a8236798..f1535424114 100644 --- a/internal/lsp/testdata/stub/stub_assign.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_assign.go.golden @@ -5,7 +5,7 @@ import "io" func main() { var br io.ByteWriter - br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite") + br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type byteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go similarity index 93% rename from internal/lsp/testdata/stub/stub_assign_multivars.go rename to gopls/internal/lsp/testdata/stub/stub_assign_multivars.go index 01b330fda54..bd36d6833d1 100644 --- a/internal/lsp/testdata/stub/stub_assign_multivars.go +++ b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go @@ -5,7 +5,7 @@ import "io" func main() { var br io.ByteWriter var i int - i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite") + i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type multiByteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden similarity index 96% rename from internal/lsp/testdata/stub/stub_assign_multivars.go.golden rename to gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden index e1e71adbd50..425d11746a5 100644 --- a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden @@ -6,7 +6,7 @@ import "io" func main() { var br io.ByteWriter var i int - i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite") + i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type multiByteWriter struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_call_expr.go b/gopls/internal/lsp/testdata/stub/stub_call_expr.go new file mode 100644 index 00000000000..0c309466524 --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_call_expr.go @@ -0,0 +1,13 @@ +package stub + +func main() { + check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "") +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +type callExpr struct{} diff --git a/internal/lsp/testdata/stub/stub_call_expr.go.golden b/gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden similarity index 78% rename from internal/lsp/testdata/stub/stub_call_expr.go.golden rename to gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden index 2d12f8651f3..c82d22440f1 100644 --- a/internal/lsp/testdata/stub/stub_call_expr.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden @@ -2,7 +2,7 @@ package stub func main() { - check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite") + check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "") } func check(err error) { diff --git a/internal/lsp/testdata/stub/stub_embedded.go b/gopls/internal/lsp/testdata/stub/stub_embedded.go similarity index 86% rename from internal/lsp/testdata/stub/stub_embedded.go rename to gopls/internal/lsp/testdata/stub/stub_embedded.go index 6d6a986bf24..f66989e9f0f 100644 --- a/internal/lsp/testdata/stub/stub_embedded.go +++ b/gopls/internal/lsp/testdata/stub/stub_embedded.go @@ -5,7 +5,7 @@ import ( "sort" ) -var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite") +var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "") type embeddedConcrete struct{} diff --git a/internal/lsp/testdata/stub/stub_embedded.go.golden b/gopls/internal/lsp/testdata/stub/stub_embedded.go.golden similarity index 95% rename from internal/lsp/testdata/stub/stub_embedded.go.golden rename to gopls/internal/lsp/testdata/stub/stub_embedded.go.golden index c258ebaf46c..3c5347e8c01 100644 --- a/internal/lsp/testdata/stub/stub_embedded.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_embedded.go.golden @@ -6,7 +6,7 @@ import ( "sort" ) -var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite") +var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "") type embeddedConcrete struct{} diff --git a/internal/lsp/testdata/stub/stub_err.go b/gopls/internal/lsp/testdata/stub/stub_err.go similarity index 92% rename from internal/lsp/testdata/stub/stub_err.go rename to gopls/internal/lsp/testdata/stub/stub_err.go index 908c7d3152f..121f0e794d7 100644 --- a/internal/lsp/testdata/stub/stub_err.go +++ b/gopls/internal/lsp/testdata/stub/stub_err.go @@ -1,7 +1,7 @@ package stub func main() { - var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite") + var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "") } type customErr struct{} diff --git a/internal/lsp/testdata/stub/stub_err.go.golden b/gopls/internal/lsp/testdata/stub/stub_err.go.golden similarity index 96% rename from internal/lsp/testdata/stub/stub_err.go.golden rename to gopls/internal/lsp/testdata/stub/stub_err.go.golden index 717aed86293..0b441bdaab1 100644 --- a/internal/lsp/testdata/stub/stub_err.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_err.go.golden @@ -2,7 +2,7 @@ package stub func main() { - var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite") + var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "") } type customErr struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_function_return.go b/gopls/internal/lsp/testdata/stub/stub_function_return.go new file mode 100644 index 00000000000..41f17645e9c --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_function_return.go @@ -0,0 +1,11 @@ +package stub + +import ( + "io" +) + +func newCloser() io.Closer { + return closer{} //@suggestedfix("c", "refactor.rewrite", "") +} + +type closer struct{} diff --git a/internal/lsp/testdata/stub/stub_function_return.go.golden b/gopls/internal/lsp/testdata/stub/stub_function_return.go.golden similarity index 77% rename from internal/lsp/testdata/stub/stub_function_return.go.golden rename to gopls/internal/lsp/testdata/stub/stub_function_return.go.golden index f80874d2b94..e90712e6973 100644 --- a/internal/lsp/testdata/stub/stub_function_return.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_function_return.go.golden @@ -6,7 +6,7 @@ import ( ) func newCloser() io.Closer { - return closer{} //@suggestedfix("c", "refactor.rewrite") + return closer{} //@suggestedfix("c", "refactor.rewrite", "") } type closer struct{} diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go similarity index 82% rename from internal/lsp/testdata/stub/stub_generic_receiver.go rename to gopls/internal/lsp/testdata/stub/stub_generic_receiver.go index 64e90fcf6a7..1c00569ea1c 100644 --- a/internal/lsp/testdata/stub/stub_generic_receiver.go +++ b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go @@ -7,7 +7,7 @@ import "io" // This file tests that that the stub method generator accounts for concrete // types that have type parameters defined. -var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite") +var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom") type genReader[T, Y any] struct { T T diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden similarity index 88% rename from internal/lsp/testdata/stub/stub_generic_receiver.go.golden rename to gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden index 1fc7157b463..97935d47eb3 100644 --- a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden @@ -8,7 +8,7 @@ import "io" // This file tests that that the stub method generator accounts for concrete // types that have type parameters defined. -var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite") +var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom") type genReader[T, Y any] struct { T T diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go similarity index 92% rename from internal/lsp/testdata/stub/stub_ignored_imports.go rename to gopls/internal/lsp/testdata/stub/stub_ignored_imports.go index 8f6ec73de1b..ca95d2a7120 100644 --- a/internal/lsp/testdata/stub/stub_ignored_imports.go +++ b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go @@ -12,7 +12,7 @@ import ( var ( _ Reader - _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite") + _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "") ) type ignoredResetter struct{} diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden similarity index 95% rename from internal/lsp/testdata/stub/stub_ignored_imports.go.golden rename to gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden index a0ddc179353..33aba532662 100644 --- a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden @@ -14,7 +14,7 @@ import ( var ( _ Reader - _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite") + _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "") ) type ignoredResetter struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_issue2606.go b/gopls/internal/lsp/testdata/stub/stub_issue2606.go new file mode 100644 index 00000000000..66ef2b24b97 --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_issue2606.go @@ -0,0 +1,7 @@ +package stub + +type I interface{ error } + +type C int + +var _ I = C(0) //@suggestedfix("C", "refactor.rewrite", "") diff --git a/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden b/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden new file mode 100644 index 00000000000..4db266346e2 --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden @@ -0,0 +1,14 @@ +-- suggestedfix_stub_issue2606_7_11 -- +package stub + +type I interface{ error } + +type C int + +// Error implements I +func (C) Error() string { + panic("unimplemented") +} + +var _ I = C(0) //@suggestedfix("C", "refactor.rewrite", "") + diff --git a/internal/lsp/testdata/stub/stub_multi_var.go b/gopls/internal/lsp/testdata/stub/stub_multi_var.go similarity index 89% rename from internal/lsp/testdata/stub/stub_multi_var.go rename to gopls/internal/lsp/testdata/stub/stub_multi_var.go index 4276b799429..06702b22204 100644 --- a/internal/lsp/testdata/stub/stub_multi_var.go +++ b/gopls/internal/lsp/testdata/stub/stub_multi_var.go @@ -6,6 +6,6 @@ import "io" // has multiple values on the same line can still be // analyzed correctly to target the interface implementation // diagnostic. -var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite") +var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "") type multiVar struct{} diff --git a/internal/lsp/testdata/stub/stub_multi_var.go.golden b/gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden similarity index 92% rename from internal/lsp/testdata/stub/stub_multi_var.go.golden rename to gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden index b9ac4236766..804c7eec65c 100644 --- a/internal/lsp/testdata/stub/stub_multi_var.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden @@ -7,7 +7,7 @@ import "io" // has multiple values on the same line can still be // analyzed correctly to target the interface implementation // diagnostic. -var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite") +var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "") type multiVar struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_pointer.go b/gopls/internal/lsp/testdata/stub/stub_pointer.go new file mode 100644 index 00000000000..e9d8bc688fc --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_pointer.go @@ -0,0 +1,9 @@ +package stub + +import "io" + +func getReaderFrom() io.ReaderFrom { + return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "") +} + +type pointerImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_pointer.go.golden b/gopls/internal/lsp/testdata/stub/stub_pointer.go.golden similarity index 79% rename from internal/lsp/testdata/stub/stub_pointer.go.golden rename to gopls/internal/lsp/testdata/stub/stub_pointer.go.golden index c4133d7a44d..a4d765dd457 100644 --- a/internal/lsp/testdata/stub/stub_pointer.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_pointer.go.golden @@ -4,7 +4,7 @@ package stub import "io" func getReaderFrom() io.ReaderFrom { - return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite") + return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "") } type pointerImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go similarity index 93% rename from internal/lsp/testdata/stub/stub_renamed_import.go rename to gopls/internal/lsp/testdata/stub/stub_renamed_import.go index eaebe251018..54dd598013d 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import.go +++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go @@ -5,7 +5,7 @@ import ( myio "io" ) -var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite") +var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "") var _ myio.Reader type myIO struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go.golden b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden similarity index 96% rename from internal/lsp/testdata/stub/stub_renamed_import.go.golden rename to gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden index 48ff4f1537f..8182d2b3675 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden @@ -6,7 +6,7 @@ import ( myio "io" ) -var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite") +var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "") var _ myio.Reader type myIO struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go similarity index 80% rename from internal/lsp/testdata/stub/stub_renamed_import_iface.go rename to gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go index 96caf540d60..0f175868504 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go +++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go @@ -1,13 +1,13 @@ package stub import ( - "golang.org/x/tools/internal/lsp/stub/other" + "golang.org/lsptests/stub/other" ) // This file tests that if an interface // method references an import from its own package // that the concrete type does not yet import, and that import happens // to be renamed, then we prefer the renaming of the interface. -var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite") +var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "") type otherInterfaceImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden similarity index 86% rename from internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden rename to gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden index 9ba2cb440e8..3d6ac0a551c 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden @@ -4,14 +4,14 @@ package stub import ( "bytes" renamed_context "context" - "golang.org/x/tools/internal/lsp/stub/other" + "golang.org/lsptests/stub/other" ) // This file tests that if an interface // method references an import from its own package // that the concrete type does not yet import, and that import happens // to be renamed, then we prefer the renaming of the interface. -var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite") +var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "") type otherInterfaceImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_stdlib.go b/gopls/internal/lsp/testdata/stub/stub_stdlib.go similarity index 93% rename from internal/lsp/testdata/stub/stub_stdlib.go rename to gopls/internal/lsp/testdata/stub/stub_stdlib.go index 0d54a6daadf..463cf78a344 100644 --- a/internal/lsp/testdata/stub/stub_stdlib.go +++ b/gopls/internal/lsp/testdata/stub/stub_stdlib.go @@ -4,6 +4,6 @@ import ( "io" ) -var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite") +var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "") type writer struct{} diff --git a/internal/lsp/testdata/stub/stub_stdlib.go.golden b/gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden similarity index 97% rename from internal/lsp/testdata/stub/stub_stdlib.go.golden rename to gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden index 8636cead414..55592501a07 100644 --- a/internal/lsp/testdata/stub/stub_stdlib.go.golden +++ b/gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden @@ -5,7 +5,7 @@ import ( "io" ) -var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite") +var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "") type writer struct{} diff --git a/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go new file mode 100644 index 00000000000..f82401fafdd --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go @@ -0,0 +1,27 @@ +package stub + +// Regression test for Issue #56825: file corrupted by insertion of +// methods after TypeSpec in a parenthesized TypeDecl. + +import "io" + +func newReadCloser() io.ReadCloser { + return rdcloser{} //@suggestedfix("rd", "refactor.rewrite", "") +} + +type ( + A int + rdcloser struct{} + B int +) + +func _() { + // Local types can't be stubbed as there's nowhere to put the methods. + // The suggestedfix assertion can't express this yet. TODO(adonovan): support it. + type local struct{} + var _ io.ReadCloser = local{} // want error: `local type "local" cannot be stubbed` +} + +type ( + C int +) diff --git a/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden new file mode 100644 index 00000000000..0848789eaf6 --- /dev/null +++ b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden @@ -0,0 +1,39 @@ +-- suggestedfix_stub_typedecl_group_9_9 -- +package stub + +// Regression test for Issue #56825: file corrupted by insertion of +// methods after TypeSpec in a parenthesized TypeDecl. + +import "io" + +func newReadCloser() io.ReadCloser { + return rdcloser{} //@suggestedfix("rd", "refactor.rewrite", "") +} + +type ( + A int + rdcloser struct{} + B int +) + +// Close implements io.ReadCloser +func (rdcloser) Close() error { + panic("unimplemented") +} + +// Read implements io.ReadCloser +func (rdcloser) Read(p []byte) (n int, err error) { + panic("unimplemented") +} + +func _() { + // Local types can't be stubbed as there's nowhere to put the methods. + // The suggestedfix assertion can't express this yet. TODO(adonovan): support it. + type local struct{} + var _ io.ReadCloser = local{} // want error: `local type "local" cannot be stubbed` +} + +type ( + C int +) + diff --git a/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go new file mode 100644 index 00000000000..7ff524479b4 --- /dev/null +++ b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go @@ -0,0 +1,11 @@ +package suggestedfix + +import ( + "log" +) + +func goodbye() { + s := "hiiiiiii" + s = s //@suggestedfix("s = s", "quickfix", "") + log.Print(s) +} diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden similarity index 75% rename from internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden rename to gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden index 9ccaa199468..e7e84fc227d 100644 --- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden +++ b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden @@ -7,7 +7,7 @@ import ( func goodbye() { s := "hiiiiiii" - //@suggestedfix("s = s", "quickfix") + //@suggestedfix("s = s", "quickfix", "") log.Print(s) } diff --git a/gopls/internal/lsp/testdata/summary.txt.golden b/gopls/internal/lsp/testdata/summary.txt.golden new file mode 100644 index 00000000000..985361ba710 --- /dev/null +++ b/gopls/internal/lsp/testdata/summary.txt.golden @@ -0,0 +1,32 @@ +-- summary -- +CallHierarchyCount = 2 +CodeLensCount = 5 +CompletionsCount = 263 +CompletionSnippetCount = 106 +UnimportedCompletionsCount = 5 +DeepCompletionsCount = 5 +FuzzyCompletionsCount = 8 +RankedCompletionsCount = 164 +CaseSensitiveCompletionsCount = 4 +DiagnosticsCount = 42 +FoldingRangesCount = 2 +FormatCount = 6 +ImportCount = 8 +SemanticTokenCount = 3 +SuggestedFixCount = 65 +FunctionExtractionCount = 27 +MethodExtractionCount = 6 +DefinitionsCount = 47 +TypeDefinitionsCount = 18 +HighlightsCount = 69 +InlayHintsCount = 4 +ReferencesCount = 30 +RenamesCount = 41 +PrepareRenamesCount = 7 +SymbolsCount = 1 +WorkspaceSymbolsCount = 20 +SignaturesCount = 33 +LinksCount = 7 +ImplementationsCount = 16 +SelectionRangesCount = 3 + diff --git a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden new file mode 100644 index 00000000000..9ae4d13649d --- /dev/null +++ b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden @@ -0,0 +1,32 @@ +-- summary -- +CallHierarchyCount = 2 +CodeLensCount = 5 +CompletionsCount = 264 +CompletionSnippetCount = 115 +UnimportedCompletionsCount = 5 +DeepCompletionsCount = 5 +FuzzyCompletionsCount = 8 +RankedCompletionsCount = 174 +CaseSensitiveCompletionsCount = 4 +DiagnosticsCount = 42 +FoldingRangesCount = 2 +FormatCount = 6 +ImportCount = 8 +SemanticTokenCount = 3 +SuggestedFixCount = 71 +FunctionExtractionCount = 27 +MethodExtractionCount = 6 +DefinitionsCount = 47 +TypeDefinitionsCount = 18 +HighlightsCount = 69 +InlayHintsCount = 5 +ReferencesCount = 30 +RenamesCount = 48 +PrepareRenamesCount = 7 +SymbolsCount = 2 +WorkspaceSymbolsCount = 20 +SignaturesCount = 33 +LinksCount = 7 +ImplementationsCount = 26 +SelectionRangesCount = 3 + diff --git a/gopls/internal/lsp/testdata/symbols/go1.18.go b/gopls/internal/lsp/testdata/symbols/go1.18.go new file mode 100644 index 00000000000..cdf99dc20ff --- /dev/null +++ b/gopls/internal/lsp/testdata/symbols/go1.18.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +package main + +type T[P any] struct { //@symbol("T", "T", "Struct", "struct{...}", "T", "") + F P //@symbol("F", "F", "Field", "P", "", "T") +} + +type Constraint interface { //@symbol("Constraint", "Constraint", "Interface", "interface{...}", "Constraint", "") + ~int | struct{ int } //@symbol("~int | struct{int}", "~int | struct{ int }", "Field", "", "", "Constraint") + + // TODO(rfindley): the selection range below is the entire interface field. + // Can we reduce it? + interface{ M() } //@symbol("interface{...}", "interface{ M() }", "Field", "", "iFaceField", "Constraint"), symbol("M", "M", "Method", "func()", "", "iFaceField") +} diff --git a/gopls/internal/lsp/testdata/symbols/go1.18.go.golden b/gopls/internal/lsp/testdata/symbols/go1.18.go.golden new file mode 100644 index 00000000000..5a0c1a94d7a --- /dev/null +++ b/gopls/internal/lsp/testdata/symbols/go1.18.go.golden @@ -0,0 +1,7 @@ +-- symbols -- +T Struct 6:6-6:7 + F Field 7:2-7:3 +Constraint Interface 10:6-10:16 + interface{...} Field 15:2-15:18 + ~int | struct{int} Field 11:2-11:22 + diff --git a/gopls/internal/lsp/testdata/symbols/main.go b/gopls/internal/lsp/testdata/symbols/main.go new file mode 100644 index 00000000000..65e0869fd5f --- /dev/null +++ b/gopls/internal/lsp/testdata/symbols/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "io" +) + +// Each symbol marker in this file defines the following information: +// symbol(name, selectionSpan, kind, detail, id, parentID) +// - name: DocumentSymbol.Name +// - selectionSpan: DocumentSymbol.SelectionRange +// - kind: DocumentSymbol.Kind +// - detail: DocumentSymbol.Detail +// - id: if non-empty, a unique identifier for this symbol +// - parentID: if non-empty, the id of the parent of this symbol +// +// This data in aggregate defines a set of document symbols and their +// parent-child relationships, which is compared against the DocummentSymbols +// response from gopls for the current file. +// +// TODO(rfindley): the symbol annotations here are complicated and difficult to +// maintain. It would be simpler to just write out the full expected response +// in the golden file, perhaps as raw JSON. + +var _ = 1 + +var x = 42 //@symbol("x", "x", "Variable", "", "", "") + +var nested struct { //@symbol("nested", "nested", "Variable", "struct{...}", "nested", "") + nestedField struct { //@symbol("nestedField", "nestedField", "Field", "struct{...}", "nestedField", "nested") + f int //@symbol("f", "f", "Field", "int", "", "nestedField") + } +} + +const y = 43 //@symbol("y", "y", "Constant", "", "", "") + +type Number int //@symbol("Number", "Number", "Class", "int", "", "") + +type Alias = string //@symbol("Alias", "Alias", "Class", "string", "", "") + +type NumberAlias = Number //@symbol("NumberAlias", "NumberAlias", "Class", "Number", "", "") + +type ( + Boolean bool //@symbol("Boolean", "Boolean", "Class", "bool", "", "") + BoolAlias = bool //@symbol("BoolAlias", "BoolAlias", "Class", "bool", "", "") +) + +type Foo struct { //@symbol("Foo", "Foo", "Struct", "struct{...}", "Foo", "") + Quux //@symbol("Quux", "Quux", "Field", "Quux", "", "Foo") + W io.Writer //@symbol("W", "W", "Field", "io.Writer", "", "Foo") + Bar int //@symbol("Bar", "Bar", "Field", "int", "", "Foo") + baz string //@symbol("baz", "baz", "Field", "string", "", "Foo") + funcField func(int) int //@symbol("funcField", "funcField", "Field", "func(int) int", "", "Foo") +} + +type Quux struct { //@symbol("Quux", "Quux", "Struct", "struct{...}", "Quux", "") + X, Y float64 //@symbol("X", "X", "Field", "float64", "", "Quux"), symbol("Y", "Y", "Field", "float64", "", "Quux") +} + +type EmptyStruct struct{} //@symbol("EmptyStruct", "EmptyStruct", "Struct", "struct{}", "", "") + +func (f Foo) Baz() string { //@symbol("(Foo).Baz", "Baz", "Method", "func() string", "", "") + return f.baz +} + +func _() {} + +func (q *Quux) Do() {} //@symbol("(*Quux).Do", "Do", "Method", "func()", "", "") + +func main() { //@symbol("main", "main", "Function", "func()", "", "") +} + +type Stringer interface { //@symbol("Stringer", "Stringer", "Interface", "interface{...}", "Stringer", "") + String() string //@symbol("String", "String", "Method", "func() string", "", "Stringer") +} + +type ABer interface { //@symbol("ABer", "ABer", "Interface", "interface{...}", "ABer", "") + B() //@symbol("B", "B", "Method", "func()", "", "ABer") + A() string //@symbol("A", "A", "Method", "func() string", "", "ABer") +} + +type WithEmbeddeds interface { //@symbol("WithEmbeddeds", "WithEmbeddeds", "Interface", "interface{...}", "WithEmbeddeds", "") + Do() //@symbol("Do", "Do", "Method", "func()", "", "WithEmbeddeds") + ABer //@symbol("ABer", "ABer", "Field", "ABer", "", "WithEmbeddeds") + io.Writer //@symbol("Writer", "Writer", "Field", "io.Writer", "", "WithEmbeddeds") +} + +type EmptyInterface interface{} //@symbol("EmptyInterface", "EmptyInterface", "Interface", "interface{}", "", "") + +func Dunk() int { return 0 } //@symbol("Dunk", "Dunk", "Function", "func() int", "", "") + +func dunk() {} //@symbol("dunk", "dunk", "Function", "func()", "", "") diff --git a/gopls/internal/lsp/testdata/symbols/main.go.golden b/gopls/internal/lsp/testdata/symbols/main.go.golden new file mode 100644 index 00000000000..98009b02d68 --- /dev/null +++ b/gopls/internal/lsp/testdata/symbols/main.go.golden @@ -0,0 +1,36 @@ +-- symbols -- +x Variable 26:5-26:6 +nested Variable 28:5-28:11 + nestedField Field 29:2-29:13 +y Constant 34:7-34:8 +Number Class 36:6-36:12 +Alias Class 38:6-38:11 +NumberAlias Class 40:6-40:17 +Boolean Class 43:2-43:9 +BoolAlias Class 44:2-44:11 +Foo Struct 47:6-47:9 + Bar Field 50:2-50:5 + Quux Field 48:2-48:6 + W Field 49:2-49:3 + baz Field 51:2-51:5 + funcField Field 52:2-52:11 +Quux Struct 55:6-55:10 + X Field 56:2-56:3 + Y Field 56:5-56:6 +EmptyStruct Struct 59:6-59:17 +(Foo).Baz Method 61:14-61:17 +(*Quux).Do Method 67:16-67:18 +main Function 69:6-69:10 +Stringer Interface 72:6-72:14 + String Method 73:2-73:8 +ABer Interface 76:6-76:10 + A Method 78:2-78:3 + B Method 77:2-77:3 +WithEmbeddeds Interface 81:6-81:19 + ABer Field 83:2-83:6 + Do Method 82:2-82:4 + Writer Field 84:5-84:11 +EmptyInterface Interface 87:6-87:20 +Dunk Function 89:6-89:10 +dunk Function 91:6-91:10 + diff --git a/internal/lsp/testdata/testy/testy.go b/gopls/internal/lsp/testdata/testy/testy.go similarity index 100% rename from internal/lsp/testdata/testy/testy.go rename to gopls/internal/lsp/testdata/testy/testy.go diff --git a/gopls/internal/lsp/testdata/testy/testy_test.go b/gopls/internal/lsp/testdata/testy/testy_test.go new file mode 100644 index 00000000000..a7e897840aa --- /dev/null +++ b/gopls/internal/lsp/testdata/testy/testy_test.go @@ -0,0 +1,18 @@ +package testy + +import ( + "testing" + + sig "golang.org/lsptests/signature" + "golang.org/lsptests/snippets" +) + +func TestSomething(t *testing.T) { //@item(TestSomething, "TestSomething(t *testing.T)", "", "func") + var x int //@mark(testyX, "x"),diag("x", "compiler", "x declared (and|but) not used", "error"),refs("x", testyX) + a() //@mark(testyA, "a") +} + +func _() { + _ = snippets.X(nil) //@signature("nil", "X(_ map[sig.Alias]types.CoolAlias) map[sig.Alias]types.CoolAlias", 0) + var _ sig.Alias +} diff --git a/internal/lsp/testdata/testy/testy_test.go.golden b/gopls/internal/lsp/testdata/testy/testy_test.go.golden similarity index 100% rename from internal/lsp/testdata/testy/testy_test.go.golden rename to gopls/internal/lsp/testdata/testy/testy_test.go.golden diff --git a/internal/lsp/testdata/typdef/typdef.go b/gopls/internal/lsp/testdata/typdef/typdef.go similarity index 100% rename from internal/lsp/testdata/typdef/typdef.go rename to gopls/internal/lsp/testdata/typdef/typdef.go diff --git a/internal/lsp/testdata/typeassert/type_assert.go b/gopls/internal/lsp/testdata/typeassert/type_assert.go similarity index 100% rename from internal/lsp/testdata/typeassert/type_assert.go rename to gopls/internal/lsp/testdata/typeassert/type_assert.go diff --git a/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go new file mode 100644 index 00000000000..729e7bbccd4 --- /dev/null +++ b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go @@ -0,0 +1,5 @@ +package typeerrors + +func x() { return nil } //@suggestedfix("nil", "quickfix", "") + +func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "") diff --git a/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden new file mode 100644 index 00000000000..48409a0b7dd --- /dev/null +++ b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden @@ -0,0 +1,14 @@ +-- suggestedfix_noresultvalues_3_19 -- +package typeerrors + +func x() { return } //@suggestedfix("nil", "quickfix", "") + +func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "") + +-- suggestedfix_noresultvalues_5_19 -- +package typeerrors + +func x() { return nil } //@suggestedfix("nil", "quickfix", "") + +func y() { return } //@suggestedfix("nil", "quickfix", "") + diff --git a/internal/lsp/testdata/typemods/type_mods.go b/gopls/internal/lsp/testdata/typemods/type_mods.go similarity index 100% rename from internal/lsp/testdata/typemods/type_mods.go rename to gopls/internal/lsp/testdata/typemods/type_mods.go diff --git a/gopls/internal/lsp/testdata/typeparams/type_params.go b/gopls/internal/lsp/testdata/typeparams/type_params.go new file mode 100644 index 00000000000..21fc7049f5b --- /dev/null +++ b/gopls/internal/lsp/testdata/typeparams/type_params.go @@ -0,0 +1,61 @@ +//go:build go1.18 +// +build go1.18 + +package typeparams + +func one[a int | string]() {} +func two[a int | string, b float64 | int]() {} + +func _() { + one[]() //@rank("]", string, float64) + two[]() //@rank("]", int, float64) + two[int, f]() //@rank("]", float64, float32) +} + +func slices[a []int | []float64]() {} //@item(tpInts, "[]int", "[]int", "type"),item(tpFloats, "[]float64", "[]float64", "type") + +func _() { + slices[]() //@rank("]", tpInts),rank("]", tpFloats) +} + +type s[a int | string] struct{} + +func _() { + s[]{} //@rank("]", int, float64) +} + +func takesGeneric[a int | string](s[a]) { + "s[a]{}" //@item(tpInScopeLit, "s[a]{}", "", "var") + takesGeneric() //@rank(")", tpInScopeLit),snippet(")", tpInScopeLit, "s[a]{\\}", "s[a]{\\}") +} + +func _() { + s[int]{} //@item(tpInstLit, "s[int]{}", "", "var") + takesGeneric[int]() //@rank(")", tpInstLit),snippet(")", tpInstLit, "s[int]{\\}", "s[int]{\\}") + + "s[...]{}" //@item(tpUninstLit, "s[...]{}", "", "var") + takesGeneric() //@rank(")", tpUninstLit),snippet(")", tpUninstLit, "s[${1:}]{\\}", "s[${1:a}]{\\}") +} + +func returnTP[A int | float64](a A) A { //@item(returnTP, "returnTP", "something", "func") + return a +} + +func _() { + // disabled - see issue #54822 + var _ int = returnTP // snippet(" //", returnTP, "returnTP[${1:}](${2:})", "returnTP[${1:A int|float64}](${2:a A})") + + var aa int //@item(tpInt, "aa", "int", "var") + var ab float64 //@item(tpFloat, "ab", "float64", "var") + returnTP[int](a) //@rank(")", tpInt, tpFloat) +} + +func takesFunc[T any](func(T) T) { + var _ func(t T) T = f //@snippet(" //", tpLitFunc, "func(t T) T {$0\\}", "func(t T) T {$0\\}") +} + +func _() { + _ = "func(...) {}" //@item(tpLitFunc, "func(...) {}", "", "var") + takesFunc() //@snippet(")", tpLitFunc, "func(${1:}) ${2:} {$0\\}", "func(${1:t} ${2:T}) ${3:T} {$0\\}") + takesFunc[int]() //@snippet(")", tpLitFunc, "func(i int) int {$0\\}", "func(${1:i} int) int {$0\\}") +} diff --git a/internal/lsp/testdata/types/types.go b/gopls/internal/lsp/testdata/types/types.go similarity index 100% rename from internal/lsp/testdata/types/types.go rename to gopls/internal/lsp/testdata/types/types.go diff --git a/gopls/internal/lsp/testdata/undeclared/var.go b/gopls/internal/lsp/testdata/undeclared/var.go new file mode 100644 index 00000000000..3fda582ce1f --- /dev/null +++ b/gopls/internal/lsp/testdata/undeclared/var.go @@ -0,0 +1,14 @@ +package undeclared + +func m() int { + z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") + if 100 < 90 { + z = 1 + } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") + z = 4 + } + for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") + } + r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") + return z +} diff --git a/gopls/internal/lsp/testdata/undeclared/var.go.golden b/gopls/internal/lsp/testdata/undeclared/var.go.golden new file mode 100644 index 00000000000..de5cbb42fbb --- /dev/null +++ b/gopls/internal/lsp/testdata/undeclared/var.go.golden @@ -0,0 +1,51 @@ +-- suggestedfix_var_10_6 -- +package undeclared + +func m() int { + z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") + if 100 < 90 { + z = 1 + } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") + z = 4 + } + i := + for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") + } + r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") + return z +} + +-- suggestedfix_var_4_12 -- +package undeclared + +func m() int { + y := + z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") + if 100 < 90 { + z = 1 + } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") + z = 4 + } + for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") + } + r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") + return z +} + +-- suggestedfix_var_7_18 -- +package undeclared + +func m() int { + z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") + n := + if 100 < 90 { + z = 1 + } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") + z = 4 + } + for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") + } + r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") + return z +} + diff --git a/gopls/internal/lsp/testdata/unimported/export_test.go b/gopls/internal/lsp/testdata/unimported/export_test.go new file mode 100644 index 00000000000..964d27d3b94 --- /dev/null +++ b/gopls/internal/lsp/testdata/unimported/export_test.go @@ -0,0 +1,3 @@ +package unimported + +var TestExport int //@item(testexport, "TestExport", "(from \"golang.org/lsptests/unimported\")", "var") diff --git a/internal/lsp/testdata/unimported/unimported.go.in b/gopls/internal/lsp/testdata/unimported/unimported.go.in similarity index 91% rename from internal/lsp/testdata/unimported/unimported.go.in rename to gopls/internal/lsp/testdata/unimported/unimported.go.in index c3c0243d901..4d1438d1bd8 100644 --- a/internal/lsp/testdata/unimported/unimported.go.in +++ b/gopls/internal/lsp/testdata/unimported/unimported.go.in @@ -14,7 +14,7 @@ func _() { /* ring.Ring */ //@item(ringring, "Ring", "(from \"container/ring\")", "var") -/* signature.Foo */ //@item(signaturefoo, "Foo", "func(a string, b int) (c bool) (from \"golang.org/x/tools/internal/lsp/signature\")", "func") +/* signature.Foo */ //@item(signaturefoo, "Foo", "func(a string, b int) (c bool) (from \"golang.org/lsptests/signature\")", "func") /* context.Background */ //@item(contextBackground, "Background", "func() context.Context (from \"context\")", "func") /* context.Background().Err */ //@item(contextBackgroundErr, "Background().Err", "func() error (from \"context\")", "method") diff --git a/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go b/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go new file mode 100644 index 00000000000..554c426a998 --- /dev/null +++ b/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go @@ -0,0 +1,16 @@ +package unimported + +import ( + _ "context" + + "golang.org/lsptests/baz" + _ "golang.org/lsptests/signature" // provide type information for unimported completions in the other file +) + +func _() { + foo.StructFoo{} //@item(litFooStructFoo, "foo.StructFoo{}", "struct{...}", "struct") + + // We get the literal completion for "foo.StructFoo{}" even though we haven't + // imported "foo" yet. + baz.FooStruct = f //@snippet(" //", litFooStructFoo, "foo.StructFoo{$0\\}", "foo.StructFoo{$0\\}") +} diff --git a/internal/lsp/testdata/unimported/x_test.go b/gopls/internal/lsp/testdata/unimported/x_test.go similarity index 100% rename from internal/lsp/testdata/unimported/x_test.go rename to gopls/internal/lsp/testdata/unimported/x_test.go diff --git a/internal/lsp/testdata/unresolved/unresolved.go.in b/gopls/internal/lsp/testdata/unresolved/unresolved.go.in similarity index 100% rename from internal/lsp/testdata/unresolved/unresolved.go.in rename to gopls/internal/lsp/testdata/unresolved/unresolved.go.in diff --git a/internal/lsp/testdata/unsafe/unsafe.go b/gopls/internal/lsp/testdata/unsafe/unsafe.go similarity index 100% rename from internal/lsp/testdata/unsafe/unsafe.go rename to gopls/internal/lsp/testdata/unsafe/unsafe.go diff --git a/internal/lsp/testdata/variadic/variadic.go.in b/gopls/internal/lsp/testdata/variadic/variadic.go.in similarity index 100% rename from internal/lsp/testdata/variadic/variadic.go.in rename to gopls/internal/lsp/testdata/variadic/variadic.go.in diff --git a/internal/lsp/testdata/variadic/variadic_intf.go b/gopls/internal/lsp/testdata/variadic/variadic_intf.go similarity index 100% rename from internal/lsp/testdata/variadic/variadic_intf.go rename to gopls/internal/lsp/testdata/variadic/variadic_intf.go diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a.go new file mode 100644 index 00000000000..4ae9997a03e --- /dev/null +++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a.go @@ -0,0 +1,9 @@ +package a + +var RandomGopherVariableA = "a" + +const RandomGopherConstantA = "a" + +const ( + randomgopherinvariable = iota +) diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go new file mode 100644 index 00000000000..0d97c50d623 --- /dev/null +++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go @@ -0,0 +1,3 @@ +package a + +var RandomGopherTestVariableA = "a" diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go new file mode 100644 index 00000000000..747cd17eccd --- /dev/null +++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go @@ -0,0 +1,3 @@ +package a_test + +var RandomGopherXTestVariableA = "a" diff --git a/gopls/internal/lsp/testdata/workspacesymbol/b/b.go b/gopls/internal/lsp/testdata/workspacesymbol/b/b.go new file mode 100644 index 00000000000..b2e2092eed6 --- /dev/null +++ b/gopls/internal/lsp/testdata/workspacesymbol/b/b.go @@ -0,0 +1,7 @@ +package b + +var RandomGopherVariableB = "b" + +type RandomGopherStructB struct { + Bar int +} diff --git a/internal/lsp/testdata/workspacesymbol/issue44806.go b/gopls/internal/lsp/testdata/workspacesymbol/issue44806.go similarity index 100% rename from internal/lsp/testdata/workspacesymbol/issue44806.go rename to gopls/internal/lsp/testdata/workspacesymbol/issue44806.go diff --git a/internal/lsp/testdata/workspacesymbol/main.go b/gopls/internal/lsp/testdata/workspacesymbol/main.go similarity index 100% rename from internal/lsp/testdata/workspacesymbol/main.go rename to gopls/internal/lsp/testdata/workspacesymbol/main.go diff --git a/internal/lsp/testdata/workspacesymbol/p/p.go b/gopls/internal/lsp/testdata/workspacesymbol/p/p.go similarity index 100% rename from internal/lsp/testdata/workspacesymbol/p/p.go rename to gopls/internal/lsp/testdata/workspacesymbol/p/p.go diff --git a/internal/lsp/testdata/workspacesymbol/query.go b/gopls/internal/lsp/testdata/workspacesymbol/query.go similarity index 100% rename from internal/lsp/testdata/workspacesymbol/query.go rename to gopls/internal/lsp/testdata/workspacesymbol/query.go diff --git a/internal/lsp/testdata/workspacesymbol/query.go.golden b/gopls/internal/lsp/testdata/workspacesymbol/query.go.golden similarity index 100% rename from internal/lsp/testdata/workspacesymbol/query.go.golden rename to gopls/internal/lsp/testdata/workspacesymbol/query.go.golden diff --git a/gopls/internal/lsp/tests/README.md b/gopls/internal/lsp/tests/README.md new file mode 100644 index 00000000000..07df28815c1 --- /dev/null +++ b/gopls/internal/lsp/tests/README.md @@ -0,0 +1,66 @@ +# Testing + +LSP has "marker tests" defined in `internal/lsp/testdata`, as well as +traditional tests. + +## Marker tests + +Marker tests have a standard input file, like +`internal/lsp/testdata/foo/bar.go`, and some may have a corresponding golden +file, like `internal/lsp/testdata/foo/bar.go.golden`. The former is the "input" +and the latter is the expected output. + +Each input file contains annotations like +`//@suggestedfix("}", "refactor.rewrite", "Fill anonymous struct")`. These annotations are interpreted by +test runners to perform certain actions. The expected output after those actions +is encoded in the golden file. + +When tests are run, each annotation results in a new subtest, which is encoded +in the golden file with a heading like, + +```bash +-- suggestedfix_bar_11_21 -- +// expected contents go here +-- suggestedfix_bar_13_20 -- +// expected contents go here +``` + +The format of these headings vary: they are defined by the +[`Golden`](https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/tests#Data.Golden) +function for each annotation. In the case above, the format is: annotation +name, file name, annotation line location, annotation character location. + +So, if `internal/lsp/testdata/foo/bar.go` has three `suggestedfix` annotations, +the golden file should have three headers with `suggestedfix_bar_xx_yy` +headings. + +To see a list of all available annotations, see the exported "expectations" in +[tests.go](https://github.com/golang/tools/blob/299f270db45902e93469b1152fafed034bb3f033/internal/lsp/tests/tests.go#L418-L447). + +To run marker tests, + +```bash +cd /path/to/tools + +# The marker tests are located in "internal/lsp", "internal/lsp/cmd, and +# "internal/lsp/source". +go test ./internal/lsp/... +``` + +There are quite a lot of marker tests, so to run one individually, pass the test +path and heading into a -run argument: + +```bash +cd /path/to/tools +go test ./internal/lsp/... -v -run TestLSP/Modules/SuggestedFix/bar_11_21 +``` + +## Resetting marker tests + +Sometimes, a change is made to lsp that requires a change to multiple golden +files. When this happens, you can run, + +```bash +cd /path/to/tools +./internal/lsp/reset_golden.sh +``` diff --git a/gopls/internal/lsp/tests/compare/text.go b/gopls/internal/lsp/tests/compare/text.go new file mode 100644 index 00000000000..9521496feec --- /dev/null +++ b/gopls/internal/lsp/tests/compare/text.go @@ -0,0 +1,33 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compare + +import ( + "golang.org/x/tools/internal/diff" +) + +// Text returns a formatted unified diff of the edits to go from want to +// got, returning "" if and only if want == got. +// +// This function is intended for use in testing, and panics if any error occurs +// while computing the diff. It is not sufficiently tested for production use. +func Text(want, got string) string { + if want == got { + return "" + } + + // Add newlines to avoid verbose newline messages ("No newline at end of file"). + unified := diff.Unified("want", "got", want+"\n", got+"\n") + + // Defensively assert that we get an actual diff, so that we guarantee the + // invariant that we return "" if and only if want == got. + // + // This is probably unnecessary, but convenient. + if unified == "" { + panic("empty diff for non-identical input") + } + + return unified +} diff --git a/gopls/internal/lsp/tests/compare/text_test.go b/gopls/internal/lsp/tests/compare/text_test.go new file mode 100644 index 00000000000..8f5af48bd11 --- /dev/null +++ b/gopls/internal/lsp/tests/compare/text_test.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compare_test + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/tests/compare" +) + +func TestText(t *testing.T) { + tests := []struct { + got, want, wantDiff string + }{ + {"", "", ""}, + {"equal", "equal", ""}, + {"a", "b", "--- want\n+++ got\n@@ -1 +1 @@\n-b\n+a\n"}, + {"a\nd\nc\n", "a\nb\nc\n", "--- want\n+++ got\n@@ -1,4 +1,4 @@\n a\n-b\n+d\n c\n \n"}, + } + + for _, test := range tests { + if gotDiff := compare.Text(test.want, test.got); gotDiff != test.wantDiff { + t.Errorf("compare.Text(%q, %q) =\n%q, want\n%q", test.want, test.got, gotDiff, test.wantDiff) + } + } +} diff --git a/gopls/internal/lsp/tests/markdown_go118.go b/gopls/internal/lsp/tests/markdown_go118.go new file mode 100644 index 00000000000..55fe106b8cc --- /dev/null +++ b/gopls/internal/lsp/tests/markdown_go118.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package tests + +import ( + "regexp" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/tests/compare" +) + +// DiffMarkdown compares two markdown strings produced by parsing go doc +// comments. +// +// For go1.19 and later, markdown conversion is done using go/doc/comment. +// Compared to the newer version, the older version has extra escapes, and +// treats code blocks slightly differently. +func DiffMarkdown(want, got string) string { + want = normalizeMarkdown(want) + got = normalizeMarkdown(got) + return compare.Text(want, got) +} + +// normalizeMarkdown normalizes whitespace and escaping of the input string, to +// eliminate differences between the Go 1.18 and Go 1.19 generated markdown for +// doc comments. Note that it does not normalize to either the 1.18 or 1.19 +// formatting: it simplifies both so that they may be compared. +// +// This function may need to be adjusted as we encounter more differences in +// the generated text. +func normalizeMarkdown(input string) string { + input = strings.TrimSpace(input) + + // For simplicity, eliminate blank lines. + input = regexp.MustCompile("\n+").ReplaceAllString(input, "\n") + + // Replace common escaped characters with their unescaped version. + // + // This list may not be exhaustive: it was just sufficient to make tests + // pass. + input = strings.NewReplacer( + `\\`, ``, + `\@`, `@`, + `\(`, `(`, + `\)`, `)`, + `\{`, `{`, + `\}`, `}`, + `\"`, `"`, + `\.`, `.`, + `\-`, `-`, + `\'`, `'`, + `\+`, `+`, + `\~`, `~`, + `\=`, `=`, + `\n\n\n`, `\n\n`, // Note that these are *escaped* newlines. + ).Replace(input) + + return input +} diff --git a/gopls/internal/lsp/tests/markdown_go119.go b/gopls/internal/lsp/tests/markdown_go119.go new file mode 100644 index 00000000000..a7fcf1a42ef --- /dev/null +++ b/gopls/internal/lsp/tests/markdown_go119.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package tests + +import ( + "golang.org/x/tools/gopls/internal/lsp/tests/compare" +) + +// DiffMarkdown compares two markdown strings produced by parsing go doc +// comments. +// +// For go1.19 and later, markdown conversion is done using go/doc/comment. +// Compared to the newer version, the older version has extra escapes, and +// treats code blocks slightly differently. +func DiffMarkdown(want, got string) string { + return compare.Text(want, got) +} diff --git a/internal/lsp/tests/normalizer.go b/gopls/internal/lsp/tests/normalizer.go similarity index 86% rename from internal/lsp/tests/normalizer.go rename to gopls/internal/lsp/tests/normalizer.go index 77d9e66a8ed..9c5d7b9c82f 100644 --- a/internal/lsp/tests/normalizer.go +++ b/gopls/internal/lsp/tests/normalizer.go @@ -41,22 +41,6 @@ func CollectNormalizers(exported *packagestest.Exported) []Normalizer { return normalizers } -// NormalizePrefix normalizes a single path at the front of the input string. -func NormalizePrefix(s string, normalizers []Normalizer) string { - for _, n := range normalizers { - if t := strings.TrimPrefix(s, n.path); t != s { - return n.fragment + t - } - if t := strings.TrimPrefix(s, n.slashed); t != s { - return n.fragment + t - } - if t := strings.TrimPrefix(s, n.escaped); t != s { - return n.fragment + t - } - } - return s -} - // Normalize replaces all paths present in s with just the fragment portion // this is used to make golden files not depend on the temporary paths of the files func Normalize(s string, normalizers []Normalizer) string { diff --git a/internal/lsp/tests/tests.go b/gopls/internal/lsp/tests/tests.go similarity index 82% rename from internal/lsp/tests/tests.go rename to gopls/internal/lsp/tests/tests.go index 8265cf2e9b1..2a370b7b732 100644 --- a/internal/lsp/tests/tests.go +++ b/gopls/internal/lsp/tests/tests.go @@ -27,11 +27,13 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/packages/packagestest" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/source/completion" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/safetoken" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/completion" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/txtar" @@ -41,7 +43,12 @@ const ( overlayFileSuffix = ".overlay" goldenFileSuffix = ".golden" inFileSuffix = ".in" - testModule = "golang.org/x/tools/internal/lsp" + + // The module path containing the testdata packages. + // + // Warning: the length of this module path matters, as we have bumped up + // against command-line limitations on windows (golang/go#54800). + testModule = "golang.org/lsptests" ) var summaryFile = "summary.txt" @@ -54,38 +61,39 @@ func init() { var UpdateGolden = flag.Bool("golden", false, "Update golden files") -type CallHierarchy map[span.Span]*CallHierarchyResult -type CodeLens map[span.URI][]protocol.CodeLens -type Diagnostics map[span.URI][]*source.Diagnostic -type CompletionItems map[token.Pos]*completion.CompletionItem -type Completions map[span.Span][]Completion -type CompletionSnippets map[span.Span][]CompletionSnippet -type UnimportedCompletions map[span.Span][]Completion -type DeepCompletions map[span.Span][]Completion -type FuzzyCompletions map[span.Span][]Completion -type CaseSensitiveCompletions map[span.Span][]Completion -type RankCompletions map[span.Span][]Completion -type FoldingRanges []span.Span -type Formats []span.Span -type Imports []span.Span -type SemanticTokens []span.Span -type SuggestedFixes map[span.Span][]string -type FunctionExtractions map[span.Span]span.Span -type MethodExtractions map[span.Span]span.Span -type Definitions map[span.Span]Definition -type Implementations map[span.Span][]span.Span -type Highlights map[span.Span][]span.Span -type References map[span.Span][]span.Span -type Renames map[span.Span]string -type PrepareRenames map[span.Span]*source.PrepareItem -type Symbols map[span.URI][]protocol.DocumentSymbol -type SymbolsChildren map[string][]protocol.DocumentSymbol -type SymbolInformation map[span.Span]protocol.SymbolInformation -type WorkspaceSymbols map[WorkspaceSymbolsTestType]map[span.URI][]string -type Signatures map[span.Span]*protocol.SignatureHelp -type Links map[span.URI][]Link -type AddImport map[span.URI]string -type Hovers map[span.Span]string +// These type names apparently avoid the need to repeat the +// type in the field name and the make() expression. +type CallHierarchy = map[span.Span]*CallHierarchyResult +type CodeLens = map[span.URI][]protocol.CodeLens +type Diagnostics = map[span.URI][]*source.Diagnostic +type CompletionItems = map[token.Pos]*completion.CompletionItem +type Completions = map[span.Span][]Completion +type CompletionSnippets = map[span.Span][]CompletionSnippet +type UnimportedCompletions = map[span.Span][]Completion +type DeepCompletions = map[span.Span][]Completion +type FuzzyCompletions = map[span.Span][]Completion +type CaseSensitiveCompletions = map[span.Span][]Completion +type RankCompletions = map[span.Span][]Completion +type FoldingRanges = []span.Span +type Formats = []span.Span +type Imports = []span.Span +type SemanticTokens = []span.Span +type SuggestedFixes = map[span.Span][]SuggestedFix +type FunctionExtractions = map[span.Span]span.Span +type MethodExtractions = map[span.Span]span.Span +type Definitions = map[span.Span]Definition +type Implementations = map[span.Span][]span.Span +type Highlights = map[span.Span][]span.Span +type References = map[span.Span][]span.Span +type Renames = map[span.Span]string +type PrepareRenames = map[span.Span]*source.PrepareItem +type Symbols = map[span.URI][]*symbol +type InlayHints = []span.Span +type WorkspaceSymbols = map[WorkspaceSymbolsTestType]map[span.URI][]string +type Signatures = map[span.Span]*protocol.SignatureHelp +type Links = map[span.URI][]Link +type AddImport = map[span.URI]string +type SelectionRanges = []span.Span type Data struct { Config packages.Config @@ -113,17 +121,15 @@ type Data struct { Highlights Highlights References References Renames Renames + InlayHints InlayHints PrepareRenames PrepareRenames Symbols Symbols - symbolsChildren SymbolsChildren - symbolInformation SymbolInformation WorkspaceSymbols WorkspaceSymbols Signatures Signatures Links Links AddImport AddImport - Hovers Hovers + SelectionRanges SelectionRanges - t testing.TB fragments map[string]string dir string golden map[string]*Golden @@ -132,9 +138,15 @@ type Data struct { ModfileFlagAvailable bool mappersMu sync.Mutex - mappers map[span.URI]*protocol.ColumnMapper + mappers map[span.URI]*protocol.Mapper } +// The Tests interface abstracts the LSP-based implementation of the marker +// test operators (such as @codelens) appearing in files beneath ../testdata/. +// +// TODO(adonovan): reduce duplication; see https://github.com/golang/go/issues/54845. +// There is only one implementation (*runner in ../lsp_test.go), so +// we can abolish the interface now. type Tests interface { CallHierarchy(*testing.T, span.Span, *CallHierarchyResult) CodeLens(*testing.T, span.URI, []protocol.CodeLens) @@ -150,12 +162,13 @@ type Tests interface { Format(*testing.T, span.Span) Import(*testing.T, span.Span) SemanticTokens(*testing.T, span.Span) - SuggestedFix(*testing.T, span.Span, []string, int) + SuggestedFix(*testing.T, span.Span, []SuggestedFix, int) FunctionExtraction(*testing.T, span.Span, span.Span) MethodExtraction(*testing.T, span.Span, span.Span) Definition(*testing.T, span.Span, Definition) Implementation(*testing.T, span.Span, []span.Span) Highlight(*testing.T, span.Span, []span.Span) + InlayHints(*testing.T, span.Span) References(*testing.T, span.Span, []span.Span) Rename(*testing.T, span.Span, string) PrepareRename(*testing.T, span.Span, *source.PrepareItem) @@ -164,7 +177,7 @@ type Tests interface { SignatureHelp(*testing.T, span.Span, *protocol.SignatureHelp) Link(*testing.T, span.URI, []Link) AddImport(*testing.T, span.URI, string) - Hover(*testing.T, span.Span, string) + SelectionRanges(*testing.T, span.Span) } type Definition struct { @@ -229,6 +242,16 @@ type Link struct { NotePosition token.Position } +type SuggestedFix struct { + ActionKind, Title string +} + +// A symbol holds a DocumentSymbol along with its parent-child edge. +type symbol struct { + pSymbol protocol.DocumentSymbol + id, parentID string +} + type Golden struct { Filename string Archive *txtar.Archive @@ -260,8 +283,8 @@ func DefaultOptions(o *source.Options) { o.InsertTextFormat = protocol.SnippetTextFormat o.CompletionBudget = time.Minute o.HierarchicalDocumentSymbolSupport = true - o.ExperimentalWorkspaceModule = true o.SemanticTokens = true + o.InternalOptions.NewDiff = "both" } func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*testing.T, *Data)) { @@ -272,10 +295,6 @@ func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*tes } for _, mode := range modes { t.Run(mode, func(t *testing.T) { - if mode == "MultiModule" { - // Some bug in 1.12 breaks reading markers, and it's not worth figuring out. - testenv.NeedsGo1Point(t, 13) - } datum := load(t, mode, dataDir) t.Helper() f(t, datum) @@ -306,20 +325,16 @@ func load(t testing.TB, mode string, dir string) *Data { FunctionExtractions: make(FunctionExtractions), MethodExtractions: make(MethodExtractions), Symbols: make(Symbols), - symbolsChildren: make(SymbolsChildren), - symbolInformation: make(SymbolInformation), WorkspaceSymbols: make(WorkspaceSymbols), Signatures: make(Signatures), Links: make(Links), AddImport: make(AddImport), - Hovers: make(Hovers), - t: t, dir: dir, fragments: map[string]string{}, golden: map[string]*Golden{}, mode: mode, - mappers: map[span.URI]*protocol.ColumnMapper{}, + mappers: map[span.URI]*protocol.Mapper{}, } if !*UpdateGolden { @@ -464,8 +479,8 @@ func load(t testing.TB, mode string, dir string) *Data { "implementations": datum.collectImplementations, "typdef": datum.collectTypeDefinitions, "hoverdef": datum.collectHoverDefinitions, - "hover": datum.collectHovers, "highlight": datum.collectHighlights, + "inlayHint": datum.collectInlayHints, "refs": datum.collectReferences, "rename": datum.collectRenames, "prepare": datum.collectPrepareRenames, @@ -478,15 +493,11 @@ func load(t testing.TB, mode string, dir string) *Data { "incomingcalls": datum.collectIncomingCalls, "outgoingcalls": datum.collectOutgoingCalls, "addimport": datum.collectAddImports, + "selectionrange": datum.collectSelectionRanges, }); err != nil { t.Fatal(err) } - for _, symbols := range datum.Symbols { - for i := range symbols { - children := datum.symbolsChildren[symbols[i].Name] - symbols[i].Children = children - } - } + // Collect names for the entries that require golden files. if err := datum.Exported.Expect(map[string]interface{}{ "godef": datum.collectDefinitionNames, @@ -557,9 +568,6 @@ func Run(t *testing.T, tests Tests, data *Data) { if strings.Contains(t.Name(), "cgo") { testenv.NeedsTool(t, "cgo") } - if strings.Contains(t.Name(), "declarecgo") { - testenv.NeedsGo1Point(t, 15) - } test(t, src, e, data.CompletionItems) }) } @@ -744,9 +752,6 @@ func Run(t *testing.T, tests Tests, data *Data) { if strings.Contains(t.Name(), "cgo") { testenv.NeedsTool(t, "cgo") } - if strings.Contains(t.Name(), "declarecgo") { - testenv.NeedsGo1Point(t, 15) - } tests.Definition(t, spn, d) }) } @@ -772,12 +777,12 @@ func Run(t *testing.T, tests Tests, data *Data) { } }) - t.Run("Hover", func(t *testing.T) { + t.Run("InlayHints", func(t *testing.T) { t.Helper() - for pos, info := range data.Hovers { - t.Run(SpanName(pos), func(t *testing.T) { + for _, src := range data.InlayHints { + t.Run(SpanName(src), func(t *testing.T) { t.Helper() - tests.Hover(t, pos, info) + tests.InlayHints(t, src) }) } }) @@ -814,10 +819,44 @@ func Run(t *testing.T, tests Tests, data *Data) { t.Run("Symbols", func(t *testing.T) { t.Helper() - for uri, expectedSymbols := range data.Symbols { + for uri, allSymbols := range data.Symbols { + byParent := make(map[string][]*symbol) + for _, sym := range allSymbols { + if sym.parentID != "" { + byParent[sym.parentID] = append(byParent[sym.parentID], sym) + } + } + + // collectChildren does a depth-first traversal of the symbol tree, + // computing children of child nodes before returning to their parent. + // This is necessary as the Children field is slice of non-pointer types, + // and therefore we need to be careful to mutate children first before + // assigning them to their parent. + var collectChildren func(id string) []protocol.DocumentSymbol + collectChildren = func(id string) []protocol.DocumentSymbol { + children := byParent[id] + // delete from byParent before recursing, to ensure that + // collectChildren terminates even in the presence of cycles. + delete(byParent, id) + var result []protocol.DocumentSymbol + for _, child := range children { + child.pSymbol.Children = collectChildren(child.id) + result = append(result, child.pSymbol) + } + return result + } + + var topLevel []protocol.DocumentSymbol + for _, sym := range allSymbols { + if sym.parentID == "" { + sym.pSymbol.Children = collectChildren(sym.id) + topLevel = append(topLevel, sym.pSymbol) + } + } + t.Run(uriName(uri), func(t *testing.T) { t.Helper() - tests.Symbols(t, uri, expectedSymbols) + tests.Symbols(t, uri, topLevel) }) } }) @@ -887,6 +926,15 @@ func Run(t *testing.T, tests Tests, data *Data) { } }) + t.Run("SelectionRanges", func(t *testing.T) { + t.Helper() + for _, span := range data.SelectionRanges { + t.Run(SpanName(span), func(t *testing.T) { + tests.SelectionRanges(t, span) + }) + } + }) + if *UpdateGolden { for _, golden := range data.golden { if !golden.Modified { @@ -970,6 +1018,7 @@ func checkData(t *testing.T, data *Data) { fmt.Fprintf(buf, "DefinitionsCount = %v\n", definitionCount) fmt.Fprintf(buf, "TypeDefinitionsCount = %v\n", typeDefinitionCount) fmt.Fprintf(buf, "HighlightsCount = %v\n", len(data.Highlights)) + fmt.Fprintf(buf, "InlayHintsCount = %v\n", len(data.InlayHints)) fmt.Fprintf(buf, "ReferencesCount = %v\n", len(data.References)) fmt.Fprintf(buf, "RenamesCount = %v\n", len(data.Renames)) fmt.Fprintf(buf, "PrepareRenamesCount = %v\n", len(data.PrepareRenames)) @@ -978,17 +1027,21 @@ func checkData(t *testing.T, data *Data) { fmt.Fprintf(buf, "SignaturesCount = %v\n", len(data.Signatures)) fmt.Fprintf(buf, "LinksCount = %v\n", linksCount) fmt.Fprintf(buf, "ImplementationsCount = %v\n", len(data.Implementations)) + fmt.Fprintf(buf, "SelectionRangesCount = %v\n", len(data.SelectionRanges)) - want := string(data.Golden("summary", summaryFile, func() ([]byte, error) { + want := string(data.Golden(t, "summary", summaryFile, func() ([]byte, error) { return buf.Bytes(), nil })) got := buf.String() if want != got { - t.Errorf("test summary does not match:\n%s", Diff(t, want, got)) + // These counters change when assertions are added or removed. + // They act as an independent safety net to ensure that the + // tests didn't spuriously pass because they did no work. + t.Errorf("test summary does not match:\n%s\n(Run with -golden to update golden file; also, there may be one per Go version.)", compare.Text(want, got)) } } -func (data *Data) Mapper(uri span.URI) (*protocol.ColumnMapper, error) { +func (data *Data) Mapper(uri span.URI) (*protocol.Mapper, error) { data.mappersMu.Lock() defer data.mappersMu.Unlock() @@ -997,24 +1050,24 @@ func (data *Data) Mapper(uri span.URI) (*protocol.ColumnMapper, error) { if err != nil { return nil, err } - data.mappers[uri] = protocol.NewColumnMapper(uri, content) + data.mappers[uri] = protocol.NewMapper(uri, content) } return data.mappers[uri], nil } -func (data *Data) Golden(tag string, target string, update func() ([]byte, error)) []byte { - data.t.Helper() +func (data *Data) Golden(t *testing.T, tag, target string, update func() ([]byte, error)) []byte { + t.Helper() fragment, found := data.fragments[target] if !found { if filepath.IsAbs(target) { - data.t.Fatalf("invalid golden file fragment %v", target) + t.Fatalf("invalid golden file fragment %v", target) } fragment = target } golden := data.golden[fragment] if golden == nil { if !*UpdateGolden { - data.t.Fatalf("could not find golden file %v: %v", fragment, tag) + t.Fatalf("could not find golden file %v: %v", fragment, tag) } golden = &Golden{ Filename: filepath.Join(data.dir, fragment+goldenFileSuffix), @@ -1040,14 +1093,14 @@ func (data *Data) Golden(tag string, target string, update func() ([]byte, error } contents, err := update() if err != nil { - data.t.Fatalf("could not update golden file %v: %v", fragment, err) + t.Fatalf("could not update golden file %v: %v", fragment, err) } file.Data = append(contents, '\n') // add trailing \n for txtar golden.Modified = true } if file == nil { - data.t.Fatalf("could not find golden contents %v: %v", fragment, tag) + t.Fatalf("could not find golden contents %v: %v", fragment, tag) } if len(file.Data) == 0 { return file.Data @@ -1056,19 +1109,8 @@ func (data *Data) Golden(tag string, target string, update func() ([]byte, error } func (data *Data) collectCodeLens(spn span.Span, title, cmd string) { - if _, ok := data.CodeLens[spn.URI()]; !ok { - data.CodeLens[spn.URI()] = []protocol.CodeLens{} - } - m, err := data.Mapper(spn.URI()) - if err != nil { - return - } - rng, err := m.Range(spn) - if err != nil { - return - } data.CodeLens[spn.URI()] = append(data.CodeLens[spn.URI()], protocol.CodeLens{ - Range: rng, + Range: data.mustRange(spn), Command: protocol.Command{ Title: title, Command: cmd, @@ -1076,18 +1118,7 @@ func (data *Data) collectCodeLens(spn span.Span, title, cmd string) { }) } -func (data *Data) collectDiagnostics(spn span.Span, msgSource, msg, msgSeverity string) { - if _, ok := data.Diagnostics[spn.URI()]; !ok { - data.Diagnostics[spn.URI()] = []*source.Diagnostic{} - } - m, err := data.Mapper(spn.URI()) - if err != nil { - return - } - rng, err := m.Range(spn) - if err != nil { - return - } +func (data *Data) collectDiagnostics(spn span.Span, msgSource, msgPattern, msgSeverity string) { severity := protocol.SeverityError switch msgSeverity { case "error": @@ -1099,14 +1130,13 @@ func (data *Data) collectDiagnostics(spn span.Span, msgSource, msg, msgSeverity case "information": severity = protocol.SeverityInformation } - // This is not the correct way to do this, but it seems excessive to do the full conversion here. - want := &source.Diagnostic{ - Range: rng, + + data.Diagnostics[spn.URI()] = append(data.Diagnostics[spn.URI()], &source.Diagnostic{ + Range: data.mustRange(spn), Severity: severity, Source: source.DiagnosticSource(msgSource), - Message: msg, - } - data.Diagnostics[spn.URI()] = append(data.Diagnostics[spn.URI()], want) + Message: msgPattern, + }) } func (data *Data) collectCompletions(typ CompletionTestType) func(span.Span, []token.Pos) { @@ -1143,15 +1173,9 @@ func (data *Data) collectCompletions(typ CompletionTestType) func(span.Span, []t } } -func (data *Data) collectCompletionItems(pos token.Pos, args []string) { - if len(args) < 3 { - loc := data.Exported.ExpectFileSet.Position(pos) - data.t.Fatalf("%s:%d: @item expects at least 3 args, got %d", - loc.Filename, loc.Line, len(args)) - } - label, detail, kind := args[0], args[1], args[2] +func (data *Data) collectCompletionItems(pos token.Pos, label, detail, kind string, args []string) { var documentation string - if len(args) == 4 { + if len(args) > 3 { documentation = args[3] } data.CompletionItems[pos] = &completion.CompletionItem{ @@ -1182,11 +1206,8 @@ func (data *Data) collectSemanticTokens(spn span.Span) { data.SemanticTokens = append(data.SemanticTokens, spn) } -func (data *Data) collectSuggestedFixes(spn span.Span, actionKind string) { - if _, ok := data.SuggestedFixes[spn]; !ok { - data.SuggestedFixes[spn] = []string{} - } - data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], actionKind) +func (data *Data) collectSuggestedFixes(spn span.Span, actionKind, fix string) { + data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], SuggestedFix{actionKind, fix}) } func (data *Data) collectFunctionExtractions(start span.Span, end span.Span) { @@ -1208,20 +1229,17 @@ func (data *Data) collectDefinitions(src, target span.Span) { } } +func (data *Data) collectSelectionRanges(spn span.Span) { + data.SelectionRanges = append(data.SelectionRanges, spn) +} + func (data *Data) collectImplementations(src span.Span, targets []span.Span) { data.Implementations[src] = targets } func (data *Data) collectIncomingCalls(src span.Span, calls []span.Span) { for _, call := range calls { - m, err := data.Mapper(call.URI()) - if err != nil { - data.t.Fatal(err) - } - rng, err := m.Range(call) - if err != nil { - data.t.Fatal(err) - } + rng := data.mustRange(call) // we're only comparing protocol.range if data.CallHierarchy[src] != nil { data.CallHierarchy[src].IncomingCalls = append(data.CallHierarchy[src].IncomingCalls, @@ -1244,19 +1262,11 @@ func (data *Data) collectOutgoingCalls(src span.Span, calls []span.Span) { data.CallHierarchy[src] = &CallHierarchyResult{} } for _, call := range calls { - m, err := data.Mapper(call.URI()) - if err != nil { - data.t.Fatal(err) - } - rng, err := m.Range(call) - if err != nil { - data.t.Fatal(err) - } // we're only comparing protocol.range data.CallHierarchy[src].OutgoingCalls = append(data.CallHierarchy[src].OutgoingCalls, protocol.CallHierarchyItem{ URI: protocol.DocumentURI(call.URI()), - Range: rng, + Range: data.mustRange(call), }) } } @@ -1269,10 +1279,6 @@ func (data *Data) collectHoverDefinitions(src, target span.Span) { } } -func (data *Data) collectHovers(src span.Span, expected string) { - data.Hovers[src] = expected -} - func (data *Data) collectTypeDefinitions(src, target span.Span) { data.Definitions[src] = Definition{ Src: src, @@ -1292,6 +1298,10 @@ func (data *Data) collectHighlights(src span.Span, expected []span.Span) { data.Highlights[src] = append(data.Highlights[src], expected...) } +func (data *Data) collectInlayHints(src span.Span) { + data.InlayHints = append(data.InlayHints, src) +} + func (data *Data) collectReferences(src span.Span, expected []span.Span) { data.References[src] = expected } @@ -1300,57 +1310,38 @@ func (data *Data) collectRenames(src span.Span, newText string) { data.Renames[src] = newText } -func (data *Data) collectPrepareRenames(src span.Span, rng span.Range, placeholder string) { - m, err := data.Mapper(src.URI()) - if err != nil { - data.t.Fatal(err) - } - // Convert range to span and then to protocol.Range. - spn, err := rng.Span() - if err != nil { - data.t.Fatal(err) - } - prng, err := m.Range(spn) - if err != nil { - data.t.Fatal(err) - } +func (data *Data) collectPrepareRenames(src, spn span.Span, placeholder string) { data.PrepareRenames[src] = &source.PrepareItem{ - Range: prng, + Range: data.mustRange(spn), Text: placeholder, } } // collectSymbols is responsible for collecting @symbol annotations. -func (data *Data) collectSymbols(name string, spn span.Span, kind string, parentName string, siName string) { +func (data *Data) collectSymbols(name string, selectionRng span.Span, kind, detail, id, parentID string) { + // We don't set 'Range' here as it is difficult (impossible?) to express + // multi-line ranges in the packagestest framework. + uri := selectionRng.URI() + data.Symbols[uri] = append(data.Symbols[uri], &symbol{ + pSymbol: protocol.DocumentSymbol{ + Name: name, + Kind: protocol.ParseSymbolKind(kind), + SelectionRange: data.mustRange(selectionRng), + Detail: detail, + }, + id: id, + parentID: parentID, + }) +} + +// mustRange converts spn into a protocol.Range, panicking on any error. +func (data *Data) mustRange(spn span.Span) protocol.Range { m, err := data.Mapper(spn.URI()) + rng, err := m.SpanRange(spn) if err != nil { - data.t.Fatal(err) - } - rng, err := m.Range(spn) - if err != nil { - data.t.Fatal(err) - } - sym := protocol.DocumentSymbol{ - Name: name, - Kind: protocol.ParseSymbolKind(kind), - SelectionRange: rng, - } - if parentName == "" { - data.Symbols[spn.URI()] = append(data.Symbols[spn.URI()], sym) - } else { - data.symbolsChildren[parentName] = append(data.symbolsChildren[parentName], sym) - } - - // Reuse @symbol in the workspace symbols tests. - si := protocol.SymbolInformation{ - Name: siName, - Kind: sym.Kind, - Location: protocol.Location{ - URI: protocol.URIFromSpanURI(spn.URI()), - Range: sym.SelectionRange, - }, + panic(fmt.Sprintf("converting span %s to range: %v", spn, err)) } - data.symbolInformation[spn] = si + return rng } func (data *Data) collectWorkspaceSymbols(typ WorkspaceSymbolsTestType) func(*expect.Note, string) { @@ -1358,7 +1349,7 @@ func (data *Data) collectWorkspaceSymbols(typ WorkspaceSymbolsTestType) func(*ex if data.WorkspaceSymbols[typ] == nil { data.WorkspaceSymbols[typ] = make(map[span.URI][]string) } - pos := data.Exported.ExpectFileSet.Position(note.Pos) + pos := safetoken.StartPosition(data.Exported.ExpectFileSet, note.Pos) uri := span.URIFromPath(pos.Filename) data.WorkspaceSymbols[typ][uri] = append(data.WorkspaceSymbols[typ][uri], query) } @@ -1388,7 +1379,7 @@ func (data *Data) collectCompletionSnippets(spn span.Span, item token.Pos, plain } func (data *Data) collectLinks(spn span.Span, link string, note *expect.Note, fset *token.FileSet) { - position := fset.Position(note.Pos) + position := safetoken.StartPosition(fset, note.Pos) uri := spn.URI() data.Links[uri] = append(data.Links[uri], Link{ Src: spn, @@ -1401,6 +1392,8 @@ func uriName(uri span.URI) string { return filepath.Base(strings.TrimSuffix(uri.Filename(), ".go")) } +// TODO(golang/go#54845): improve the formatting here to match standard +// line:column position formatting. func SpanName(spn span.Span) string { return fmt.Sprintf("%v_%v_%v", uriName(spn.URI()), spn.Start().Line(), spn.Start().Column()) } diff --git a/gopls/internal/lsp/tests/util.go b/gopls/internal/lsp/tests/util.go new file mode 100644 index 00000000000..fd65ecb55fa --- /dev/null +++ b/gopls/internal/lsp/tests/util.go @@ -0,0 +1,547 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tests + +import ( + "bytes" + "context" + "fmt" + "go/token" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/source/completion" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/gopls/internal/span" +) + +var builtins = map[string]bool{ + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "imag": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "true": true, +} + +// DiffLinks takes the links we got and checks if they are located within the source or a Note. +// If the link is within a Note, the link is removed. +// Returns an diff comment if there are differences and empty string if no diffs. +func DiffLinks(mapper *protocol.Mapper, wantLinks []Link, gotLinks []protocol.DocumentLink) string { + var notePositions []token.Position + links := make(map[span.Span]string, len(wantLinks)) + for _, link := range wantLinks { + links[link.Src] = link.Target + notePositions = append(notePositions, link.NotePosition) + } + + var msg strings.Builder + for _, link := range gotLinks { + spn, err := mapper.RangeSpan(link.Range) + if err != nil { + return fmt.Sprintf("%v", err) + } + linkInNote := false + for _, notePosition := range notePositions { + // Drop the links found inside expectation notes arguments as this links are not collected by expect package. + if notePosition.Line == spn.Start().Line() && + notePosition.Column <= spn.Start().Column() { + delete(links, spn) + linkInNote = true + } + } + if linkInNote { + continue + } + + if target, ok := links[spn]; ok { + delete(links, spn) + if target != link.Target { + fmt.Fprintf(&msg, "%s: want link with target %q, got %q\n", spn, target, link.Target) + } + } else { + fmt.Fprintf(&msg, "%s: got unexpected link with target %q\n", spn, link.Target) + } + } + for spn, target := range links { + fmt.Fprintf(&msg, "%s: expected link with target %q is missing\n", spn, target) + } + return msg.String() +} + +// CompareDiagnostics reports testing errors to t when the diagnostic set got +// does not match want. If the sole expectation has source "no_diagnostics", +// the test expects that no diagnostics were received for the given document. +func CompareDiagnostics(t *testing.T, uri span.URI, want, got []*source.Diagnostic) { + t.Helper() + fileName := path.Base(string(uri)) + + // A special case to test that there are no diagnostics for a file. + if len(want) == 1 && want[0].Source == "no_diagnostics" { + want = nil + } + + // Build a helper function to match an actual diagnostic to an overlapping + // expected diagnostic (if any). + unmatched := make([]*source.Diagnostic, len(want)) + copy(unmatched, want) + source.SortDiagnostics(unmatched) + match := func(g *source.Diagnostic) *source.Diagnostic { + // Find the last expected diagnostic d for which start(d) < end(g), and + // check to see if it overlaps. + i := sort.Search(len(unmatched), func(i int) bool { + d := unmatched[i] + // See rangeOverlaps: if a range is a single point, we consider End to be + // included in the range... + if g.Range.Start == g.Range.End { + return protocol.ComparePosition(d.Range.Start, g.Range.End) > 0 + } + // ...otherwise the end position of a range is not included. + return protocol.ComparePosition(d.Range.Start, g.Range.End) >= 0 + }) + if i == 0 { + return nil + } + w := unmatched[i-1] + if rangeOverlaps(w.Range, g.Range) { + unmatched = append(unmatched[:i-1], unmatched[i:]...) + return w + } + return nil + } + + for _, g := range got { + w := match(g) + if w == nil { + t.Errorf("%s:%s: unexpected diagnostic %q", fileName, g.Range, g.Message) + continue + } + if match, err := regexp.MatchString(w.Message, g.Message); err != nil { + t.Errorf("%s:%s: invalid regular expression %q: %v", fileName, w.Range.Start, w.Message, err) + } else if !match { + t.Errorf("%s:%s: got Message %q, want match for pattern %q", fileName, g.Range.Start, g.Message, w.Message) + } + if w.Severity != g.Severity { + t.Errorf("%s:%s: got Severity %v, want %v", fileName, g.Range.Start, g.Severity, w.Severity) + } + if w.Source != g.Source { + t.Errorf("%s:%s: got Source %v, want %v", fileName, g.Range.Start, g.Source, w.Source) + } + } + + for _, w := range unmatched { + t.Errorf("%s:%s: unmatched diagnostic pattern %q", fileName, w.Range, w.Message) + } +} + +// rangeOverlaps reports whether r1 and r2 overlap. +func rangeOverlaps(r1, r2 protocol.Range) bool { + if inRange(r2.Start, r1) || inRange(r1.Start, r2) { + return true + } + return false +} + +// inRange reports whether p is contained within [r.Start, r.End), or if p == +// r.Start == r.End (special handling for the case where the range is a single +// point). +func inRange(p protocol.Position, r protocol.Range) bool { + if protocol.IsPoint(r) { + return protocol.ComparePosition(r.Start, p) == 0 + } + if protocol.ComparePosition(r.Start, p) <= 0 && protocol.ComparePosition(p, r.End) < 0 { + return true + } + return false +} + +func DiffCodeLens(uri span.URI, want, got []protocol.CodeLens) string { + sortCodeLens(want) + sortCodeLens(got) + + if len(got) != len(want) { + return summarizeCodeLens(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want)) + } + for i, w := range want { + g := got[i] + if w.Command.Command != g.Command.Command { + return summarizeCodeLens(i, uri, want, got, "incorrect Command Name got %v want %v", g.Command.Command, w.Command.Command) + } + if w.Command.Title != g.Command.Title { + return summarizeCodeLens(i, uri, want, got, "incorrect Command Title got %v want %v", g.Command.Title, w.Command.Title) + } + if protocol.ComparePosition(w.Range.Start, g.Range.Start) != 0 { + return summarizeCodeLens(i, uri, want, got, "incorrect Start got %v want %v", g.Range.Start, w.Range.Start) + } + if !protocol.IsPoint(g.Range) { // Accept any 'want' range if the codelens returns a zero-length range. + if protocol.ComparePosition(w.Range.End, g.Range.End) != 0 { + return summarizeCodeLens(i, uri, want, got, "incorrect End got %v want %v", g.Range.End, w.Range.End) + } + } + } + return "" +} + +func sortCodeLens(c []protocol.CodeLens) { + sort.Slice(c, func(i int, j int) bool { + if r := protocol.CompareRange(c[i].Range, c[j].Range); r != 0 { + return r < 0 + } + if c[i].Command.Command < c[j].Command.Command { + return true + } else if c[i].Command.Command == c[j].Command.Command { + return c[i].Command.Title < c[j].Command.Title + } else { + return false + } + }) +} + +func summarizeCodeLens(i int, uri span.URI, want, got []protocol.CodeLens, reason string, args ...interface{}) string { + msg := &bytes.Buffer{} + fmt.Fprint(msg, "codelens failed") + if i >= 0 { + fmt.Fprintf(msg, " at %d", i) + } + fmt.Fprint(msg, " because of ") + fmt.Fprintf(msg, reason, args...) + fmt.Fprint(msg, ":\nexpected:\n") + for _, d := range want { + fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title) + } + fmt.Fprintf(msg, "got:\n") + for _, d := range got { + fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title) + } + return msg.String() +} + +func DiffSignatures(spn span.Span, want, got *protocol.SignatureHelp) string { + decorate := func(f string, args ...interface{}) string { + return fmt.Sprintf("invalid signature at %s: %s", spn, fmt.Sprintf(f, args...)) + } + if len(got.Signatures) != 1 { + return decorate("wanted 1 signature, got %d", len(got.Signatures)) + } + if got.ActiveSignature != 0 { + return decorate("wanted active signature of 0, got %d", int(got.ActiveSignature)) + } + if want.ActiveParameter != got.ActiveParameter { + return decorate("wanted active parameter of %d, got %d", want.ActiveParameter, int(got.ActiveParameter)) + } + g := got.Signatures[0] + w := want.Signatures[0] + if diff := compare.Text(NormalizeAny(w.Label), NormalizeAny(g.Label)); diff != "" { + return decorate("mismatched labels:\n%s", diff) + } + var paramParts []string + for _, p := range g.Parameters { + paramParts = append(paramParts, p.Label) + } + paramsStr := strings.Join(paramParts, ", ") + if !strings.Contains(g.Label, paramsStr) { + return decorate("expected signature %q to contain params %q", g.Label, paramsStr) + } + return "" +} + +// NormalizeAny replaces occurrences of interface{} in input with any. +// +// In Go 1.18, standard library functions were changed to use the 'any' +// alias in place of interface{}, which affects their type string. +func NormalizeAny(input string) string { + return strings.ReplaceAll(input, "interface{}", "any") +} + +// DiffCallHierarchyItems returns the diff between expected and actual call locations for incoming/outgoing call hierarchies +func DiffCallHierarchyItems(gotCalls []protocol.CallHierarchyItem, expectedCalls []protocol.CallHierarchyItem) string { + expected := make(map[protocol.Location]bool) + for _, call := range expectedCalls { + expected[protocol.Location{URI: call.URI, Range: call.Range}] = true + } + + got := make(map[protocol.Location]bool) + for _, call := range gotCalls { + got[protocol.Location{URI: call.URI, Range: call.Range}] = true + } + if len(got) != len(expected) { + return fmt.Sprintf("expected %d calls but got %d", len(expected), len(got)) + } + for spn := range got { + if !expected[spn] { + return fmt.Sprintf("incorrect calls, expected locations %v but got locations %v", expected, got) + } + } + return "" +} + +func FilterBuiltins(src span.Span, items []protocol.CompletionItem) []protocol.CompletionItem { + var ( + got []protocol.CompletionItem + wantBuiltins = strings.Contains(string(src.URI()), "builtins") + wantKeywords = strings.Contains(string(src.URI()), "keywords") + ) + for _, item := range items { + if !wantBuiltins && isBuiltin(item.Label, item.Detail, item.Kind) { + continue + } + + if !wantKeywords && token.Lookup(item.Label).IsKeyword() { + continue + } + + got = append(got, item) + } + return got +} + +func isBuiltin(label, detail string, kind protocol.CompletionItemKind) bool { + if detail == "" && kind == protocol.ClassCompletion { + return true + } + // Remaining builtin constants, variables, interfaces, and functions. + trimmed := label + if i := strings.Index(trimmed, "("); i >= 0 { + trimmed = trimmed[:i] + } + return builtins[trimmed] +} + +func CheckCompletionOrder(want, got []protocol.CompletionItem, strictScores bool) string { + var ( + matchedIdxs []int + lastGotIdx int + lastGotSort float64 + inOrder = true + errorMsg = "completions out of order" + ) + for _, w := range want { + var found bool + for i, g := range got { + if w.Label == g.Label && NormalizeAny(w.Detail) == NormalizeAny(g.Detail) && w.Kind == g.Kind { + matchedIdxs = append(matchedIdxs, i) + found = true + + if i < lastGotIdx { + inOrder = false + } + lastGotIdx = i + + sort, _ := strconv.ParseFloat(g.SortText, 64) + if strictScores && len(matchedIdxs) > 1 && sort <= lastGotSort { + inOrder = false + errorMsg = "candidate scores not strictly decreasing" + } + lastGotSort = sort + + break + } + } + if !found { + return summarizeCompletionItems(-1, []protocol.CompletionItem{w}, got, "didn't find expected completion") + } + } + + sort.Ints(matchedIdxs) + matched := make([]protocol.CompletionItem, 0, len(matchedIdxs)) + for _, idx := range matchedIdxs { + matched = append(matched, got[idx]) + } + + if !inOrder { + return summarizeCompletionItems(-1, want, matched, errorMsg) + } + + return "" +} + +func DiffSnippets(want string, got *protocol.CompletionItem) string { + if want == "" { + if got != nil { + x := got.TextEdit + return fmt.Sprintf("expected no snippet but got %s", x.NewText) + } + } else { + if got == nil { + return fmt.Sprintf("couldn't find completion matching %q", want) + } + x := got.TextEdit + if want != x.NewText { + return fmt.Sprintf("expected snippet %q, got %q", want, x.NewText) + } + } + return "" +} + +func FindItem(list []protocol.CompletionItem, want completion.CompletionItem) *protocol.CompletionItem { + for _, item := range list { + if item.Label == want.Label { + return &item + } + } + return nil +} + +// DiffCompletionItems prints the diff between expected and actual completion +// test results. +// +// The diff will be formatted using '-' and '+' for want and got, respectively. +func DiffCompletionItems(want, got []protocol.CompletionItem) string { + // Many fields are not set in the "want" slice. + irrelevantFields := []string{ + "AdditionalTextEdits", + "Documentation", + "TextEdit", + "SortText", + "Preselect", + "FilterText", + "InsertText", + "InsertTextFormat", + } + ignore := cmpopts.IgnoreFields(protocol.CompletionItem{}, irrelevantFields...) + normalizeAny := cmpopts.AcyclicTransformer("NormalizeAny", func(item protocol.CompletionItem) protocol.CompletionItem { + item.Detail = NormalizeAny(item.Detail) + return item + }) + return cmp.Diff(want, got, ignore, normalizeAny) +} + +func summarizeCompletionItems(i int, want, got []protocol.CompletionItem, reason string, args ...interface{}) string { + msg := &bytes.Buffer{} + fmt.Fprint(msg, "completion failed") + if i >= 0 { + fmt.Fprintf(msg, " at %d", i) + } + fmt.Fprint(msg, " because of ") + fmt.Fprintf(msg, reason, args...) + fmt.Fprint(msg, ":\nexpected:\n") + for _, d := range want { + fmt.Fprintf(msg, " %v\n", d) + } + fmt.Fprintf(msg, "got:\n") + for _, d := range got { + fmt.Fprintf(msg, " %v\n", d) + } + return msg.String() +} + +func EnableAllAnalyzers(opts *source.Options) { + if opts.Analyses == nil { + opts.Analyses = make(map[string]bool) + } + for _, a := range opts.DefaultAnalyzers { + if !a.IsEnabled(opts) { + opts.Analyses[a.Analyzer.Name] = true + } + } + for _, a := range opts.TypeErrorAnalyzers { + if !a.IsEnabled(opts) { + opts.Analyses[a.Analyzer.Name] = true + } + } + for _, a := range opts.ConvenienceAnalyzers { + if !a.IsEnabled(opts) { + opts.Analyses[a.Analyzer.Name] = true + } + } + for _, a := range opts.StaticcheckAnalyzers { + if !a.IsEnabled(opts) { + opts.Analyses[a.Analyzer.Name] = true + } + } +} + +func EnableAllInlayHints(opts *source.Options) { + if opts.Hints == nil { + opts.Hints = make(map[string]bool) + } + for name := range source.AllInlayHints { + opts.Hints[name] = true + } +} + +func WorkspaceSymbolsString(ctx context.Context, data *Data, queryURI span.URI, symbols []protocol.SymbolInformation) (string, error) { + queryDir := filepath.Dir(queryURI.Filename()) + var filtered []string + for _, s := range symbols { + uri := s.Location.URI.SpanURI() + dir := filepath.Dir(uri.Filename()) + if !source.InDir(queryDir, dir) { // assume queries always issue from higher directories + continue + } + m, err := data.Mapper(uri) + if err != nil { + return "", err + } + spn, err := m.LocationSpan(s.Location) + if err != nil { + return "", err + } + filtered = append(filtered, fmt.Sprintf("%s %s %s", spn, s.Name, s.Kind)) + } + sort.Strings(filtered) + return strings.Join(filtered, "\n") + "\n", nil +} + +func WorkspaceSymbolsTestTypeToMatcher(typ WorkspaceSymbolsTestType) source.SymbolMatcher { + switch typ { + case WorkspaceSymbolsFuzzy: + return source.SymbolFuzzy + case WorkspaceSymbolsCaseSensitive: + return source.SymbolCaseSensitive + default: + return source.SymbolCaseInsensitive + } +} + +// LocationsToSpans converts protocol location into span form for testing. +func LocationsToSpans(data *Data, locs []protocol.Location) ([]span.Span, error) { + spans := make([]span.Span, len(locs)) + for i, loc := range locs { + m, err := data.Mapper(loc.URI.SpanURI()) + if err != nil { + return nil, err + } + spn, err := m.LocationSpan(loc) + if err != nil { + return nil, fmt.Errorf("failed for %v: %w", loc, err) + } + spans[i] = spn + } + return spans, nil +} + +// SortAndFormatSpans sorts and formats a list of spans for use in an assertion. +func SortAndFormatSpans(spans []span.Span) string { + span.SortSpans(spans) + var buf strings.Builder + for _, spn := range spans { + fmt.Fprintf(&buf, "%v\n", spn) + } + return buf.String() +} diff --git a/gopls/internal/lsp/tests/util_go118.go b/gopls/internal/lsp/tests/util_go118.go new file mode 100644 index 00000000000..6115342df74 --- /dev/null +++ b/gopls/internal/lsp/tests/util_go118.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package tests + +func init() { + builtins["any"] = true + builtins["comparable"] = true +} diff --git a/gopls/internal/lsp/tests/util_go121.go b/gopls/internal/lsp/tests/util_go121.go new file mode 100644 index 00000000000..93065864802 --- /dev/null +++ b/gopls/internal/lsp/tests/util_go121.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package tests + +func init() { + builtins["clear"] = true +} diff --git a/gopls/internal/lsp/text_synchronization.go b/gopls/internal/lsp/text_synchronization.go new file mode 100644 index 00000000000..b7be1e1ce11 --- /dev/null +++ b/gopls/internal/lsp/text_synchronization.go @@ -0,0 +1,349 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "bytes" + "context" + "errors" + "fmt" + "path/filepath" + "sync" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/jsonrpc2" +) + +// ModificationSource identifies the originating cause of a file modification. +type ModificationSource int + +const ( + // FromDidOpen is a file modification caused by opening a file. + FromDidOpen = ModificationSource(iota) + + // FromDidChange is a file modification caused by changing a file. + FromDidChange + + // FromDidChangeWatchedFiles is a file modification caused by a change to a + // watched file. + FromDidChangeWatchedFiles + + // FromDidSave is a file modification caused by a file save. + FromDidSave + + // FromDidClose is a file modification caused by closing a file. + FromDidClose + + // TODO: add FromDidChangeConfiguration, once configuration changes cause a + // new snapshot to be created. + + // FromRegenerateCgo refers to file modifications caused by regenerating + // the cgo sources for the workspace. + FromRegenerateCgo + + // FromInitialWorkspaceLoad refers to the loading of all packages in the + // workspace when the view is first created. + FromInitialWorkspaceLoad +) + +func (m ModificationSource) String() string { + switch m { + case FromDidOpen: + return "opened files" + case FromDidChange: + return "changed files" + case FromDidChangeWatchedFiles: + return "files changed on disk" + case FromDidSave: + return "saved files" + case FromDidClose: + return "close files" + case FromRegenerateCgo: + return "regenerate cgo" + case FromInitialWorkspaceLoad: + return "initial workspace load" + default: + return "unknown file modification" + } +} + +func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error { + uri := params.TextDocument.URI.SpanURI() + if !uri.IsFile() { + return nil + } + // There may not be any matching view in the current session. If that's + // the case, try creating a new view based on the opened file path. + // + // TODO(rstambler): This seems like it would continuously add new + // views, but it won't because ViewOf only returns an error when there + // are no views in the session. I don't know if that logic should go + // here, or if we can continue to rely on that implementation detail. + if _, err := s.session.ViewOf(uri); err != nil { + dir := filepath.Dir(uri.Filename()) + if err := s.addFolders(ctx, []protocol.WorkspaceFolder{{ + URI: string(protocol.URIFromPath(dir)), + Name: filepath.Base(dir), + }}); err != nil { + return err + } + } + return s.didModifyFiles(ctx, []source.FileModification{{ + URI: uri, + Action: source.Open, + Version: params.TextDocument.Version, + Text: []byte(params.TextDocument.Text), + LanguageID: params.TextDocument.LanguageID, + }}, FromDidOpen) +} + +func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error { + uri := params.TextDocument.URI.SpanURI() + if !uri.IsFile() { + return nil + } + + text, err := s.changedText(ctx, uri, params.ContentChanges) + if err != nil { + return err + } + c := source.FileModification{ + URI: uri, + Action: source.Change, + Version: params.TextDocument.Version, + Text: text, + } + if err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange); err != nil { + return err + } + return s.warnAboutModifyingGeneratedFiles(ctx, uri) +} + +// warnAboutModifyingGeneratedFiles shows a warning if a user tries to edit a +// generated file for the first time. +func (s *Server) warnAboutModifyingGeneratedFiles(ctx context.Context, uri span.URI) error { + s.changedFilesMu.Lock() + _, ok := s.changedFiles[uri] + if !ok { + s.changedFiles[uri] = struct{}{} + } + s.changedFilesMu.Unlock() + + // This file has already been edited before. + if ok { + return nil + } + + // Ideally, we should be able to specify that a generated file should + // be opened as read-only. Tell the user that they should not be + // editing a generated file. + view, err := s.session.ViewOf(uri) + if err != nil { + return err + } + snapshot, release, err := view.Snapshot() + if err != nil { + return err + } + isGenerated := source.IsGenerated(ctx, snapshot, uri) + release() + + if !isGenerated { + return nil + } + return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Message: fmt.Sprintf("Do not edit this file! %s is a generated file.", uri.Filename()), + Type: protocol.Warning, + }) +} + +func (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error { + var modifications []source.FileModification + for _, change := range params.Changes { + uri := change.URI.SpanURI() + if !uri.IsFile() { + continue + } + action := changeTypeToFileAction(change.Type) + modifications = append(modifications, source.FileModification{ + URI: uri, + Action: action, + OnDisk: true, + }) + } + return s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles) +} + +func (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error { + uri := params.TextDocument.URI.SpanURI() + if !uri.IsFile() { + return nil + } + c := source.FileModification{ + URI: uri, + Action: source.Save, + } + if params.Text != nil { + c.Text = []byte(*params.Text) + } + return s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave) +} + +func (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error { + uri := params.TextDocument.URI.SpanURI() + if !uri.IsFile() { + return nil + } + return s.didModifyFiles(ctx, []source.FileModification{ + { + URI: uri, + Action: source.Close, + Version: -1, + Text: nil, + }, + }, FromDidClose) +} + +func (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) error { + // wg guards two conditions: + // 1. didModifyFiles is complete + // 2. the goroutine diagnosing changes on behalf of didModifyFiles is + // complete, if it was started + // + // Both conditions must be satisfied for the purpose of testing: we don't + // want to observe the completion of change processing until we have received + // all diagnostics as well as all server->client notifications done on behalf + // of this function. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + + if s.session.Options().VerboseWorkDoneProgress { + work := s.progress.Start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil) + go func() { + wg.Wait() + work.End(ctx, "Done.") + }() + } + + onDisk := cause == FromDidChangeWatchedFiles + + s.stateMu.Lock() + if s.state >= serverShutDown { + // This state check does not prevent races below, and exists only to + // produce a better error message. The actual race to the cache should be + // guarded by Session.viewMu. + s.stateMu.Unlock() + return errors.New("server is shut down") + } + s.stateMu.Unlock() + + // If the set of changes included directories, expand those directories + // to their files. + modifications = s.session.ExpandModificationsToDirectories(ctx, modifications) + + // Build a lookup map for file modifications, so that we can later join + // with the snapshot file associations. + modMap := make(map[span.URI]source.FileModification) + for _, mod := range modifications { + modMap[mod.URI] = mod + } + + snapshots, release, err := s.session.DidModifyFiles(ctx, modifications) + if err != nil { + return err + } + + // golang/go#50267: diagnostics should be re-sent after an open or close. For + // some clients, it may be helpful to re-send after each change. + for snapshot, uris := range snapshots { + for _, uri := range uris { + mod := modMap[uri] + if snapshot.View().Options().ChattyDiagnostics || mod.Action == source.Open || mod.Action == source.Close { + s.mustPublishDiagnostics(uri) + } + } + } + + wg.Add(1) + go func() { + s.diagnoseSnapshots(snapshots, onDisk) + release() + wg.Done() + }() + + // After any file modifications, we need to update our watched files, + // in case something changed. Compute the new set of directories to watch, + // and if it differs from the current set, send updated registrations. + return s.updateWatchedDirectories(ctx) +} + +// DiagnosticWorkTitle returns the title of the diagnostic work resulting from a +// file change originating from the given cause. +func DiagnosticWorkTitle(cause ModificationSource) string { + return fmt.Sprintf("diagnosing %v", cause) +} + +func (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) { + if len(changes) == 0 { + return nil, fmt.Errorf("%w: no content changes provided", jsonrpc2.ErrInternal) + } + + // Check if the client sent the full content of the file. + // We accept a full content change even if the server expected incremental changes. + if len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 { + return []byte(changes[0].Text), nil + } + return s.applyIncrementalChanges(ctx, uri, changes) +} + +func (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) { + fh, err := s.session.GetFile(ctx, uri) + if err != nil { + return nil, err + } + content, err := fh.Read() + if err != nil { + return nil, fmt.Errorf("%w: file not found (%v)", jsonrpc2.ErrInternal, err) + } + for _, change := range changes { + // TODO(adonovan): refactor to use diff.Apply, which is robust w.r.t. + // out-of-order or overlapping changes---and much more efficient. + + // Make sure to update mapper along with the content. + m := protocol.NewMapper(uri, content) + if change.Range == nil { + return nil, fmt.Errorf("%w: unexpected nil range for change", jsonrpc2.ErrInternal) + } + spn, err := m.RangeSpan(*change.Range) + if err != nil { + return nil, err + } + start, end := spn.Start().Offset(), spn.End().Offset() + if end < start { + return nil, fmt.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal) + } + var buf bytes.Buffer + buf.Write(content[:start]) + buf.WriteString(change.Text) + buf.Write(content[end:]) + content = buf.Bytes() + } + return content, nil +} + +func changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction { + switch ct { + case protocol.Changed: + return source.Change + case protocol.Created: + return source.Create + case protocol.Deleted: + return source.Delete + } + return source.UnknownFileAction +} diff --git a/gopls/internal/lsp/work/completion.go b/gopls/internal/lsp/work/completion.go new file mode 100644 index 00000000000..bcdc2d1f42e --- /dev/null +++ b/gopls/internal/lsp/work/completion.go @@ -0,0 +1,154 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.CompletionList, error) { + ctx, done := event.Start(ctx, "work.Completion") + defer done() + + // Get the position of the cursor. + pw, err := snapshot.ParseWork(ctx, fh) + if err != nil { + return nil, fmt.Errorf("getting go.work file handle: %w", err) + } + cursor, err := pw.Mapper.PositionOffset(position) + if err != nil { + return nil, fmt.Errorf("computing cursor offset: %w", err) + } + + // Find the use statement the user is in. + use, pathStart, _ := usePath(pw, cursor) + if use == nil { + return &protocol.CompletionList{}, nil + } + completingFrom := use.Path[:cursor-pathStart] + + // We're going to find the completions of the user input + // (completingFrom) by doing a walk on the innermost directory + // of the given path, and comparing the found paths to make sure + // that they match the component of the path after the + // innermost directory. + // + // We'll maintain two paths when doing this: pathPrefixSlash + // is essentially the path the user typed in, and pathPrefixAbs + // is the path made absolute from the go.work directory. + + pathPrefixSlash := completingFrom + pathPrefixAbs := filepath.FromSlash(pathPrefixSlash) + if !filepath.IsAbs(pathPrefixAbs) { + pathPrefixAbs = filepath.Join(filepath.Dir(pw.URI.Filename()), pathPrefixAbs) + } + + // pathPrefixDir is the directory that will be walked to find matches. + // If pathPrefixSlash is not explicitly a directory boundary (is either equivalent to "." or + // ends in a separator) we need to examine its parent directory to find sibling files that + // match. + depthBound := 5 + pathPrefixDir, pathPrefixBase := pathPrefixAbs, "" + pathPrefixSlashDir := pathPrefixSlash + if filepath.Clean(pathPrefixSlash) != "." && !strings.HasSuffix(pathPrefixSlash, "/") { + depthBound++ + pathPrefixDir, pathPrefixBase = filepath.Split(pathPrefixAbs) + pathPrefixSlashDir = dirNonClean(pathPrefixSlash) + } + + var completions []string + // Stop traversing deeper once we've hit 10k files to try to stay generally under 100ms. + const numSeenBound = 10000 + var numSeen int + stopWalking := errors.New("hit numSeenBound") + err = filepath.Walk(pathPrefixDir, func(wpath string, info os.FileInfo, err error) error { + if numSeen > numSeenBound { + // Stop traversing if we hit bound. + return stopWalking + } + numSeen++ + + // rel is the path relative to pathPrefixDir. + // Make sure that it has pathPrefixBase as a prefix + // otherwise it won't match the beginning of the + // base component of the path the user typed in. + rel := strings.TrimPrefix(wpath[len(pathPrefixDir):], string(filepath.Separator)) + if info.IsDir() && wpath != pathPrefixDir && !strings.HasPrefix(rel, pathPrefixBase) { + return filepath.SkipDir + } + + // Check for a match (a module directory). + if filepath.Base(rel) == "go.mod" { + relDir := strings.TrimSuffix(dirNonClean(rel), string(os.PathSeparator)) + completionPath := join(pathPrefixSlashDir, filepath.ToSlash(relDir)) + + if !strings.HasPrefix(completionPath, completingFrom) { + return nil + } + if strings.HasSuffix(completionPath, "/") { + // Don't suggest paths that end in "/". This happens + // when the input is a path that ends in "/" and + // the completion is empty. + return nil + } + completion := completionPath[len(completingFrom):] + if completingFrom == "" && !strings.HasPrefix(completion, "./") { + // Bias towards "./" prefixes. + completion = join(".", completion) + } + + completions = append(completions, completion) + } + + if depth := strings.Count(rel, string(filepath.Separator)); depth >= depthBound { + return filepath.SkipDir + } + return nil + }) + if err != nil && !errors.Is(err, stopWalking) { + return nil, fmt.Errorf("walking to find completions: %w", err) + } + + sort.Strings(completions) + + var items []protocol.CompletionItem + for _, c := range completions { + items = append(items, protocol.CompletionItem{ + Label: c, + InsertText: c, + }) + } + return &protocol.CompletionList{Items: items}, nil +} + +// dirNonClean is filepath.Dir, without the Clean at the end. +func dirNonClean(path string) string { + vol := filepath.VolumeName(path) + i := len(path) - 1 + for i >= len(vol) && !os.IsPathSeparator(path[i]) { + i-- + } + return path[len(vol) : i+1] +} + +func join(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return strings.TrimSuffix(a, "/") + "/" + b +} diff --git a/gopls/internal/lsp/work/diagnostics.go b/gopls/internal/lsp/work/diagnostics.go new file mode 100644 index 00000000000..cbcc8505512 --- /dev/null +++ b/gopls/internal/lsp/work/diagnostics.go @@ -0,0 +1,92 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/internal/event" +) + +func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) { + ctx, done := event.Start(ctx, "work.Diagnostics", source.SnapshotLabels(snapshot)...) + defer done() + + reports := map[span.URI][]*source.Diagnostic{} + uri := snapshot.WorkFile() + if uri == "" { + return nil, nil + } + fh, err := snapshot.GetFile(ctx, uri) + if err != nil { + return nil, err + } + reports[fh.URI()] = []*source.Diagnostic{} + diagnostics, err := DiagnosticsForWork(ctx, snapshot, fh) + if err != nil { + return nil, err + } + for _, d := range diagnostics { + fh, err := snapshot.GetFile(ctx, d.URI) + if err != nil { + return nil, err + } + reports[fh.URI()] = append(reports[fh.URI()], d) + } + + return reports, nil +} + +func DiagnosticsForWork(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) { + pw, err := snapshot.ParseWork(ctx, fh) + if err != nil { + if pw == nil || len(pw.ParseErrors) == 0 { + return nil, err + } + return pw.ParseErrors, nil + } + + // Add diagnostic if a directory does not contain a module. + var diagnostics []*source.Diagnostic + for _, use := range pw.File.Use { + rng, err := pw.Mapper.OffsetRange(use.Syntax.Start.Byte, use.Syntax.End.Byte) + if err != nil { + return nil, err + } + + modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use)) + if err != nil { + return nil, err + } + if _, err := modfh.Read(); err != nil && os.IsNotExist(err) { + diagnostics = append(diagnostics, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.WorkFileError, + Message: fmt.Sprintf("directory %v does not contain a module", use.Path), + }) + } + } + return diagnostics, nil +} + +func modFileURI(pw *source.ParsedWorkFile, use *modfile.Use) span.URI { + workdir := filepath.Dir(pw.URI.Filename()) + + modroot := filepath.FromSlash(use.Path) + if !filepath.IsAbs(modroot) { + modroot = filepath.Join(workdir, modroot) + } + + return span.URIFromPath(filepath.Join(modroot, "go.mod")) +} diff --git a/gopls/internal/lsp/work/format.go b/gopls/internal/lsp/work/format.go new file mode 100644 index 00000000000..e852eb4d27e --- /dev/null +++ b/gopls/internal/lsp/work/format.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "context" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) { + ctx, done := event.Start(ctx, "work.Format") + defer done() + + pw, err := snapshot.ParseWork(ctx, fh) + if err != nil { + return nil, err + } + formatted := modfile.Format(pw.File.Syntax) + // Calculate the edits to be made due to the change. + diffs := snapshot.View().Options().ComputeEdits(string(pw.Mapper.Content), string(formatted)) + return source.ToProtocolEdits(pw.Mapper, diffs) +} diff --git a/gopls/internal/lsp/work/hover.go b/gopls/internal/lsp/work/hover.go new file mode 100644 index 00000000000..1a1b299fd76 --- /dev/null +++ b/gopls/internal/lsp/work/hover.go @@ -0,0 +1,89 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "bytes" + "context" + "fmt" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) { + // We only provide hover information for the view's go.work file. + if fh.URI() != snapshot.WorkFile() { + return nil, nil + } + + ctx, done := event.Start(ctx, "work.Hover") + defer done() + + // Get the position of the cursor. + pw, err := snapshot.ParseWork(ctx, fh) + if err != nil { + return nil, fmt.Errorf("getting go.work file handle: %w", err) + } + offset, err := pw.Mapper.PositionOffset(position) + if err != nil { + return nil, fmt.Errorf("computing cursor offset: %w", err) + } + + // Confirm that the cursor is inside a use statement, and then find + // the position of the use statement's directory path. + use, pathStart, pathEnd := usePath(pw, offset) + + // The cursor position is not on a use statement. + if use == nil { + return nil, nil + } + + // Get the mod file denoted by the use. + modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use)) + if err != nil { + return nil, fmt.Errorf("getting modfile handle: %w", err) + } + pm, err := snapshot.ParseMod(ctx, modfh) + if err != nil { + return nil, fmt.Errorf("getting modfile handle: %w", err) + } + mod := pm.File.Module.Mod + + // Get the range to highlight for the hover. + rng, err := pw.Mapper.OffsetRange(pathStart, pathEnd) + if err != nil { + return nil, err + } + options := snapshot.View().Options() + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: options.PreferredContentFormat, + Value: mod.Path, + }, + Range: rng, + }, nil +} + +func usePath(pw *source.ParsedWorkFile, offset int) (use *modfile.Use, pathStart, pathEnd int) { + for _, u := range pw.File.Use { + path := []byte(u.Path) + s, e := u.Syntax.Start.Byte, u.Syntax.End.Byte + i := bytes.Index(pw.Mapper.Content[s:e], path) + if i == -1 { + // This should not happen. + continue + } + // Shift the start position to the location of the + // module directory within the use statement. + pathStart, pathEnd = s+i, s+i+len(path) + if pathStart <= offset && offset <= pathEnd { + return u, pathStart, pathEnd + } + } + return nil, 0, 0 +} diff --git a/gopls/internal/lsp/workspace.go b/gopls/internal/lsp/workspace.go new file mode 100644 index 00000000000..c50ae32bb1c --- /dev/null +++ b/gopls/internal/lsp/workspace.go @@ -0,0 +1,95 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + "fmt" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/span" +) + +func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error { + event := params.Event + for _, folder := range event.Removed { + view := s.session.View(folder.Name) + if view != nil { + s.session.RemoveView(view) + } else { + return fmt.Errorf("view %s for %v not found", folder.Name, folder.URI) + } + } + return s.addFolders(ctx, event.Added) +} + +// addView returns a Snapshot and a release function that must be +// called when it is no longer needed. +func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) { + s.stateMu.Lock() + state := s.state + s.stateMu.Unlock() + if state < serverInitialized { + return nil, nil, fmt.Errorf("addView called before server initialized") + } + options := s.session.Options().Clone() + if err := s.fetchConfig(ctx, name, uri, options); err != nil { + return nil, nil, err + } + _, snapshot, release, err := s.session.NewView(ctx, name, uri, options) + return snapshot, release, err +} + +func (s *Server) didChangeConfiguration(ctx context.Context, _ *protocol.DidChangeConfigurationParams) error { + // Apply any changes to the session-level settings. + options := s.session.Options().Clone() + if err := s.fetchConfig(ctx, "", "", options); err != nil { + return err + } + s.session.SetOptions(options) + + // Go through each view, getting and updating its configuration. + for _, view := range s.session.Views() { + options := s.session.Options().Clone() + if err := s.fetchConfig(ctx, view.Name(), view.Folder(), options); err != nil { + return err + } + view, err := s.session.SetViewOptions(ctx, view, options) + if err != nil { + return err + } + go func() { + snapshot, release, err := view.Snapshot() + if err != nil { + return // view is shut down; no need to diagnose + } + defer release() + s.diagnoseDetached(snapshot) + }() + } + + // An options change may have affected the detected Go version. + s.checkViewGoVersions() + + return nil +} + +func semanticTokenRegistration(tokenTypes, tokenModifiers []string) protocol.Registration { + return protocol.Registration{ + ID: "textDocument/semanticTokens", + Method: "textDocument/semanticTokens", + RegisterOptions: &protocol.SemanticTokensOptions{ + Legend: protocol.SemanticTokensLegend{ + // TODO(pjw): trim these to what we use (and an unused one + // at position 0 of TokTypes, to catch typos) + TokenTypes: tokenTypes, + TokenModifiers: tokenModifiers, + }, + Full: true, + Range: true, + }, + } +} diff --git a/gopls/internal/lsp/workspace_symbol.go b/gopls/internal/lsp/workspace_symbol.go new file mode 100644 index 00000000000..88b3e8865ae --- /dev/null +++ b/gopls/internal/lsp/workspace_symbol.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" +) + +func (s *Server) symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) { + ctx, done := event.Start(ctx, "lsp.Server.symbol") + defer done() + + views := s.session.Views() + matcher := s.session.Options().SymbolMatcher + style := s.session.Options().SymbolStyle + // TODO(rfindley): it looks wrong that we need to pass views here. + // + // Evidence: + // - this is the only place we convert views to []source.View + // - workspace symbols is the only place where we call source.View.Snapshot + var sourceViews []source.View + for _, v := range views { + sourceViews = append(sourceViews, v) + } + return source.WorkspaceSymbols(ctx, matcher, style, sourceViews, params.Query) +} diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go index 5e4eb5fc23a..2048e9667b6 100644 --- a/gopls/internal/regtest/bench/bench_test.go +++ b/gopls/internal/regtest/bench/bench_test.go @@ -5,190 +5,273 @@ package bench import ( + "context" "flag" "fmt" + "io/ioutil" + "log" "os" - "runtime/pprof" + "os/exec" + "path/filepath" + "sync" "testing" + "time" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/fake" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/fakenet" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" - "golang.org/x/tools/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) +// This package implements benchmarks that share a common editor session. +// +// It is a work-in-progress. +// +// Remaining TODO(rfindley): +// - add detailed documentation for how to write a benchmark, as a package doc +// - add benchmarks for more features +// - eliminate flags, and just run benchmarks on with a predefined set of +// arguments + func TestMain(m *testing.M) { bug.PanicOnBugs = true - Main(m, hooks.Options) + event.SetExporter(nil) // don't log to stderr + code := doMain(m) + os.Exit(code) } -func benchmarkOptions(dir string) []RunOption { - return []RunOption{ - // Run in an existing directory, since we're trying to simulate known cases - // that cause gopls memory problems. - InExistingDir(dir), - // Skip logs as they buffer up memory unnaturally. - SkipLogs(), - // The Debug server only makes sense if running in singleton mode. - Modes(Singleton), - // Remove the default timeout. Individual tests should control their - // own graceful termination. - NoDefaultTimeout(), - - // Use the actual proxy, since we want our builds to succeed. - GOPROXY("https://proxy.golang.org"), - } +func doMain(m *testing.M) (code int) { + defer func() { + if editor != nil { + if err := editor.Close(context.Background()); err != nil { + fmt.Fprintf(os.Stderr, "closing editor: %v", err) + if code == 0 { + code = 1 + } + } + } + if tempDir != "" { + if err := os.RemoveAll(tempDir); err != nil { + fmt.Fprintf(os.Stderr, "cleaning temp dir: %v", err) + if code == 0 { + code = 1 + } + } + } + }() + return m.Run() } -func printBenchmarkResults(result testing.BenchmarkResult) { - fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString()) -} +var ( + workdir = flag.String("workdir", "", "if set, working directory to use for benchmarks; overrides -repo and -commit") + repo = flag.String("repo", "https://go.googlesource.com/tools", "if set (and -workdir is unset), run benchmarks in this repo") + file = flag.String("file", "go/ast/astutil/util.go", "active file, for benchmarks that operate on a file") + commitish = flag.String("commit", "gopls/v0.9.0", "if set (and -workdir is unset), run benchmarks at this commit") + + goplsPath = flag.String("gopls_path", "", "if set, use this gopls for testing; incompatible with -gopls_commit") + goplsCommit = flag.String("gopls_commit", "", "if set, install and use gopls at this commit for testing; incompatible with -gopls_path") + + // If non-empty, tempDir is a temporary working dir that was created by this + // test suite. + // + // The sync.Once variables guard various modifications of the temp directory. + makeTempDirOnce sync.Once + checkoutRepoOnce sync.Once + installGoplsOnce sync.Once + tempDir string + + setupEditorOnce sync.Once + sandbox *fake.Sandbox + editor *fake.Editor + awaiter *Awaiter +) -var iwlOptions struct { - workdir string +// getTempDir returns the temporary directory to use for benchmark files, +// creating it if necessary. +func getTempDir() string { + makeTempDirOnce.Do(func() { + var err error + tempDir, err = ioutil.TempDir("", "gopls-bench") + if err != nil { + log.Fatal(err) + } + }) + return tempDir } -func init() { - flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory") +// benchmarkDir returns the directory to use for benchmarks. +// +// If -workdir is set, just use that directory. Otherwise, check out a shallow +// copy of -repo at the given -commit, and clean up when the test suite exits. +func benchmarkDir() string { + if *workdir != "" { + return *workdir + } + if *repo == "" { + log.Fatal("-repo must be provided if -workdir is unset") + } + if *commitish == "" { + log.Fatal("-commit must be provided if -workdir is unset") + } + + dir := filepath.Join(getTempDir(), "repo") + checkoutRepoOnce.Do(func() { + log.Printf("creating working dir: checking out %s@%s to %s\n", *repo, *commitish, dir) + if err := shallowClone(dir, *repo, *commitish); err != nil { + log.Fatal(err) + } + }) + return dir } -func TestBenchmarkIWL(t *testing.T) { - if iwlOptions.workdir == "" { - t.Skip("-iwl_workdir not configured") +// shallowClone performs a shallow clone of repo into dir at the given +// 'commitish' ref (any commit reference understood by git). +// +// The directory dir must not already exist. +func shallowClone(dir, repo, commitish string) error { + if err := os.Mkdir(dir, 0750); err != nil { + return fmt.Errorf("creating dir for %s: %v", repo, err) } - opts := stressTestOptions(iwlOptions.workdir) - // Don't skip hooks, so that we can wait for IWL. - opts = append(opts, SkipHooks(false)) + // Set a timeout for git fetch. If this proves flaky, it can be removed. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + // Use a shallow fetch to download just the relevant commit. + shInit := fmt.Sprintf("git init && git fetch --depth=1 %q %q && git checkout FETCH_HEAD", repo, commitish) + initCmd := exec.CommandContext(ctx, "/bin/sh", "-c", shInit) + initCmd.Dir = dir + if output, err := initCmd.CombinedOutput(); err != nil { + return fmt.Errorf("checking out %s: %v\n%s", repo, err, output) + } + return nil +} + +// benchmarkEnv returns a shared benchmark environment +func benchmarkEnv(tb testing.TB) *Env { + setupEditorOnce.Do(func() { + dir := benchmarkDir() - results := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {}) + var err error + sandbox, editor, awaiter, err = connectEditor(dir, fake.EditorConfig{}) + if err != nil { + log.Fatalf("connecting editor: %v", err) + } + + if err := awaiter.Await(context.Background(), InitialWorkspaceLoad); err != nil { + panic(err) } }) - printBenchmarkResults(results) + return &Env{ + T: tb, + Ctx: context.Background(), + Editor: editor, + Sandbox: sandbox, + Awaiter: awaiter, + } } -var symbolOptions struct { - workdir, query, matcher, style string - printResults bool -} +// connectEditor connects a fake editor session in the given dir, using the +// given editor config. +func connectEditor(dir string, config fake.EditorConfig) (*fake.Sandbox, *fake.Editor, *Awaiter, error) { + s, err := fake.NewSandbox(&fake.SandboxConfig{ + Workdir: dir, + GOPROXY: "https://proxy.golang.org", + }) + if err != nil { + return nil, nil, nil, err + } -func init() { - flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory") - flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark") - flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark") - flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark") - flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results") + a := NewAwaiter(s.Workdir) + ts := getServer() + e, err := fake.NewEditor(s, config).Connect(context.Background(), ts, a.Hooks()) + if err != nil { + return nil, nil, nil, err + } + return s, e, a, nil } -func TestBenchmarkSymbols(t *testing.T) { - if symbolOptions.workdir == "" { - t.Skip("-symbol_workdir not configured") +// getServer returns a server connector that either starts a new in-process +// server, or starts a separate gopls process. +func getServer() servertest.Connector { + if *goplsPath != "" && *goplsCommit != "" { + panic("can't set both -gopls_path and -gopls_commit") } - - opts := benchmarkOptions(symbolOptions.workdir) - conf := EditorConfig{} - if symbolOptions.matcher != "" { - conf.SymbolMatcher = &symbolOptions.matcher + if *goplsPath != "" { + return &SidecarServer{*goplsPath} } - if symbolOptions.style != "" { - conf.SymbolStyle = &symbolOptions.style + if *goplsCommit != "" { + path := getInstalledGopls() + return &SidecarServer{path} } - opts = append(opts, conf) - - WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) { - // We can't Await in this test, since we have disabled hooks. Instead, run - // one symbol request to completion to ensure all necessary cache entries - // are populated. - symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{ - Query: symbolOptions.query, - }) - if err != nil { - t.Fatal(err) + server := lsprpc.NewStreamServer(cache.New(nil, nil), false, hooks.Options) + return servertest.NewPipeServer(server, jsonrpc2.NewRawStream) +} + +// getInstalledGopls builds gopls at the given -gopls_commit, returning the +// path to the gopls binary. +func getInstalledGopls() string { + if *goplsCommit == "" { + panic("must provide -gopls_commit") + } + toolsDir := filepath.Join(getTempDir(), "tools") + goplsPath := filepath.Join(toolsDir, "gopls", "gopls") + + installGoplsOnce.Do(func() { + log.Printf("installing gopls: checking out x/tools@%s\n", *goplsCommit) + if err := shallowClone(toolsDir, "https://go.googlesource.com/tools", *goplsCommit); err != nil { + log.Fatal(err) } - if symbolOptions.printResults { - fmt.Println("Results:") - for i := 0; i < len(symbols); i++ { - fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName) - } + log.Println("installing gopls: building...") + bld := exec.Command("go", "build", ".") + bld.Dir = filepath.Join(getTempDir(), "tools", "gopls") + if output, err := bld.CombinedOutput(); err != nil { + log.Fatalf("building gopls: %v\n%s", err, output) } - results := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{ - Query: symbolOptions.query, - }); err != nil { - t.Fatal(err) - } - } - }) - printBenchmarkResults(results) + // Confirm that the resulting path now exists. + if _, err := os.Stat(goplsPath); err != nil { + log.Fatalf("os.Stat(%s): %v", goplsPath, err) + } }) + return goplsPath } -var ( - benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.") - benchFile = flag.String("didchange_file", "", "The file to modify") - benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to") -) +// A SidecarServer starts (and connects to) a separate gopls process at the +// given path. +type SidecarServer struct { + goplsPath string +} -// TestBenchmarkDidChange benchmarks modifications of a single file by making -// synthetic modifications in a comment. It controls pacing by waiting for the -// server to actually start processing the didChange notification before -// proceeding. Notably it does not wait for diagnostics to complete. -// -// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir -// is the path to a workspace root, and -didchange_file is the -// workspace-relative path to a file to modify. e.g.: -// -// go test -run=TestBenchmarkDidChange \ -// -didchange_dir=path/to/kubernetes \ -// -didchange_file=pkg/util/hash/hash.go -func TestBenchmarkDidChange(t *testing.T) { - if *benchDir == "" { - t.Skip("-didchange_dir is not set") +// Connect creates new io.Pipes and binds them to the underlying StreamServer. +func (s *SidecarServer) Connect(ctx context.Context) jsonrpc2.Conn { + cmd := exec.CommandContext(ctx, s.goplsPath, "serve") + + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + stdout, err := cmd.StdoutPipe() + if err != nil { + log.Fatal(err) } - if *benchFile == "" { - t.Fatal("-didchange_file must be set if -didchange_dir is set") + cmd.Stderr = os.Stdout + if err := cmd.Start(); err != nil { + log.Fatalf("starting gopls: %v", err) } - opts := benchmarkOptions(*benchDir) - WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) { - env.OpenFile(*benchFile) - env.Await(env.DoneWithOpen()) - // Insert the text we'll be modifying at the top of the file. - env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"}) - - // Run the profiler after the initial load, - // across all benchmark iterations. - if *benchProfile != "" { - profile, err := os.Create(*benchProfile) - if err != nil { - t.Fatal(err) - } - defer profile.Close() - if err := pprof.StartCPUProfile(profile); err != nil { - t.Fatal(err) - } - defer pprof.StopCPUProfile() - } + go cmd.Wait() // to free resources; error is ignored - result := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - env.EditBuffer(*benchFile, fake.Edit{ - Start: fake.Pos{Line: 0, Column: 0}, - End: fake.Pos{Line: 1, Column: 0}, - // Increment - Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1), - }) - env.Await(StartedChange(uint64(i + 1))) - } - }) - printBenchmarkResults(result) - }) + clientStream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", stdout, stdin)) + clientConn := jsonrpc2.NewConn(clientStream) + return clientConn } diff --git a/gopls/internal/regtest/bench/completion_bench_test.go b/gopls/internal/regtest/bench/completion_bench_test.go deleted file mode 100644 index f9b8445891d..00000000000 --- a/gopls/internal/regtest/bench/completion_bench_test.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bench - -import ( - "flag" - "fmt" - "strings" - "testing" - - . "golang.org/x/tools/internal/lsp/regtest" - - "golang.org/x/tools/internal/lsp/fake" -) - -// dummyCompletionFunction to test manually configured completion using CLI. -func dummyCompletionFunction() { const s = "placeholder"; fmt.Printf("%s", s) } - -type completionBenchOptions struct { - workdir, file, locationRegexp string - printResults bool - // hook to run edits before initial completion, not supported for manually - // configured completions. - preCompletionEdits func(*Env) -} - -var completionOptions = completionBenchOptions{} - -func init() { - flag.StringVar(&completionOptions.workdir, "completion_workdir", "", "directory to run completion benchmarks in") - flag.StringVar(&completionOptions.file, "completion_file", "", "relative path to the file to complete in") - flag.StringVar(&completionOptions.locationRegexp, "completion_regexp", "", "regexp location to complete at") - flag.BoolVar(&completionOptions.printResults, "completion_print_results", false, "whether to print completion results") -} - -func benchmarkCompletion(options completionBenchOptions, t *testing.T) { - if completionOptions.workdir == "" { - t.Skip("-completion_workdir not configured, skipping benchmark") - } - - opts := stressTestOptions(options.workdir) - - // Completion gives bad results if IWL is not yet complete, so we must await - // it first (and therefore need hooks). - opts = append(opts, SkipHooks(false)) - - WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) { - env.OpenFile(options.file) - - // Run edits required for this completion. - if options.preCompletionEdits != nil { - options.preCompletionEdits(env) - } - - // Run a completion to make sure the system is warm. - pos := env.RegexpSearch(options.file, options.locationRegexp) - completions := env.Completion(options.file, pos) - - if options.printResults { - fmt.Println("Results:") - for i := 0; i < len(completions.Items); i++ { - fmt.Printf("\t%d. %v\n", i, completions.Items[i]) - } - } - - results := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - env.Completion(options.file, pos) - } - }) - - printBenchmarkResults(results) - }) -} - -// endPosInBuffer returns the position for last character in the buffer for -// the given file. -func endPosInBuffer(env *Env, name string) fake.Pos { - buffer := env.Editor.BufferText(name) - lines := strings.Split(buffer, "\n") - numLines := len(lines) - - return fake.Pos{ - Line: numLines - 1, - Column: len([]rune(lines[numLines-1])), - } -} - -// Benchmark completion at a specified file and location. When no CLI options -// are specified, this test is skipped. -// To Run (from x/tools/gopls) against the dummy function above: -// -// go test -v ./internal/regtest/bench -run=TestBenchmarkConfiguredCompletion -// -completion_workdir="$HOME/Developer/tools" -// -completion_file="gopls/internal/regtest/completion_bench_test.go" -// -completion_regexp="dummyCompletionFunction.*fmt\.Printf\(\"%s\", s(\))" -func TestBenchmarkConfiguredCompletion(t *testing.T) { - benchmarkCompletion(completionOptions, t) -} - -// To run (from x/tools/gopls): -// go test -v ./internal/regtest/bench -run TestBenchmark<>Completion -// -completion_workdir="$HOME/Developer/tools" -// where <> is one of the tests below. completion_workdir should be path to -// x/tools on your system. - -// Benchmark struct completion in tools codebase. -func TestBenchmarkStructCompletion(t *testing.T) { - file := "internal/lsp/cache/session.go" - - preCompletionEdits := func(env *Env) { - env.OpenFile(file) - originalBuffer := env.Editor.BufferText(file) - env.EditBuffer(file, fake.Edit{ - End: endPosInBuffer(env, file), - Text: originalBuffer + "\nvar testVariable map[string]bool = Session{}.\n", - }) - } - - benchmarkCompletion(completionBenchOptions{ - workdir: completionOptions.workdir, - file: file, - locationRegexp: `var testVariable map\[string\]bool = Session{}(\.)`, - preCompletionEdits: preCompletionEdits, - printResults: completionOptions.printResults, - }, t) -} - -// Benchmark import completion in tools codebase. -func TestBenchmarkImportCompletion(t *testing.T) { - benchmarkCompletion(completionBenchOptions{ - workdir: completionOptions.workdir, - file: "internal/lsp/source/completion/completion.go", - locationRegexp: `go\/()`, - printResults: completionOptions.printResults, - }, t) -} - -// Benchmark slice completion in tools codebase. -func TestBenchmarkSliceCompletion(t *testing.T) { - file := "internal/lsp/cache/session.go" - - preCompletionEdits := func(env *Env) { - env.OpenFile(file) - originalBuffer := env.Editor.BufferText(file) - env.EditBuffer(file, fake.Edit{ - End: endPosInBuffer(env, file), - Text: originalBuffer + "\nvar testVariable []byte = \n", - }) - } - - benchmarkCompletion(completionBenchOptions{ - workdir: completionOptions.workdir, - file: file, - locationRegexp: `var testVariable \[\]byte (=)`, - preCompletionEdits: preCompletionEdits, - printResults: completionOptions.printResults, - }, t) -} - -// Benchmark deep completion in function call in tools codebase. -func TestBenchmarkFuncDeepCompletion(t *testing.T) { - file := "internal/lsp/source/completion/completion.go" - fileContent := ` -func (c *completer) _() { - c.inference.kindMatches(c.) -} -` - preCompletionEdits := func(env *Env) { - env.OpenFile(file) - originalBuffer := env.Editor.BufferText(file) - env.EditBuffer(file, fake.Edit{ - End: endPosInBuffer(env, file), - Text: originalBuffer + fileContent, - }) - } - - benchmarkCompletion(completionBenchOptions{ - workdir: completionOptions.workdir, - file: file, - locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`, - preCompletionEdits: preCompletionEdits, - printResults: completionOptions.printResults, - }, t) -} diff --git a/gopls/internal/regtest/bench/completion_test.go b/gopls/internal/regtest/bench/completion_test.go new file mode 100644 index 00000000000..ffccf34b363 --- /dev/null +++ b/gopls/internal/regtest/bench/completion_test.go @@ -0,0 +1,201 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "context" + "fmt" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + + "golang.org/x/tools/gopls/internal/lsp/fake" +) + +type completionBenchOptions struct { + file, locationRegexp string + + // Hooks to run edits before initial completion + setup func(*Env) // run before the benchmark starts + beforeCompletion func(*Env) // run before each completion +} + +func benchmarkCompletion(options completionBenchOptions, b *testing.B) { + dir := benchmarkDir() + + // Use a new environment for each test, to avoid any existing state from the + // previous session. + sandbox, editor, awaiter, err := connectEditor(dir, fake.EditorConfig{ + Settings: map[string]interface{}{ + "completionBudget": "1m", // arbitrary long completion budget + }, + }) + if err != nil { + b.Fatal(err) + } + ctx := context.Background() + defer func() { + if err := editor.Close(ctx); err != nil { + b.Errorf("closing editor: %v", err) + } + }() + + env := &Env{ + T: b, + Ctx: ctx, + Editor: editor, + Sandbox: sandbox, + Awaiter: awaiter, + } + + // Run edits required for this completion. + if options.setup != nil { + options.setup(env) + } + + // Run a completion to make sure the system is warm. + loc := env.RegexpSearch(options.file, options.locationRegexp) + completions := env.Completion(loc) + + if testing.Verbose() { + fmt.Println("Results:") + for i := 0; i < len(completions.Items); i++ { + fmt.Printf("\t%d. %v\n", i, completions.Items[i]) + } + } + + b.ResetTimer() + + // Use a subtest to ensure that benchmarkCompletion does not itself get + // executed multiple times (as it is doing expensive environment + // initialization). + b.Run("completion", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if options.beforeCompletion != nil { + options.beforeCompletion(env) + } + env.Completion(loc) + } + }) +} + +// endPosInBuffer returns the position for last character in the buffer for +// the given file. +func endRangeInBuffer(env *Env, name string) protocol.Range { + buffer := env.BufferText(name) + m := protocol.NewMapper("", []byte(buffer)) + rng, err := m.OffsetRange(len(buffer), len(buffer)) + if err != nil { + env.T.Fatal(err) + } + return rng +} + +// Benchmark struct completion in tools codebase. +func BenchmarkStructCompletion(b *testing.B) { + file := "internal/lsp/cache/session.go" + + setup := func(env *Env) { + env.OpenFile(file) + env.EditBuffer(file, protocol.TextEdit{ + Range: endRangeInBuffer(env, file), + NewText: "\nvar testVariable map[string]bool = Session{}.\n", + }) + } + + benchmarkCompletion(completionBenchOptions{ + file: file, + locationRegexp: `var testVariable map\[string\]bool = Session{}(\.)`, + setup: setup, + }, b) +} + +// Benchmark import completion in tools codebase. +func BenchmarkImportCompletion(b *testing.B) { + const file = "internal/lsp/source/completion/completion.go" + benchmarkCompletion(completionBenchOptions{ + file: file, + locationRegexp: `go\/()`, + setup: func(env *Env) { env.OpenFile(file) }, + }, b) +} + +// Benchmark slice completion in tools codebase. +func BenchmarkSliceCompletion(b *testing.B) { + file := "internal/lsp/cache/session.go" + + setup := func(env *Env) { + env.OpenFile(file) + env.EditBuffer(file, protocol.TextEdit{ + Range: endRangeInBuffer(env, file), + NewText: "\nvar testVariable []byte = \n", + }) + } + + benchmarkCompletion(completionBenchOptions{ + file: file, + locationRegexp: `var testVariable \[\]byte (=)`, + setup: setup, + }, b) +} + +// Benchmark deep completion in function call in tools codebase. +func BenchmarkFuncDeepCompletion(b *testing.B) { + file := "internal/lsp/source/completion/completion.go" + fileContent := ` +func (c *completer) _() { + c.inference.kindMatches(c.) +} +` + setup := func(env *Env) { + env.OpenFile(file) + originalBuffer := env.BufferText(file) + env.EditBuffer(file, protocol.TextEdit{ + Range: endRangeInBuffer(env, file), + NewText: originalBuffer + fileContent, + }) + } + + benchmarkCompletion(completionBenchOptions{ + file: file, + locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`, + setup: setup, + }, b) +} + +// Benchmark completion following an arbitrary edit. +// +// Edits force type-checked packages to be invalidated, so we want to measure +// how long it takes before completion results are available. +func BenchmarkCompletionFollowingEdit(b *testing.B) { + file := "internal/lsp/source/completion/completion2.go" + fileContent := ` +package completion + +func (c *completer) _() { + c.inference.kindMatches(c.) + // __MAGIC_STRING_1 +} +` + setup := func(env *Env) { + env.CreateBuffer(file, fileContent) + } + + n := 1 + beforeCompletion := func(env *Env) { + old := fmt.Sprintf("__MAGIC_STRING_%d", n) + new := fmt.Sprintf("__MAGIC_STRING_%d", n+1) + n++ + env.RegexpReplace(file, old, new) + } + + benchmarkCompletion(completionBenchOptions{ + file: file, + locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`, + setup: setup, + beforeCompletion: beforeCompletion, + }, b) +} diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go new file mode 100644 index 00000000000..d309e3dcebf --- /dev/null +++ b/gopls/internal/regtest/bench/didchange_test.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "fmt" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +// BenchmarkDidChange benchmarks modifications of a single file by making +// synthetic modifications in a comment. It controls pacing by waiting for the +// server to actually start processing the didChange notification before +// proceeding. Notably it does not wait for diagnostics to complete. +// +// Uses -workdir and -file to control where the edits occur. +func BenchmarkDidChange(b *testing.B) { + env := benchmarkEnv(b) + env.OpenFile(*file) + env.Await(env.DoneWithOpen()) + + // Insert the text we'll be modifying at the top of the file. + env.EditBuffer(*file, protocol.TextEdit{NewText: "// __REGTEST_PLACEHOLDER_0__\n"}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + env.EditBuffer(*file, protocol.TextEdit{ + Range: protocol.Range{ + Start: protocol.Position{Line: 0, Character: 0}, + End: protocol.Position{Line: 1, Character: 0}, + }, + // Increment the placeholder text, to ensure cache misses. + NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1), + }) + env.Await(env.StartedChange()) + } +} diff --git a/gopls/internal/regtest/bench/editor_features_test.go b/gopls/internal/regtest/bench/editor_features_test.go new file mode 100644 index 00000000000..ea6727b5c31 --- /dev/null +++ b/gopls/internal/regtest/bench/editor_features_test.go @@ -0,0 +1,83 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "fmt" + "testing" +) + +func BenchmarkGoToDefinition(b *testing.B) { + env := benchmarkEnv(b) + + env.OpenFile("internal/imports/mod.go") + loc := env.RegexpSearch("internal/imports/mod.go", "ModuleJSON") + env.GoToDefinition(loc) + env.Await(env.DoneWithOpen()) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + env.GoToDefinition(loc) + } +} + +func BenchmarkFindAllReferences(b *testing.B) { + env := benchmarkEnv(b) + + env.OpenFile("internal/imports/mod.go") + loc := env.RegexpSearch("internal/imports/mod.go", "gopathwalk") + env.References(loc) + env.Await(env.DoneWithOpen()) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + env.References(loc) + } +} + +func BenchmarkRename(b *testing.B) { + env := benchmarkEnv(b) + + env.OpenFile("internal/imports/mod.go") + env.Await(env.DoneWithOpen()) + + b.ResetTimer() + + for i := 1; i < b.N; i++ { + loc := env.RegexpSearch("internal/imports/mod.go", "gopathwalk") + newName := fmt.Sprintf("%s%d", "gopathwalk", i) + env.Rename(loc, newName) + } +} + +func BenchmarkFindAllImplementations(b *testing.B) { + env := benchmarkEnv(b) + + env.OpenFile("internal/imports/mod.go") + loc := env.RegexpSearch("internal/imports/mod.go", "initAllMods") + env.Await(env.DoneWithOpen()) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + env.Implementations(loc) + } +} + +func BenchmarkHover(b *testing.B) { + env := benchmarkEnv(b) + + env.OpenFile("internal/imports/mod.go") + loc := env.RegexpSearch("internal/imports/mod.go", "bytes") + env.Await(env.DoneWithOpen()) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + env.Hover(loc) + } +} diff --git a/gopls/internal/regtest/bench/iwl_test.go b/gopls/internal/regtest/bench/iwl_test.go new file mode 100644 index 00000000000..baa92fc4b4d --- /dev/null +++ b/gopls/internal/regtest/bench/iwl_test.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "context" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/fake" + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +// BenchmarkInitialWorkspaceLoad benchmarks the initial workspace load time for +// a new editing session. +func BenchmarkInitialWorkspaceLoad(b *testing.B) { + dir := benchmarkDir() + b.ResetTimer() + + ctx := context.Background() + for i := 0; i < b.N; i++ { + _, editor, awaiter, err := connectEditor(dir, fake.EditorConfig{}) + if err != nil { + b.Fatal(err) + } + if err := awaiter.Await(ctx, InitialWorkspaceLoad); err != nil { + b.Fatal(err) + } + b.StopTimer() + if err := editor.Close(ctx); err != nil { + b.Fatal(err) + } + b.StartTimer() + } +} diff --git a/gopls/internal/regtest/bench/mem_test.go b/gopls/internal/regtest/bench/mem_test.go new file mode 100644 index 00000000000..19626785acc --- /dev/null +++ b/gopls/internal/regtest/bench/mem_test.go @@ -0,0 +1,39 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "runtime" + "testing" +) + +// TestPrintMemStats measures the memory usage of loading a project. +// It uses the same -didchange_dir flag as above. +// Always run it in isolation since it measures global heap usage. +// +// Kubernetes example: +// +// $ go test -v -run=TestPrintMemStats -workdir=$HOME/w/kubernetes +// TotalAlloc: 5766 MB +// HeapAlloc: 1984 MB +// +// Both figures exhibit variance of less than 1%. +func TestPrintMemStats(t *testing.T) { + // This test only makes sense when run in isolation, so for now it is + // manually skipped. + // + // TODO(rfindley): figure out a better way to capture memstats as a benchmark + // metric. + t.Skip("unskip to run this test manually") + + _ = benchmarkEnv(t) + + runtime.GC() + runtime.GC() + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6) + t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6) +} diff --git a/gopls/internal/regtest/bench/stress_test.go b/gopls/internal/regtest/bench/stress_test.go index f7e59faf97f..11c511f1780 100644 --- a/gopls/internal/regtest/bench/stress_test.go +++ b/gopls/internal/regtest/bench/stress_test.go @@ -11,56 +11,83 @@ import ( "testing" "time" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" ) -// Pilosa is a repository that has historically caused significant memory -// problems for Gopls. We use it for a simple stress test that types -// arbitrarily in a file with lots of dependents. +// github.com/pilosa/pilosa is a repository that has historically caused +// significant memory problems for Gopls. We use it for a simple stress test +// that types arbitrarily in a file with lots of dependents. var pilosaPath = flag.String("pilosa_path", "", "Path to a directory containing "+ "github.com/pilosa/pilosa, for stress testing. Do not set this unless you "+ "know what you're doing!") -func stressTestOptions(dir string) []RunOption { - opts := benchmarkOptions(dir) - opts = append(opts, SkipHooks(true), DebugAddress(":8087")) - return opts -} - func TestPilosaStress(t *testing.T) { + // TODO(rfindley): revisit this test and make it is hermetic: it should check + // out pilosa into a directory. + // + // Note: This stress test has not been run recently, and may no longer + // function properly. if *pilosaPath == "" { t.Skip("-pilosa_path not configured") } - opts := stressTestOptions(*pilosaPath) - WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) { - files := []string{ - "cmd.go", - "internal/private.pb.go", - "roaring/roaring.go", - "roaring/roaring_internal_test.go", - "server/handler_test.go", - } - for _, file := range files { - env.OpenFile(file) + sandbox, err := fake.NewSandbox(&fake.SandboxConfig{ + Workdir: *pilosaPath, + GOPROXY: "https://proxy.golang.org", + }) + if err != nil { + t.Fatal(err) + } + + server := lsprpc.NewStreamServer(cache.New(nil, nil), false, hooks.Options) + ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream) + ctx := context.Background() + + editor, err := fake.NewEditor(sandbox, fake.EditorConfig{}).Connect(ctx, ts, fake.ClientHooks{}) + if err != nil { + t.Fatal(err) + } + + files := []string{ + "cmd.go", + "internal/private.pb.go", + "roaring/roaring.go", + "roaring/roaring_internal_test.go", + "server/handler_test.go", + } + for _, file := range files { + if err := editor.OpenFile(ctx, file); err != nil { + t.Fatal(err) } - ctx, cancel := context.WithTimeout(env.Ctx, 10*time.Minute) - defer cancel() + } + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() - i := 1 - // MagicNumber is an identifier that occurs in roaring.go. Just change it - // arbitrarily. - env.RegexpReplace("roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1)) - for { - select { - case <-ctx.Done(): - return - default: - } - env.RegexpReplace("roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1)) - time.Sleep(20 * time.Millisecond) - i++ + i := 1 + // MagicNumber is an identifier that occurs in roaring.go. Just change it + // arbitrarily. + if err := editor.RegexpReplace(ctx, "roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1)); err != nil { + t.Fatal(err) + } + for { + select { + case <-ctx.Done(): + return + default: } - }) + if err := editor.RegexpReplace(ctx, "roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1)); err != nil { + t.Fatal(err) + } + // Simulate (very fast) typing. + // + // Typing 80 wpm ~150ms per keystroke. + time.Sleep(150 * time.Millisecond) + i++ + } } diff --git a/gopls/internal/regtest/bench/workspace_symbols_test.go b/gopls/internal/regtest/bench/workspace_symbols_test.go new file mode 100644 index 00000000000..a540dfd2cd0 --- /dev/null +++ b/gopls/internal/regtest/bench/workspace_symbols_test.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bench + +import ( + "flag" + "fmt" + "testing" +) + +var symbolQuery = flag.String("symbol_query", "test", "symbol query to use in benchmark") + +// BenchmarkWorkspaceSymbols benchmarks the time to execute a workspace symbols +// request (controlled by the -symbol_query flag). +func BenchmarkWorkspaceSymbols(b *testing.B) { + env := benchmarkEnv(b) + + // Make an initial symbol query to warm the cache. + symbols := env.Symbol(*symbolQuery) + + if testing.Verbose() { + fmt.Println("Results:") + for i := 0; i < len(symbols); i++ { + fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName) + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + env.Symbol(*symbolQuery) + } +} diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go index a64f9c480ae..acd5652811e 100644 --- a/gopls/internal/regtest/codelens/codelens_test.go +++ b/gopls/internal/regtest/codelens/codelens_test.go @@ -6,18 +6,15 @@ package codelens import ( "fmt" - "runtime" - "strings" "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/bug" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/testenv" ) @@ -63,9 +60,7 @@ const ( for _, test := range tests { t.Run(test.label, func(t *testing.T) { WithOptions( - EditorConfig{ - CodeLenses: test.enabled, - }, + Settings{"codelenses": test.enabled}, ).Run(t, workspace, func(t *testing.T, env *Env) { env.OpenFile("lib.go") lens := env.CodeLens("lib.go") @@ -80,8 +75,11 @@ const ( // This test confirms the full functionality of the code lenses for updating // dependencies in a go.mod file. It checks for the code lens that suggests // an update and then executes the command associated with that code lens. A -// regression test for golang/go#39446. +// regression test for golang/go#39446. It also checks that these code lenses +// only affect the diagnostics and contents of the containing go.mod file. func TestUpgradeCodelens(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + const proxyWithLatest = ` -- golang.org/x/hello@v1.3.3/go.mod -- module golang.org/x/hello @@ -102,30 +100,64 @@ var Goodbye error ` const shouldUpdateDep = ` --- go.mod -- -module mod.com +-- go.work -- +go 1.18 + +use ( + ./a + ./b +) +-- a/go.mod -- +module mod.com/a go 1.14 require golang.org/x/hello v1.2.3 --- go.sum -- +-- a/go.sum -- golang.org/x/hello v1.2.3 h1:7Wesfkx/uBd+eFgPrq0irYj/1XfmbvLV8jZ/W7C2Dwg= golang.org/x/hello v1.2.3/go.mod h1:OgtlzsxVMUUdsdQCIDYgaauCTH47B8T8vofouNJfzgY= --- main.go -- +-- a/main.go -- package main import "golang.org/x/hello/hi" +func main() { + _ = hi.Goodbye +} +-- b/go.mod -- +module mod.com/b + +go 1.14 + +require golang.org/x/hello v1.2.3 +-- b/go.sum -- +golang.org/x/hello v1.2.3 h1:7Wesfkx/uBd+eFgPrq0irYj/1XfmbvLV8jZ/W7C2Dwg= +golang.org/x/hello v1.2.3/go.mod h1:OgtlzsxVMUUdsdQCIDYgaauCTH47B8T8vofouNJfzgY= +-- b/main.go -- +package main + +import ( + "golang.org/x/hello/hi" +) + func main() { _ = hi.Goodbye } ` - const wantGoMod = `module mod.com + const wantGoModA = `module mod.com/a go 1.14 require golang.org/x/hello v1.3.3 +` + // Applying the diagnostics or running the codelenses for a/go.mod + // should not change the contents of b/go.mod + const wantGoModB = `module mod.com/b + +go 1.14 + +require golang.org/x/hello v1.2.3 ` for _, commandTitle := range []string{ @@ -136,10 +168,11 @@ require golang.org/x/hello v1.3.3 WithOptions( ProxyFiles(proxyWithLatest), ).Run(t, shouldUpdateDep, func(t *testing.T, env *Env) { - env.OpenFile("go.mod") + env.OpenFile("a/go.mod") + env.OpenFile("b/go.mod") var lens protocol.CodeLens var found bool - for _, l := range env.CodeLens("go.mod") { + for _, l := range env.CodeLens("a/go.mod") { if l.Command.Title == commandTitle { lens = l found = true @@ -154,9 +187,12 @@ require golang.org/x/hello v1.3.3 }); err != nil { t.Fatal(err) } - env.Await(env.DoneWithChangeWatchedFiles()) - if got := env.Editor.BufferText("go.mod"); got != wantGoMod { - t.Fatalf("go.mod upgrade failed:\n%s", tests.Diff(t, wantGoMod, got)) + env.AfterChange() + if got := env.BufferText("a/go.mod"); got != wantGoModA { + t.Fatalf("a/go.mod upgrade failed:\n%s", compare.Text(wantGoModA, got)) + } + if got := env.BufferText("b/go.mod"); got != wantGoModB { + t.Fatalf("b/go.mod changed unexpectedly:\n%s", compare.Text(wantGoModB, got)) } }) }) @@ -165,22 +201,36 @@ require golang.org/x/hello v1.3.3 t.Run(fmt.Sprintf("Upgrade individual dependency vendoring=%v", vendoring), func(t *testing.T) { WithOptions(ProxyFiles(proxyWithLatest)).Run(t, shouldUpdateDep, func(t *testing.T, env *Env) { if vendoring { - env.RunGoCommand("mod", "vendor") + env.RunGoCommandInDir("a", "mod", "vendor") } - env.Await(env.DoneWithChangeWatchedFiles()) - env.OpenFile("go.mod") - env.ExecuteCodeLensCommand("go.mod", command.CheckUpgrades) + env.AfterChange() + env.OpenFile("a/go.mod") + env.OpenFile("b/go.mod") + env.ExecuteCodeLensCommand("a/go.mod", command.CheckUpgrades, nil) d := &protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("go.mod", `require`, "can be upgraded"), - ReadDiagnostics("go.mod", d), - ), + env.OnceMet( + Diagnostics(env.AtRegexp("a/go.mod", `require`), WithMessage("can be upgraded")), + ReadDiagnostics("a/go.mod", d), + // We do not want there to be a diagnostic for b/go.mod, + // but there may be some subtlety in timing here, where this + // should always succeed, but may not actually test the correct + // behavior. + NoDiagnostics(env.AtRegexp("b/go.mod", `require`)), ) - env.ApplyQuickFixes("go.mod", d.Diagnostics) - env.Await(env.DoneWithChangeWatchedFiles()) - if got := env.Editor.BufferText("go.mod"); got != wantGoMod { - t.Fatalf("go.mod upgrade failed:\n%s", tests.Diff(t, wantGoMod, got)) + // Check for upgrades in b/go.mod and then clear them. + env.ExecuteCodeLensCommand("b/go.mod", command.CheckUpgrades, nil) + env.Await(Diagnostics(env.AtRegexp("b/go.mod", `require`), WithMessage("can be upgraded"))) + env.ExecuteCodeLensCommand("b/go.mod", command.ResetGoModDiagnostics, nil) + env.Await(NoDiagnostics(ForFile("b/go.mod"))) + + // Apply the diagnostics to a/go.mod. + env.ApplyQuickFixes("a/go.mod", d.Diagnostics) + env.AfterChange() + if got := env.BufferText("a/go.mod"); got != wantGoModA { + t.Fatalf("a/go.mod upgrade failed:\n%s", compare.Text(wantGoModA, got)) + } + if got := env.BufferText("b/go.mod"); got != wantGoModB { + t.Fatalf("b/go.mod changed unexpectedly:\n%s", compare.Text(wantGoModB, got)) } }) }) @@ -188,7 +238,6 @@ require golang.org/x/hello v1.3.3 } func TestUnusedDependenciesCodelens(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const proxy = ` -- golang.org/x/hello@v1.0.0/go.mod -- module golang.org/x/hello @@ -232,9 +281,9 @@ func main() { ` WithOptions(ProxyFiles(proxy)).Run(t, shouldRemoveDep, func(t *testing.T, env *Env) { env.OpenFile("go.mod") - env.ExecuteCodeLensCommand("go.mod", command.Tidy) + env.ExecuteCodeLensCommand("go.mod", command.Tidy, nil) env.Await(env.DoneWithChangeWatchedFiles()) - got := env.Editor.BufferText("go.mod") + got := env.BufferText("go.mod") const wantGoMod = `module mod.com go 1.14 @@ -242,15 +291,13 @@ go 1.14 require golang.org/x/hello v1.0.0 ` if got != wantGoMod { - t.Fatalf("go.mod tidy failed:\n%s", tests.Diff(t, wantGoMod, got)) + t.Fatalf("go.mod tidy failed:\n%s", compare.Text(wantGoMod, got)) } }) } func TestRegenerateCgo(t *testing.T) { testenv.NeedsTool(t, "cgo") - testenv.NeedsGo1Point(t, 15) - const workspace = ` -- go.mod -- module example.com @@ -271,85 +318,19 @@ func Foo() { Run(t, workspace, func(t *testing.T, env *Env) { // Open the file. We have a nonexistant symbol that will break cgo processing. env.OpenFile("cgo.go") - env.Await(env.DiagnosticAtRegexpWithMessage("cgo.go", ``, "go list failed to return CompiledGoFiles")) + env.AfterChange( + Diagnostics(env.AtRegexp("cgo.go", ``), WithMessage("go list failed to return CompiledGoFiles")), + ) // Fix the C function name. We haven't regenerated cgo, so nothing should be fixed. env.RegexpReplace("cgo.go", `int fortythree`, "int fortytwo") env.SaveBuffer("cgo.go") - env.Await(OnceMet( - env.DoneWithSave(), - env.DiagnosticAtRegexpWithMessage("cgo.go", ``, "go list failed to return CompiledGoFiles"), - )) - - // Regenerate cgo, fixing the diagnostic. - env.ExecuteCodeLensCommand("cgo.go", command.RegenerateCgo) - env.Await(EmptyDiagnostics("cgo.go")) - }) -} - -func TestGCDetails(t *testing.T) { - testenv.NeedsGo1Point(t, 15) - if runtime.GOOS == "android" { - t.Skipf("the gc details code lens doesn't work on Android") - } - - const mod = ` --- go.mod -- -module mod.com - -go 1.15 --- main.go -- -package main - -import "fmt" - -func main() { - fmt.Println(42) -} -` - WithOptions( - EditorConfig{ - CodeLenses: map[string]bool{ - "gc_details": true, - }}, - ).Run(t, mod, func(t *testing.T, env *Env) { - env.OpenFile("main.go") - env.ExecuteCodeLensCommand("main.go", command.GCDetails) - d := &protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - DiagnosticAt("main.go", 5, 13), - ReadDiagnostics("main.go", d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("cgo.go", ``), WithMessage("go list failed to return CompiledGoFiles")), ) - // Confirm that the diagnostics come from the gc details code lens. - var found bool - for _, d := range d.Diagnostics { - if d.Severity != protocol.SeverityInformation { - t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity) - } - if strings.Contains(d.Message, "42 escapes") { - found = true - } - } - if !found { - t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`) - } - - // Editing a buffer should cause gc_details diagnostics to disappear, since - // they only apply to saved buffers. - env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n")) - env.Await(EmptyDiagnostics("main.go")) - // Saving a buffer should re-format back to the original state, and - // re-enable the gc_details diagnostics. - env.SaveBuffer("main.go") - env.Await(DiagnosticAt("main.go", 5, 13)) - - // Toggle the GC details code lens again so now it should be off. - env.ExecuteCodeLensCommand("main.go", command.GCDetails) - env.Await( - EmptyDiagnostics("main.go"), - ) + // Regenerate cgo, fixing the diagnostic. + env.ExecuteCodeLensCommand("cgo.go", command.RegenerateCgo, nil) + env.Await(NoDiagnostics(ForFile("cgo.go"))) }) } diff --git a/gopls/internal/regtest/codelens/gcdetails_test.go b/gopls/internal/regtest/codelens/gcdetails_test.go new file mode 100644 index 00000000000..e0642d65224 --- /dev/null +++ b/gopls/internal/regtest/codelens/gcdetails_test.go @@ -0,0 +1,127 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codelens + +import ( + "runtime" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" +) + +func TestGCDetails_Toggle(t *testing.T) { + if runtime.GOOS == "android" { + t.Skipf("the gc details code lens doesn't work on Android") + } + + const mod = ` +-- go.mod -- +module mod.com + +go 1.15 +-- main.go -- +package main + +import "fmt" + +func main() { + fmt.Println(42) +} +` + WithOptions( + Settings{ + "codelenses": map[string]bool{ + "gc_details": true, + }, + }, + ).Run(t, mod, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + env.ExecuteCodeLensCommand("main.go", command.GCDetails, nil) + d := &protocol.PublishDiagnosticsParams{} + env.OnceMet( + Diagnostics(AtPosition("main.go", 5, 13)), + ReadDiagnostics("main.go", d), + ) + // Confirm that the diagnostics come from the gc details code lens. + var found bool + for _, d := range d.Diagnostics { + if d.Severity != protocol.SeverityInformation { + t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity) + } + if strings.Contains(d.Message, "42 escapes") { + found = true + } + } + if !found { + t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`) + } + + // Editing a buffer should cause gc_details diagnostics to disappear, since + // they only apply to saved buffers. + env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n")) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) + + // Saving a buffer should re-format back to the original state, and + // re-enable the gc_details diagnostics. + env.SaveBuffer("main.go") + env.AfterChange(Diagnostics(AtPosition("main.go", 5, 13))) + + // Toggle the GC details code lens again so now it should be off. + env.ExecuteCodeLensCommand("main.go", command.GCDetails, nil) + env.Await(NoDiagnostics(ForFile("main.go"))) + }) +} + +// Test for the crasher in golang/go#54199 +func TestGCDetails_NewFile(t *testing.T) { + bug.PanicOnBugs = false + const src = ` +-- go.mod -- +module mod.test + +go 1.12 +` + + WithOptions( + Settings{ + "codelenses": map[string]bool{ + "gc_details": true, + }, + }, + ).Run(t, src, func(t *testing.T, env *Env) { + env.CreateBuffer("p_test.go", "") + + const gcDetailsCommand = "gopls." + string(command.GCDetails) + + hasGCDetails := func() bool { + lenses := env.CodeLens("p_test.go") // should not crash + for _, lens := range lenses { + if lens.Command.Command == gcDetailsCommand { + return true + } + } + return false + } + + // With an empty file, we shouldn't get the gc_details codelens because + // there is nowhere to position it (it needs a package name). + if hasGCDetails() { + t.Errorf("got the gc_details codelens for an empty file") + } + + // Edit to provide a package name. + env.EditBuffer("p_test.go", fake.NewEdit(0, 0, 0, 0, "package p")) + + // Now we should get the gc_details codelens. + if !hasGCDetails() { + t.Errorf("didn't get the gc_details codelens for a valid non-empty Go file") + } + }) +} diff --git a/gopls/internal/regtest/completion/completion18_test.go b/gopls/internal/regtest/completion/completion18_test.go index 9683e30c828..18e81bc4b34 100644 --- a/gopls/internal/regtest/completion/completion18_test.go +++ b/gopls/internal/regtest/completion/completion18_test.go @@ -10,7 +10,8 @@ package completion import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) // test generic receivers @@ -41,10 +42,10 @@ func (s SyncMap[XX,string]) g(v UU) {} env.OpenFile("main.go") env.Await(env.DoneWithOpen()) for _, tst := range tests { - pos := env.RegexpSearch("main.go", tst.pat) - pos.Column += len(tst.pat) - completions := env.Completion("main.go", pos) - result := compareCompletionResults(tst.want, completions.Items) + loc := env.RegexpSearch("main.go", tst.pat) + loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte(tst.pat))) + completions := env.Completion(loc) + result := compareCompletionLabels(tst.want, completions.Items) if result != "" { t.Errorf("%s: wanted %v", result, tst.want) for i, g := range completions.Items { @@ -95,7 +96,7 @@ func FuzzHex(f *testing.F) { tests := []struct { file string pat string - offset int // from the beginning of pat to what the user just typed + offset uint32 // UTF16 length from the beginning of pat to what the user just typed want []string }{ {"a_test.go", "f.Ad", 3, []string{"Add"}}, @@ -108,10 +109,10 @@ func FuzzHex(f *testing.F) { for _, test := range tests { env.OpenFile(test.file) env.Await(env.DoneWithOpen()) - pos := env.RegexpSearch(test.file, test.pat) - pos.Column += test.offset // character user just typed? will type? - completions := env.Completion(test.file, pos) - result := compareCompletionResults(test.want, completions.Items) + loc := env.RegexpSearch(test.file, test.pat) + loc.Range.Start.Character += test.offset // character user just typed? will type? + completions := env.Completion(loc) + result := compareCompletionLabels(test.want, completions.Items) if result != "" { t.Errorf("pat %q %q", test.pat, result) for i, it := range completions.Items { diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go index 1ffb0000d3b..9859158ae87 100644 --- a/gopls/internal/regtest/completion/completion_test.go +++ b/gopls/internal/regtest/completion/completion_test.go @@ -9,13 +9,13 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" - - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" "golang.org/x/tools/internal/testenv" + + "golang.org/x/tools/gopls/internal/lsp/protocol" ) func TestMain(m *testing.M) { @@ -43,7 +43,6 @@ const Name = "Hello" ` func TestPackageCompletion(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const files = ` -- go.mod -- module mod.com @@ -174,41 +173,35 @@ package Run(t, files, func(t *testing.T, env *Env) { if tc.content != nil { env.WriteWorkspaceFile(tc.filename, *tc.content) - env.Await( - env.DoneWithChangeWatchedFiles(), - ) + env.Await(env.DoneWithChangeWatchedFiles()) } env.OpenFile(tc.filename) - completions := env.Completion(tc.filename, env.RegexpSearch(tc.filename, tc.triggerRegexp)) + completions := env.Completion(env.RegexpSearch(tc.filename, tc.triggerRegexp)) // Check that the completion item suggestions are in the range - // of the file. - lineCount := len(strings.Split(env.Editor.BufferText(tc.filename), "\n")) + // of the file. {Start,End}.Line are zero-based. + lineCount := len(strings.Split(env.BufferText(tc.filename), "\n")) for _, item := range completions.Items { - if start := int(item.TextEdit.Range.Start.Line); start >= lineCount { - t.Fatalf("unexpected text edit range start line number: got %d, want less than %d", start, lineCount) + if start := int(item.TextEdit.Range.Start.Line); start > lineCount { + t.Fatalf("unexpected text edit range start line number: got %d, want <= %d", start, lineCount) } - if end := int(item.TextEdit.Range.End.Line); end >= lineCount { - t.Fatalf("unexpected text edit range end line number: got %d, want less than %d", end, lineCount) + if end := int(item.TextEdit.Range.End.Line); end > lineCount { + t.Fatalf("unexpected text edit range end line number: got %d, want <= %d", end, lineCount) } } if tc.want != nil { - start, end := env.RegexpRange(tc.filename, tc.editRegexp) - expectedRng := protocol.Range{ - Start: fake.Pos.ToProtocolPosition(start), - End: fake.Pos.ToProtocolPosition(end), - } + expectedLoc := env.RegexpSearch(tc.filename, tc.editRegexp) for _, item := range completions.Items { gotRng := item.TextEdit.Range - if expectedRng != gotRng { + if expectedLoc.Range != gotRng { t.Errorf("unexpected completion range for completion item %s: got %v, want %v", - item.Label, gotRng, expectedRng) + item.Label, gotRng, expectedLoc.Range) } } } - diff := compareCompletionResults(tc.want, completions.Items) + diff := compareCompletionLabels(tc.want, completions.Items) if diff != "" { t.Error(diff) } @@ -230,19 +223,16 @@ package ma want := []string{"ma", "ma_test", "main", "math", "math_test"} Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("math/add.go") - completions := env.Completion("math/add.go", fake.Pos{ - Line: 0, - Column: 10, - }) + completions := env.Completion(env.RegexpSearch("math/add.go", "package ma()")) - diff := compareCompletionResults(want, completions.Items) + diff := compareCompletionLabels(want, completions.Items) if diff != "" { t.Fatal(diff) } }) } -func compareCompletionResults(want []string, gotItems []protocol.CompletionItem) string { +func compareCompletionLabels(want []string, gotItems []protocol.CompletionItem) string { if len(gotItems) != len(want) { return fmt.Sprintf("got %v completion(s), want %v", len(gotItems), len(want)) } @@ -266,8 +256,6 @@ func compareCompletionResults(want []string, gotItems []protocol.CompletionItem) } func TestUnimportedCompletion(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- go.mod -- module mod.com @@ -305,19 +293,19 @@ func _() { // Trigger unimported completions for the example.com/blah package. env.OpenFile("main.go") env.Await(env.DoneWithOpen()) - pos := env.RegexpSearch("main.go", "ah") - completions := env.Completion("main.go", pos) + loc := env.RegexpSearch("main.go", "ah") + completions := env.Completion(loc) if len(completions.Items) == 0 { t.Fatalf("no completion items") } - env.AcceptCompletion("main.go", pos, completions.Items[0]) + env.AcceptCompletion(loc, completions.Items[0]) env.Await(env.DoneWithChange()) // Trigger completions once again for the blah.<> selector. env.RegexpReplace("main.go", "_ = blah", "_ = blah.") env.Await(env.DoneWithChange()) - pos = env.RegexpSearch("main.go", "\n}") - completions = env.Completion("main.go", pos) + loc = env.RegexpSearch("main.go", "\n}") + completions = env.Completion(loc) if len(completions.Items) != 1 { t.Fatalf("expected 1 completion item, got %v", len(completions.Items)) } @@ -325,11 +313,11 @@ func _() { if item.Label != "Name" { t.Fatalf("expected completion item blah.Name, got %v", item.Label) } - env.AcceptCompletion("main.go", pos, item) + env.AcceptCompletion(loc, item) // Await the diagnostics to add example.com/blah to the go.mod file. - env.Await( - env.DiagnosticAtRegexp("main.go", `"example.com/blah"`), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)), ) }) } @@ -393,8 +381,8 @@ type S struct { Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo.go") - completions := env.Completion("foo.go", env.RegexpSearch("foo.go", `if s\.()`)) - diff := compareCompletionResults([]string{"i"}, completions.Items) + completions := env.Completion(env.RegexpSearch("foo.go", `if s\.()`)) + diff := compareCompletionLabels([]string{"i"}, completions.Items) if diff != "" { t.Fatal(diff) } @@ -453,8 +441,8 @@ func _() { {`var _ e = xxxx()`, []string{"xxxxc", "xxxxd", "xxxxe"}}, } for _, tt := range tests { - completions := env.Completion("main.go", env.RegexpSearch("main.go", tt.re)) - diff := compareCompletionResults(tt.want, completions.Items) + completions := env.Completion(env.RegexpSearch("main.go", tt.re)) + diff := compareCompletionLabels(tt.want, completions.Items) if diff != "" { t.Errorf("%s: %s", tt.re, diff) } @@ -486,32 +474,30 @@ func doit() { ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("prog.go") - pos := env.RegexpSearch("prog.go", "if fooF") - pos.Column += len("if fooF") - completions := env.Completion("prog.go", pos) - diff := compareCompletionResults([]string{"fooFunc"}, completions.Items) + loc := env.RegexpSearch("prog.go", "if fooF") + loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte("if fooF"))) + completions := env.Completion(loc) + diff := compareCompletionLabels([]string{"fooFunc"}, completions.Items) if diff != "" { t.Error(diff) } if completions.Items[0].Tags == nil { - t.Errorf("expected Tags to show deprecation %#v", diff[0]) + t.Errorf("expected Tags to show deprecation %#v", completions.Items[0].Tags) } - pos = env.RegexpSearch("prog.go", "= badP") - pos.Column += len("= badP") - completions = env.Completion("prog.go", pos) - diff = compareCompletionResults([]string{"badPi"}, completions.Items) + loc = env.RegexpSearch("prog.go", "= badP") + loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte("= badP"))) + completions = env.Completion(loc) + diff = compareCompletionLabels([]string{"badPi"}, completions.Items) if diff != "" { t.Error(diff) } if completions.Items[0].Tags == nil { - t.Errorf("expected Tags to show deprecation %#v", diff[0]) + t.Errorf("expected Tags to show deprecation %#v", completions.Items[0].Tags) } }) } func TestUnimportedCompletion_VSCodeIssue1489(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const src = ` -- go.mod -- module mod.com @@ -529,27 +515,28 @@ func main() { } ` WithOptions( - EditorConfig{WindowsLineEndings: true}, + WindowsLineEndings(), ).Run(t, src, func(t *testing.T, env *Env) { // Trigger unimported completions for the example.com/blah package. env.OpenFile("main.go") env.Await(env.DoneWithOpen()) - pos := env.RegexpSearch("main.go", "Sqr()") - completions := env.Completion("main.go", pos) + loc := env.RegexpSearch("main.go", "Sqr()") + completions := env.Completion(loc) if len(completions.Items) == 0 { t.Fatalf("no completion items") } - env.AcceptCompletion("main.go", pos, completions.Items[0]) + env.AcceptCompletion(loc, completions.Items[0]) env.Await(env.DoneWithChange()) - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") want := "package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"math\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"a\")\r\n\tmath.Sqrt(${1:})\r\n}\r\n" - if got != want { - t.Errorf("unimported completion: got %q, want %q", got, want) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unimported completion (-want +got):\n%s", diff) } }) } func TestDefinition(t *testing.T) { + testenv.NeedsGo1Point(t, 17) // in go1.16, The FieldList in func x is not empty stuff := ` -- go.mod -- module mod.com @@ -557,43 +544,42 @@ module mod.com go 1.18 -- a_test.go -- package foo -func T() -func TestG() -func TestM() -func TestMi() -func Ben() -func Fuz() -func Testx() -func TestMe(t *testing.T) -func BenchmarkFoo() ` - // All those parentheses are needed for the completion code to see - // later lines as being definitions tests := []struct { - pat string - want []string + line string // the sole line in the buffer after the package statement + pat string // the pattern to search for + want []string // expected completions }{ - {"T", []string{"TestXxx(t *testing.T)", "TestMain(m *testing.M)"}}, - {"TestM", []string{"TestMain(m *testing.M)", "TestM(t *testing.T)"}}, - {"TestMi", []string{"TestMi(t *testing.T)"}}, - {"TestG", []string{"TestG(t *testing.T)"}}, - {"B", []string{"BenchmarkXxx(b *testing.B)"}}, - {"BenchmarkFoo", []string{"BenchmarkFoo(b *testing.B)"}}, - {"F", []string{"FuzzXxx(f *testing.F)"}}, - {"Testx", nil}, - {"TestMe", []string{"TestMe"}}, + {"func T", "T", []string{"TestXxx(t *testing.T)", "TestMain(m *testing.M)"}}, + {"func T()", "T", []string{"TestMain", "Test"}}, + {"func TestM", "TestM", []string{"TestMain(m *testing.M)", "TestM(t *testing.T)"}}, + {"func TestM()", "TestM", []string{"TestMain"}}, + {"func TestMi", "TestMi", []string{"TestMi(t *testing.T)"}}, + {"func TestMi()", "TestMi", nil}, + {"func TestG", "TestG", []string{"TestG(t *testing.T)"}}, + {"func TestG(", "TestG", nil}, + {"func Ben", "B", []string{"BenchmarkXxx(b *testing.B)"}}, + {"func Ben(", "Ben", []string{"Benchmark"}}, + {"func BenchmarkFoo", "BenchmarkFoo", []string{"BenchmarkFoo(b *testing.B)"}}, + {"func BenchmarkFoo(", "BenchmarkFoo", nil}, + {"func Fuz", "F", []string{"FuzzXxx(f *testing.F)"}}, + {"func Fuz(", "Fuz", []string{"Fuzz"}}, + {"func Testx", "Testx", nil}, + {"func TestMe(t *testing.T)", "TestMe", nil}, + {"func Te(t *testing.T)", "Te", []string{"TestMain", "Test"}}, } fname := "a_test.go" Run(t, stuff, func(t *testing.T, env *Env) { env.OpenFile(fname) env.Await(env.DoneWithOpen()) for _, tst := range tests { - pos := env.RegexpSearch(fname, tst.pat) - pos.Column += len(tst.pat) - completions := env.Completion(fname, pos) - result := compareCompletionResults(tst.want, completions.Items) + env.SetBufferContent(fname, "package foo\n"+tst.line) + loc := env.RegexpSearch(fname, tst.pat) + loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte(tst.pat))) + completions := env.Completion(loc) + result := compareCompletionLabels(tst.want, completions.Items) if result != "" { - t.Errorf("%s failed: %s:%q", tst.pat, result, tst.want) + t.Errorf("\npat:%q line:%q failed: %s:%q", tst.pat, tst.line, result, tst.want) for i, it := range completions.Items { t.Errorf("%d got %q %q", i, it.Label, it.Detail) } @@ -602,6 +588,90 @@ func BenchmarkFoo() }) } +// Test that completing a definition replaces source text when applied, golang/go#56852. +// Note: With go <= 1.16 the completions does not add parameters and fails these tests. +func TestDefinitionReplaceRange(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + + const mod = ` +-- go.mod -- +module mod.com + +go 1.17 +` + + tests := []struct { + name string + before, after string + }{ + { + name: "func TestMa", + before: ` +package foo_test + +func TestMa +`, + after: ` +package foo_test + +func TestMain(m *testing.M) +`, + }, + { + name: "func TestSome", + before: ` +package foo_test + +func TestSome +`, + after: ` +package foo_test + +func TestSome(t *testing.T) +`, + }, + { + name: "func Bench", + before: ` +package foo_test + +func Bench +`, + // Note: Snippet with escaped }. + after: ` +package foo_test + +func Benchmark${1:Xxx}(b *testing.B) { + $0 +\} +`, + }, + } + + Run(t, mod, func(t *testing.T, env *Env) { + env.CreateBuffer("foo_test.go", "") + + for _, tst := range tests { + tst.before = strings.Trim(tst.before, "\n") + tst.after = strings.Trim(tst.after, "\n") + env.SetBufferContent("foo_test.go", tst.before) + + loc := env.RegexpSearch("foo_test.go", tst.name) + loc.Range.Start.Character = uint32(protocol.UTF16Len([]byte(tst.name))) + completions := env.Completion(loc) + if len(completions.Items) == 0 { + t.Fatalf("no completion items") + } + + env.AcceptCompletion(loc, completions.Items[0]) + env.Await(env.DoneWithChange()) + if buf := env.BufferText("foo_test.go"); buf != tst.after { + t.Errorf("%s:incorrect completion: got %q, want %q", tst.name, buf, tst.after) + } + } + }) +} + func TestGoWorkCompletion(t *testing.T) { const files = ` -- go.work -- @@ -638,8 +708,8 @@ use ./dir/foobar/ {`use ./dir/foobar/()`, []string{}}, } for _, tt := range tests { - completions := env.Completion("go.work", env.RegexpSearch("go.work", tt.re)) - diff := compareCompletionResults(tt.want, completions.Items) + completions := env.Completion(env.RegexpSearch("go.work", tt.re)) + diff := compareCompletionLabels(tt.want, completions.Items) if diff != "" { t.Errorf("%s: %s", tt.re, diff) } diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go index 2674d555c5a..df69703ee26 100644 --- a/gopls/internal/regtest/completion/postfix_snippet_test.go +++ b/gopls/internal/regtest/completion/postfix_snippet_test.go @@ -8,13 +8,10 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/source" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestPostfixSnippetCompletion(t *testing.T) { - t.Skipf("skipping test due to suspected synchronization bug; see https://go.dev/issue/50707") - const mod = ` -- go.mod -- module mod.com @@ -264,6 +261,27 @@ for k := range foo { keys = append(keys, k) } +} +`, + }, + { + name: "channel_range", + before: ` +package foo + +func _() { + foo := make(chan int) + foo.range +} +`, + after: ` +package foo + +func _() { + foo := make(chan int) + for e := range foo { + $0 +} } `, }, @@ -379,7 +397,7 @@ func _() { before: ` package foo -func foo() []string { +func foo() []string { x := "test" return x.split }`, @@ -388,7 +406,7 @@ package foo import "strings" -func foo() []string { +func foo() []string { x := "test" return strings.Split(x, "$0") }`, @@ -414,26 +432,30 @@ func foo() string { }, } - r := WithOptions(Options(func(o *source.Options) { - o.ExperimentalPostfixCompletions = true - })) + r := WithOptions( + Settings{ + "experimentalPostfixCompletions": true, + }, + ) r.Run(t, mod, func(t *testing.T, env *Env) { + env.CreateBuffer("foo.go", "") + for _, c := range cases { t.Run(c.name, func(t *testing.T) { c.before = strings.Trim(c.before, "\n") c.after = strings.Trim(c.after, "\n") - env.CreateBuffer("foo.go", c.before) + env.SetBufferContent("foo.go", c.before) - pos := env.RegexpSearch("foo.go", "\n}") - completions := env.Completion("foo.go", pos) + loc := env.RegexpSearch("foo.go", "\n}") + completions := env.Completion(loc) if len(completions.Items) != 1 { t.Fatalf("expected one completion, got %v", completions.Items) } - env.AcceptCompletion("foo.go", pos, completions.Items[0]) + env.AcceptCompletion(loc, completions.Items[0]) - if buf := env.Editor.BufferText("foo.go"); buf != c.after { + if buf := env.BufferText("foo.go"); buf != c.after { t.Errorf("\nGOT:\n%s\nEXPECTED:\n%s", buf, c.after) } }) diff --git a/gopls/internal/regtest/debug/debug_test.go b/gopls/internal/regtest/debug/debug_test.go index d60b3f780d7..f8efb8f5d30 100644 --- a/gopls/internal/regtest/debug/debug_test.go +++ b/gopls/internal/regtest/debug/debug_test.go @@ -8,8 +8,8 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" ) func TestMain(m *testing.M) { @@ -20,12 +20,8 @@ func TestBugNotification(t *testing.T) { // Verify that a properly configured session gets notified of a bug on the // server. WithOptions( - Modes(Singleton), // must be in-process to receive the bug report below - EditorConfig{ - Settings: map[string]interface{}{ - "showBugReports": true, - }, - }, + Modes(Default), // must be in-process to receive the bug report below + Settings{"showBugReports": true}, ).Run(t, "", func(t *testing.T, env *Env) { const desc = "got a bug" bug.Report(desc, nil) diff --git a/gopls/internal/regtest/diagnostics/analysis_test.go b/gopls/internal/regtest/diagnostics/analysis_test.go new file mode 100644 index 00000000000..308c25f13f6 --- /dev/null +++ b/gopls/internal/regtest/diagnostics/analysis_test.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diagnostics + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +// Test for the timeformat analyzer, following golang/vscode-go#2406. +// +// This test checks that applying the suggested fix from the analyzer resolves +// the diagnostic warning. +func TestTimeFormatAnalyzer(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- main.go -- +package main + +import ( + "fmt" + "time" +) + +func main() { + now := time.Now() + fmt.Println(now.Format("2006-02-01")) +}` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + + var d protocol.PublishDiagnosticsParams + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "2006-02-01")), + ReadDiagnostics("main.go", &d), + ) + + env.ApplyQuickFixes("main.go", d.Diagnostics) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) + }) +} diff --git a/gopls/internal/regtest/diagnostics/builtin_test.go b/gopls/internal/regtest/diagnostics/builtin_test.go index 775e7ec0b14..935a7f9b831 100644 --- a/gopls/internal/regtest/diagnostics/builtin_test.go +++ b/gopls/internal/regtest/diagnostics/builtin_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestIssue44866(t *testing.T) { @@ -26,13 +26,10 @@ const ( ` Run(t, src, func(t *testing.T, env *Env) { env.OpenFile("a.go") - name, _ := env.GoToDefinition("a.go", env.RegexpSearch("a.go", "iota")) - if !strings.HasSuffix(name, "builtin.go") { - t.Fatalf("jumped to %q, want builtin.go", name) + loc := env.GoToDefinition(env.RegexpSearch("a.go", "iota")) + if !strings.HasSuffix(string(loc.URI), "builtin.go") { + t.Fatalf("jumped to %q, want builtin.go", loc.URI) } - env.Await(OnceMet( - env.DoneWithOpen(), - NoDiagnostics("builtin.go"), - )) + env.AfterChange(NoDiagnostics(ForFile("builtin.go"))) }) } diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index 6f5db4cd419..f96a0aa04fa 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -11,12 +11,11 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" - - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" "golang.org/x/tools/internal/testenv" ) @@ -49,13 +48,8 @@ func TestDiagnosticErrorInEditedFile(t *testing.T) { // diagnostic. env.OpenFile("main.go") env.RegexpReplace("main.go", "Printl(n)", "") - env.Await( - // Once we have gotten diagnostics for the change above, we should - // satisfy the DiagnosticAtRegexp assertion. - OnceMet( - env.DoneWithChange(), - env.DiagnosticAtRegexp("main.go", "Printl"), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "Printl")), // Assert that this test has sent no error logs to the client. This is not // strictly necessary for testing this regression, but is included here // as an example of using the NoErrorLogs() expectation. Feel free to @@ -79,13 +73,9 @@ func m() { log.Println() } `) - env.Await( - env.DiagnosticAtRegexp("main.go", "log"), - ) + env.AfterChange(Diagnostics(env.AtRegexp("main.go", "log"))) env.SaveBuffer("main.go") - env.Await( - EmptyDiagnostics("main.go"), - ) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) }) } @@ -96,7 +86,7 @@ const Foo = "abc ` Run(t, brokenFile, func(t *testing.T, env *Env) { env.CreateBuffer("broken.go", brokenFile) - env.Await(env.DiagnosticAtRegexp("broken.go", "\"abc")) + env.AfterChange(Diagnostics(env.AtRegexp("broken.go", "\"abc"))) }) } @@ -119,13 +109,16 @@ const a = 2 func TestDiagnosticClearingOnEdit(t *testing.T) { Run(t, badPackage, func(t *testing.T, env *Env) { env.OpenFile("b.go") - env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2")) + env.AfterChange( + Diagnostics(env.AtRegexp("a.go", "a = 1")), + Diagnostics(env.AtRegexp("b.go", "a = 2")), + ) // Fix the error by editing the const name in b.go to `b`. env.RegexpReplace("b.go", "(a) = 2", "b") - env.Await( - EmptyDiagnostics("a.go"), - EmptyDiagnostics("b.go"), + env.AfterChange( + NoDiagnostics(ForFile("a.go")), + NoDiagnostics(ForFile("b.go")), ) }) } @@ -133,10 +126,16 @@ func TestDiagnosticClearingOnEdit(t *testing.T) { func TestDiagnosticClearingOnDelete_Issue37049(t *testing.T) { Run(t, badPackage, func(t *testing.T, env *Env) { env.OpenFile("a.go") - env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2")) + env.AfterChange( + Diagnostics(env.AtRegexp("a.go", "a = 1")), + Diagnostics(env.AtRegexp("b.go", "a = 2")), + ) env.RemoveWorkspaceFile("b.go") - env.Await(EmptyDiagnostics("a.go"), EmptyDiagnostics("b.go")) + env.AfterChange( + NoDiagnostics(ForFile("a.go")), + NoDiagnostics(ForFile("b.go")), + ) }) } @@ -145,16 +144,16 @@ func TestDiagnosticClearingOnClose(t *testing.T) { env.CreateBuffer("c.go", `package consts const a = 3`) - env.Await( - env.DiagnosticAtRegexp("a.go", "a = 1"), - env.DiagnosticAtRegexp("b.go", "a = 2"), - env.DiagnosticAtRegexp("c.go", "a = 3"), + env.AfterChange( + Diagnostics(env.AtRegexp("a.go", "a = 1")), + Diagnostics(env.AtRegexp("b.go", "a = 2")), + Diagnostics(env.AtRegexp("c.go", "a = 3")), ) env.CloseBuffer("c.go") - env.Await( - env.DiagnosticAtRegexp("a.go", "a = 1"), - env.DiagnosticAtRegexp("b.go", "a = 2"), - EmptyDiagnostics("c.go"), + env.AfterChange( + Diagnostics(env.AtRegexp("a.go", "a = 1")), + Diagnostics(env.AtRegexp("b.go", "a = 2")), + NoDiagnostics(ForFile("c.go")), ) }) } @@ -166,20 +165,20 @@ func TestIssue37978(t *testing.T) { env.CreateBuffer("c/c.go", "") // Write the file contents with a missing import. - env.EditBuffer("c/c.go", fake.Edit{ - Text: `package c + env.EditBuffer("c/c.go", protocol.TextEdit{ + NewText: `package c const a = http.MethodGet `, }) - env.Await( - env.DiagnosticAtRegexp("c/c.go", "http.MethodGet"), + env.AfterChange( + Diagnostics(env.AtRegexp("c/c.go", "http.MethodGet")), ) // Save file, which will organize imports, adding the expected import. // Expect the diagnostics to clear. env.SaveBuffer("c/c.go") - env.Await( - EmptyDiagnostics("c/c.go"), + env.AfterChange( + NoDiagnostics(ForFile("c/c.go")), ) }) } @@ -213,15 +212,15 @@ func TestA(t *testing.T) { // not break the workspace. func TestDeleteTestVariant(t *testing.T) { Run(t, test38878, func(t *testing.T, env *Env) { - env.Await(env.DiagnosticAtRegexp("a_test.go", `f\((3)\)`)) + env.AfterChange(Diagnostics(env.AtRegexp("a_test.go", `f\((3)\)`))) env.RemoveWorkspaceFile("a_test.go") - env.Await(EmptyDiagnostics("a_test.go")) + env.AfterChange(NoDiagnostics(ForFile("a_test.go"))) // Make sure the test variant has been removed from the workspace by // triggering a metadata load. env.OpenFile("a.go") env.RegexpReplace("a.go", `// import`, "import") - env.Await(env.DiagnosticAtRegexp("a.go", `"fmt"`)) + env.AfterChange(Diagnostics(env.AtRegexp("a.go", `"fmt"`))) }) } @@ -230,11 +229,9 @@ func TestDeleteTestVariant(t *testing.T) { func TestDeleteTestVariant_DiskOnly(t *testing.T) { Run(t, test38878, func(t *testing.T, env *Env) { env.OpenFile("a_test.go") - env.Await(DiagnosticAt("a_test.go", 5, 3)) + env.AfterChange(Diagnostics(AtPosition("a_test.go", 5, 3))) env.Sandbox.Workdir.RemoveFile(context.Background(), "a_test.go") - env.Await(OnceMet( - env.DoneWithChangeWatchedFiles(), - DiagnosticAt("a_test.go", 5, 3))) + env.AfterChange(Diagnostics(AtPosition("a_test.go", 5, 3))) }) } @@ -260,23 +257,20 @@ func Hello() { t.Run("manual", func(t *testing.T) { Run(t, noMod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)), ) env.CreateBuffer("go.mod", `module mod.com go 1.12 `) env.SaveBuffer("go.mod") - env.Await( - EmptyDiagnostics("main.go"), - ) var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("bob/bob.go", "x"), - ReadDiagnostics("bob/bob.go", &d), - ), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(env.AtRegexp("bob/bob.go", "x")), + ReadDiagnostics("bob/bob.go", &d), ) if len(d.Diagnostics) != 1 { t.Fatalf("expected 1 diagnostic, got %v", len(d.Diagnostics)) @@ -285,30 +279,32 @@ func Hello() { }) t.Run("initialized", func(t *testing.T) { Run(t, noMod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)), ) env.RunGoCommand("mod", "init", "mod.com") - env.Await( - EmptyDiagnostics("main.go"), - env.DiagnosticAtRegexp("bob/bob.go", "x"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(env.AtRegexp("bob/bob.go", "x")), ) }) }) t.Run("without workspace module", func(t *testing.T) { WithOptions( - Modes(Singleton), + Modes(Default), ).Run(t, noMod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)), ) if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil { t.Fatal(err) } - env.Await( - EmptyDiagnostics("main.go"), - env.DiagnosticAtRegexp("bob/bob.go", "x"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(env.AtRegexp("bob/bob.go", "x")), ) }) }) @@ -349,15 +345,15 @@ func TestHello(t *testing.T) { Run(t, testPackage, func(t *testing.T, env *Env) { env.OpenFile("lib_test.go") - env.Await( - DiagnosticAt("lib_test.go", 10, 2), - DiagnosticAt("lib_test.go", 11, 2), + env.AfterChange( + Diagnostics(AtPosition("lib_test.go", 10, 2)), + Diagnostics(AtPosition("lib_test.go", 11, 2)), ) env.OpenFile("lib.go") env.RegexpReplace("lib.go", "_ = x", "var y int") - env.Await( - env.DiagnosticAtRegexp("lib.go", "y int"), - EmptyDiagnostics("lib_test.go"), + env.AfterChange( + Diagnostics(env.AtRegexp("lib.go", "y int")), + NoDiagnostics(ForFile("lib_test.go")), ) }) } @@ -376,16 +372,8 @@ func main() {} Run(t, packageChange, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.RegexpReplace("a.go", "foo", "foox") - env.Await( - // When the bug reported in #38328 was present, we didn't get erroneous - // file diagnostics until after the didChange message generated by the - // package renaming was fully processed. Therefore, in order for this - // test to actually exercise the bug, we must wait until that work has - // completed. - OnceMet( - env.DoneWithChange(), - NoDiagnostics("a.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("a.go")), ) }) } @@ -432,8 +420,12 @@ func TestResolveDiagnosticWithDownload(t *testing.T) { env.OpenFile("print.go") // Check that gopackages correctly loaded this dependency. We should get a // diagnostic for the wrong formatting type. - // TODO: we should be able to easily also match the diagnostic message. - env.Await(env.DiagnosticAtRegexp("print.go", "fmt.Printf")) + env.AfterChange( + Diagnostics( + env.AtRegexp("print.go", "fmt.Printf"), + WithMessage("wrong type int"), + ), + ) }) } @@ -456,7 +448,9 @@ func Hello() { ` Run(t, adHoc, func(t *testing.T, env *Env) { env.OpenFile("b/b.go") - env.Await(env.DiagnosticAtRegexp("b/b.go", "x")) + env.AfterChange( + Diagnostics(env.AtRegexp("b/b.go", "x")), + ) }) } @@ -471,16 +465,15 @@ func _() { } ` WithOptions( - EditorConfig{ - Env: map[string]string{ - "GOPATH": "", - "GO111MODULE": "off", - }, - }).Run(t, files, func(t *testing.T, env *Env) { + EnvVars{ + "GOPATH": "", + "GO111MODULE": "off", + }, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") - env.Await(env.DiagnosticAtRegexp("main.go", "fmt")) + env.AfterChange(Diagnostics(env.AtRegexp("main.go", "fmt"))) env.SaveBuffer("main.go") - env.Await(EmptyDiagnostics("main.go")) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) }) } @@ -500,11 +493,12 @@ package x var X = 0 ` - editorConfig := EditorConfig{Env: map[string]string{"GOFLAGS": "-tags=foo"}} - WithOptions(editorConfig).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + EnvVars{"GOFLAGS": "-tags=foo"}, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OrganizeImports("main.go") - env.Await(EmptyDiagnostics("main.go")) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) }) } @@ -529,11 +523,9 @@ func _() { Run(t, generated, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - DiagnosticAt("main.go", 5, 8), - ReadDiagnostics("main.go", &d), - ), + env.AfterChange( + Diagnostics(AtPosition("main.go", 5, 8)), + ReadDiagnostics("main.go", &d), ) if fixes := env.GetQuickFixes("main.go", d.Diagnostics); len(fixes) != 0 { t.Errorf("got quick fixes %v, wanted none", fixes) @@ -543,7 +535,7 @@ func _() { // Expect a module/GOPATH error if there is an error in the file at startup. // Tests golang/go#37279. -func TestShowCriticalError_Issue37279(t *testing.T) { +func TestBrokenWorkspace_OutsideModule(t *testing.T) { const noModule = ` -- a.go -- package foo @@ -556,11 +548,13 @@ func f() { ` Run(t, noModule, func(t *testing.T, env *Env) { env.OpenFile("a.go") - env.Await( + env.AfterChange( + // Expect the adHocPackagesWarning. OutstandingWork(lsp.WorkspaceLoadFailure, "outside of a module"), ) + // Deleting the import dismisses the warning. env.RegexpReplace("a.go", `import "mod.com/hello"`, "") - env.Await( + env.AfterChange( NoOutstandingWork(), ) }) @@ -573,10 +567,11 @@ hi mom ` for _, go111module := range []string{"on", "off", ""} { t.Run(fmt.Sprintf("GO111MODULE_%v", go111module), func(t *testing.T) { - WithOptions(EditorConfig{ - Env: map[string]string{"GO111MODULE": go111module}, - }).Run(t, files, func(t *testing.T, env *Env) { - env.Await( + WithOptions( + EnvVars{"GO111MODULE": go111module}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, NoOutstandingWork(), ) }) @@ -605,24 +600,20 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "off", - }, - }, + EnvVars{"GO111MODULE": "off"}, ).Run(t, collision, func(t *testing.T, env *Env) { env.OpenFile("x/x.go") - env.Await( - env.DiagnosticAtRegexpWithMessage("x/x.go", `^`, "found packages main (main.go) and x (x.go)"), - env.DiagnosticAtRegexpWithMessage("x/main.go", `^`, "found packages main (main.go) and x (x.go)"), + env.AfterChange( + Diagnostics(env.AtRegexp("x/x.go", `^`), WithMessage("found packages main (main.go) and x (x.go)")), + Diagnostics(env.AtRegexp("x/main.go", `^`), WithMessage("found packages main (main.go) and x (x.go)")), ) // We don't recover cleanly from the errors without good overlay support. if testenv.Go1Point() >= 16 { env.RegexpReplace("x/x.go", `package x`, `package main`) - env.Await(OnceMet( - env.DoneWithChange(), - env.DiagnosticAtRegexpWithMessage("x/main.go", `fmt`, "undeclared name"))) + env.AfterChange( + Diagnostics(env.AtRegexp("x/main.go", `fmt`)), + ) } }) } @@ -640,9 +631,6 @@ var ErrHelpWanted error // Test for golang/go#38211. func Test_Issue38211(t *testing.T) { - t.Skipf("Skipping flaky test: https://golang.org/issue/44098") - - testenv.NeedsGo1Point(t, 14) const ardanLabs = ` -- go.mod -- module mod.com @@ -665,49 +653,44 @@ func main() { env.OpenFile("go.mod") env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), - ReadDiagnostics("main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)), + ReadDiagnostics("main.go", &d), ) env.ApplyQuickFixes("main.go", d.Diagnostics) env.SaveBuffer("go.mod") - env.Await( - EmptyDiagnostics("main.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) // Comment out the line that depends on conf and expect a // diagnostic and a fix to remove the import. env.RegexpReplace("main.go", "_ = conf.ErrHelpWanted", "//_ = conf.ErrHelpWanted") - env.Await( - env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)), ) env.SaveBuffer("main.go") // Expect a diagnostic and fix to remove the dependency in the go.mod. - env.Await(EmptyDiagnostics("main.go")) - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("go.mod", "require github.com/ardanlabs/conf", "not used in this module"), - ReadDiagnostics("go.mod", &d), - ), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(env.AtRegexp("go.mod", "require github.com/ardanlabs/conf"), WithMessage("not used in this module")), + ReadDiagnostics("go.mod", &d), ) env.ApplyQuickFixes("go.mod", d.Diagnostics) env.SaveBuffer("go.mod") - env.Await( - EmptyDiagnostics("go.mod"), + env.AfterChange( + NoDiagnostics(ForFile("go.mod")), ) // Uncomment the lines and expect a new diagnostic for the import. env.RegexpReplace("main.go", "//_ = conf.ErrHelpWanted", "_ = conf.ErrHelpWanted") env.SaveBuffer("main.go") - env.Await( - env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)), ) }) } // Test for golang/go#38207. func TestNewModule_Issue38207(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const emptyFile = ` -- go.mod -- module mod.com @@ -728,22 +711,19 @@ func main() { `) env.SaveBuffer("main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("main.go", `"github.com/ardanlabs/conf"`, "no required module"), - ReadDiagnostics("main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`), WithMessage("no required module")), + ReadDiagnostics("main.go", &d), ) env.ApplyQuickFixes("main.go", d.Diagnostics) - env.Await( - EmptyDiagnostics("main.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } // Test for golang/go#36960. func TestNewFileBadImports_Issue36960(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const simplePackage = ` -- go.mod -- module mod.com @@ -762,15 +742,12 @@ func _() { env.OpenFile("a/a1.go") env.CreateBuffer("a/a2.go", ``) env.SaveBufferWithoutActions("a/a2.go") - env.Await( - OnceMet( - env.DoneWithSave(), - NoDiagnostics("a/a1.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("a/a1.go")), ) env.EditBuffer("a/a2.go", fake.NewEdit(0, 0, 0, 0, `package a`)) - env.Await( - OnceMet(env.DoneWithChange(), NoDiagnostics("a/a1.go")), + env.AfterChange( + NoDiagnostics(ForFile("a/a1.go")), ) }) } @@ -778,9 +755,6 @@ func _() { // This test tries to replicate the workflow of a user creating a new x test. // It also tests golang/go#39315. func TestManuallyCreatingXTest(t *testing.T) { - // Only for 1.15 because of golang/go#37971. - testenv.NeedsGo1Point(t, 15) - // Create a package that already has a test variant (in-package test). const testVariant = ` -- go.mod -- @@ -807,9 +781,9 @@ func TestHello(t *testing.T) { // Open the file, triggering the workspace load. // There are errors in the code to ensure all is working as expected. env.OpenFile("hello/hello.go") - env.Await( - env.DiagnosticAtRegexp("hello/hello.go", "x"), - env.DiagnosticAtRegexp("hello/hello_test.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("hello/hello.go", "x")), + Diagnostics(env.AtRegexp("hello/hello_test.go", "x")), ) // Create an empty file with the intention of making it an x test. @@ -836,20 +810,18 @@ func TestHello(t *testing.T) { `)) // Expect a diagnostic for the missing import. Save, which should // trigger import organization. The diagnostic should clear. - env.Await( - env.DiagnosticAtRegexp("hello/hello_x_test.go", "hello.Hello"), + env.AfterChange( + Diagnostics(env.AtRegexp("hello/hello_x_test.go", "hello.Hello")), ) env.SaveBuffer("hello/hello_x_test.go") - env.Await( - EmptyDiagnostics("hello/hello_x_test.go"), + env.AfterChange( + NoDiagnostics(ForFile("hello/hello_x_test.go")), ) }) } // Reproduce golang/go#40690. func TestCreateOnlyXTest(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - const mod = ` -- go.mod -- module mod.com @@ -871,15 +843,13 @@ func TestX(t *testing.T) { var x int } `) - env.Await( - env.DiagnosticAtRegexp("foo/bar_test.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("foo/bar_test.go", "x")), ) }) } func TestChangePackageName(t *testing.T) { - t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.") - const mod = ` -- go.mod -- module mod.com @@ -892,17 +862,11 @@ package foo_ ` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("foo/bar_test.go") + env.AfterChange() env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test") - env.SaveBuffer("foo/bar_test.go") - env.Await( - OnceMet( - env.DoneWithSave(), - NoDiagnostics("foo/bar_test.go"), - ), - OnceMet( - env.DoneWithSave(), - NoDiagnostics("foo/foo.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("foo/bar_test.go")), + NoDiagnostics(ForFile("foo/foo.go")), ) }) } @@ -920,11 +884,9 @@ var _ = foo.Bar ` Run(t, ws, func(t *testing.T, env *Env) { env.OpenFile("_foo/x.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - NoDiagnostics("_foo/x.go"), - )) + env.AfterChange( + NoDiagnostics(ForFile("_foo/x.go")), + ) }) } @@ -964,17 +926,15 @@ const C = a.A // We should still get diagnostics for files that exist. env.RegexpReplace("b/b.go", `a.A`, "a.Nonexistant") - env.Await(env.DiagnosticAtRegexp("b/b.go", `Nonexistant`)) + env.AfterChange( + Diagnostics(env.AtRegexp("b/b.go", `Nonexistant`)), + ) }) } // This is a copy of the scenario_default/quickfix_empty_files.txt test from // govim. Reproduces golang/go#39646. func TestQuickFixEmptyFiles(t *testing.T) { - t.Skip("too flaky: golang/go#48773") - - testenv.NeedsGo1Point(t, 15) - const mod = ` -- go.mod -- module mod.com @@ -1014,7 +974,9 @@ func main() { Run(t, mod, func(t *testing.T, env *Env) { writeGoVim(env, "p/p.go", p) writeGoVim(env, "main.go", main) - env.Await(env.DiagnosticAtRegexp("main.go", "5")) + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "5")), + ) }) }) @@ -1043,16 +1005,16 @@ func TestDoIt(t *testing.T) { p.DoIt(5) } `) - env.Await( - env.DiagnosticAtRegexp("main.go", "5"), - env.DiagnosticAtRegexp("p/p_test.go", "5"), - env.DiagnosticAtRegexp("p/x_test.go", "5"), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "5")), + Diagnostics(env.AtRegexp("p/p_test.go", "5")), + Diagnostics(env.AtRegexp("p/x_test.go", "5")), ) env.RegexpReplace("p/p.go", "s string", "i int") - env.Await( - EmptyDiagnostics("main.go"), - EmptyDiagnostics("p/p_test.go"), - EmptyDiagnostics("p/x_test.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + NoDiagnostics(ForFile("p/p_test.go")), + NoDiagnostics(ForFile("p/x_test.go")), ) }) }) @@ -1076,8 +1038,8 @@ func _() { WorkspaceFolders(), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "x")), ) }) } @@ -1100,8 +1062,6 @@ func Foo() { } ` Run(t, basic, func(t *testing.T, env *Env) { - testenv.NeedsGo1Point(t, 16) // We can't recover cleanly from this case without good overlay support. - env.WriteWorkspaceFile("foo/foo_test.go", `package main func main() { @@ -1109,12 +1069,7 @@ func main() { }`) env.OpenFile("foo/foo_test.go") env.RegexpReplace("foo/foo_test.go", `package main`, `package foo`) - env.Await( - OnceMet( - env.DoneWithChange(), - NoDiagnostics("foo/foo.go"), - ), - ) + env.AfterChange(NoDiagnostics(ForFile("foo/foo.go"))) }) } @@ -1131,16 +1086,9 @@ func main() {} ` Run(t, basic, func(t *testing.T, env *Env) { env.Editor.CreateBuffer(env.Ctx, "foo.go", `package main`) - env.Await( - env.DoneWithOpen(), - ) + env.AfterChange() env.CloseBuffer("foo.go") - env.Await( - OnceMet( - env.DoneWithClose(), - NoLogMatching(protocol.Info, "packages=0"), - ), - ) + env.AfterChange(NoLogMatching(protocol.Info, "packages=0")) }) } @@ -1180,16 +1128,14 @@ func main() { var x int } `)) - env.Await( - env.DiagnosticAtRegexp("main.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "x")), ) }) } // Reproduces golang/go#39763. func TestInvalidPackageName(t *testing.T) { - testenv.NeedsGo1Point(t, 15) - const pkgDefault = ` -- go.mod -- module mod.com @@ -1202,8 +1148,11 @@ func main() {} ` Run(t, pkgDefault, func(t *testing.T, env *Env) { env.OpenFile("main.go") - env.Await( - env.DiagnosticAtRegexpWithMessage("main.go", "default", "expected 'IDENT'"), + env.AfterChange( + Diagnostics( + env.AtRegexp("main.go", "default"), + WithMessage("expected 'IDENT'"), + ), ) }) } @@ -1230,17 +1179,17 @@ func main() { WorkspaceFolders("a"), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") - env.Await( - env.DiagnosticAtRegexp("main.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "x")), ) }) WithOptions( WorkspaceFolders("a"), - LimitWorkspaceScope(), + Settings{"expandWorkspaceToModule": false}, ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") - env.Await( - NoDiagnostics("main.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } @@ -1267,23 +1216,19 @@ func main() { ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, - }, - }, + Settings{"staticcheck": true}, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams - env.Await(OnceMet( - env.DiagnosticAtRegexpWithMessage("main.go", `t{"msg"}`, "redundant type"), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `t{"msg"}`), WithMessage("redundant type")), ReadDiagnostics("main.go", &d), - )) + ) if tags := d.Diagnostics[0].Tags; len(tags) == 0 || tags[0] != protocol.Unnecessary { t.Errorf("wanted Unnecessary tag on diagnostic, got %v", tags) } env.ApplyQuickFixes("main.go", d.Diagnostics) - env.Await(EmptyDiagnostics("main.go")) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) }) } @@ -1306,23 +1251,23 @@ func main() {} Run(t, dir, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OpenFile("other.go") - x := env.DiagnosticsFor("main.go") - if x == nil { - t.Fatalf("expected 1 diagnostic, got none") - } - if len(x.Diagnostics) != 1 { - t.Fatalf("main.go, got %d diagnostics, expected 1", len(x.Diagnostics)) + var mainDiags, otherDiags protocol.PublishDiagnosticsParams + env.AfterChange( + ReadDiagnostics("main.go", &mainDiags), + ReadDiagnostics("other.go", &otherDiags), + ) + if len(mainDiags.Diagnostics) != 1 { + t.Fatalf("main.go, got %d diagnostics, expected 1", len(mainDiags.Diagnostics)) } - keep := x.Diagnostics[0] - y := env.DiagnosticsFor("other.go") - if len(y.Diagnostics) != 1 { - t.Fatalf("other.go: got %d diagnostics, expected 1", len(y.Diagnostics)) + keep := mainDiags.Diagnostics[0] + if len(otherDiags.Diagnostics) != 1 { + t.Fatalf("other.go: got %d diagnostics, expected 1", len(otherDiags.Diagnostics)) } - if len(y.Diagnostics[0].RelatedInformation) != 1 { - t.Fatalf("got %d RelatedInformations, expected 1", len(y.Diagnostics[0].RelatedInformation)) + if len(otherDiags.Diagnostics[0].RelatedInformation) != 1 { + t.Fatalf("got %d RelatedInformations, expected 1", len(otherDiags.Diagnostics[0].RelatedInformation)) } // check that the RelatedInformation matches the error from main.go - c := y.Diagnostics[0].RelatedInformation[0] + c := otherDiags.Diagnostics[0].RelatedInformation[0] if c.Location.Range != keep.Range { t.Errorf("locations don't match. Got %v expected %v", c.Location.Range, keep.Range) } @@ -1330,9 +1275,6 @@ func main() {} } func TestNotifyOrphanedFiles(t *testing.T) { - // Need GO111MODULE=on for this test to work with Go 1.12. - testenv.NeedsGo1Point(t, 13) - const files = ` -- go.mod -- module mod.com @@ -1344,8 +1286,8 @@ package a func main() { var x int } --- a/a_ignore.go -- -// +build ignore +-- a/a_exclude.go -- +// +build exclude package a @@ -1355,17 +1297,21 @@ func _() { ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "x")), ) - env.OpenFile("a/a_ignore.go") - env.Await( - DiagnosticAt("a/a_ignore.go", 2, 8), + env.OpenFile("a/a_exclude.go") + env.AfterChange( + Diagnostics(env.AtRegexp("a/a_exclude.go", "package (a)")), ) }) } func TestEnableAllExperiments(t *testing.T) { + // Before the oldest supported Go version, gopls sends a warning to upgrade + // Go, which fails the expectation below. + testenv.NeedsGo1Point(t, lsp.OldestSupportedGoVersion()) + const mod = ` -- go.mod -- module mod.com @@ -1381,12 +1327,13 @@ func b(c bytes.Buffer) { } ` WithOptions( - EditorConfig{ - AllExperiments: true, - }, + Settings{"allExperiments": true}, ).Run(t, mod, func(t *testing.T, env *Env) { // Confirm that the setting doesn't cause any warnings. - env.Await(NoShowMessage()) + env.OnceMet( + InitialWorkspaceLoad, + NoShownMessage(""), // empty substring to match any message + ) }) } @@ -1436,11 +1383,9 @@ func main() { } ` Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - OnceMet( - InitialWorkspaceLoad, - NoDiagnosticWithMessage("", "illegal character U+0023 '#'"), - ), + env.OnceMet( + InitialWorkspaceLoad, + NoDiagnostics(WithMessage("illegal character U+0023 '#'")), ) }) } @@ -1450,8 +1395,6 @@ func main() { // have no more complaints about it. // https://github.com/golang/go/issues/41061 func TestRenamePackage(t *testing.T) { - testenv.NeedsGo1Point(t, 16) - const proxy = ` -- example.com@v1.2.3/go.mod -- module example.com @@ -1495,11 +1438,7 @@ package foo_ WithOptions( ProxyFiles(proxy), InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "off", - }, - }, + EnvVars{"GO111MODULE": "off"}, ).Run(t, contents, func(t *testing.T, env *Env) { // Simulate typing character by character. env.OpenFile("foo/foo_test.go") @@ -1507,10 +1446,8 @@ package foo_ env.RegexpReplace("foo/foo_test.go", "_", "_t") env.Await(env.DoneWithChange()) env.RegexpReplace("foo/foo_test.go", "_t", "_test") - env.Await(env.DoneWithChange()) - - env.Await( - EmptyDiagnostics("foo/foo_test.go"), + env.AfterChange( + NoDiagnostics(ForFile("foo/foo_test.go")), NoOutstandingWork(), ) }) @@ -1519,8 +1456,6 @@ package foo_ // TestProgressBarErrors confirms that critical workspace load errors are shown // and updated via progress reports. func TestProgressBarErrors(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const pkg = ` -- go.mod -- modul mod.com @@ -1531,7 +1466,7 @@ package main ` Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("go.mod") - env.Await( + env.AfterChange( OutstandingWork(lsp.WorkspaceLoadFailure, "unknown directive"), ) env.EditBuffer("go.mod", fake.NewEdit(0, 0, 3, 0, `module mod.com @@ -1541,20 +1476,18 @@ go 1.hello // As of golang/go#42529, go.mod changes do not reload the workspace until // they are saved. env.SaveBufferWithoutActions("go.mod") - env.Await( + env.AfterChange( OutstandingWork(lsp.WorkspaceLoadFailure, "invalid go version"), ) env.RegexpReplace("go.mod", "go 1.hello", "go 1.12") env.SaveBufferWithoutActions("go.mod") - env.Await( + env.AfterChange( NoOutstandingWork(), ) }) } func TestDeleteDirectory(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- bob/bob.go -- package bob @@ -1564,7 +1497,7 @@ func Hello() { } -- go.mod -- module mod.com --- main.go -- +-- cmd/main.go -- package main import "mod.com/bob" @@ -1574,11 +1507,15 @@ func main() { } ` Run(t, mod, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + FileWatchMatching("bob"), + ) env.RemoveWorkspaceFile("bob") - env.Await( - env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), - EmptyDiagnostics("bob/bob.go"), - RegistrationMatching("didChangeWatchedFiles"), + env.AfterChange( + Diagnostics(env.AtRegexp("cmd/main.go", `"mod.com/bob"`)), + NoDiagnostics(ForFile("bob/bob.go")), + NoFileWatchMatching("bob"), ) }) } @@ -1617,10 +1554,11 @@ package c import _ "mod.com/triple/a" ` Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexpWithMessage("self/self.go", `_ "mod.com/self"`, "import cycle not allowed"), - env.DiagnosticAtRegexpWithMessage("double/a/a.go", `_ "mod.com/double/b"`, "import cycle not allowed"), - env.DiagnosticAtRegexpWithMessage("triple/a/a.go", `_ "mod.com/triple/b"`, "import cycle not allowed"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("self/self.go", `_ "mod.com/self"`), WithMessage("import cycle not allowed")), + Diagnostics(env.AtRegexp("double/a/a.go", `_ "mod.com/double/b"`), WithMessage("import cycle not allowed")), + Diagnostics(env.AtRegexp("triple/a/a.go", `_ "mod.com/triple/b"`), WithMessage("import cycle not allowed")), ) }) } @@ -1651,31 +1589,26 @@ const B = a.B Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("b/b.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - // The Go command sometimes tells us about only one of the import cycle - // errors below. For robustness of this test, succeed if we get either. - // - // TODO(golang/go#52904): we should get *both* of these errors. - AnyOf( - env.DiagnosticAtRegexpWithMessage("a/a.go", `"mod.test/b"`, "import cycle"), - env.DiagnosticAtRegexpWithMessage("b/b.go", `"mod.test/a"`, "import cycle"), - ), + env.AfterChange( + // The Go command sometimes tells us about only one of the import cycle + // errors below. For robustness of this test, succeed if we get either. + // + // TODO(golang/go#52904): we should get *both* of these errors. + AnyOf( + Diagnostics(env.AtRegexp("a/a.go", `"mod.test/b"`), WithMessage("import cycle")), + Diagnostics(env.AtRegexp("b/b.go", `"mod.test/a"`), WithMessage("import cycle")), ), ) env.RegexpReplace("b/b.go", `const B = a\.B`, "") env.SaveBuffer("b/b.go") - env.Await( - EmptyOrNoDiagnostics("a/a.go"), - EmptyOrNoDiagnostics("b/b.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + NoDiagnostics(ForFile("b/b.go")), ) }) } func TestBadImport(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- go.mod -- module mod.com @@ -1690,80 +1623,21 @@ import ( ` t.Run("module", func(t *testing.T) { Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `could not import nosuchpkg (no required module provides package "nosuchpkg"`), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"nosuchpkg"`), WithMessage(`could not import nosuchpkg (no required module provides package "nosuchpkg"`)), ) }) }) t.Run("GOPATH", func(t *testing.T) { WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{"GO111MODULE": "off"}, - }, - Modes(Singleton), + EnvVars{"GO111MODULE": "off"}, + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `cannot find package "nosuchpkg" in any of`), - ) - }) - }) -} - -func TestMultipleModules_Warning(t *testing.T) { - const modules = ` --- a/go.mod -- -module a.com - -go 1.12 --- a/a.go -- -package a --- b/go.mod -- -module b.com - -go 1.12 --- b/b.go -- -package b -` - for _, go111module := range []string{"on", "auto"} { - t.Run("GO111MODULE="+go111module, func(t *testing.T) { - WithOptions( - Modes(Singleton), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": go111module, - }, - }, - ).Run(t, modules, func(t *testing.T, env *Env) { - env.OpenFile("a/a.go") - env.OpenFile("b/go.mod") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "package a"), - env.DiagnosticAtRegexp("b/go.mod", "module b.com"), - OutstandingWork(lsp.WorkspaceLoadFailure, "gopls requires a module at the root of your workspace."), - ) - }) - }) - } - - // Expect no warning if GO111MODULE=auto in a directory in GOPATH. - t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) { - WithOptions( - Modes(Singleton), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, - InGOPATH(), - ).Run(t, modules, func(t *testing.T, env *Env) { - env.OpenFile("a/a.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - NoDiagnostics("a/a.go"), - ), - NoOutstandingWork(), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"nosuchpkg"`), WithMessage(`cannot find package "nosuchpkg" in any of`)), ) }) }) @@ -1815,24 +1689,14 @@ func helloHelper() {} ` WithOptions( ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, nested, func(t *testing.T, env *Env) { // Expect a diagnostic in a nested module. env.OpenFile("nested/hello/hello.go") - didOpen := env.DoneWithOpen() - env.Await( - OnceMet( - didOpen, - env.DiagnosticAtRegexp("nested/hello/hello.go", "helloHelper"), - ), - OnceMet( - didOpen, - env.DiagnosticAtRegexpWithMessage("nested/hello/hello.go", "package hello", "nested module"), - ), - OnceMet( - didOpen, - OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("nested/hello/hello.go", "helloHelper")), + Diagnostics(env.AtRegexp("nested/hello/hello.go", "package hello"), WithMessage("nested module")), + OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"), ) }) } @@ -1847,12 +1711,7 @@ func main() {} Run(t, nomod, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.RegexpReplace("main.go", "{}", "{ var x int; }") // simulate typing - env.Await( - OnceMet( - env.DoneWithChange(), - NoLogMatching(protocol.Info, "packages=1"), - ), - ) + env.AfterChange(NoLogMatching(protocol.Info, "packages=1")) }) } @@ -1876,9 +1735,9 @@ var Bar = Foo Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo.go") - env.Await(env.DiagnosticAtRegexpWithMessage("bar.go", `Foo`, "undeclared name")) + env.AfterChange(Diagnostics(env.AtRegexp("bar.go", `Foo`))) env.RegexpReplace("foo.go", `\+build`, "") - env.Await(EmptyDiagnostics("bar.go")) + env.AfterChange(NoDiagnostics(ForFile("bar.go"))) }) } @@ -1906,17 +1765,15 @@ package main Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OpenFile("other.go") - env.Await( - env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"), - env.DiagnosticAtRegexpWithMessage("main.go", "fdas", "undeclared name"), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "asdf")), + Diagnostics(env.AtRegexp("main.go", "fdas")), ) env.SetBufferContent("other.go", "package main\n\nasdf") // The new diagnostic in other.go should not suppress diagnostics in main.go. - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("other.go", "asdf", "expected declaration"), - env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("other.go", "asdf"), WithMessage("expected declaration")), + Diagnostics(env.AtRegexp("main.go", "asdf")), ) }) } @@ -1935,42 +1792,8 @@ package main env.Await(env.DoneWithOpen()) env.RegexpReplace("go.mod", "module", "modul") env.SaveBufferWithoutActions("go.mod") - env.Await( - OnceMet( - env.DoneWithSave(), - NoLogMatching(protocol.Error, "initial workspace load failed"), - ), - ) - }) -} - -// Tests golang/go#45075: A panic in fillreturns broke diagnostics. -// Expect an error log indicating that fillreturns panicked, as well type -// errors for the broken code. -func TestFillReturnsPanic(t *testing.T) { - // At tip, the panic no longer reproduces. - testenv.SkipAfterGo1Point(t, 16) - - const files = ` --- go.mod -- -module mod.com - -go 1.15 --- main.go -- -package main - -func foo() int { - return x, nil -} -` - Run(t, files, func(t *testing.T, env *Env) { - env.OpenFile("main.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - LogMatching(protocol.Error, `.*analysis fillreturns.*panicked.*`, 1, true), - env.DiagnosticAtRegexpWithMessage("main.go", `return x`, "wrong number of return values"), - ), + env.AfterChange( + NoLogMatching(protocol.Error, "initial workspace load failed"), ) }) } @@ -1990,168 +1813,8 @@ func main() {} ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("go.mod") - env.Await( - OnceMet( - env.DoneWithOpen(), - LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false), - ), - ) - }) -} - -func TestUseOfInvalidMetadata(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - - const mod = ` --- go.mod -- -module mod.com - -go 1.12 --- main.go -- -package main - -import ( - "mod.com/a" - //"os" -) - -func _() { - a.Hello() - os.Getenv("") - //var x int -} --- a/a.go -- -package a - -func Hello() {} -` - WithOptions( - EditorConfig{ - ExperimentalUseInvalidMetadata: true, - }, - Modes(Singleton), - ).Run(t, mod, func(t *testing.T, env *Env) { - env.OpenFile("go.mod") - env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file - env.SaveBufferWithoutActions("go.mod") - env.Await( - env.DiagnosticAtRegexp("go.mod", "modul"), - ) - // Confirm that language features work with invalid metadata. - env.OpenFile("main.go") - file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "Hello")) - wantPos := env.RegexpSearch("a/a.go", "Hello") - if file != "a/a.go" && pos != wantPos { - t.Fatalf("expected a/a.go:%s, got %s:%s", wantPos, file, pos) - } - // Confirm that new diagnostics appear with invalid metadata by adding - // an unused variable to the body of the function. - env.RegexpReplace("main.go", "//var x int", "var x int") - env.Await( - env.DiagnosticAtRegexp("main.go", "x"), - ) - // Add an import and confirm that we get a diagnostic for it, since the - // metadata will not have been updated. - env.RegexpReplace("main.go", "//\"os\"", "\"os\"") - env.Await( - env.DiagnosticAtRegexp("main.go", `"os"`), - ) - // Fix the go.mod file and expect the diagnostic to resolve itself. - env.RegexpReplace("go.mod", "modul mod.com", "module mod.com") - env.SaveBuffer("go.mod") - env.Await( - env.DiagnosticAtRegexp("main.go", "x"), - env.NoDiagnosticAtRegexp("main.go", `"os"`), - EmptyDiagnostics("go.mod"), - ) - }) -} - -func TestReloadInvalidMetadata(t *testing.T) { - // We only use invalid metadata for Go versions > 1.12. - testenv.NeedsGo1Point(t, 13) - - const mod = ` --- go.mod -- -module mod.com - -go 1.12 --- main.go -- -package main - -func _() {} -` - WithOptions( - EditorConfig{ - ExperimentalUseInvalidMetadata: true, - }, - // ExperimentalWorkspaceModule has a different failure mode for this - // case. - Modes(Singleton), - ).Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - OnceMet( - InitialWorkspaceLoad, - CompletedWork("Load", 1, false), - ), - ) - - // Break the go.mod file on disk, expecting a reload. - env.WriteWorkspaceFile("go.mod", `modul mod.com - -go 1.12 -`) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - env.DiagnosticAtRegexp("go.mod", "modul"), - CompletedWork("Load", 1, false), - ), - ) - - env.OpenFile("main.go") - env.Await(env.DoneWithOpen()) - // The first edit after the go.mod file invalidation should cause a reload. - // Any subsequent simple edits should not. - content := `package main - -func main() { - _ = 1 -} -` - env.EditBuffer("main.go", fake.NewEdit(0, 0, 3, 0, content)) - env.Await( - OnceMet( - env.DoneWithChange(), - CompletedWork("Load", 2, false), - NoLogMatching(protocol.Error, "error loading file"), - ), - ) - env.RegexpReplace("main.go", "_ = 1", "_ = 2") - env.Await( - OnceMet( - env.DoneWithChange(), - CompletedWork("Load", 2, false), - NoLogMatching(protocol.Error, "error loading file"), - ), - ) - // Add an import to the main.go file and confirm that it does get - // reloaded, but the reload fails, so we see a diagnostic on the new - // "fmt" import. - env.EditBuffer("main.go", fake.NewEdit(0, 0, 5, 0, `package main - -import "fmt" - -func main() { - fmt.Println("") -} -`)) - env.Await( - OnceMet( - env.DoneWithChange(), - env.DiagnosticAtRegexp("main.go", `"fmt"`), - CompletedWork("Load", 3, false), - ), + env.AfterChange( + LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false), ) }) } @@ -2169,9 +1832,14 @@ package main const C = 0b10 ` Run(t, files, func(t *testing.T, env *Env) { - env.Await(env.DiagnosticAtRegexpWithMessage("main.go", `0b10`, "go1.13 or later")) + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `0b10`), WithMessage("go1.13 or later")), + ) env.WriteWorkspaceFile("go.mod", "module mod.com \n\ngo 1.13\n") - env.Await(EmptyDiagnostics("main.go")) + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + ) }) } @@ -2191,11 +1859,10 @@ func F[T C](_ T) { Run(t, files, func(t *testing.T, env *Env) { var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("main.go", `C`, "undeclared name"), - ReadDiagnostics("main.go", &d), - ), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `C`)), + ReadDiagnostics("main.go", &d), ) if fixes := env.GetQuickFixes("main.go", d.Diagnostics); len(fixes) != 0 { t.Errorf("got quick fixes %v, wanted none", fixes) @@ -2218,17 +1885,15 @@ func F[T any](_ T) { ` Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file. var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("main.go", `T any`, "type parameters require"), - ReadDiagnostics("main.go", &d), - ), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `T any`), WithMessage("type parameter")), + ReadDiagnostics("main.go", &d), ) env.ApplyQuickFixes("main.go", d.Diagnostics) - - env.Await( - EmptyDiagnostics("main.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } @@ -2252,17 +1917,132 @@ func F[T any](_ T) { ` Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file. var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("main.go", `T any`, "type parameters require"), - ReadDiagnostics("main.go", &d), - ), + + // We should have a diagnostic because generics are not supported at 1.16. + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `T any`), WithMessage("type parameter")), + ReadDiagnostics("main.go", &d), ) + // This diagnostic should have a quick fix to edit the go version. env.ApplyQuickFixes("main.go", d.Diagnostics) - env.Await( - EmptyDiagnostics("main.go"), + // Once the edit is applied, the problematic diagnostics should be + // resolved. + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } + +// This test demonstrates that analysis facts are correctly propagated +// across packages. +func TestInterpackageAnalysis(t *testing.T) { + const src = ` +-- go.mod -- +module example.com +-- a/a.go -- +package a + +import "example.com/b" + +func _() { + new(b.B).Printf("%d", "s") // printf error +} + +-- b/b.go -- +package b + +import "example.com/c" + +type B struct{} + +func (B) Printf(format string, args ...interface{}) { + c.MyPrintf(format, args...) +} + +-- c/c.go -- +package c + +import "fmt" + +func MyPrintf(format string, args ...interface{}) { + fmt.Printf(format, args...) +} +` + Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("a/a.go") + env.AfterChange( + Diagnostics( + env.AtRegexp("a/a.go", "new.*Printf"), + WithMessage("format %d has arg \"s\" of wrong type string"), + ), + ) + }) +} + +// This test ensures that only Analyzers with RunDespiteErrors=true +// are invoked on a package that would not compile, even if the errors +// are distant and localized. +func TestErrorsThatPreventAnalysis(t *testing.T) { + const src = ` +-- go.mod -- +module example.com +-- a/a.go -- +package a + +import "fmt" +import "sync" +import _ "example.com/b" + +func _() { + // The copylocks analyzer (RunDespiteErrors, FactTypes={}) does run. + var mu sync.Mutex + mu2 := mu // copylocks error, reported + _ = &mu2 + + // The printf analyzer (!RunDespiteErrors, FactTypes!={}) does not run: + // (c, printf) failed because of type error in c + // (b, printf) and (a, printf) do not run because of failed prerequisites. + fmt.Printf("%d", "s") // printf error, unreported + + // The bools analyzer (!RunDespiteErrors, FactTypes={}) does not run: + var cond bool + _ = cond != true && cond != true // bools error, unreported +} + +-- b/b.go -- +package b + +import _ "example.com/c" + +-- c/c.go -- +package c + +var _ = 1 / "" // type error + +` + Run(t, src, func(t *testing.T, env *Env) { + var diags protocol.PublishDiagnosticsParams + env.OpenFile("a/a.go") + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "mu2 := (mu)"), WithMessage("assignment copies lock value")), + ReadDiagnostics("a/a.go", &diags)) + + // Assert that there were no other diagnostics. + // In particular: + // - "fmt.Printf" does not trigger a [printf] finding; + // - "cond != true" does not trigger a [bools] finding. + // + // We use this check in preference to NoDiagnosticAtRegexp + // as it is robust in case of minor mistakes in the position + // regexp, and because it reports unexpected diagnostics. + if got, want := len(diags.Diagnostics), 1; got != want { + t.Errorf("got %d diagnostics in a/a.go, want %d:", got, want) + for i, diag := range diags.Diagnostics { + t.Logf("Diagnostics[%d] = %+v", i, diag) + } + } + }) +} diff --git a/gopls/internal/regtest/diagnostics/golist_test.go b/gopls/internal/regtest/diagnostics/golist_test.go new file mode 100644 index 00000000000..85b35be024f --- /dev/null +++ b/gopls/internal/regtest/diagnostics/golist_test.go @@ -0,0 +1,71 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diagnostics + +import ( + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/testenv" +) + +func TestGoListErrors(t *testing.T) { + testenv.NeedsTool(t, "cgo") + + const src = ` +-- go.mod -- +module a.com + +go 1.18 +-- a/a.go -- +package a + +import +-- c/c.go -- +package c + +/* +int fortythree() { return 42; } +*/ +import "C" + +func Foo() { + print(C.fortytwo()) +} +-- p/p.go -- +package p + +import "a.com/q" + +const P = q.Q + 1 +-- q/q.go -- +package q + +import "a.com/p" + +const Q = p.P + 1 +` + + Run(t, src, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics( + env.AtRegexp("a/a.go", "import\n()"), + FromSource(string(source.ParseError)), + ), + Diagnostics( + AtPosition("c/c.go", 0, 0), + FromSource(string(source.ListError)), + WithMessage("may indicate failure to perform cgo processing"), + ), + Diagnostics( + env.AtRegexp("p/p.go", `"a.com/q"`), + FromSource(string(source.ListError)), + WithMessage("import cycle not allowed"), + ), + ) + }) +} diff --git a/gopls/internal/regtest/diagnostics/invalidation_test.go b/gopls/internal/regtest/diagnostics/invalidation_test.go new file mode 100644 index 00000000000..f5097f32d77 --- /dev/null +++ b/gopls/internal/regtest/diagnostics/invalidation_test.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diagnostics + +import ( + "fmt" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +// Test for golang/go#50267: diagnostics should be re-sent after a file is +// opened. +func TestDiagnosticsAreResentAfterCloseOrOpen(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.16 +-- main.go -- +package main + +func _() { + x := 2 +} +` + Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file. + env.OpenFile("main.go") + var afterOpen protocol.PublishDiagnosticsParams + env.AfterChange( + ReadDiagnostics("main.go", &afterOpen), + ) + env.CloseBuffer("main.go") + var afterClose protocol.PublishDiagnosticsParams + env.AfterChange( + ReadDiagnostics("main.go", &afterClose), + ) + if afterOpen.Version == afterClose.Version { + t.Errorf("publishDiagnostics: got the same version after closing (%d) as after opening", afterOpen.Version) + } + env.OpenFile("main.go") + var afterReopen protocol.PublishDiagnosticsParams + env.AfterChange( + ReadDiagnostics("main.go", &afterReopen), + ) + if afterReopen.Version == afterClose.Version { + t.Errorf("pubslishDiagnostics: got the same version after reopening (%d) as after closing", afterClose.Version) + } + }) +} + +// Test for the "chattyDiagnostics" setting: we should get re-published +// diagnostics after every file change, even if diagnostics did not change. +func TestChattyDiagnostics(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.16 +-- main.go -- +package main + +func _() { + x := 2 +} + +// Irrelevant comment #0 +` + + WithOptions( + Settings{ + "chattyDiagnostics": true, + }, + ).Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file. + + env.OpenFile("main.go") + var d protocol.PublishDiagnosticsParams + env.AfterChange( + ReadDiagnostics("main.go", &d), + ) + + if len(d.Diagnostics) != 1 { + t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics)) + } + msg := d.Diagnostics[0].Message + + for i := 0; i < 5; i++ { + before := d.Version + env.RegexpReplace("main.go", "Irrelevant comment #.", fmt.Sprintf("Irrelevant comment #%d", i)) + env.AfterChange( + ReadDiagnostics("main.go", &d), + ) + + if d.Version == before { + t.Errorf("after change, got version %d, want new version", d.Version) + } + + // As a sanity check, make sure we have the same diagnostic. + if len(d.Diagnostics) != 1 { + t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics)) + } + newMsg := d.Diagnostics[0].Message + if newMsg != msg { + t.Errorf("after change, got message %q, want %q", newMsg, msg) + } + } + }) +} diff --git a/gopls/internal/regtest/diagnostics/undeclared_test.go b/gopls/internal/regtest/diagnostics/undeclared_test.go index 79f7d42675b..ac5f598cc48 100644 --- a/gopls/internal/regtest/diagnostics/undeclared_test.go +++ b/gopls/internal/regtest/diagnostics/undeclared_test.go @@ -7,8 +7,8 @@ package diagnostics import ( "testing" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestUndeclaredDiagnostics(t *testing.T) { @@ -44,23 +44,29 @@ func _() int { // 'x' is undeclared, but still necessary. env.OpenFile("a/a.go") - env.Await(env.DiagnosticAtRegexp("a/a.go", "x")) - diags := env.DiagnosticsFor("a/a.go") - if got := len(diags.Diagnostics); got != 1 { + var adiags protocol.PublishDiagnosticsParams + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "x")), + ReadDiagnostics("a/a.go", &adiags), + ) + if got := len(adiags.Diagnostics); got != 1 { t.Errorf("len(Diagnostics) = %d, want 1", got) } - if diag := diags.Diagnostics[0]; isUnnecessary(diag) { + if diag := adiags.Diagnostics[0]; isUnnecessary(diag) { t.Errorf("%v tagged unnecessary, want necessary", diag) } // 'y = y' is pointless, and should be detected as unnecessary. env.OpenFile("b/b.go") - env.Await(env.DiagnosticAtRegexp("b/b.go", "y = y")) - diags = env.DiagnosticsFor("b/b.go") - if got := len(diags.Diagnostics); got != 1 { + var bdiags protocol.PublishDiagnosticsParams + env.AfterChange( + Diagnostics(env.AtRegexp("b/b.go", "y = y")), + ReadDiagnostics("b/b.go", &bdiags), + ) + if got := len(bdiags.Diagnostics); got != 1 { t.Errorf("len(Diagnostics) = %d, want 1", got) } - if diag := diags.Diagnostics[0]; !isUnnecessary(diag) { + if diag := bdiags.Diagnostics[0]; !isUnnecessary(diag) { t.Errorf("%v tagged necessary, want unnecessary", diag) } }) diff --git a/gopls/internal/regtest/inlayhints/inlayhints_test.go b/gopls/internal/regtest/inlayhints/inlayhints_test.go new file mode 100644 index 00000000000..d4caabe79d3 --- /dev/null +++ b/gopls/internal/regtest/inlayhints/inlayhints_test.go @@ -0,0 +1,69 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package inlayhint + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/hooks" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/bug" +) + +func TestMain(m *testing.M) { + bug.PanicOnBugs = true + Main(m, hooks.Options) +} + +func TestEnablingInlayHints(t *testing.T) { + const workspace = ` +-- go.mod -- +module inlayHint.test +go 1.12 +-- lib.go -- +package lib +type Number int +const ( + Zero Number = iota + One + Two +) +` + tests := []struct { + label string + enabled map[string]bool + wantInlayHint bool + }{ + { + label: "default", + wantInlayHint: false, + }, + { + label: "enable const", + enabled: map[string]bool{source.ConstantValues: true}, + wantInlayHint: true, + }, + { + label: "enable parameter names", + enabled: map[string]bool{source.ParameterNames: true}, + wantInlayHint: false, + }, + } + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + WithOptions( + Settings{ + "hints": test.enabled, + }, + ).Run(t, workspace, func(t *testing.T, env *Env) { + env.OpenFile("lib.go") + lens := env.InlayHints("lib.go") + if gotInlayHint := len(lens) > 0; gotInlayHint != test.wantInlayHint { + t.Errorf("got inlayHint: %t, want %t", gotInlayHint, test.wantInlayHint) + } + }) + }) + } +} diff --git a/gopls/internal/regtest/marker/marker_test.go b/gopls/internal/regtest/marker/marker_test.go new file mode 100644 index 00000000000..ac051a555e0 --- /dev/null +++ b/gopls/internal/regtest/marker/marker_test.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package marker + +import ( + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +// Note: we use a separate package for the marker tests so that we can easily +// compare their performance to the existing marker tests in ./internal/lsp. + +// TestMarkers runs the marker tests from the testdata directory. +// +// See RunMarkerTests for details on how marker tests work. +func TestMarkers(t *testing.T) { + RunMarkerTests(t, "testdata") +} diff --git a/gopls/internal/regtest/marker/testdata/definition/embed.txt b/gopls/internal/regtest/marker/testdata/definition/embed.txt new file mode 100644 index 00000000000..b1131d86362 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/definition/embed.txt @@ -0,0 +1,226 @@ +This test checks definition and hover operations over embedded fields and methods. +-- go.mod -- +module mod.com + +go 1.18 +-- a/a.go -- +package a + +type A string //@loc(AString, "A") + +func (_ A) Hi() {} //@loc(AHi, "Hi") + +type S struct { + Field int //@loc(SField, "Field") + R // embed a struct + H // embed an interface +} + +type R struct { + Field2 int //@loc(RField2, "Field2") +} + +func (_ R) Hey() {} //@loc(RHey, "Hey") + +type H interface { //@loc(H, "H") + Goodbye() //@loc(HGoodbye, "Goodbye") +} + +type I interface { //@loc(I, "I") + B() //@loc(IB, "B") + J +} + +type J interface { //@loc(J, "J") + Hello() //@loc(JHello, "Hello") +} + +-- b/b.go -- +package b + +import "mod.com/a" //@loc(AImport, re"\".*\"") + +type Embed struct { + *a.A + a.I + a.S +} + +func _() { + e := Embed{} + e.Hi() //@def("Hi", AHi),hover("Hi", "Hi", AHi) + e.B() //@def("B", IB),hover("B", "B", IB) + _ = e.Field //@def("Field", SField),hover("Field", "Field", SField) + _ = e.Field2 //@def("Field2", RField2),hover("Field2", "Field2", RField2) + e.Hello() //@def("Hello", JHello),hover("Hello", "Hello",JHello) + e.Hey() //@def("Hey", RHey), hover("Hey", "Hey", RHey) + e.Goodbye() //@def("Goodbye", HGoodbye), hover("Goodbye", "Goodbye", HGoodbye) +} + +type aAlias = a.A //@loc(aAlias, "aAlias") + +type S1 struct { //@loc(S1, "S1") + F1 int //@loc(S1F1, "F1") + S2 //@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2) + a.A //@def("A", AString),hover("A", "A", aA) + aAlias //@def("a", aAlias),hover("a", "aAlias", aAlias) +} + +type S2 struct { //@loc(S2, "S2") + F1 string //@loc(S2F1, "F1") + F2 int //@loc(S2F2, "F2") + *a.A //@def("A", AString),def("a",AImport) +} + +type S3 struct { + F1 struct { + a.A //@def("A", AString) + } +} + +func Bar() { + var x S1 //@def("S1", S1),hover("S1", "S1", S1) + _ = x.S2 //@def("S2", S1S2),hover("S2", "S2", S1S2) + _ = x.F1 //@def("F1", S1F1),hover("F1", "F1", S1F1) + _ = x.F2 //@def("F2", S2F2),hover("F2", "F2", S2F2) + _ = x.S2.F1 //@def("F1", S2F1),hover("F1", "F1", S2F1) +} +-- b/c.go -- +package b + +var _ = S1{ //@def("S1", S1),hover("S1", "S1", S1) + F1: 99, //@def("F1", S1F1),hover("F1", "F1", S1F1) +} +-- @AHi/hover.md -- +```go +func (a.A).Hi() +``` + +[`(a.A).Hi` on pkg.go.dev](https://pkg.go.dev/mod.com/a#A.Hi) +-- @HGoodbye/hover.md -- +```go +func (a.H).Goodbye() +``` + +@loc(HGoodbye, "Goodbye") + + +[`(a.H).Goodbye` on pkg.go.dev](https://pkg.go.dev/mod.com/a#H.Goodbye) +-- @IB/hover.md -- +```go +func (a.I).B() +``` + +@loc(IB, "B") + + +[`(a.I).B` on pkg.go.dev](https://pkg.go.dev/mod.com/a#I.B) +-- @JHello/hover.md -- +```go +func (a.J).Hello() +``` + +@loc(JHello, "Hello") + + +[`(a.J).Hello` on pkg.go.dev](https://pkg.go.dev/mod.com/a#J.Hello) +-- @RField2/hover.md -- +```go +field Field2 int +``` + +@loc(RField2, "Field2") + + +[`(a.R).Field2` on pkg.go.dev](https://pkg.go.dev/mod.com/a#R.Field2) +-- @RHey/hover.md -- +```go +func (a.R).Hey() +``` + +[`(a.R).Hey` on pkg.go.dev](https://pkg.go.dev/mod.com/a#R.Hey) +-- @S1/hover.md -- +```go +type S1 struct { + F1 int //@loc(S1F1, "F1") + S2 //@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2) + a.A //@def("A", AString),hover("A", "A", aA) + aAlias //@def("a", aAlias),hover("a", "aAlias", aAlias) +} +``` + +[`b.S1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1) +-- @S1F1/hover.md -- +```go +field F1 int +``` + +@loc(S1F1, "F1") + + +[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1.F1) +-- @S1S2/hover.md -- +```go +field S2 S2 +``` + +@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2) + + +[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1.S2) +-- @S2/hover.md -- +```go +type S2 struct { + F1 string //@loc(S2F1, "F1") + F2 int //@loc(S2F2, "F2") + *a.A //@def("A", AString),def("a",AImport) +} +``` + +[`b.S2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2) +-- @S2F1/hover.md -- +```go +field F1 string +``` + +@loc(S2F1, "F1") + + +[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2.F1) +-- @S2F2/hover.md -- +```go +field F2 int +``` + +@loc(S2F2, "F2") + + +[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2.F2) +-- @SField/hover.md -- +```go +field Field int +``` + +@loc(SField, "Field") + + +[`(a.S).Field` on pkg.go.dev](https://pkg.go.dev/mod.com/a#S.Field) +-- @aA/hover.md -- +```go +type A string + +func (a.A).Hi() +``` + +@loc(AString, "A") + + +[`a.A` on pkg.go.dev](https://pkg.go.dev/mod.com/a#A) +-- @aAlias/hover.md -- +```go +type aAlias = a.A + +func (a.A).Hi() +``` + +@loc(aAlias, "aAlias") diff --git a/gopls/internal/regtest/marker/testdata/definition/import.txt b/gopls/internal/regtest/marker/testdata/definition/import.txt new file mode 100644 index 00000000000..9e5e5929aa9 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/definition/import.txt @@ -0,0 +1,52 @@ +This test checks definition and hover over imports. +-- go.mod -- +module mod.com + +go 1.18 +-- foo/foo.go -- +package foo + +type Foo struct{} + +// DoFoo does foo. +func DoFoo() {} //@loc(DoFoo, "DoFoo") +-- bar/bar.go -- +package bar + +import ( + myFoo "mod.com/foo" //@loc(myFoo, "myFoo") +) + +var _ *myFoo.Foo //@def("myFoo", myFoo),hover("myFoo", "myFoo", myFoo) +-- bar/dotimport.go -- +package bar + +import . "mod.com/foo" + +func _() { + // variable of type foo.Foo + var _ Foo //@hover("_", "_", FooVar) + + DoFoo() //@hover("DoFoo", "DoFoo", DoFoo) +} +-- @DoFoo/hover.md -- +```go +func DoFoo() +``` + +DoFoo does foo. + + +[`foo.DoFoo` on pkg.go.dev](https://pkg.go.dev/mod.com/foo#DoFoo) +-- @FooVar/hover.md -- +```go +var _ Foo +``` + +variable of type foo.Foo +-- @myFoo/hover.md -- +```go +package myFoo ("mod.com/foo") +``` + +[`myFoo` on pkg.go.dev](https://pkg.go.dev/mod.com/foo) diff --git a/gopls/internal/regtest/marker/testdata/definition/misc.txt b/gopls/internal/regtest/marker/testdata/definition/misc.txt new file mode 100644 index 00000000000..48f5d340c43 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/definition/misc.txt @@ -0,0 +1,230 @@ +This test exercises miscellaneous definition and hover requests. +-- go.mod -- +module mod.com + +go 1.16 +-- a.go -- +package a //@loc(aPackage, re"package (a)"),hover(aPackage, aPackage, aPackage) + +var ( + // x is a variable. + x string //@loc(x, "x"),hover(x, x, hoverx) +) + +// Constant block. When I hover on h, I should see this comment. +const ( + // When I hover on g, I should see this comment. + g = 1 //@hover("g", "g", hoverg) + + h = 2 //@hover("h", "h", hoverh) +) + +// z is a variable too. +var z string //@loc(z, "z"),hover(z, z, hoverz) + +func AStuff() { //@loc(AStuff, "AStuff") + x := 5 + Random2(x) //@def("dom2", Random2) + Random() //@def("()", Random) +} + +type H interface { //@loc(H, "H") + Goodbye() +} + +type I interface { //@loc(I, "I") + B() + J +} + +type J interface { //@loc(J, "J") + Hello() +} + +func _() { + // 1st type declaration block + type ( + a struct { //@hover("a", "a", hoverDeclBlocka) + x string + } + ) + + // 2nd type declaration block + type ( + // b has a comment + b struct{} //@hover("b", "b", hoverDeclBlockb) + ) + + // 3rd type declaration block + type ( + // c is a struct + c struct { //@hover("c", "c", hoverDeclBlockc) + f string + } + + d string //@hover("d", "d", hoverDeclBlockd) + ) + + type ( + e struct { //@hover("e", "e", hoverDeclBlocke) + f float64 + } // e has a comment + ) +} + +var ( + hh H //@hover("H", "H", hoverH) + ii I //@hover("I", "I", hoverI) + jj J //@hover("J", "J", hoverJ) +) +-- a_test.go -- +package a + +import ( + "testing" +) + +func TestA(t *testing.T) { //@hover("TestA", "TestA", hoverTestA) +} +-- random.go -- +package a + +func Random() int { //@loc(Random, "Random") + y := 6 + 7 + return y +} + +func Random2(y int) int { //@loc(Random2, "Random2"),loc(RandomParamY, "y") + return y //@def("y", RandomParamY),hover("y", "y", hovery) +} + +type Pos struct { + x, y int //@loc(PosX, "x"),loc(PosY, "y") +} + +// Typ has a comment. Its fields do not. +type Typ struct{ field string } //@loc(TypField, "field") + +func _() { + x := &Typ{} + _ = x.field //@def("field", TypField),hover("field", "field", hoverfield) +} + +func (p *Pos) Sum() int { //@loc(PosSum, "Sum") + return p.x + p.y //@hover("x", "x", hoverpx) +} + +func _() { + var p Pos + _ = p.Sum() //@def("()", PosSum),hover("()", `Sum`, hoverSum) +} +-- @aPackage/hover.md -- +-- @hoverDeclBlocka/hover.md -- +```go +type a struct { + x string +} +``` + +1st type declaration block +-- @hoverDeclBlockb/hover.md -- +```go +type b struct{} +``` + +b has a comment +-- @hoverDeclBlockc/hover.md -- +```go +type c struct { + f string +} +``` + +c is a struct +-- @hoverDeclBlockd/hover.md -- +```go +type d string +``` + +3rd type declaration block +-- @hoverDeclBlocke/hover.md -- +```go +type e struct { + f float64 +} +``` + +e has a comment +-- @hoverH/hover.md -- +```go +type H interface { + Goodbye() +} +``` + +[`a.H` on pkg.go.dev](https://pkg.go.dev/mod.com#H) +-- @hoverI/hover.md -- +```go +type I interface { + B() + J +} +``` + +[`a.I` on pkg.go.dev](https://pkg.go.dev/mod.com#I) +-- @hoverJ/hover.md -- +```go +type J interface { + Hello() +} +``` + +[`a.J` on pkg.go.dev](https://pkg.go.dev/mod.com#J) +-- @hoverSum/hover.md -- +```go +func (*Pos).Sum() int +``` + +[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/mod.com#Pos.Sum) +-- @hoverTestA/hover.md -- +```go +func TestA(t *testing.T) +``` +-- @hoverfield/hover.md -- +```go +field field string +``` +-- @hoverg/hover.md -- +```go +const g untyped int = 1 +``` + +When I hover on g, I should see this comment. +-- @hoverh/hover.md -- +```go +const h untyped int = 2 +``` + +Constant block. When I hover on h, I should see this comment. +-- @hoverpx/hover.md -- +```go +field x int +``` + +@loc(PosX, "x"),loc(PosY, "y") +-- @hoverx/hover.md -- +```go +var x string +``` + +x is a variable. +-- @hovery/hover.md -- +```go +var y int +``` +-- @hoverz/hover.md -- +```go +var z string +``` + +z is a variable too. diff --git a/gopls/internal/regtest/marker/testdata/hover/basiclit.txt b/gopls/internal/regtest/marker/testdata/hover/basiclit.txt new file mode 100644 index 00000000000..32527420d01 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/basiclit.txt @@ -0,0 +1,60 @@ +This test checks gopls behavior when hovering over basic literals. +-- basiclit.go -- +package basiclit + +func _() { + _ = 'a' //@hover("'a'", "'a'", latinA) + _ = 0x61 //@hover("0x61", "0x61", latinA) + + _ = '\u2211' //@hover("'\\u2211'", "'\\u2211'", summation) + _ = 0x2211 //@hover("0x2211", "0x2211", summation) + _ = "foo \u2211 bar" //@hover("\\u2211", "\\u2211", summation) + + _ = '\a' //@hover("'\\a'", "'\\a'", control) + _ = "foo \a bar" //@hover("\\a", "\\a", control) + + _ = '\U0001F30A' //@hover("'\\U0001F30A'", "'\\U0001F30A'", waterWave) + _ = 0x0001F30A //@hover("0x0001F30A", "0x0001F30A", waterWave) + _ = "foo \U0001F30A bar" //@hover("\\U0001F30A", "\\U0001F30A", waterWave) + + _ = '\x7E' //@hover("'\\x7E'", "'\\x7E'", tilde) + _ = "foo \x7E bar" //@hover("\\x7E", "\\x7E", tilde) + _ = "foo \a bar" //@hover("\\a", "\\a", control) + + _ = '\173' //@hover("'\\173'", "'\\173'", leftCurly) + _ = "foo \173 bar" //@hover("\\173","\\173", leftCurly) + _ = "foo \173 bar \u2211 baz" //@hover("\\173","\\173", leftCurly) + _ = "foo \173 bar \u2211 baz" //@hover("\\u2211","\\u2211", summation) + _ = "foo\173bar\u2211baz" //@hover("\\173","\\173", leftCurly) + _ = "foo\173bar\u2211baz" //@hover("\\u2211","\\u2211", summation) + + // search for runes in string only if there is an escaped sequence + _ = "hello" //@hover(`"hello"`, _, _) + + // incorrect escaped rune sequences + _ = '\0' //@hover("'\\0'", _, _),diag(re`\\0()'`, re"illegal character") + _ = '\u22111' //@hover("'\\u22111'", _, _) + _ = '\U00110000' //@hover("'\\U00110000'", _, _) + _ = '\u12e45'//@hover("'\\u12e45'", _, _) + _ = '\xa' //@hover("'\\xa'", _, _) + _ = 'aa' //@hover("'aa'", _, _) + + // other basic lits + _ = 1 //@hover("1", _, _) + _ = 1.2 //@hover("1.2", _, _) + _ = 1.2i //@hover("1.2i", _, _) + _ = 0123 //@hover("0123", _, _) + _ = 0x1234567890 //@hover("0x1234567890", _, _) +) +-- @control/hover.md -- +U+0007, control +-- @latinA/hover.md -- +'a', U+0061, LATIN SMALL LETTER A +-- @leftCurly/hover.md -- +'{', U+007B, LEFT CURLY BRACKET +-- @summation/hover.md -- +'āˆ‘', U+2211, N-ARY SUMMATION +-- @tilde/hover.md -- +'~', U+007E, TILDE +-- @waterWave/hover.md -- +'🌊', U+1F30A, WATER WAVE diff --git a/gopls/internal/regtest/marker/testdata/hover/const.txt b/gopls/internal/regtest/marker/testdata/hover/const.txt new file mode 100644 index 00000000000..cdb0e51e27d --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/const.txt @@ -0,0 +1,18 @@ +This test checks hovering over constants. +-- go.mod -- +module mod.com + +go 1.18 +-- c.go -- +package c + +const X = 0 //@hover("X", "X", bX) +-- @bX/hover.md -- +```go +const X untyped int = 0 +``` + +@hover("X", "X", bX) + + +[`c.X` on pkg.go.dev](https://pkg.go.dev/mod.com#X) diff --git a/gopls/internal/regtest/marker/testdata/hover/generics.txt b/gopls/internal/regtest/marker/testdata/hover/generics.txt new file mode 100644 index 00000000000..2c526d82b97 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/generics.txt @@ -0,0 +1,72 @@ +This file contains tests for hovering over generic Go code. +-- flags -- +-min_go=go1.18 +-- generics.go -- +package generics + +type value[T any] struct { //hover("lue", "value", value),hover("T", "T", valueT) + val T //@hover("T", "T", valuevalT) + Q int //@hover("Q", "Q", valueQ) +} + +type Value[T any] struct { //@hover("T", "T", ValueT) + val T //@hover("T", "T", ValuevalT) + Q int //@hover("Q", "Q", ValueQ) +} + +// disabled - see issue #54822 +func F[P interface{ ~int | string }]() { // hover("P","P",Ptparam) + // disabled - see issue #54822 + var _ P // hover("P","P",Pvar) +} +-- go.mod -- +// A go.mod is require for correct pkgsite links. +// TODO(rfindley): don't link to ad-hoc or command-line-arguments packages! +module mod.com + +go 1.18 +-- inferred.go -- +package generics + +func app[S interface{ ~[]E }, E interface{}](s S, e E) S { + return append(s, e) +} + +func _() { + _ = app[[]int] //@hover("app", "app", appint) + _ = app[[]int, int] //@hover("app", "app", appint) + // TODO(rfindley): eliminate this diagnostic. + _ = app[[]int]([]int{}, 0) //@hover("app", "app", appint),diag("[[]int]", re"unnecessary type arguments") + _ = app([]int{}, 0) //@hover("app", "app", appint) +} +-- @ValueQ/hover.md -- +```go +field Q int +``` + +@hover("Q", "Q", ValueQ) + + +[`(generics.Value).Q` on pkg.go.dev](https://pkg.go.dev/mod.com#Value.Q) +-- @ValueT/hover.md -- +```go +type parameter T any +``` +-- @ValuevalT/hover.md -- +```go +type parameter T any +``` +-- @appint/hover.md -- +```go +func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S +``` +-- @valueQ/hover.md -- +```go +field Q int +``` + +@hover("Q", "Q", valueQ) +-- @valuevalT/hover.md -- +```go +type parameter T any +``` diff --git a/gopls/internal/regtest/marker/testdata/hover/goprivate.txt b/gopls/internal/regtest/marker/testdata/hover/goprivate.txt new file mode 100644 index 00000000000..4c309ef38cf --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/goprivate.txt @@ -0,0 +1,27 @@ +This test checks that links in hover obey GOPRIVATE. +-- env -- +GOPRIVATE=mod.com +-- go.mod -- +module mod.com +-- p.go -- +package p + +// T should not be linked, as it is private. +type T struct{} //@hover("T", "T", T) +-- lib/lib.go -- +package lib + +// GOPRIVATE should also match nested packages. +type L struct{} //@hover("L", "L", L) +-- @L/hover.md -- +```go +type L struct{} +``` + +GOPRIVATE should also match nested packages. +-- @T/hover.md -- +```go +type T struct{} +``` + +T should not be linked, as it is private. diff --git a/gopls/internal/regtest/marker/testdata/hover/hover.txt b/gopls/internal/regtest/marker/testdata/hover/hover.txt new file mode 100644 index 00000000000..f9cd3311bc8 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/hover.txt @@ -0,0 +1,29 @@ +This test demonstrates some features of the new marker test runner. +-- a.go -- +package a + +const abc = 0x2a //@hover("b", "abc", abc),hover(" =", "abc", abc) +-- typeswitch.go -- +package a + +func _() { + var y interface{} + switch x := y.(type) { //@hover("x", "x", x) + case int: + println(x) //@hover("x", "x", xint),hover(")", "x", xint) + } +} +-- @abc/hover.md -- +```go +const abc untyped int = 42 +``` + +@hover("b", "abc", abc),hover(" =", "abc", abc) +-- @x/hover.md -- +```go +var x interface{} +``` +-- @xint/hover.md -- +```go +var x int +``` diff --git a/gopls/internal/regtest/marker/testdata/hover/linkable.txt b/gopls/internal/regtest/marker/testdata/hover/linkable.txt new file mode 100644 index 00000000000..b82600a1e13 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/linkable.txt @@ -0,0 +1,123 @@ +This test checks that we correctly determine pkgsite links for various +identifiers. + +We should only produce links that work, meaning the object is reachable via the +package's public API. +-- go.mod -- +module mod.com + +go 1.18 +-- p.go -- +package p + +type E struct { + Embed int +} + +// T is in the package scope, and so should be linkable. +type T struct{ //@hover("T", "T", T) + // Only exported fields should be linkable + + f int //@hover("f", "f", f) + F int //@hover("F", "F", F) + + E + + // TODO(rfindley): is the link here correct? It ignores N. + N struct { + // Nested fields should also be linkable. + Nested int //@hover("Nested", "Nested", Nested) + } +} +// M is an exported method, and so should be linkable. +func (T) M() {} + +// m is not exported, and so should not be linkable. +func (T) m() {} + +func _() { + var t T + + // Embedded fields should be linkable. + _ = t.Embed //@hover("Embed", "Embed", Embed) + + // Local variables should not be linkable, even if they are capitalized. + var X int //@hover("X", "X", X) + _ = X + + // Local types should not be linkable, even if they are capitalized. + type Local struct { //@hover("Local", "Local", Local) + E + } + + // But the embedded field should still be linkable. + var l Local + _ = l.Embed //@hover("Embed", "Embed", Embed) +} +-- @Embed/hover.md -- +```go +field Embed int +``` + +[`(p.E).Embed` on pkg.go.dev](https://pkg.go.dev/mod.com#E.Embed) +-- @F/hover.md -- +```go +field F int +``` + +@hover("F", "F", F) + + +[`(p.T).F` on pkg.go.dev](https://pkg.go.dev/mod.com#T.F) +-- @Local/hover.md -- +```go +type Local struct { + E +} +``` + +Local types should not be linkable, even if they are capitalized. +-- @Nested/hover.md -- +```go +field Nested int +``` + +Nested fields should also be linkable. + + +[`(p.T).Nested` on pkg.go.dev](https://pkg.go.dev/mod.com#T.Nested) +-- @T/hover.md -- +```go +type T struct { + f int //@hover("f", "f", f) + F int //@hover("F", "F", F) + + E + + // TODO(rfindley): is the link here correct? It ignores N. + N struct { + // Nested fields should also be linkable. + Nested int //@hover("Nested", "Nested", Nested) + } +} + +func (T).M() +func (T).m() +``` + +T is in the package scope, and so should be linkable. + + +[`p.T` on pkg.go.dev](https://pkg.go.dev/mod.com#T) +-- @X/hover.md -- +```go +var X int +``` + +Local variables should not be linkable, even if they are capitalized. +-- @f/hover.md -- +```go +field f int +``` + +@hover("f", "f", f) diff --git a/gopls/internal/regtest/marker/testdata/hover/std.txt b/gopls/internal/regtest/marker/testdata/hover/std.txt new file mode 100644 index 00000000000..a526b5211eb --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/hover/std.txt @@ -0,0 +1,80 @@ +This test checks hover results for built-in or standard library symbols. + +It uses synopsis documentation as full documentation for some of these +built-ins varies across Go versions, where as it just so happens that the +synopsis does not. + +In the future we may need to limit this test to the latest Go version to avoid +documentation churn. +-- settings.json -- +{ + "hoverKind": "SynopsisDocumentation" +} +-- go.mod -- +module mod.com + +go 1.18 +-- std.go -- +package std + +import ( + "fmt" + "go/types" + "sync" +) + +func _() { + var err error //@loc(err, "err") + fmt.Printf("%v", err) //@def("err", err) + + var _ string //@hover("string", "string", hoverstring) + _ = make([]int, 0) //@hover("make", "make", hovermake) + + var mu sync.Mutex + mu.Lock() //@hover("Lock", "Lock", hoverLock) + + var typ *types.Named //@hover("types", "types", hoverTypes) + typ.Obj().Name() //@hover("Name", "Name", hoverName) +} +-- @hoverLock/hover.md -- +```go +func (*sync.Mutex).Lock() +``` + +Lock locks m. + + +[`(sync.Mutex).Lock` on pkg.go.dev](https://pkg.go.dev/sync#Mutex.Lock) +-- @hoverName/hover.md -- +```go +func (*types.object).Name() string +``` + +Name returns the object's (package-local, unqualified) name. + + +[`(types.TypeName).Name` on pkg.go.dev](https://pkg.go.dev/go/types#TypeName.Name) +-- @hoverTypes/hover.md -- +```go +package types ("go/types") +``` + +[`types` on pkg.go.dev](https://pkg.go.dev/go/types) +-- @hovermake/hover.md -- +```go +func make(t Type, size ...int) Type +``` + +The make built-in function allocates and initializes an object of type slice, map, or chan (only). + + +[`make` on pkg.go.dev](https://pkg.go.dev/builtin#make) +-- @hoverstring/hover.md -- +```go +type string string +``` + +string is the set of all strings of 8-bit bytes, conventionally but not necessarily representing UTF-8-encoded text. + + +[`string` on pkg.go.dev](https://pkg.go.dev/builtin#string) diff --git a/gopls/internal/regtest/misc/call_hierarchy_test.go b/gopls/internal/regtest/misc/call_hierarchy_test.go index 9d98896ce2e..f0f5d4a4117 100644 --- a/gopls/internal/regtest/misc/call_hierarchy_test.go +++ b/gopls/internal/regtest/misc/call_hierarchy_test.go @@ -6,8 +6,8 @@ package misc import ( "testing" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) // Test for golang/go#49125 @@ -23,11 +23,11 @@ package pkg // TODO(rfindley): this could probably just be a marker test. Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("p.go") - pos := env.RegexpSearch("p.go", "pkg") + loc := env.RegexpSearch("p.go", "pkg") var params protocol.CallHierarchyPrepareParams - params.TextDocument.URI = env.Sandbox.Workdir.URI("p.go") - params.Position = pos.ToProtocolPosition() + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start // Check that this doesn't panic. env.Editor.Server.PrepareCallHierarchy(env.Ctx, ¶ms) diff --git a/gopls/internal/regtest/misc/configuration_test.go b/gopls/internal/regtest/misc/configuration_test.go index d9cce96a43e..6e6d9d73388 100644 --- a/gopls/internal/regtest/misc/configuration_test.go +++ b/gopls/internal/regtest/misc/configuration_test.go @@ -7,9 +7,8 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/fake" "golang.org/x/tools/internal/testenv" ) @@ -36,18 +35,67 @@ var FooErr = errors.New("foo") ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + ) + cfg := env.Editor.Config() + cfg.Settings = map[string]interface{}{ + "staticcheck": true, + } + // TODO(rfindley): support waiting on diagnostics following a configuration + // change. + env.ChangeConfiguration(cfg) env.Await( - env.DoneWithOpen(), - NoDiagnostics("a/a.go"), + Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)")), + ) + }) +} + +// TestMajorOptionsChange is like TestChangeConfiguration, but modifies an +// an open buffer before making a major (but inconsequential) change that +// causes gopls to recreate the view. +// +// Gopls should not get confused about buffer content when recreating the view. +func TestMajorOptionsChange(t *testing.T) { + t.Skip("broken due to golang/go#57934") + + testenv.NeedsGo1Point(t, 17) + + const files = ` +-- go.mod -- +module mod.com + +go 1.12 +-- a/a.go -- +package a + +import "errors" + +var ErrFoo = errors.New("foo") +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("a/a.go") + // Introduce a staticcheck diagnostic. It should be detected when we enable + // staticcheck later. + env.RegexpReplace("a/a.go", "ErrFoo", "FooErr") + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) - cfg := &fake.EditorConfig{} - *cfg = env.Editor.Config + cfg := env.Editor.Config() + // Any change to environment recreates the view, but this should not cause + // gopls to get confused about the content of a/a.go: we should get the + // staticcheck diagnostic below. + cfg.Env = map[string]string{ + "AN_ARBITRARY_VAR": "FOO", + } cfg.Settings = map[string]interface{}{ "staticcheck": true, } - env.ChangeConfiguration(t, cfg) + // TODO(rfindley): support waiting on diagnostics following a configuration + // change. + env.ChangeConfiguration(cfg) env.Await( - DiagnosticAt("a/a.go", 5, 4), + Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)")), ) }) } @@ -70,11 +118,42 @@ import "errors" var FooErr = errors.New("foo") ` - WithOptions(EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, + WithOptions( + Settings{"staticcheck": true}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + ShownMessage("staticcheck is not supported"), + ) + }) +} + +func TestGofumptWarning(t *testing.T) { + testenv.SkipAfterGo1Point(t, 17) + + WithOptions( + Settings{"gofumpt": true}, + ).Run(t, "", func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + ShownMessage("gofumpt is not supported"), + ) + }) +} + +func TestDeprecatedSettings(t *testing.T) { + WithOptions( + Settings{ + "experimentalUseInvalidMetadata": true, + "experimentalWatchedFileDelay": "1s", + "experimentalWorkspaceModule": true, }, - }).Run(t, files, func(t *testing.T, env *Env) { - env.Await(ShownMessage("staticcheck is not supported")) + ).Run(t, "", func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + ShownMessage("experimentalWorkspaceModule"), + ShownMessage("experimentalUseInvalidMetadata"), + ShownMessage("experimentalWatchedFileDelay"), + ) }) } diff --git a/gopls/internal/regtest/misc/debugserver_test.go b/gopls/internal/regtest/misc/debugserver_test.go index c0df87070c0..519f7944790 100644 --- a/gopls/internal/regtest/misc/debugserver_test.go +++ b/gopls/internal/regtest/misc/debugserver_test.go @@ -8,10 +8,10 @@ import ( "net/http" "testing" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestStartDebugging(t *testing.T) { diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go index 2f5a54820d0..7767ac5aaa3 100644 --- a/gopls/internal/regtest/misc/definition_test.go +++ b/gopls/internal/regtest/misc/definition_test.go @@ -5,16 +5,15 @@ package misc import ( + "os" "path" + "path/filepath" "strings" "testing" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/testenv" - - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" ) const internalDefinition = ` @@ -39,12 +38,111 @@ const message = "Hello World." func TestGoToInternalDefinition(t *testing.T) { Run(t, internalDefinition, func(t *testing.T, env *Env) { env.OpenFile("main.go") - name, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "message")) + loc := env.GoToDefinition(env.RegexpSearch("main.go", "message")) + name := env.Sandbox.Workdir.URIToPath(loc.URI) if want := "const.go"; name != want { t.Errorf("GoToDefinition: got file %q, want %q", name, want) } - if want := env.RegexpSearch("const.go", "message"); pos != want { - t.Errorf("GoToDefinition: got position %v, want %v", pos, want) + if want := env.RegexpSearch("const.go", "message"); loc != want { + t.Errorf("GoToDefinition: got location %v, want %v", loc, want) + } + }) +} + +const linknameDefinition = ` +-- go.mod -- +module mod.com + +-- upper/upper.go -- +package upper + +import ( + _ "unsafe" + + _ "mod.com/middle" +) + +//go:linkname foo mod.com/lower.bar +func foo() string + +-- middle/middle.go -- +package middle + +import ( + _ "mod.com/lower" +) + +-- lower/lower.s -- + +-- lower/lower.go -- +package lower + +func bar() string { + return "bar as foo" +}` + +func TestGoToLinknameDefinition(t *testing.T) { + Run(t, linknameDefinition, func(t *testing.T, env *Env) { + env.OpenFile("upper/upper.go") + + // Jump from directives 2nd arg. + start := env.RegexpSearch("upper/upper.go", `lower.bar`) + loc := env.GoToDefinition(start) + name := env.Sandbox.Workdir.URIToPath(loc.URI) + if want := "lower/lower.go"; name != want { + t.Errorf("GoToDefinition: got file %q, want %q", name, want) + } + if want := env.RegexpSearch("lower/lower.go", `bar`); loc != want { + t.Errorf("GoToDefinition: got position %v, want %v", loc, want) + } + }) +} + +const linknameDefinitionReverse = ` +-- go.mod -- +module mod.com + +-- upper/upper.s -- + +-- upper/upper.go -- +package upper + +import ( + _ "mod.com/middle" +) + +func foo() string + +-- middle/middle.go -- +package middle + +import ( + _ "mod.com/lower" +) + +-- lower/lower.go -- +package lower + +import _ "unsafe" + +//go:linkname bar mod.com/upper.foo +func bar() string { + return "bar as foo" +}` + +func TestGoToLinknameDefinitionInReverseDep(t *testing.T) { + Run(t, linknameDefinitionReverse, func(t *testing.T, env *Env) { + env.OpenFile("lower/lower.go") + + // Jump from directives 2nd arg. + start := env.RegexpSearch("lower/lower.go", `upper.foo`) + loc := env.GoToDefinition(start) + name := env.Sandbox.Workdir.URIToPath(loc.URI) + if want := "upper/upper.go"; name != want { + t.Errorf("GoToDefinition: got file %q, want %q", name, want) + } + if want := env.RegexpSearch("upper/upper.go", `foo`); loc != want { + t.Errorf("GoToDefinition: got position %v, want %v", loc, want) } }) } @@ -66,19 +164,21 @@ func main() { func TestGoToStdlibDefinition_Issue37045(t *testing.T) { Run(t, stdlibDefinition, func(t *testing.T, env *Env) { env.OpenFile("main.go") - name, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Printf)`)) + loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Printf)`)) + name := env.Sandbox.Workdir.URIToPath(loc.URI) if got, want := path.Base(name), "print.go"; got != want { t.Errorf("GoToDefinition: got file %q, want %q", name, want) } // Test that we can jump to definition from outside our workspace. // See golang.org/issues/37045. - newName, newPos := env.GoToDefinition(name, pos) + newLoc := env.GoToDefinition(loc) + newName := env.Sandbox.Workdir.URIToPath(newLoc.URI) if newName != name { t.Errorf("GoToDefinition is not idempotent: got %q, want %q", newName, name) } - if newPos != pos { - t.Errorf("GoToDefinition is not idempotent: got %v, want %v", newPos, pos) + if newLoc != loc { + t.Errorf("GoToDefinition is not idempotent: got %v, want %v", newLoc, loc) } }) } @@ -86,24 +186,24 @@ func TestGoToStdlibDefinition_Issue37045(t *testing.T) { func TestUnexportedStdlib_Issue40809(t *testing.T) { Run(t, stdlibDefinition, func(t *testing.T, env *Env) { env.OpenFile("main.go") - name, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Printf)`)) - env.OpenFile(name) + loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Printf)`)) + name := env.Sandbox.Workdir.URIToPath(loc.URI) - pos := env.RegexpSearch(name, `:=\s*(newPrinter)\(\)`) + loc = env.RegexpSearch(name, `:=\s*(newPrinter)\(\)`) // Check that we can find references on a reference - refs := env.References(name, pos) + refs := env.References(loc) if len(refs) < 5 { t.Errorf("expected 5+ references to newPrinter, found: %#v", refs) } - name, pos = env.GoToDefinition(name, pos) - content, _ := env.Hover(name, pos) + loc = env.GoToDefinition(loc) + content, _ := env.Hover(loc) if !strings.Contains(content.Value, "newPrinter") { t.Fatal("definition of newPrinter went to the incorrect place") } // And on the definition too. - refs = env.References(name, pos) + refs = env.References(loc) if len(refs) < 5 { t.Errorf("expected 5+ references to newPrinter, found: %#v", refs) } @@ -127,13 +227,13 @@ func main() { }` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error")) + content, _ := env.Hover(env.RegexpSearch("main.go", "Error")) if content == nil { t.Fatalf("nil hover content for Error") } want := "```go\nfunc (error).Error() string\n```" if content.Value != want { - t.Fatalf("hover failed:\n%s", tests.Diff(t, want, content.Value)) + t.Fatalf("hover failed:\n%s", compare.Text(want, content.Value)) } }) } @@ -153,24 +253,19 @@ func main() {} ` for _, tt := range []struct { wantLinks int - wantDef bool importShortcut string }{ - {1, false, "Link"}, - {0, true, "Definition"}, - {1, true, "Both"}, + {1, "Link"}, + {0, "Definition"}, + {1, "Both"}, } { t.Run(tt.importShortcut, func(t *testing.T) { WithOptions( - EditorConfig{ - ImportShortcut: tt.importShortcut, - }, + Settings{"importShortcut": tt.importShortcut}, ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"fmt"`)) - if !tt.wantDef && (file != "" || pos != (fake.Pos{})) { - t.Fatalf("expected no definition, got one: %s:%v", file, pos) - } else if tt.wantDef && file == "" && pos == (fake.Pos{}) { + loc := env.GoToDefinition(env.RegexpSearch("main.go", `"fmt"`)) + if loc == (protocol.Location{}) { t.Fatalf("expected definition, got none") } links := env.DocumentLink("main.go") @@ -217,7 +312,7 @@ func main() {} Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - _, pos, err := env.Editor.GoToTypeDefinition(env.Ctx, "main.go", env.RegexpSearch("main.go", tt.re)) + loc, err := env.Editor.GoToTypeDefinition(env.Ctx, env.RegexpSearch("main.go", tt.re)) if tt.wantError { if err == nil { t.Fatal("expected error, got nil") @@ -228,9 +323,9 @@ func main() {} t.Fatalf("expected nil error, got %s", err) } - typePos := env.RegexpSearch("main.go", tt.wantTypeRe) - if pos != typePos { - t.Errorf("invalid pos: want %+v, got %+v", typePos, pos) + typeLoc := env.RegexpSearch("main.go", tt.wantTypeRe) + if loc != typeLoc { + t.Errorf("invalid pos: want %+v, got %+v", typeLoc, loc) } }) }) @@ -239,8 +334,6 @@ func main() {} // Test for golang/go#47825. func TestImportTestVariant(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - const mod = ` -- go.mod -- module mod.com @@ -275,7 +368,7 @@ package client ` Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("client/client_role_test.go") - env.GoToDefinition("client/client_role_test.go", env.RegexpSearch("client/client_role_test.go", "RoleSetup")) + env.GoToDefinition(env.RegexpSearch("client/client_role_test.go", "RoleSetup")) }) } @@ -289,3 +382,92 @@ func TestGoToCrashingDefinition_Issue49223(t *testing.T) { env.Editor.Server.Definition(env.Ctx, params) }) } + +// TestVendoringInvalidatesMetadata ensures that gopls uses the +// correct metadata even after an external 'go mod vendor' command +// causes packages to move; see issue #55995. +// See also TestImplementationsInVendor, which tests the same fix. +func TestVendoringInvalidatesMetadata(t *testing.T) { + t.Skip("golang/go#56169: file watching does not capture vendor dirs") + + const proxy = ` +-- other.com/b@v1.0.0/go.mod -- +module other.com/b +go 1.14 + +-- other.com/b@v1.0.0/b.go -- +package b +const K = 0 +` + const src = ` +-- go.mod -- +module example.com/a +go 1.14 +require other.com/b v1.0.0 + +-- go.sum -- +other.com/b v1.0.0 h1:1wb3PMGdet5ojzrKl+0iNksRLnOM9Jw+7amBNqmYwqk= +other.com/b v1.0.0/go.mod h1:TgHQFucl04oGT+vrUm/liAzukYHNxCwKNkQZEyn3m9g= + +-- a.go -- +package a +import "other.com/b" +const _ = b.K + +` + WithOptions( + ProxyFiles(proxy), + Modes(Default), // fails in 'experimental' mode + ).Run(t, src, func(t *testing.T, env *Env) { + // Enable to debug go.sum mismatch, which may appear as + // "module lookup disabled by GOPROXY=off", confusingly. + if false { + env.DumpGoSum(".") + } + + env.OpenFile("a.go") + refLoc := env.RegexpSearch("a.go", "K") // find "b.K" reference + + // Initially, b.K is defined in the module cache. + gotLoc := env.GoToDefinition(refLoc) + gotFile := env.Sandbox.Workdir.URIToPath(gotLoc.URI) + wantCache := filepath.ToSlash(env.Sandbox.GOPATH()) + "/pkg/mod/other.com/b@v1.0.0/b.go" + if gotFile != wantCache { + t.Errorf("GoToDefinition, before: got file %q, want %q", gotFile, wantCache) + } + + // Run 'go mod vendor' outside the editor. + if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil { + t.Fatalf("go mod vendor: %v", err) + } + + // Synchronize changes to watched files. + env.Await(env.DoneWithChangeWatchedFiles()) + + // Now, b.K is defined in the vendor tree. + gotLoc = env.GoToDefinition(refLoc) + wantVendor := "vendor/other.com/b/b.go" + if gotFile != wantVendor { + t.Errorf("GoToDefinition, after go mod vendor: got file %q, want %q", gotFile, wantVendor) + } + + // Delete the vendor tree. + if err := os.RemoveAll(env.Sandbox.Workdir.AbsPath("vendor")); err != nil { + t.Fatal(err) + } + // Notify the server of the deletion. + if err := env.Sandbox.Workdir.CheckForFileChanges(env.Ctx); err != nil { + t.Fatal(err) + } + + // Synchronize again. + env.Await(env.DoneWithChangeWatchedFiles()) + + // b.K is once again defined in the module cache. + gotLoc = env.GoToDefinition(gotLoc) + gotFile = env.Sandbox.Workdir.URIToPath(gotLoc.URI) + if gotFile != wantCache { + t.Errorf("GoToDefinition, after rm -rf vendor: got file %q, want %q", gotFile, wantCache) + } + }) +} diff --git a/gopls/internal/regtest/misc/embed_test.go b/gopls/internal/regtest/misc/embed_test.go index 2e66d7866ca..021fbfcc06d 100644 --- a/gopls/internal/regtest/misc/embed_test.go +++ b/gopls/internal/regtest/misc/embed_test.go @@ -6,12 +6,10 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/testenv" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestMissingPatternDiagnostic(t *testing.T) { - testenv.NeedsGo1Point(t, 16) const files = ` -- go.mod -- module example.com @@ -30,8 +28,13 @@ var foo string ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("x.go") - env.Await(env.DiagnosticAtRegexpWithMessage("x.go", `NONEXISTENT`, "no matching files found")) + env.AfterChange( + Diagnostics( + env.AtRegexp("x.go", `NONEXISTENT`), + WithMessage("no matching files found"), + ), + ) env.RegexpReplace("x.go", `NONEXISTENT`, "x.go") - env.Await(EmptyDiagnostics("x.go")) + env.AfterChange(NoDiagnostics(ForFile("x.go"))) }) } diff --git a/gopls/internal/regtest/misc/extract_test.go b/gopls/internal/regtest/misc/extract_test.go new file mode 100644 index 00000000000..23efffbb70e --- /dev/null +++ b/gopls/internal/regtest/misc/extract_test.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package misc + +import ( + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + + "golang.org/x/tools/gopls/internal/lsp/protocol" +) + +func TestExtractFunction(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.12 +-- main.go -- +package main + +func Foo() int { + a := 5 + return a +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + loc := env.RegexpSearch("main.go", `a := 5\n.*return a`) + actions, err := env.Editor.CodeAction(env.Ctx, loc, nil) + if err != nil { + t.Fatal(err) + } + + // Find the extract function code action. + var extractFunc *protocol.CodeAction + for _, action := range actions { + if action.Kind == protocol.RefactorExtract && action.Title == "Extract function" { + extractFunc = &action + break + } + } + if extractFunc == nil { + t.Fatal("could not find extract function action") + } + + env.ApplyCodeAction(*extractFunc) + want := `package main + +func Foo() int { + return newFunction() +} + +func newFunction() int { + a := 5 + return a +} +` + if got := env.BufferText("main.go"); got != want { + t.Fatalf("TestFillStruct failed:\n%s", compare.Text(want, got)) + } + }) +} diff --git a/gopls/internal/regtest/misc/failures_test.go b/gopls/internal/regtest/misc/failures_test.go index 23fccfd628d..42aa3721a34 100644 --- a/gopls/internal/regtest/misc/failures_test.go +++ b/gopls/internal/regtest/misc/failures_test.go @@ -7,12 +7,15 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" ) -// This test passes (TestHoverOnError in definition_test.go) without -// the //line directive +// This is a slight variant of TestHoverOnError in definition_test.go +// that includes a line directive, which makes no difference since +// gopls ignores line directives. func TestHoverFailure(t *testing.T) { + t.Skip("line directives //line ") const mod = ` -- go.mod -- module mod.com @@ -29,19 +32,27 @@ func main() { var err error err.Error() }` - WithOptions(SkipLogs()).Run(t, mod, func(t *testing.T, env *Env) { + Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error")) - // without the //line comment content would be non-nil - if content != nil { - t.Fatalf("expected nil hover content for Error") + content, _ := env.Hover(env.RegexpSearch("main.go", "Error")) + if content == nil { + t.Fatalf("Hover('Error') returned nil") + } + want := "```go\nfunc (error).Error() string\n```" + if content.Value != want { + t.Fatalf("wrong Hover('Error') content:\n%s", compare.Text(want, content.Value)) } }) } -// badPackageDup contains a duplicate definition of the 'a' const. -// this is from diagnostics_test.go, -const badPackageDup = ` +// This test demonstrates a case where gopls is not at all confused by +// line directives, because it completely ignores them. +func TestFailingDiagnosticClearingOnEdit(t *testing.T) { + t.Skip("line directives //line ") + // badPackageDup contains a duplicate definition of the 'a' const. + // This is a minor variant of TestDiagnosticClearingOnEdit from + // diagnostics_test.go, with a line directive, which makes no difference. + const badPackageDup = ` -- go.mod -- module mod.com @@ -56,15 +67,18 @@ package consts const a = 2 ` -func TestFailingDiagnosticClearingOnEdit(t *testing.T) { Run(t, badPackageDup, func(t *testing.T, env *Env) { env.OpenFile("b.go") - // no diagnostics for any files, but there should be - env.Await(NoDiagnostics("a.go"), NoDiagnostics("b.go")) + env.AfterChange( + Diagnostics(env.AtRegexp("b.go", `a = 2`), WithMessage("a redeclared")), + Diagnostics(env.AtRegexp("a.go", `a = 1`), WithMessage("other declaration")), + ) // Fix the error by editing the const name in b.go to `b`. env.RegexpReplace("b.go", "(a) = 2", "b") - - // The diagnostics that weren't sent above should now be cleared. + env.AfterChange( + NoDiagnostics(ForFile("a.go")), + NoDiagnostics(ForFile("b.go")), + ) }) } diff --git a/gopls/internal/regtest/misc/fix_test.go b/gopls/internal/regtest/misc/fix_test.go index 8318ae557da..7a5e530e307 100644 --- a/gopls/internal/regtest/misc/fix_test.go +++ b/gopls/internal/regtest/misc/fix_test.go @@ -7,10 +7,10 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) // A basic test for fillstruct, now that it uses a command. @@ -34,11 +34,7 @@ func Foo() { ` Run(t, basic, func(t *testing.T, env *Env) { env.OpenFile("main.go") - pos := env.RegexpSearch("main.go", "Info{}").ToProtocolPosition() - if err := env.Editor.RefactorRewrite(env.Ctx, "main.go", &protocol.Range{ - Start: pos, - End: pos, - }); err != nil { + if err := env.Editor.RefactorRewrite(env.Ctx, env.RegexpSearch("main.go", "Info{}")); err != nil { t.Fatal(err) } want := `package main @@ -55,8 +51,8 @@ func Foo() { } } ` - if got := env.Editor.BufferText("main.go"); got != want { - t.Fatalf("TestFillStruct failed:\n%s", tests.Diff(t, want, got)) + if got := env.BufferText("main.go"); got != want { + t.Fatalf("TestFillStruct failed:\n%s", compare.Text(want, got)) } }) } @@ -77,11 +73,11 @@ func Foo() error { Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams - env.Await(OnceMet( + env.AfterChange( // The error message here changed in 1.18; "return values" covers both forms. - env.DiagnosticAtRegexpWithMessage("main.go", `return`, "return values"), + Diagnostics(env.AtRegexp("main.go", `return`), WithMessage("return values")), ReadDiagnostics("main.go", &d), - )) + ) codeActions := env.CodeAction("main.go", d.Diagnostics) if len(codeActions) != 2 { t.Fatalf("expected 2 code actions, got %v", len(codeActions)) @@ -102,6 +98,6 @@ func Foo() error { t.Fatalf("expected fixall code action, got none") } env.ApplyQuickFixes("main.go", d.Diagnostics) - env.Await(EmptyDiagnostics("main.go")) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) }) } diff --git a/gopls/internal/regtest/misc/formatting_test.go b/gopls/internal/regtest/misc/formatting_test.go index 75d8f622458..ee8098cc93b 100644 --- a/gopls/internal/regtest/misc/formatting_test.go +++ b/gopls/internal/regtest/misc/formatting_test.go @@ -8,9 +8,9 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" - - "golang.org/x/tools/internal/lsp/tests" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/testenv" ) const unformattedProgram = ` @@ -34,10 +34,10 @@ func TestFormatting(t *testing.T) { Run(t, unformattedProgram, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.FormatBuffer("main.go") - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") want := env.ReadWorkspaceFile("main.go.golden") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -56,10 +56,10 @@ func f() {} Run(t, onelineProgram, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.FormatBuffer("a.go") - got := env.Editor.BufferText("a.go") + got := env.BufferText("a.go") want := env.ReadWorkspaceFile("a.go.formatted") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -80,10 +80,10 @@ func f() { fmt.Println() } Run(t, onelineProgramA, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.OrganizeImports("a.go") - got := env.Editor.BufferText("a.go") + got := env.BufferText("a.go") want := env.ReadWorkspaceFile("a.go.imported") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -101,10 +101,10 @@ func f() {} Run(t, onelineProgramB, func(t *testing.T, env *Env) { env.OpenFile("a.go") env.OrganizeImports("a.go") - got := env.Editor.BufferText("a.go") + got := env.BufferText("a.go") want := env.ReadWorkspaceFile("a.go.imported") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -147,10 +147,10 @@ func TestOrganizeImports(t *testing.T) { Run(t, disorganizedProgram, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OrganizeImports("main.go") - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") want := env.ReadWorkspaceFile("main.go.organized") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -159,10 +159,10 @@ func TestFormattingOnSave(t *testing.T) { Run(t, disorganizedProgram, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.SaveBuffer("main.go") - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") want := env.ReadWorkspaceFile("main.go.formatted") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } @@ -259,10 +259,10 @@ func main() { env.CreateBuffer("main.go", crlf) env.Await(env.DoneWithOpen()) env.OrganizeImports("main.go") - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") got = strings.ReplaceAll(got, "\r\n", "\n") // convert everything to LF for simplicity if tt.want != got { - t.Errorf("unexpected content after save:\n%s", tests.Diff(t, tt.want, got)) + t.Errorf("unexpected content after save:\n%s", compare.Text(tt.want, got)) } }) }) @@ -303,6 +303,7 @@ func main() { } func TestGofumptFormatting(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // Exercise some gofumpt formatting rules: // - No empty lines following an assignment operator @@ -352,18 +353,16 @@ const Bar = 42 ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "gofumpt": true, - }, + Settings{ + "gofumpt": true, }, ).Run(t, input, func(t *testing.T, env *Env) { env.OpenFile("foo.go") env.FormatBuffer("foo.go") - got := env.Editor.BufferText("foo.go") + got := env.BufferText("foo.go") want := env.ReadWorkspaceFile("foo.go.formatted") if got != want { - t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got)) + t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got)) } }) } diff --git a/gopls/internal/regtest/misc/generate_test.go b/gopls/internal/regtest/misc/generate_test.go index 1dc22d737ba..85dd9a732bb 100644 --- a/gopls/internal/regtest/misc/generate_test.go +++ b/gopls/internal/regtest/misc/generate_test.go @@ -12,12 +12,10 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestGenerateProgress(t *testing.T) { - t.Skipf("skipping flaky test: https://golang.org/issue/49901") - const generatedWorkspace = ` -- go.mod -- module fake.test @@ -61,15 +59,14 @@ func main() { ` Run(t, generatedWorkspace, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", "lib1.(Answer)"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", "lib1.(Answer)")), ) env.RunGenerate("./lib1") env.RunGenerate("./lib2") - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - EmptyDiagnostics("main.go")), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } diff --git a/gopls/internal/regtest/misc/highlight_test.go b/gopls/internal/regtest/misc/highlight_test.go index affbffd66f4..8835d608ecf 100644 --- a/gopls/internal/regtest/misc/highlight_test.go +++ b/gopls/internal/regtest/misc/highlight_test.go @@ -8,9 +8,8 @@ import ( "sort" "testing" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestWorkspacePackageHighlight(t *testing.T) { @@ -31,9 +30,9 @@ func main() { Run(t, mod, func(t *testing.T, env *Env) { const file = "main.go" env.OpenFile(file) - _, pos := env.GoToDefinition(file, env.RegexpSearch(file, `var (A) string`)) + loc := env.GoToDefinition(env.RegexpSearch(file, `var (A) string`)) - checkHighlights(env, file, pos, 3) + checkHighlights(env, loc, 3) }) } @@ -54,10 +53,11 @@ func main() { Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt\.(Printf)`)) - pos := env.RegexpSearch(file, `func Printf\((format) string`) + defLoc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt\.(Printf)`)) + file := env.Sandbox.Workdir.URIToPath(defLoc.URI) + loc := env.RegexpSearch(file, `func Printf\((format) string`) - checkHighlights(env, file, pos, 2) + checkHighlights(env, loc, 2) }) } @@ -113,26 +113,28 @@ func main() {}` ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"example.com/global"`)) - pos := env.RegexpSearch(file, `const (A)`) - checkHighlights(env, file, pos, 4) + defLoc := env.GoToDefinition(env.RegexpSearch("main.go", `"example.com/global"`)) + file := env.Sandbox.Workdir.URIToPath(defLoc.URI) + loc := env.RegexpSearch(file, `const (A)`) + checkHighlights(env, loc, 4) - file, _ = env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"example.com/local"`)) - pos = env.RegexpSearch(file, `const (b)`) - checkHighlights(env, file, pos, 5) + defLoc = env.GoToDefinition(env.RegexpSearch("main.go", `"example.com/local"`)) + file = env.Sandbox.Workdir.URIToPath(defLoc.URI) + loc = env.RegexpSearch(file, `const (b)`) + checkHighlights(env, loc, 5) }) } -func checkHighlights(env *Env, file string, pos fake.Pos, highlightCount int) { +func checkHighlights(env *Env, loc protocol.Location, highlightCount int) { t := env.T t.Helper() - highlights := env.DocumentHighlight(file, pos) + highlights := env.DocumentHighlight(loc) if len(highlights) != highlightCount { t.Fatalf("expected %v highlight(s), got %v", highlightCount, len(highlights)) } - references := env.References(file, pos) + references := env.References(loc) if len(highlights) != len(references) { t.Fatalf("number of highlights and references is expected to be equal: %v != %v", len(highlights), len(references)) } diff --git a/gopls/internal/regtest/misc/hover_test.go b/gopls/internal/regtest/misc/hover_test.go index 4701b075acc..8b5f089ab3d 100644 --- a/gopls/internal/regtest/misc/hover_test.go +++ b/gopls/internal/regtest/misc/hover_test.go @@ -9,8 +9,9 @@ import ( "strings" "testing" - "golang.org/x/tools/internal/lsp/fake" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" "golang.org/x/tools/internal/testenv" ) @@ -60,21 +61,22 @@ func main() { ProxyFiles(proxy), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") - mixedPos := env.RegexpSearch("main.go", "Mixed") - got, _ := env.Hover("main.go", mixedPos) + mixedLoc := env.RegexpSearch("main.go", "Mixed") + got, _ := env.Hover(mixedLoc) if !strings.Contains(got.Value, "unexported") { t.Errorf("Workspace hover: missing expected field 'unexported'. Got:\n%q", got.Value) } - cacheFile, _ := env.GoToDefinition("main.go", mixedPos) - argPos := env.RegexpSearch(cacheFile, "printMixed.*(Mixed)") - got, _ = env.Hover(cacheFile, argPos) + cacheLoc := env.GoToDefinition(mixedLoc) + cacheFile := env.Sandbox.Workdir.URIToPath(cacheLoc.URI) + argLoc := env.RegexpSearch(cacheFile, "printMixed.*(Mixed)") + got, _ = env.Hover(argLoc) if !strings.Contains(got.Value, "unexported") { t.Errorf("Non-workspace hover: missing expected field 'unexported'. Got:\n%q", got.Value) } - exportedFieldPos := env.RegexpSearch("main.go", "Exported") - got, _ = env.Hover("main.go", exportedFieldPos) + exportedFieldLoc := env.RegexpSearch("main.go", "Exported") + got, _ = env.Hover(exportedFieldLoc) if !strings.Contains(got.Value, "comment") { t.Errorf("Workspace hover: missing comment for field 'Exported'. Got:\n%q", got.Value) } @@ -82,7 +84,6 @@ func main() { } func TestHoverIntLiteral(t *testing.T) { - testenv.NeedsGo1Point(t, 13) const source = ` -- main.go -- package main @@ -99,13 +100,13 @@ func main() { Run(t, source, func(t *testing.T, env *Env) { env.OpenFile("main.go") hexExpected := "58190" - got, _ := env.Hover("main.go", env.RegexpSearch("main.go", "hex")) + got, _ := env.Hover(env.RegexpSearch("main.go", "hex")) if got != nil && !strings.Contains(got.Value, hexExpected) { t.Errorf("Hover: missing expected field '%s'. Got:\n%q", hexExpected, got.Value) } binExpected := "73" - got, _ = env.Hover("main.go", env.RegexpSearch("main.go", "bigBin")) + got, _ = env.Hover(env.RegexpSearch("main.go", "bigBin")) if got != nil && !strings.Contains(got.Value, binExpected) { t.Errorf("Hover: missing expected field '%s'. Got:\n%q", binExpected, got.Value) } @@ -114,7 +115,8 @@ func main() { // Tests that hovering does not trigger the panic in golang/go#48249. func TestPanicInHoverBrokenCode(t *testing.T) { - testenv.NeedsGo1Point(t, 13) + // Note: this test can not be expressed as a marker test, as it must use + // content without a trailing newline. const source = ` -- main.go -- package main @@ -122,7 +124,7 @@ package main type Example struct` Run(t, source, func(t *testing.T, env *Env) { env.OpenFile("main.go") - env.Editor.Hover(env.Ctx, "main.go", env.RegexpSearch("main.go", "Example")) + env.Editor.Hover(env.Ctx, env.RegexpSearch("main.go", "Example")) }) } @@ -138,14 +140,11 @@ package main Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.EditBuffer("main.go", fake.NewEdit(0, 0, 1, 0, "package main\nfunc main() {\nconst x = `\nfoo\n`\n}")) - env.Editor.Hover(env.Ctx, "main.go", env.RegexpSearch("main.go", "foo")) + env.Editor.Hover(env.Ctx, env.RegexpSearch("main.go", "foo")) }) } func TestHoverImport(t *testing.T) { - // For Go.13 and earlier versions, Go will try to download imported but missing packages. This behavior breaks the - // workspace as Go fails to download non-existent package "mod.com/lib4" - testenv.NeedsGo1Point(t, 14) const packageDoc1 = "Package lib1 hover documentation" const packageDoc2 = "Package lib2 hover documentation" tests := []struct { @@ -209,15 +208,128 @@ func main() { Run(t, source, func(t *testing.T, env *Env) { env.OpenFile("main.go") for _, test := range tests { - got, _ := env.Hover("main.go", env.RegexpSearch("main.go", test.hoverPackage)) + got, _ := env.Hover(env.RegexpSearch("main.go", test.hoverPackage)) + if got == nil { + t.Error("nil hover for", test.hoverPackage) + continue + } if !strings.Contains(got.Value, test.want) { t.Errorf("Hover: got:\n%q\nwant:\n%q", got.Value, test.want) } } - got, _ := env.Hover("main.go", env.RegexpSearch("main.go", "mod.com/lib4")) + got, _ := env.Hover(env.RegexpSearch("main.go", "mod.com/lib4")) if got != nil { t.Errorf("Hover: got:\n%q\nwant:\n%v", got.Value, nil) } }) } + +// for x/tools/gopls: unhandled named anchor on the hover #57048 +func TestHoverTags(t *testing.T) { + const source = ` +-- go.mod -- +module mod.com + +go 1.19 + +-- lib/a.go -- + +// variety of execution modes. +// +// # Test package setup +// +// The regression test package uses a couple of uncommon patterns to reduce +package lib + +-- a.go -- + package main + import "mod.com/lib" + + const A = 1 + +} +` + Run(t, source, func(t *testing.T, env *Env) { + t.Run("tags", func(t *testing.T) { + env.OpenFile("a.go") + z := env.RegexpSearch("a.go", "lib") + t.Logf("%#v", z) + got, _ := env.Hover(env.RegexpSearch("a.go", "lib")) + if strings.Contains(got.Value, "{#hdr-") { + t.Errorf("Hover: got {#hdr- tag:\n%q", got) + } + }) + }) +} + +// This is a regression test for Go issue #57625. +func TestHoverModMissingModuleStmt(t *testing.T) { + const source = ` +-- go.mod -- +go 1.16 +` + Run(t, source, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + env.Hover(env.RegexpSearch("go.mod", "go")) // no panic + }) +} + +func TestHoverCompletionMarkdown(t *testing.T) { + testenv.NeedsGo1Point(t, 19) + const source = ` +-- go.mod -- +module mod.com +go 1.19 +-- main.go -- +package main +// Just says [hello]. +// +// [hello]: https://en.wikipedia.org/wiki/Hello +func Hello() string { + Hello() //Here + return "hello" +} +` + Run(t, source, func(t *testing.T, env *Env) { + // Hover, Completion, and SignatureHelp should all produce markdown + // check that the markdown for SignatureHelp and Completion are + // the same, and contained in that for Hover (up to trailing \n) + env.OpenFile("main.go") + loc := env.RegexpSearch("main.go", "func (Hello)") + hover, _ := env.Hover(loc) + hoverContent := hover.Value + + loc = env.RegexpSearch("main.go", "//Here") + loc.Range.Start.Character -= 3 // Hello(_) //Here + completions := env.Completion(loc) + signatures := env.SignatureHelp(loc) + + if len(completions.Items) != 1 { + t.Errorf("got %d completions, expected 1", len(completions.Items)) + } + if len(signatures.Signatures) != 1 { + t.Errorf("got %d signatures, expected 1", len(signatures.Signatures)) + } + item := completions.Items[0].Documentation.Value + var itemContent string + if x, ok := item.(protocol.MarkupContent); !ok || x.Kind != protocol.Markdown { + t.Fatalf("%#v is not markdown", item) + } else { + itemContent = strings.Trim(x.Value, "\n") + } + sig := signatures.Signatures[0].Documentation.Value + var sigContent string + if x, ok := sig.(protocol.MarkupContent); !ok || x.Kind != protocol.Markdown { + t.Fatalf("%#v is not markdown", item) + } else { + sigContent = x.Value + } + if itemContent != sigContent { + t.Errorf("item:%q not sig:%q", itemContent, sigContent) + } + if !strings.Contains(hoverContent, itemContent) { + t.Errorf("hover:%q does not containt sig;%q", hoverContent, sigContent) + } + }) +} diff --git a/gopls/internal/regtest/misc/import_test.go b/gopls/internal/regtest/misc/import_test.go index d5b6bcf43f1..30986ba5077 100644 --- a/gopls/internal/regtest/misc/import_test.go +++ b/gopls/internal/regtest/misc/import_test.go @@ -8,10 +8,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" ) func TestAddImport(t *testing.T) { @@ -49,9 +49,9 @@ func main() { Command: "gopls.add_import", Arguments: cmd.Arguments, }, nil) - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") if got != want { - t.Fatalf("gopls.add_import failed\n%s", tests.Diff(t, want, got)) + t.Fatalf("gopls.add_import failed\n%s", compare.Text(want, got)) } }) } diff --git a/gopls/internal/regtest/misc/imports_test.go b/gopls/internal/regtest/misc/imports_test.go index 4ae2be6bf10..bea955220a4 100644 --- a/gopls/internal/regtest/misc/imports_test.go +++ b/gopls/internal/regtest/misc/imports_test.go @@ -11,9 +11,9 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/testenv" ) @@ -47,7 +47,7 @@ func TestZ(t *testing.T) { Run(t, needs, func(t *testing.T, env *Env) { env.CreateBuffer("a_test.go", ntest) env.SaveBuffer("a_test.go") - got := env.Editor.BufferText("a_test.go") + got := env.BufferText("a_test.go") if want != got { t.Errorf("got\n%q, wanted\n%q", got, want) } @@ -76,7 +76,7 @@ func main() { env.OrganizeImports("main.go") actions := env.CodeAction("main.go", nil) if len(actions) > 0 { - got := env.Editor.BufferText("main.go") + got := env.BufferText("main.go") t.Errorf("unexpected actions %#v", actions) if got == vim1 { t.Errorf("no changes") @@ -146,23 +146,21 @@ import "example.com/x" var _, _ = x.X, y.Y ` - testenv.NeedsGo1Point(t, 15) - modcache, err := ioutil.TempDir("", "TestGOMODCACHE-modcache") if err != nil { t.Fatal(err) } defer os.RemoveAll(modcache) - editorConfig := EditorConfig{Env: map[string]string{"GOMODCACHE": modcache}} WithOptions( - editorConfig, + EnvVars{"GOMODCACHE": modcache}, ProxyFiles(proxy), ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") - env.Await(env.DiagnosticAtRegexp("main.go", `y.Y`)) + env.AfterChange(Diagnostics(env.AtRegexp("main.go", `y.Y`))) env.SaveBuffer("main.go") - env.Await(EmptyDiagnostics("main.go")) - path, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `y.(Y)`)) + env.AfterChange(NoDiagnostics(ForFile("main.go"))) + loc := env.GoToDefinition(env.RegexpSearch("main.go", `y.(Y)`)) + path := env.Sandbox.Workdir.URIToPath(loc.URI) if !strings.HasPrefix(path, filepath.ToSlash(modcache)) { t.Errorf("found module dependency outside of GOMODCACHE: got %v, wanted subdir of %v", path, filepath.ToSlash(modcache)) } @@ -202,15 +200,59 @@ func TestA(t *testing.T) { Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/a.go", "os.Stat"), - ReadDiagnostics("a/a.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "os.Stat")), + ReadDiagnostics("a/a.go", &d), ) env.ApplyQuickFixes("a/a.go", d.Diagnostics) - env.Await( - EmptyDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) } + +// Test for golang/go#52784 +func TestGoWorkImports(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const pkg = ` +-- go.work -- +go 1.19 + +use ( + ./caller + ./mod +) +-- caller/go.mod -- +module caller.com + +go 1.18 + +require mod.com v0.0.0 + +replace mod.com => ../mod +-- caller/caller.go -- +package main + +func main() { + a.Test() +} +-- mod/go.mod -- +module mod.com + +go 1.18 +-- mod/a/a.go -- +package a + +func Test() { +} +` + Run(t, pkg, func(t *testing.T, env *Env) { + env.OpenFile("caller/caller.go") + env.AfterChange(Diagnostics(env.AtRegexp("caller/caller.go", "a.Test"))) + + // Saving caller.go should trigger goimports, which should find a.Test in + // the mod.com module, thanks to the go.work file. + env.SaveBuffer("caller/caller.go") + env.AfterChange(NoDiagnostics(ForFile("caller/caller.go"))) + }) +} diff --git a/gopls/internal/regtest/misc/leak_test.go b/gopls/internal/regtest/misc/leak_test.go new file mode 100644 index 00000000000..28a5843ec2e --- /dev/null +++ b/gopls/internal/regtest/misc/leak_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package misc + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/lsprpc" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/jsonrpc2" + "golang.org/x/tools/internal/jsonrpc2/servertest" +) + +// Test for golang/go#57222. +func TestCacheLeak(t *testing.T) { + // TODO(rfindley): either fix this test with additional instrumentation, or + // delete it. + t.Skip("This test races with cache eviction.") + const files = `-- a.go -- +package a + +func _() { + println("1") +} +` + c := cache.New(nil, nil) + env := setupEnv(t, files, c) + env.Await(InitialWorkspaceLoad) + env.OpenFile("a.go") + + // Make a couple edits to stabilize cache state. + // + // For some reason, after only one edit we're left with two parsed files + // (perhaps because something had to ParseHeader). If this test proves flaky, + // we'll need to investigate exactly what is causing various parse modes to + // be present (or rewrite the test to be more tolerant, for example make ~100 + // modifications and assert that we're within a few of where we're started). + env.RegexpReplace("a.go", "1", "2") + env.RegexpReplace("a.go", "2", "3") + env.AfterChange() + + // Capture cache state, make an arbitrary change, and wait for gopls to do + // its work. Afterward, we should have the exact same number of parsed + before := c.MemStats() + env.RegexpReplace("a.go", "3", "4") + env.AfterChange() + after := c.MemStats() + + if diff := cmp.Diff(before, after); diff != "" { + t.Errorf("store objects differ after change (-before +after)\n%s", diff) + } +} + +// setupEnv creates a new sandbox environment for editing the txtar encoded +// content of files. It uses a new gopls instance backed by the Cache c. +func setupEnv(t *testing.T, files string, c *cache.Cache) *Env { + ctx := debug.WithInstance(context.Background(), "", "off") + server := lsprpc.NewStreamServer(c, false, hooks.Options) + ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream) + s, err := fake.NewSandbox(&fake.SandboxConfig{ + Files: fake.UnpackTxt(files), + }) + if err != nil { + t.Fatal(err) + } + + a := NewAwaiter(s.Workdir) + e, err := fake.NewEditor(s, fake.EditorConfig{}).Connect(ctx, ts, a.Hooks()) + + return &Env{ + T: t, + Ctx: ctx, + Editor: e, + Sandbox: s, + Awaiter: a, + } +} diff --git a/gopls/internal/regtest/misc/link_test.go b/gopls/internal/regtest/misc/link_test.go index e84f6377eeb..8a64c54e225 100644 --- a/gopls/internal/regtest/misc/link_test.go +++ b/gopls/internal/regtest/misc/link_test.go @@ -8,14 +8,10 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" - - "golang.org/x/tools/internal/testenv" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestHoverAndDocumentLink(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - const program = ` -- go.mod -- module mod.test @@ -53,36 +49,38 @@ const Hello = "Hello" env.OpenFile("main.go") env.OpenFile("go.mod") - modLink := "https://pkg.go.dev/mod/import.test@v1.2.3?utm_source=gopls" - pkgLink := "https://pkg.go.dev/import.test@v1.2.3/pkg?utm_source=gopls" + modLink := "https://pkg.go.dev/mod/import.test@v1.2.3" + pkgLink := "https://pkg.go.dev/import.test@v1.2.3/pkg" // First, check that we get the expected links via hover and documentLink. - content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello")) + content, _ := env.Hover(env.RegexpSearch("main.go", "pkg.Hello")) if content == nil || !strings.Contains(content.Value, pkgLink) { t.Errorf("hover: got %v in main.go, want contains %q", content, pkgLink) } - content, _ = env.Hover("go.mod", env.RegexpSearch("go.mod", "import.test")) + content, _ = env.Hover(env.RegexpSearch("go.mod", "import.test")) if content == nil || !strings.Contains(content.Value, pkgLink) { t.Errorf("hover: got %v in go.mod, want contains %q", content, pkgLink) } links := env.DocumentLink("main.go") if len(links) != 1 || links[0].Target != pkgLink { - t.Errorf("documentLink: got %v for main.go, want link to %q", links, pkgLink) + t.Errorf("documentLink: got links %+v for main.go, want one link with target %q", links, pkgLink) } links = env.DocumentLink("go.mod") if len(links) != 1 || links[0].Target != modLink { - t.Errorf("documentLink: got %v for go.mod, want link to %q", links, modLink) + t.Errorf("documentLink: got links %+v for go.mod, want one link with target %q", links, modLink) } // Then change the environment to make these links private. - env.ChangeEnv(map[string]string{"GOPRIVATE": "import.test"}) + cfg := env.Editor.Config() + cfg.Env = map[string]string{"GOPRIVATE": "import.test"} + env.ChangeConfiguration(cfg) // Finally, verify that the links are gone. - content, _ = env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello")) + content, _ = env.Hover(env.RegexpSearch("main.go", "pkg.Hello")) if content == nil || strings.Contains(content.Value, pkgLink) { t.Errorf("hover: got %v in main.go, want non-empty hover without %q", content, pkgLink) } - content, _ = env.Hover("go.mod", env.RegexpSearch("go.mod", "import.test")) + content, _ = env.Hover(env.RegexpSearch("go.mod", "import.test")) if content == nil || strings.Contains(content.Value, modLink) { t.Errorf("hover: got %v in go.mod, want contains %q", content, modLink) } diff --git a/gopls/internal/regtest/misc/misc_test.go b/gopls/internal/regtest/misc/misc_test.go index c553bdb3780..12aea697c15 100644 --- a/gopls/internal/regtest/misc/misc_test.go +++ b/gopls/internal/regtest/misc/misc_test.go @@ -8,8 +8,8 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" ) func TestMain(m *testing.M) { diff --git a/gopls/internal/regtest/misc/multiple_adhoc_test.go b/gopls/internal/regtest/misc/multiple_adhoc_test.go index 5f803e4e385..981b74efca0 100644 --- a/gopls/internal/regtest/misc/multiple_adhoc_test.go +++ b/gopls/internal/regtest/misc/multiple_adhoc_test.go @@ -7,7 +7,7 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestMultipleAdHocPackages(t *testing.T) { @@ -30,14 +30,14 @@ func main() () { } `, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - if list := env.Completion("a/a.go", env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 { + if list := env.Completion(env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 { t.Fatal("expected completions, got none") } env.OpenFile("a/b.go") - if list := env.Completion("a/b.go", env.RegexpSearch("a/b.go", "Println")); list == nil || len(list.Items) == 0 { + if list := env.Completion(env.RegexpSearch("a/b.go", "Println")); list == nil || len(list.Items) == 0 { t.Fatal("expected completions, got none") } - if list := env.Completion("a/a.go", env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 { + if list := env.Completion(env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 { t.Fatal("expected completions, got none") } }) diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index 768251680f9..e1f5d8e0502 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -5,9 +5,15 @@ package misc import ( + "fmt" + "os" + "sort" + "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestStdlibReferences(t *testing.T) { @@ -28,12 +34,13 @@ func main() { Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") - file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Print)`)) - refs, err := env.Editor.References(env.Ctx, file, pos) + loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Print)`)) + refs, err := env.Editor.References(env.Ctx, loc) if err != nil { t.Fatal(err) } if len(refs) != 2 { + // TODO(adonovan): make this assertion less maintainer-hostile. t.Fatalf("got %v reference(s), want 2", len(refs)) } // The first reference is guaranteed to be the definition. @@ -43,8 +50,10 @@ func main() { }) } -// This reproduces and tests golang/go#48400. -func TestReferencesPanicOnError(t *testing.T) { +// This is a regression test for golang/go#48400 (a panic). +func TestReferencesOnErrorMethod(t *testing.T) { + // Ideally this would actually return the correct answer, + // instead of merely failing gracefully. const files = ` -- go.mod -- module mod.com @@ -70,14 +79,321 @@ func _() { ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") - file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `Error`)) - refs, err := env.Editor.References(env.Ctx, file, pos) - if err == nil { - t.Fatalf("expected error for references, instead got %v", refs) + loc := env.GoToDefinition(env.RegexpSearch("main.go", `Error`)) + refs, err := env.Editor.References(env.Ctx, loc) + if err != nil { + t.Fatalf("references on (*s).Error failed: %v", err) + } + // TODO(adonovan): this test is crying out for marker support in regtests. + var buf strings.Builder + for _, ref := range refs { + fmt.Fprintf(&buf, "%s %s\n", env.Sandbox.Workdir.URIToPath(ref.URI), ref.Range) } - wantErr := "no position for func (error).Error() string" - if err.Error() != wantErr { - t.Fatalf("expected error with message %s, instead got %s", wantErr, err.Error()) + got := buf.String() + want := "main.go 8:10-8:15\n" + // (*s).Error decl + "main.go 14:7-14:12\n" // s.Error() call + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected references on (*s).Error (-want +got):\n%s", diff) + } + }) +} + +func TestPackageReferences(t *testing.T) { + tests := []struct { + packageName string + wantRefCount int + wantFiles []string + }{ + { + "lib1", + 3, + []string{ + "main.go", + "lib1/a.go", + "lib1/b.go", + }, + }, + { + "lib2", + 2, + []string{ + "main.go", + "lib2/a.go", + }, + }, + } + + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib1/a.go -- +package lib1 + +const A = 1 + +-- lib1/b.go -- +package lib1 + +const B = 1 + +-- lib2/a.go -- +package lib2 + +const C = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib1" + "mod.com/lib2" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + for _, test := range tests { + file := fmt.Sprintf("%s/a.go", test.packageName) + env.OpenFile(file) + loc := env.RegexpSearch(file, test.packageName) + refs := env.References(loc) + if len(refs) != test.wantRefCount { + // TODO(adonovan): make this assertion less maintainer-hostile. + t.Fatalf("got %v reference(s), want %d", len(refs), test.wantRefCount) + } + var refURIs []string + for _, ref := range refs { + refURIs = append(refURIs, string(ref.URI)) + } + for _, base := range test.wantFiles { + hasBase := false + for _, ref := range refURIs { + if strings.HasSuffix(ref, base) { + hasBase = true + break + } + } + if !hasBase { + t.Fatalf("got [%v], want reference ends with \"%v\"", strings.Join(refURIs, ","), base) + } + } } }) } + +// Test for golang/go#43144. +// +// Verify that we search for references and implementations in intermediate +// test variants. +func TestReferencesInTestVariants(t *testing.T) { + const files = ` +-- go.mod -- +module foo.mod + +go 1.12 +-- foo/foo.go -- +package foo + +import "foo.mod/bar" + +const Foo = 42 + +type T int +type InterfaceM interface{ M() } +type InterfaceF interface{ F() } + +func _() { + _ = bar.Blah +} + +-- foo/foo_test.go -- +package foo + +type Fer struct{} +func (Fer) F() {} + +-- bar/bar.go -- +package bar + +var Blah = 123 + +-- bar/bar_test.go -- +package bar + +type Mer struct{} +func (Mer) M() {} + +func TestBar() { + _ = Blah +} +-- bar/bar_x_test.go -- +package bar_test + +import ( + "foo.mod/bar" + "foo.mod/foo" +) + +type Mer struct{} +func (Mer) M() {} + +func _() { + _ = bar.Blah + _ = foo.Foo +} +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("foo/foo.go") + + // Helper to map locations relative file paths. + fileLocations := func(locs []protocol.Location) []string { + var got []string + for _, loc := range locs { + got = append(got, env.Sandbox.Workdir.URIToPath(loc.URI)) + } + sort.Strings(got) + return got + } + + refTests := []struct { + re string + wantRefs []string + }{ + // Blah is referenced: + // - inside the foo.mod/bar (ordinary) package + // - inside the foo.mod/bar [foo.mod/bar.test] test variant package + // - from the foo.mod/bar_test [foo.mod/bar.test] x_test package + // - from the foo.mod/foo package + {"Blah", []string{"bar/bar.go", "bar/bar_test.go", "bar/bar_x_test.go", "foo/foo.go"}}, + + // Foo is referenced in bar_x_test.go via the intermediate test variant + // foo.mod/foo [foo.mod/bar.test]. + {"Foo", []string{"bar/bar_x_test.go", "foo/foo.go"}}, + } + + for _, test := range refTests { + loc := env.RegexpSearch("foo/foo.go", test.re) + refs := env.References(loc) + + got := fileLocations(refs) + if diff := cmp.Diff(test.wantRefs, got); diff != "" { + t.Errorf("References(%q) returned unexpected diff (-want +got):\n%s", test.re, diff) + } + } + + implTests := []struct { + re string + wantImpls []string + }{ + // InterfaceM is implemented both in foo.mod/bar [foo.mod/bar.test] (which + // doesn't import foo), and in foo.mod/bar_test [foo.mod/bar.test], which + // imports the test variant of foo. + {"InterfaceM", []string{"bar/bar_test.go", "bar/bar_x_test.go"}}, + + // A search within the ordinary package to should find implementations + // (Fer) within the augmented test package. + {"InterfaceF", []string{"foo/foo_test.go"}}, + } + + for _, test := range implTests { + loc := env.RegexpSearch("foo/foo.go", test.re) + impls := env.Implementations(loc) + + got := fileLocations(impls) + if diff := cmp.Diff(test.wantImpls, got); diff != "" { + t.Errorf("Implementations(%q) returned unexpected diff (-want +got):\n%s", test.re, diff) + } + } + }) +} + +// This is a regression test for Issue #56169, in which interface +// implementations in vendored modules were not found. The actual fix +// was the same as for #55995; see TestVendoringInvalidatesMetadata. +func TestImplementationsInVendor(t *testing.T) { + t.Skip("golang/go#56169: file watching does not capture vendor dirs") + + const proxy = ` +-- other.com/b@v1.0.0/go.mod -- +module other.com/b +go 1.14 + +-- other.com/b@v1.0.0/b.go -- +package b +type B int +func (B) F() {} +` + const src = ` +-- go.mod -- +module example.com/a +go 1.14 +require other.com/b v1.0.0 + +-- go.sum -- +other.com/b v1.0.0 h1:9WyCKS+BLAMRQM0CegP6zqP2beP+ShTbPaARpNY31II= +other.com/b v1.0.0/go.mod h1:TgHQFucl04oGT+vrUm/liAzukYHNxCwKNkQZEyn3m9g= + +-- a.go -- +package a +import "other.com/b" +type I interface { F() } +var _ b.B + +` + WithOptions( + ProxyFiles(proxy), + Modes(Default), // fails in 'experimental' mode + ).Run(t, src, func(t *testing.T, env *Env) { + // Enable to debug go.sum mismatch, which may appear as + // "module lookup disabled by GOPROXY=off", confusingly. + if false { + env.DumpGoSum(".") + } + + checkVendor := func(locs []protocol.Location, wantVendor bool) { + if len(locs) != 1 { + t.Errorf("got %d locations, want 1", len(locs)) + } else if strings.Contains(string(locs[0].URI), "/vendor/") != wantVendor { + t.Errorf("got location %s, wantVendor=%t", locs[0], wantVendor) + } + } + + env.OpenFile("a.go") + refLoc := env.RegexpSearch("a.go", "I") // find "I" reference + + // Initially, a.I has one implementation b.B in + // the module cache, not the vendor tree. + checkVendor(env.Implementations(refLoc), false) + + // Run 'go mod vendor' outside the editor. + if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil { + t.Fatalf("go mod vendor: %v", err) + } + + // Synchronize changes to watched files. + env.Await(env.DoneWithChangeWatchedFiles()) + + // Now, b.B is found in the vendor tree. + checkVendor(env.Implementations(refLoc), true) + + // Delete the vendor tree. + if err := os.RemoveAll(env.Sandbox.Workdir.AbsPath("vendor")); err != nil { + t.Fatal(err) + } + // Notify the server of the deletion. + if err := env.Sandbox.Workdir.CheckForFileChanges(env.Ctx); err != nil { + t.Fatal(err) + } + + // Synchronize again. + env.Await(env.DoneWithChangeWatchedFiles()) + + // b.B is once again defined in the module cache. + checkVendor(env.Implementations(refLoc), false) + }) +} diff --git a/gopls/internal/regtest/misc/rename_test.go b/gopls/internal/regtest/misc/rename_test.go index 121b70725b4..ba5cf7ae8e0 100644 --- a/gopls/internal/regtest/misc/rename_test.go +++ b/gopls/internal/regtest/misc/rename_test.go @@ -8,9 +8,318 @@ import ( "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/testenv" ) +func TestPrepareRenameMainPackage(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- main.go -- +package main + +import ( + "fmt" +) + +func main() { + fmt.Println(1) +} +` + const wantErr = "can't rename package \"main\"" + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + loc := env.RegexpSearch("main.go", `main`) + params := &protocol.PrepareRenameParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + _, err := env.Editor.Server.PrepareRename(env.Ctx, params) + if err == nil { + t.Errorf("missing can't rename package main error from PrepareRename") + } + + if err.Error() != wantErr { + t.Errorf("got %v, want %v", err.Error(), wantErr) + } + }) +} + +// Test case for golang/go#56227 +func TestRenameWithUnsafeSlice(t *testing.T) { + testenv.NeedsGo1Point(t, 17) // unsafe.Slice was added in Go 1.17 + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- p.go -- +package p + +import "unsafe" + +type T struct{} + +func (T) M() {} + +func _() { + x := [3]int{1, 2, 3} + ptr := unsafe.Pointer(&x) + _ = unsafe.Slice((*int)(ptr), 3) +} +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("p.go") + env.Rename(env.RegexpSearch("p.go", "M"), "N") // must not panic + }) +} + +func TestPrepareRenameWithNoPackageDeclaration(t *testing.T) { + const files = ` +go 1.14 +-- lib/a.go -- +import "fmt" + +const A = 1 + +func bar() { + fmt.Println("Bar") +} + +-- main.go -- +package main + +import "fmt" + +func main() { + fmt.Println("Hello") +} +` + const wantErr = "no object found" + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + err := env.Editor.Rename(env.Ctx, env.RegexpSearch("lib/a.go", "fmt"), "fmt1") + if err == nil { + t.Errorf("missing no object found from Rename") + } + + if err.Error() != wantErr { + t.Errorf("got %v, want %v", err.Error(), wantErr) + } + }) +} + +func TestPrepareRenameFailWithUnknownModule(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +go 1.14 +-- lib/a.go -- +package lib + +const A = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" +) + +func main() { + println("Hello") +} +` + const wantErr = "can't rename package: missing module information for package" + Run(t, files, func(t *testing.T, env *Env) { + loc := env.RegexpSearch("lib/a.go", "lib") + params := &protocol.PrepareRenameParams{ + TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc), + } + _, err := env.Editor.Server.PrepareRename(env.Ctx, params) + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Errorf("missing cannot rename packages with unknown module from PrepareRename") + } + }) +} + +func TestRenamePackageWithConflicts(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/nested/a.go -- +package nested + +const B = 1 + +-- lib/x/a.go -- +package nested1 + +const C = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + "mod.com/lib/nested" + nested1 "mod.com/lib/x" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested") + + // Check if the new package name exists. + env.RegexpSearch("nested/a.go", "package nested") + env.RegexpSearch("main.go", `nested2 "mod.com/nested"`) + env.RegexpSearch("main.go", "mod.com/nested/nested") + env.RegexpSearch("main.go", `nested1 "mod.com/nested/x"`) + }) +} + +func TestRenamePackageWithAlias(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/nested/a.go -- +package nested + +const B = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + lib1 "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested") + + // Check if the new package name exists. + env.RegexpSearch("nested/a.go", "package nested") + env.RegexpSearch("main.go", "mod.com/nested") + env.RegexpSearch("main.go", `lib1 "mod.com/nested/nested"`) + }) +} + +func TestRenamePackageWithDifferentDirectoryPath(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/nested/a.go -- +package foo + +const B = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + foo "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested") + + // Check if the new package name exists. + env.RegexpSearch("nested/a.go", "package nested") + env.RegexpSearch("main.go", "mod.com/nested") + env.RegexpSearch("main.go", `foo "mod.com/nested/nested"`) + }) +} + +func TestRenamePackage(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/b.go -- +package lib + +const B = 1 + +-- lib/nested/a.go -- +package nested + +const C = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1") + + // Check if the new package name exists. + env.RegexpSearch("lib1/a.go", "package lib1") + env.RegexpSearch("lib1/b.go", "package lib1") + env.RegexpSearch("main.go", "mod.com/lib1") + env.RegexpSearch("main.go", "mod.com/lib1/nested") + }) +} + // Test for golang/go#47564. func TestRenameInTestVariant(t *testing.T) { const files = ` @@ -48,11 +357,580 @@ func main() { Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") - pos := env.RegexpSearch("main.go", `stringutil\.(Identity)`) - env.Rename("main.go", pos, "Identityx") - text := env.Editor.BufferText("stringutil/stringutil_test.go") + env.Rename(env.RegexpSearch("main.go", `stringutil\.(Identity)`), "Identityx") + text := env.BufferText("stringutil/stringutil_test.go") if !strings.Contains(text, "Identityx") { t.Errorf("stringutil/stringutil_test.go: missing expected token `Identityx` after rename:\n%s", text) } }) } + +// This is a test that rename operation initiated by the editor function as expected. +func TestRenameFileFromEditor(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.16 +-- a/a.go -- +package a + +const X = 1 +-- a/x.go -- +package a + +const X = 2 +-- b/b.go -- +package b +` + + Run(t, files, func(t *testing.T, env *Env) { + // Rename files and verify that diagnostics are affected accordingly. + + // Initially, we should have diagnostics on both X's, for their duplicate declaration. + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.go", "X")), + Diagnostics(env.AtRegexp("a/x.go", "X")), + ) + + // Moving x.go should make the diagnostic go away. + env.RenameFile("a/x.go", "b/x.go") + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), // no more duplicate declarations + Diagnostics(env.AtRegexp("b/b.go", "package")), // as package names mismatch + ) + + // Renaming should also work on open buffers. + env.OpenFile("b/x.go") + + // Moving x.go back to a/ should cause the diagnostics to reappear. + env.RenameFile("b/x.go", "a/x.go") + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "X")), + Diagnostics(env.AtRegexp("a/x.go", "X")), + ) + + // Renaming the entire directory should move both the open and closed file. + env.RenameFile("a", "x") + env.AfterChange( + Diagnostics(env.AtRegexp("x/a.go", "X")), + Diagnostics(env.AtRegexp("x/x.go", "X")), + ) + + // As a sanity check, verify that x/x.go is open. + if text := env.BufferText("x/x.go"); text == "" { + t.Fatal("got empty buffer for x/x.go") + } + }) +} + +func TestRenamePackage_Tests(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/b.go -- +package lib + +const B = 1 + +-- lib/a_test.go -- +package lib_test + +import ( + "mod.com/lib" + "fmt +) + +const C = 1 + +-- lib/b_test.go -- +package lib + +import ( + "fmt +) + +const D = 1 + +-- lib/nested/a.go -- +package nested + +const D = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1") + + // Check if the new package name exists. + env.RegexpSearch("lib1/a.go", "package lib1") + env.RegexpSearch("lib1/b.go", "package lib1") + env.RegexpSearch("main.go", "mod.com/lib1") + env.RegexpSearch("main.go", "mod.com/lib1/nested") + + // Check if the test package is renamed + env.RegexpSearch("lib1/a_test.go", "package lib1_test") + env.RegexpSearch("lib1/b_test.go", "package lib1") + }) +} + +func TestRenamePackage_NestedModule(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const files = ` +-- go.work -- +go 1.18 +use ( + . + ./foo/bar + ./foo/baz +) + +-- go.mod -- +module mod.com + +go 1.18 + +require ( + mod.com/foo/bar v0.0.0 +) + +replace ( + mod.com/foo/bar => ./foo/bar + mod.com/foo/baz => ./foo/baz +) +-- foo/foo.go -- +package foo + +import "fmt" + +func Bar() { + fmt.Println("In foo before renamed to foox.") +} + +-- foo/bar/go.mod -- +module mod.com/foo/bar + +-- foo/bar/bar.go -- +package bar + +const Msg = "Hi from package bar" + +-- foo/baz/go.mod -- +module mod.com/foo/baz + +-- foo/baz/baz.go -- +package baz + +const Msg = "Hi from package baz" + +-- main.go -- +package main + +import ( + "fmt" + "mod.com/foo/bar" + "mod.com/foo/baz" + "mod.com/foo" +) + +func main() { + foo.Bar() + fmt.Println(bar.Msg) + fmt.Println(baz.Msg) +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("foo/foo.go") + env.Rename(env.RegexpSearch("foo/foo.go", "foo"), "foox") + + env.RegexpSearch("foox/foo.go", "package foox") + env.OpenFile("foox/bar/bar.go") + env.OpenFile("foox/bar/go.mod") + + env.RegexpSearch("main.go", "mod.com/foo/bar") + env.RegexpSearch("main.go", "mod.com/foox") + env.RegexpSearch("main.go", "foox.Bar()") + + env.RegexpSearch("go.mod", "./foox/bar") + env.RegexpSearch("go.mod", "./foox/baz") + }) +} + +func TestRenamePackage_DuplicateImport(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/nested/a.go -- +package nested + +const B = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + lib1 "mod.com/lib" + lib2 "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested") + + // Check if the new package name exists. + env.RegexpSearch("nested/a.go", "package nested") + env.RegexpSearch("main.go", "mod.com/nested") + env.RegexpSearch("main.go", `lib1 "mod.com/nested"`) + env.RegexpSearch("main.go", `lib2 "mod.com/nested/nested"`) + }) +} + +func TestRenamePackage_DuplicateBlankImport(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +const A = 1 + +-- lib/nested/a.go -- +package nested + +const B = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib" + _ "mod.com/lib" + lib1 "mod.com/lib/nested" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested") + + // Check if the new package name exists. + env.RegexpSearch("nested/a.go", "package nested") + env.RegexpSearch("main.go", "mod.com/nested") + env.RegexpSearch("main.go", `_ "mod.com/nested"`) + env.RegexpSearch("main.go", `lib1 "mod.com/nested/nested"`) + }) +} + +func TestRenamePackage_TestVariant(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.12 +-- foo/foo.go -- +package foo + +const Foo = 42 +-- bar/bar.go -- +package bar + +import "mod.com/foo" + +const Bar = foo.Foo +-- bar/bar_test.go -- +package bar + +import "mod.com/foo" + +const Baz = foo.Foo +-- testdata/bar/bar.go -- +package bar + +import "mod.com/foox" + +const Bar = foox.Foo +-- testdata/bar/bar_test.go -- +package bar + +import "mod.com/foox" + +const Baz = foox.Foo +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("foo/foo.go") + env.Rename(env.RegexpSearch("foo/foo.go", "package (foo)"), "foox") + + checkTestdata(t, env) + }) +} + +func TestRenamePackage_IntermediateTestVariant(t *testing.T) { + // In this test set up, we have the following import edges: + // bar_test -> baz -> foo -> bar + // bar_test -> foo -> bar + // bar_test -> bar + // + // As a consequence, bar_x_test.go is in the reverse closure of both + // `foo [bar.test]` and `baz [bar.test]`. This test confirms that we don't + // produce duplicate edits in this case. + const files = ` +-- go.mod -- +module foo.mod + +go 1.12 +-- foo/foo.go -- +package foo + +import "foo.mod/bar" + +const Foo = 42 + +const _ = bar.Bar +-- baz/baz.go -- +package baz + +import "foo.mod/foo" + +const Baz = foo.Foo +-- bar/bar.go -- +package bar + +var Bar = 123 +-- bar/bar_test.go -- +package bar + +const _ = Bar +-- bar/bar_x_test.go -- +package bar_test + +import ( + "foo.mod/bar" + "foo.mod/baz" + "foo.mod/foo" +) + +const _ = bar.Bar + baz.Baz + foo.Foo +-- testdata/foox/foo.go -- +package foox + +import "foo.mod/bar" + +const Foo = 42 + +const _ = bar.Bar +-- testdata/baz/baz.go -- +package baz + +import "foo.mod/foox" + +const Baz = foox.Foo +-- testdata/bar/bar_x_test.go -- +package bar_test + +import ( + "foo.mod/bar" + "foo.mod/baz" + "foo.mod/foox" +) + +const _ = bar.Bar + baz.Baz + foox.Foo +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("foo/foo.go") + env.Rename(env.RegexpSearch("foo/foo.go", "package (foo)"), "foox") + + checkTestdata(t, env) + }) +} + +func TestRenamePackage_Nesting(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +import "mod.com/lib/nested" + +const A = 1 + nested.B +-- lib/nested/a.go -- +package nested + +const B = 1 +-- other/other.go -- +package other + +import ( + "mod.com/lib" + "mod.com/lib/nested" +) + +const C = lib.A + nested.B +-- testdata/libx/a.go -- +package libx + +import "mod.com/libx/nested" + +const A = 1 + nested.B +-- testdata/other/other.go -- +package other + +import ( + "mod.com/libx" + "mod.com/libx/nested" +) + +const C = libx.A + nested.B +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "package (lib)"), "libx") + + checkTestdata(t, env) + }) +} + +func TestRenamePackage_InvalidName(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +import "mod.com/lib/nested" + +const A = 1 + nested.B +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/a.go") + loc := env.RegexpSearch("lib/a.go", "package (lib)") + + for _, badName := range []string{"$$$", "lib_test"} { + if err := env.Editor.Rename(env.Ctx, loc, badName); err == nil { + t.Errorf("Rename(lib, libx) succeeded, want non-nil error") + } + } + }) +} + +func TestRenamePackage_InternalPackage(t *testing.T) { + testenv.NeedsGo1Point(t, 17) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib/a.go -- +package lib + +import ( + "fmt" + "mod.com/lib/internal/x" +) + +const A = 1 + +func print() { + fmt.Println(x.B) +} + +-- lib/internal/x/a.go -- +package x + +const B = 1 + +-- main.go -- +package main + +import "mod.com/lib" + +func main() { + lib.print() +} +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("lib/internal/x/a.go") + env.Rename(env.RegexpSearch("lib/internal/x/a.go", "x"), "utils") + + // Check if the new package name exists. + env.RegexpSearch("lib/a.go", "mod.com/lib/internal/utils") + env.RegexpSearch("lib/a.go", "utils.B") + + // Check if the test package is renamed + env.RegexpSearch("lib/internal/utils/a.go", "package utils") + + env.OpenFile("lib/a.go") + env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1") + + // Check if the new package name exists. + env.RegexpSearch("lib1/a.go", "package lib1") + env.RegexpSearch("lib1/a.go", "mod.com/lib1/internal/utils") + env.RegexpSearch("main.go", `import "mod.com/lib1"`) + env.RegexpSearch("main.go", "lib1.print()") + }) +} + +// checkTestdata checks that current buffer contents match their corresponding +// expected content in the testdata directory. +func checkTestdata(t *testing.T, env *Env) { + t.Helper() + files := env.ListFiles("testdata") + if len(files) == 0 { + t.Fatal("no files in testdata directory") + } + for _, file := range files { + suffix := strings.TrimPrefix(file, "testdata/") + got := env.BufferText(suffix) + want := env.ReadWorkspaceFile(file) + if diff := compare.Text(want, got); diff != "" { + t.Errorf("Rename: unexpected buffer content for %s (-want +got):\n%s", suffix, diff) + } + } +} diff --git a/gopls/internal/regtest/misc/semantictokens_test.go b/gopls/internal/regtest/misc/semantictokens_test.go index 79507876a64..a96024b9ca2 100644 --- a/gopls/internal/regtest/misc/semantictokens_test.go +++ b/gopls/internal/regtest/misc/semantictokens_test.go @@ -5,10 +5,14 @@ package misc import ( + "strings" "testing" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/typeparams" ) func TestBadURICrash_VSCodeIssue1498(t *testing.T) { @@ -25,10 +29,8 @@ func main() {} ` WithOptions( - Modes(Singleton), - EditorConfig{ - AllExperiments: true, - }, + Modes(Default), + Settings{"allExperiments": true}, ).Run(t, src, func(t *testing.T, env *Env) { params := &protocol.SemanticTokensParams{} const badURI = "http://foo" @@ -42,3 +44,161 @@ func main() {} } }) } + +// fix bug involving type parameters and regular parameters +// (golang/vscode-go#2527) +func TestSemantic_2527(t *testing.T) { + if !typeparams.Enabled { + t.Skip("type parameters are needed for this test") + } + // these are the expected types of identifiers in text order + want := []result{ + {"package", "keyword", ""}, + {"foo", "namespace", ""}, + {"func", "keyword", ""}, + {"Add", "function", "definition deprecated"}, + {"T", "typeParameter", "definition"}, + {"int", "type", "defaultLibrary"}, + {"target", "parameter", "definition"}, + {"T", "typeParameter", ""}, + {"l", "parameter", "definition"}, + {"T", "typeParameter", ""}, + {"T", "typeParameter", ""}, + {"return", "keyword", ""}, + {"append", "function", "defaultLibrary"}, + {"l", "parameter", ""}, + {"target", "parameter", ""}, + {"for", "keyword", ""}, + {"range", "keyword", ""}, + {"l", "parameter", ""}, + {"return", "keyword", ""}, + {"nil", "variable", "readonly defaultLibrary"}, + } + src := ` +-- go.mod -- +module example.com + +go 1.19 +-- main.go -- +package foo +// Deprecated (for testing) +func Add[T int](target T, l []T) []T { + return append(l, target) + for range l {} // test coverage + return nil +} +` + WithOptions( + Modes(Default), + Settings{"semanticTokens": true}, + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "for range")), + ) + p := &protocol.SemanticTokensParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: env.Sandbox.Workdir.URI("main.go"), + }, + } + v, err := env.Editor.Server.SemanticTokensFull(env.Ctx, p) + if err != nil { + t.Fatal(err) + } + seen := interpret(v.Data, env.BufferText("main.go")) + if x := cmp.Diff(want, seen); x != "" { + t.Errorf("Semantic tokens do not match (-want +got):\n%s", x) + } + }) + +} + +// fix inconsistency in TypeParameters +// https://github.com/golang/go/issues/57619 +func TestSemantic_57619(t *testing.T) { + if !typeparams.Enabled { + t.Skip("type parameters are needed for this test") + } + src := ` +-- go.mod -- +module example.com + +go 1.19 +-- main.go -- +package foo +type Smap[K int, V any] struct { + Store map[K]V +} +func (s *Smap[K, V]) Get(k K) (V, bool) { + v, ok := s.Store[k] + return v, ok +} +func New[K int, V any]() Smap[K, V] { + return Smap[K, V]{Store: make(map[K]V)} +} +` + WithOptions( + Modes(Default), + Settings{"semanticTokens": true}, + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + p := &protocol.SemanticTokensParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: env.Sandbox.Workdir.URI("main.go"), + }, + } + v, err := env.Editor.Server.SemanticTokensFull(env.Ctx, p) + if err != nil { + t.Fatal(err) + } + seen := interpret(v.Data, env.BufferText("main.go")) + for i, s := range seen { + if (s.Token == "K" || s.Token == "V") && s.TokenType != "typeParameter" { + t.Errorf("%d: expected K and V to be type parameters, but got %v", i, s) + } + } + }) +} + +type result struct { + Token string + TokenType string + Mod string +} + +// human-readable version of the semantic tokens +// comment, string, number are elided +// (and in the future, maybe elide other things, like operators) +func interpret(x []uint32, contents string) []result { + lines := strings.Split(contents, "\n") + ans := []result{} + line, col := 1, 1 + for i := 0; i < len(x); i += 5 { + line += int(x[i]) + col += int(x[i+1]) + if x[i] != 0 { // new line + col = int(x[i+1]) + 1 // 1-based column numbers + } + sz := x[i+2] + t := semanticTypes[x[i+3]] + if t == "comment" || t == "string" || t == "number" { + continue + } + l := x[i+4] + var mods []string + for i, mod := range semanticModifiers { + if l&(1< ../a +-- b/b/b.go -- +package b + +import "a.com/a" + +func _() { + a.DoSomething() +} +` + + WithOptions( + WorkspaceFolders("a"), + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("a/a/a.go") + env.OpenFile("b/b/b.go") + signatureHelp := func(filename string) *protocol.SignatureHelp { + loc := env.RegexpSearch(filename, `DoSomething\(()\)`) + var params protocol.SignatureHelpParams + params.TextDocument.URI = loc.URI + params.Position = loc.Range.Start + help, err := env.Editor.Server.SignatureHelp(env.Ctx, ¶ms) + if err != nil { + t.Fatal(err) + } + return help + } + ahelp := signatureHelp("a/a/a.go") + bhelp := signatureHelp("b/b/b.go") + + if diff := cmp.Diff(ahelp, bhelp); diff != "" { + t.Fatal(diff) + } + }) +} diff --git a/gopls/internal/regtest/misc/staticcheck_test.go b/gopls/internal/regtest/misc/staticcheck_test.go index 94bb39903a5..9242e194eb7 100644 --- a/gopls/internal/regtest/misc/staticcheck_test.go +++ b/gopls/internal/regtest/misc/staticcheck_test.go @@ -9,7 +9,7 @@ import ( "golang.org/x/tools/internal/testenv" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestStaticcheckGenerics(t *testing.T) { @@ -60,19 +60,51 @@ func testGenerics[P *T, T any](p P) { var FooErr error = errors.New("foo") ` - WithOptions(EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, - }, - }).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + Settings{"staticcheck": true}, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - env.Await( - env.DiagnosticAtRegexpFromSource("a/a.go", "sort.Slice", "sortslice"), - env.DiagnosticAtRegexpFromSource("a/a.go", "sort.Slice.(slice)", "SA1028"), - env.DiagnosticAtRegexpFromSource("a/a.go", "var (FooErr)", "ST1012"), - env.DiagnosticAtRegexpFromSource("a/a.go", `"12234"`, "SA1024"), - env.DiagnosticAtRegexpFromSource("a/a.go", "testGenerics.*(p P)", "SA4009"), - env.DiagnosticAtRegexpFromSource("a/a.go", "q = (&\\*p)", "SA4001"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "sort.Slice"), FromSource("sortslice")), + Diagnostics(env.AtRegexp("a/a.go", "sort.Slice.(slice)"), FromSource("SA1028")), + Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)"), FromSource("ST1012")), + Diagnostics(env.AtRegexp("a/a.go", `"12234"`), FromSource("SA1024")), + Diagnostics(env.AtRegexp("a/a.go", "testGenerics.*(p P)"), FromSource("SA4009")), + Diagnostics(env.AtRegexp("a/a.go", "q = (&\\*p)"), FromSource("SA4001")), + ) + }) +} + +// Test for golang/go#56270: an analysis with related info should not panic if +// analysis.RelatedInformation.End is not set. +func TestStaticcheckRelatedInfo(t *testing.T) { + testenv.NeedsGo1Point(t, 17) // staticcheck is only supported at Go 1.17+ + const files = ` +-- go.mod -- +module mod.test + +go 1.18 +-- p.go -- +package p + +import ( + "fmt" +) + +func Foo(enabled interface{}) { + if enabled, ok := enabled.(bool); ok { + } else { + _ = fmt.Sprintf("invalid type %T", enabled) // enabled is always bool here + } +} +` + + WithOptions( + Settings{"staticcheck": true}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("p.go") + env.AfterChange( + Diagnostics(env.AtRegexp("p.go", ", (enabled)"), FromSource("SA9008")), ) }) } diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go index 324a8006fa7..4fcf1067a1e 100644 --- a/gopls/internal/regtest/misc/vendor_test.go +++ b/gopls/internal/regtest/misc/vendor_test.go @@ -7,10 +7,9 @@ package misc import ( "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) const basicProxy = ` @@ -25,18 +24,6 @@ var Goodbye error ` func TestInconsistentVendoring(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - - // TODO(golang/go#49646): delete this comment once this test is stable. - // - // In golang/go#49646, this test is reported as flaky on Windows. We believe - // this is due to file contention from go mod vendor that should be resolved. - // If this test proves to still be flaky, skip it. - // - // if runtime.GOOS == "windows" { - // t.Skipf("skipping test due to flakiness on Windows: https://golang.org/issue/49646") - // } - const pkgThatUsesVendoring = ` -- go.mod -- module mod.com @@ -59,21 +46,20 @@ func _() { } ` WithOptions( - Modes(Singleton), + Modes(Default), ProxyFiles(basicProxy), ).Run(t, pkgThatUsesVendoring, func(t *testing.T, env *Env) { env.OpenFile("a/a1.go") d := &protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("go.mod", "module mod.com", "Inconsistent vendoring"), - ReadDiagnostics("go.mod", d), - ), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("go.mod", "module mod.com"), WithMessage("Inconsistent vendoring")), + ReadDiagnostics("go.mod", d), ) env.ApplyQuickFixes("go.mod", d.Diagnostics) - env.Await( - env.DiagnosticAtRegexpWithMessage("a/a1.go", `q int`, "not used"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a1.go", `q int`), WithMessage("not used")), ) }) } diff --git a/gopls/internal/regtest/misc/vuln_test.go b/gopls/internal/regtest/misc/vuln_test.go index 94fde715c77..8badc879e1b 100644 --- a/gopls/internal/regtest/misc/vuln_test.go +++ b/gopls/internal/regtest/misc/vuln_test.go @@ -2,17 +2,32 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.18 +// +build go1.18 + package misc import ( + "context" + "encoding/json" + "path/filepath" + "sort" + "strings" "testing" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/command" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/gopls/internal/vulncheck" + "golang.org/x/tools/gopls/internal/vulncheck/vulntest" + "golang.org/x/tools/internal/testenv" ) -func TestRunVulncheckExpError(t *testing.T) { +func TestRunGovulncheckError(t *testing.T) { const files = ` -- go.mod -- module mod.com @@ -22,15 +37,15 @@ go 1.12 package foo ` Run(t, files, func(t *testing.T, env *Env) { - cmd, err := command.NewRunVulncheckExpCommand("Run Vulncheck Exp", command.VulncheckArgs{ - Dir: "/invalid/file/url", // invalid arg + cmd, err := command.NewRunGovulncheckCommand("Run Vulncheck Exp", command.VulncheckArgs{ + URI: "/invalid/file/url", // invalid arg }) if err != nil { t.Fatal(err) } params := &protocol.ExecuteCommandParams{ - Command: command.RunVulncheckExp.ID(), + Command: command.RunGovulncheck.ID(), Arguments: cmd.Arguments, } @@ -41,3 +56,922 @@ package foo } }) } + +func TestRunGovulncheckError2(t *testing.T) { + const files = ` +-- go.mod -- +module mod.com + +go 1.12 +-- foo.go -- +package foo + +func F() { // build error incomplete +` + WithOptions( + EnvVars{ + "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`. + }, + Settings{ + "codelenses": map[string]bool{ + "run_govulncheck": true, + }, + }, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + var result command.RunVulncheckResult + env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result) + var ws WorkStatus + env.Await( + CompletedProgress(result.Token, &ws), + ) + wantEndMsg, wantMsgPart := "failed", "failed to load packages due to errors" + if ws.EndMsg != "failed" || !strings.Contains(ws.Msg, wantMsgPart) { + t.Errorf("work status = %+v, want {EndMessage: %q, Message: %q}", ws, wantEndMsg, wantMsgPart) + } + }) +} + +const vulnsData = ` +-- GO-2022-01.yaml -- +modules: + - module: golang.org/amod + versions: + - introduced: 1.0.0 + - fixed: 1.0.4 + - introduced: 1.1.2 + packages: + - package: golang.org/amod/avuln + symbols: + - VulnData.Vuln1 + - VulnData.Vuln2 +description: > + vuln in amod +references: + - href: pkg.go.dev/vuln/GO-2022-01 +-- GO-2022-03.yaml -- +modules: + - module: golang.org/amod + versions: + - introduced: 1.0.0 + - fixed: 1.0.6 + packages: + - package: golang.org/amod/avuln + symbols: + - nonExisting +description: > + unaffecting vulnerability +-- GO-2022-02.yaml -- +modules: + - module: golang.org/bmod + packages: + - package: golang.org/bmod/bvuln + symbols: + - Vuln +description: | + vuln in bmod + + This is a long description + of this vulnerability. +references: + - href: pkg.go.dev/vuln/GO-2022-03 +-- GO-2022-04.yaml -- +modules: + - module: golang.org/bmod + packages: + - package: golang.org/bmod/unused + symbols: + - Vuln +description: | + vuln in bmod/somtrhingelse +references: + - href: pkg.go.dev/vuln/GO-2022-04 +-- GOSTDLIB.yaml -- +modules: + - module: stdlib + versions: + - introduced: 1.18.0 + packages: + - package: archive/zip + symbols: + - OpenReader +references: + - href: pkg.go.dev/vuln/GOSTDLIB +` + +func TestRunGovulncheckStd(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- main.go -- +package main + +import ( + "archive/zip" + "fmt" +) + +func main() { + _, err := zip.OpenReader("file.zip") // vulnerability id: GOSTDLIB + fmt.Println(err) +} +` + + db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData)) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + WithOptions( + EnvVars{ + // Let the analyzer read vulnerabilities data from the testdata/vulndb. + "GOVULNDB": db.URI(), + // When fetchinging stdlib package vulnerability info, + // behave as if our go version is go1.18 for this testing. + // The default behavior is to run `go env GOVERSION` (which isn't mutable env var). + vulncheck.GoVersionForVulnTest: "go1.18", + "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`. + }, + Settings{ + "codelenses": map[string]bool{ + "run_govulncheck": true, + }, + }, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + + // Test CodeLens is present. + lenses := env.CodeLens("go.mod") + + const wantCommand = "gopls." + string(command.RunGovulncheck) + var gotCodelens = false + var lens protocol.CodeLens + for _, l := range lenses { + if l.Command.Command == wantCommand { + gotCodelens = true + lens = l + break + } + } + if !gotCodelens { + t.Fatal("got no vulncheck codelens") + } + // Run Command included in the codelens. + var result command.RunVulncheckResult + env.ExecuteCommand(&protocol.ExecuteCommandParams{ + Command: lens.Command.Command, + Arguments: lens.Command.Arguments, + }, &result) + + env.OnceMet( + CompletedProgress(result.Token, nil), + ShownMessage("Found GOSTDLIB"), + NoDiagnostics(ForFile("go.mod")), + ) + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{ + "go.mod": {IDs: []string{"GOSTDLIB"}, Mode: govulncheck.ModeGovulncheck}}) + }) +} + +func TestFetchVulncheckResultStd(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- main.go -- +package main + +import ( + "archive/zip" + "fmt" +) + +func main() { + _, err := zip.OpenReader("file.zip") // vulnerability id: GOSTDLIB + fmt.Println(err) +} +` + + db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData)) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + WithOptions( + EnvVars{ + // Let the analyzer read vulnerabilities data from the testdata/vulndb. + "GOVULNDB": db.URI(), + // When fetchinging stdlib package vulnerability info, + // behave as if our go version is go1.18 for this testing. + vulncheck.GoVersionForVulnTest: "go1.18", + "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`. + }, + Settings{"ui.diagnostic.vulncheck": "Imports"}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + env.AfterChange( + NoDiagnostics(ForFile("go.mod")), + // we don't publish diagnostics for standard library vulnerability yet. + ) + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{ + "go.mod": { + IDs: []string{"GOSTDLIB"}, + Mode: govulncheck.ModeImports, + }, + }) + }) +} + +type fetchVulncheckResult struct { + IDs []string + Mode govulncheck.AnalysisMode +} + +func testFetchVulncheckResult(t *testing.T, env *Env, want map[string]fetchVulncheckResult) { + t.Helper() + + var result map[protocol.DocumentURI]*govulncheck.Result + fetchCmd, err := command.NewFetchVulncheckResultCommand("fetch", command.URIArg{ + URI: env.Sandbox.Workdir.URI("go.mod"), + }) + if err != nil { + t.Fatal(err) + } + env.ExecuteCommand(&protocol.ExecuteCommandParams{ + Command: fetchCmd.Command, + Arguments: fetchCmd.Arguments, + }, &result) + + for _, v := range want { + sort.Strings(v.IDs) + } + got := map[string]fetchVulncheckResult{} + for k, r := range result { + var osv []string + for _, v := range r.Vulns { + osv = append(osv, v.OSV.ID) + } + sort.Strings(osv) + modfile := env.Sandbox.Workdir.RelPath(k.SpanURI().Filename()) + got[modfile] = fetchVulncheckResult{ + IDs: osv, + Mode: r.Mode, + } + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("fetch vulnchheck result = got %v, want %v: diff %v", got, want, diff) + } +} + +const workspace1 = ` +-- go.mod -- +module golang.org/entry + +go 1.18 + +require golang.org/cmod v1.1.3 + +require ( + golang.org/amod v1.0.0 // indirect + golang.org/bmod v0.5.0 // indirect +) +-- go.sum -- +golang.org/amod v1.0.0 h1:EUQOI2m5NhQZijXZf8WimSnnWubaFNrrKUH/PopTN8k= +golang.org/amod v1.0.0/go.mod h1:yvny5/2OtYFomKt8ax+WJGvN6pfN1pqjGnn7DQLUi6E= +golang.org/bmod v0.5.0 h1:KgvUulMyMiYRB7suKA0x+DfWRVdeyPgVJvcishTH+ng= +golang.org/bmod v0.5.0/go.mod h1:f6o+OhF66nz/0BBc/sbCsshyPRKMSxZIlG50B/bsM4c= +golang.org/cmod v1.1.3 h1:PJ7rZFTk7xGAunBRDa0wDe7rZjZ9R/vr1S2QkVVCngQ= +golang.org/cmod v1.1.3/go.mod h1:eCR8dnmvLYQomdeAZRCPgS5JJihXtqOQrpEkNj5feQA= +-- x/x.go -- +package x + +import ( + "golang.org/cmod/c" + "golang.org/entry/y" +) + +func X() { + c.C1().Vuln1() // vuln use: X -> Vuln1 +} + +func CallY() { + y.Y() // vuln use: CallY -> y.Y -> bvuln.Vuln +} + +-- y/y.go -- +package y + +import "golang.org/cmod/c" + +func Y() { + c.C2()() // vuln use: Y -> bvuln.Vuln +} +` + +// cmod/c imports amod/avuln and bmod/bvuln. +const proxy1 = ` +-- golang.org/cmod@v1.1.3/go.mod -- +module golang.org/cmod + +go 1.12 +-- golang.org/cmod@v1.1.3/c/c.go -- +package c + +import ( + "golang.org/amod/avuln" + "golang.org/bmod/bvuln" +) + +type I interface { + Vuln1() +} + +func C1() I { + v := avuln.VulnData{} + v.Vuln2() // vuln use + return v +} + +func C2() func() { + return bvuln.Vuln +} +-- golang.org/amod@v1.0.0/go.mod -- +module golang.org/amod + +go 1.14 +-- golang.org/amod@v1.0.0/avuln/avuln.go -- +package avuln + +type VulnData struct {} +func (v VulnData) Vuln1() {} +func (v VulnData) Vuln2() {} +-- golang.org/amod@v1.0.4/go.mod -- +module golang.org/amod + +go 1.14 +-- golang.org/amod@v1.0.4/avuln/avuln.go -- +package avuln + +type VulnData struct {} +func (v VulnData) Vuln1() {} +func (v VulnData) Vuln2() {} + +-- golang.org/bmod@v0.5.0/go.mod -- +module golang.org/bmod + +go 1.14 +-- golang.org/bmod@v0.5.0/bvuln/bvuln.go -- +package bvuln + +func Vuln() { + // something evil +} +-- golang.org/bmod@v0.5.0/unused/unused.go -- +package unused + +func Vuln() { + // something evil +} +-- golang.org/amod@v1.0.6/go.mod -- +module golang.org/amod + +go 1.14 +-- golang.org/amod@v1.0.6/avuln/avuln.go -- +package avuln + +type VulnData struct {} +func (v VulnData) Vuln1() {} +func (v VulnData) Vuln2() {} +` + +func vulnTestEnv(vulnsDB, proxyData string) (*vulntest.DB, []RunOption, error) { + db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData)) + if err != nil { + return nil, nil, nil + } + settings := Settings{ + "codelenses": map[string]bool{ + "run_govulncheck": true, + }, + } + ev := EnvVars{ + // Let the analyzer read vulnerabilities data from the testdata/vulndb. + "GOVULNDB": db.URI(), + // When fetching stdlib package vulnerability info, + // behave as if our go version is go1.18 for this testing. + // The default behavior is to run `go env GOVERSION` (which isn't mutable env var). + vulncheck.GoVersionForVulnTest: "go1.18", + "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`. + "GOSUMDB": "off", + } + return db, []RunOption{ProxyFiles(proxyData), ev, settings}, nil +} + +func TestRunVulncheckPackageDiagnostics(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + + db, opts0, err := vulnTestEnv(vulnsData, proxy1) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + + checkVulncheckDiagnostics := func(env *Env, t *testing.T) { + env.OpenFile("go.mod") + + gotDiagnostics := &protocol.PublishDiagnosticsParams{} + env.AfterChange( + Diagnostics(env.AtRegexp("go.mod", `golang.org/amod`)), + ReadDiagnostics("go.mod", gotDiagnostics), + ) + + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{ + "go.mod": { + IDs: []string{"GO-2022-01", "GO-2022-02", "GO-2022-03"}, + Mode: govulncheck.ModeImports, + }, + }) + + wantVulncheckDiagnostics := map[string]vulnDiagExpectation{ + "golang.org/amod": { + diagnostics: []vulnDiag{ + { + msg: "golang.org/amod has known vulnerabilities GO-2022-01, GO-2022-03.", + severity: protocol.SeverityInformation, + source: string(source.Vulncheck), + codeActions: []string{ + "Run govulncheck to verify", + "Upgrade to v1.0.6", + "Upgrade to latest", + }, + }, + }, + codeActions: []string{ + "Run govulncheck to verify", + "Upgrade to v1.0.6", + "Upgrade to latest", + }, + hover: []string{"GO-2022-01", "Fixed in v1.0.4.", "GO-2022-03"}, + }, + "golang.org/bmod": { + diagnostics: []vulnDiag{ + { + msg: "golang.org/bmod has a vulnerability GO-2022-02.", + severity: protocol.SeverityInformation, + source: string(source.Vulncheck), + codeActions: []string{ + "Run govulncheck to verify", + }, + }, + }, + codeActions: []string{ + "Run govulncheck to verify", + }, + hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."}, + }, + } + + for pattern, want := range wantVulncheckDiagnostics { + modPathDiagnostics := testVulnDiagnostics(t, env, pattern, want, gotDiagnostics) + + gotActions := env.CodeAction("go.mod", modPathDiagnostics) + if diff := diffCodeActions(gotActions, want.codeActions); diff != "" { + t.Errorf("code actions for %q do not match, got %v, want %v\n%v\n", pattern, gotActions, want.codeActions, diff) + continue + } + } + } + + wantNoVulncheckDiagnostics := func(env *Env, t *testing.T) { + env.OpenFile("go.mod") + + gotDiagnostics := &protocol.PublishDiagnosticsParams{} + env.AfterChange( + ReadDiagnostics("go.mod", gotDiagnostics), + ) + + if len(gotDiagnostics.Diagnostics) > 0 { + t.Errorf("Unexpected diagnostics: %v", stringify(gotDiagnostics)) + } + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{}) + } + + for _, tc := range []struct { + name string + setting Settings + wantDiagnostics bool + }{ + {"imports", Settings{"ui.diagnostic.vulncheck": "Imports"}, true}, + {"default", Settings{}, false}, + {"invalid", Settings{"ui.diagnostic.vulncheck": "invalid"}, false}, + } { + t.Run(tc.name, func(t *testing.T) { + // override the settings options to enable diagnostics + opts := append(opts0, tc.setting) + WithOptions(opts...).Run(t, workspace1, func(t *testing.T, env *Env) { + // TODO(hyangah): implement it, so we see GO-2022-01, GO-2022-02, and GO-2022-03. + // Check that the actions we get when including all diagnostics at a location return the same result + if tc.wantDiagnostics { + checkVulncheckDiagnostics(env, t) + } else { + wantNoVulncheckDiagnostics(env, t) + } + + if tc.name == "imports" && tc.wantDiagnostics { + // test we get only govulncheck-based diagnostics after "run govulncheck". + var result command.RunVulncheckResult + env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result) + gotDiagnostics := &protocol.PublishDiagnosticsParams{} + env.OnceMet( + CompletedProgress(result.Token, nil), + ShownMessage("Found"), + ) + env.OnceMet( + Diagnostics(env.AtRegexp("go.mod", "golang.org/bmod")), + ReadDiagnostics("go.mod", gotDiagnostics), + ) + // We expect only one diagnostic for GO-2022-02. + count := 0 + for _, diag := range gotDiagnostics.Diagnostics { + if strings.Contains(diag.Message, "GO-2022-02") { + count++ + if got, want := diag.Severity, protocol.SeverityWarning; got != want { + t.Errorf("Diagnostic for GO-2022-02 = %v, want %v", got, want) + } + } + } + if count != 1 { + t.Errorf("Unexpected number of diagnostics about GO-2022-02 = %v, want 1:\n%+v", count, stringify(gotDiagnostics)) + } + } + }) + }) + } +} + +func stringify(a interface{}) string { + data, _ := json.Marshal(a) + return string(data) +} + +func TestRunVulncheckWarning(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + + db, opts, err := vulnTestEnv(vulnsData, proxy1) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + WithOptions(opts...).Run(t, workspace1, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + + var result command.RunVulncheckResult + env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result) + gotDiagnostics := &protocol.PublishDiagnosticsParams{} + env.OnceMet( + CompletedProgress(result.Token, nil), + ShownMessage("Found"), + ) + // Vulncheck diagnostics asynchronous to the vulncheck command. + env.OnceMet( + Diagnostics(env.AtRegexp("go.mod", `golang.org/amod`)), + ReadDiagnostics("go.mod", gotDiagnostics), + ) + + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{ + "go.mod": {IDs: []string{"GO-2022-01", "GO-2022-02", "GO-2022-03"}, Mode: govulncheck.ModeGovulncheck}, + }) + env.OpenFile("x/x.go") + lineX := env.RegexpSearch("x/x.go", `c\.C1\(\)\.Vuln1\(\)`).Range.Start + env.OpenFile("y/y.go") + lineY := env.RegexpSearch("y/y.go", `c\.C2\(\)\(\)`).Range.Start + wantDiagnostics := map[string]vulnDiagExpectation{ + "golang.org/amod": { + applyAction: "Upgrade to v1.0.6", + diagnostics: []vulnDiag{ + { + msg: "golang.org/amod has a vulnerability used in the code: GO-2022-01.", + severity: protocol.SeverityWarning, + source: string(source.Govulncheck), + codeActions: []string{ + "Upgrade to v1.0.4", + "Upgrade to latest", + "Reset govulncheck result", + }, + relatedInfo: []vulnRelatedInfo{ + {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln1 + {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln2 + }, + }, + { + msg: "golang.org/amod has a vulnerability GO-2022-03 that is not used in the code.", + severity: protocol.SeverityInformation, + source: string(source.Govulncheck), + codeActions: []string{ + "Upgrade to v1.0.6", + "Upgrade to latest", + "Reset govulncheck result", + }, + relatedInfo: []vulnRelatedInfo{ + {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln1 + {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln2 + }, + }, + }, + codeActions: []string{ + "Upgrade to v1.0.6", + "Upgrade to latest", + "Reset govulncheck result", + }, + hover: []string{"GO-2022-01", "Fixed in v1.0.4.", "GO-2022-03"}, + }, + "golang.org/bmod": { + diagnostics: []vulnDiag{ + { + msg: "golang.org/bmod has a vulnerability used in the code: GO-2022-02.", + severity: protocol.SeverityWarning, + source: string(source.Govulncheck), + codeActions: []string{ + "Reset govulncheck result", // no fix, but we should give an option to reset. + }, + relatedInfo: []vulnRelatedInfo{ + {"y.go", uint32(lineY.Line), "[GO-2022-02]"}, // bvuln.Vuln + }, + }, + }, + codeActions: []string{ + "Reset govulncheck result", // no fix, but we should give an option to reset. + }, + hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."}, + }, + } + + for mod, want := range wantDiagnostics { + modPathDiagnostics := testVulnDiagnostics(t, env, mod, want, gotDiagnostics) + + // Check that the actions we get when including all diagnostics at a location return the same result + gotActions := env.CodeAction("go.mod", modPathDiagnostics) + if diff := diffCodeActions(gotActions, want.codeActions); diff != "" { + t.Errorf("code actions for %q do not match, expected %v, got %v\n%v\n", mod, want.codeActions, gotActions, diff) + continue + } + + // Apply the code action matching applyAction. + if want.applyAction == "" { + continue + } + for _, action := range gotActions { + if action.Title == want.applyAction { + env.ApplyCodeAction(action) + break + } + } + } + + env.Await(env.DoneWithChangeWatchedFiles()) + wantGoMod := `module golang.org/entry + +go 1.18 + +require golang.org/cmod v1.1.3 + +require ( + golang.org/amod v1.0.6 // indirect + golang.org/bmod v0.5.0 // indirect +) +` + if got := env.BufferText("go.mod"); got != wantGoMod { + t.Fatalf("go.mod vulncheck fix failed:\n%s", compare.Text(wantGoMod, got)) + } + }) +} + +func diffCodeActions(gotActions []protocol.CodeAction, want []string) string { + var gotTitles []string + for _, ca := range gotActions { + gotTitles = append(gotTitles, ca.Title) + } + return cmp.Diff(want, gotTitles) +} + +const workspace2 = ` +-- go.mod -- +module golang.org/entry + +go 1.18 + +require golang.org/bmod v0.5.0 + +-- go.sum -- +golang.org/bmod v0.5.0 h1:MT/ysNRGbCiURc5qThRFWaZ5+rK3pQRPo9w7dYZfMDk= +golang.org/bmod v0.5.0/go.mod h1:k+zl+Ucu4yLIjndMIuWzD/MnOHy06wqr3rD++y0abVs= +-- x/x.go -- +package x + +import "golang.org/bmod/bvuln" + +func F() { + // Calls a benign func in bvuln. + bvuln.OK() +} +` + +const proxy2 = ` +-- golang.org/bmod@v0.5.0/bvuln/bvuln.go -- +package bvuln + +func Vuln() {} // vulnerable. +func OK() {} // ok. +` + +func TestGovulncheckInfo(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + + db, opts, err := vulnTestEnv(vulnsData, proxy2) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + WithOptions(opts...).Run(t, workspace2, func(t *testing.T, env *Env) { + env.OpenFile("go.mod") + var result command.RunVulncheckResult + env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result) + gotDiagnostics := &protocol.PublishDiagnosticsParams{} + env.OnceMet( + CompletedProgress(result.Token, nil), + ShownMessage("No vulnerabilities found"), // only count affecting vulnerabilities. + ) + + // Vulncheck diagnostics asynchronous to the vulncheck command. + env.OnceMet( + Diagnostics(env.AtRegexp("go.mod", "golang.org/bmod")), + ReadDiagnostics("go.mod", gotDiagnostics), + ) + + testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{"go.mod": {IDs: []string{"GO-2022-02"}, Mode: govulncheck.ModeGovulncheck}}) + // wantDiagnostics maps a module path in the require + // section of a go.mod to diagnostics that will be returned + // when running vulncheck. + wantDiagnostics := map[string]vulnDiagExpectation{ + "golang.org/bmod": { + diagnostics: []vulnDiag{ + { + msg: "golang.org/bmod has a vulnerability GO-2022-02 that is not used in the code.", + severity: protocol.SeverityInformation, + source: string(source.Govulncheck), + codeActions: []string{ + "Reset govulncheck result", + }, + }, + }, + codeActions: []string{ + "Reset govulncheck result", + }, + hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."}, + }, + } + + var allActions []protocol.CodeAction + for mod, want := range wantDiagnostics { + modPathDiagnostics := testVulnDiagnostics(t, env, mod, want, gotDiagnostics) + // Check that the actions we get when including all diagnostics at a location return the same result + gotActions := env.CodeAction("go.mod", modPathDiagnostics) + allActions = append(allActions, gotActions...) + if diff := diffCodeActions(gotActions, want.codeActions); diff != "" { + t.Errorf("code actions for %q do not match, expected %v, got %v\n%v\n", mod, want.codeActions, gotActions, diff) + continue + } + } + + // Clear Diagnostics by using one of the reset code actions. + var reset protocol.CodeAction + for _, a := range allActions { + if a.Title == "Reset govulncheck result" { + reset = a + break + } + } + if reset.Title != "Reset govulncheck result" { + t.Errorf("failed to find a 'Reset govulncheck result' code action, got %v", allActions) + } + env.ApplyCodeAction(reset) + + env.Await(NoDiagnostics(ForFile("go.mod"))) + }) +} + +// testVulnDiagnostics finds the require or module statement line for the requireMod in go.mod file +// and runs checks if diagnostics and code actions associated with the line match expectation. +func testVulnDiagnostics(t *testing.T, env *Env, pattern string, want vulnDiagExpectation, got *protocol.PublishDiagnosticsParams) []protocol.Diagnostic { + t.Helper() + loc := env.RegexpSearch("go.mod", pattern) + var modPathDiagnostics []protocol.Diagnostic + for _, w := range want.diagnostics { + // Find the diagnostics at loc.start. + var diag *protocol.Diagnostic + for _, g := range got.Diagnostics { + g := g + if g.Range.Start == loc.Range.Start && w.msg == g.Message { + modPathDiagnostics = append(modPathDiagnostics, g) + diag = &g + break + } + } + if diag == nil { + t.Errorf("no diagnostic at %q matching %q found\n", pattern, w.msg) + continue + } + if diag.Severity != w.severity || diag.Source != w.source { + t.Errorf("incorrect (severity, source) for %q, want (%s, %s) got (%s, %s)\n", w.msg, w.severity, w.source, diag.Severity, diag.Source) + } + sort.Slice(w.relatedInfo, func(i, j int) bool { return w.relatedInfo[i].less(w.relatedInfo[j]) }) + if got, want := summarizeRelatedInfo(diag.RelatedInformation), w.relatedInfo; !cmp.Equal(got, want) { + t.Errorf("related info for %q do not match, want %v, got %v\n", w.msg, want, got) + } + // Check expected code actions appear. + gotActions := env.CodeAction("go.mod", []protocol.Diagnostic{*diag}) + if diff := diffCodeActions(gotActions, w.codeActions); diff != "" { + t.Errorf("code actions for %q do not match, want %v, got %v\n%v\n", w.msg, w.codeActions, gotActions, diff) + continue + } + } + // Check that useful info is supplemented as hover. + if len(want.hover) > 0 { + hover, _ := env.Hover(loc) + for _, part := range want.hover { + if !strings.Contains(hover.Value, part) { + t.Errorf("hover contents for %q do not match, want %v, got %v\n", pattern, strings.Join(want.hover, ","), hover.Value) + break + } + } + } + return modPathDiagnostics +} + +// summarizeRelatedInfo converts protocol.DiagnosticRelatedInformation to vulnRelatedInfo +// that captures only the part that we want to test. +func summarizeRelatedInfo(rinfo []protocol.DiagnosticRelatedInformation) []vulnRelatedInfo { + var res []vulnRelatedInfo + for _, r := range rinfo { + filename := filepath.Base(r.Location.URI.SpanURI().Filename()) + message, _, _ := strings.Cut(r.Message, " ") + line := r.Location.Range.Start.Line + res = append(res, vulnRelatedInfo{filename, line, message}) + } + sort.Slice(res, func(i, j int) bool { + return res[i].less(res[j]) + }) + return res +} + +type vulnRelatedInfo struct { + Filename string + Line uint32 + Message string +} + +type vulnDiag struct { + msg string + severity protocol.DiagnosticSeverity + // codeActions is a list titles of code actions that we get with this + // diagnostics as the context. + codeActions []string + // relatedInfo is related info message prefixed by the file base. + // See summarizeRelatedInfo. + relatedInfo []vulnRelatedInfo + // diagnostic source. + source string +} + +func (i vulnRelatedInfo) less(j vulnRelatedInfo) bool { + if i.Filename != j.Filename { + return i.Filename < j.Filename + } + if i.Line != j.Line { + return i.Line < j.Line + } + return i.Message < j.Message +} + +// vulnDiagExpectation maps a module path in the require +// section of a go.mod to diagnostics that will be returned +// when running vulncheck. +type vulnDiagExpectation struct { + // applyAction is the title of the code action to run for this module. + // If empty, no code actions will be executed. + applyAction string + // diagnostics is the list of diagnostics we expect at the require line for + // the module path. + diagnostics []vulnDiag + // codeActions is a list titles of code actions that we get with context + // diagnostics. + codeActions []string + // hover message is the list of expected hover message parts for this go.mod require line. + // all parts must appear in the hover message. + hover []string +} diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go index a21d47312dd..a492e1d4985 100644 --- a/gopls/internal/regtest/misc/workspace_symbol_test.go +++ b/gopls/internal/regtest/misc/workspace_symbol_test.go @@ -7,16 +7,12 @@ package misc import ( "testing" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/source" ) func TestWorkspaceSymbolMissingMetadata(t *testing.T) { - // We get 2 symbols on 1.12, for some reason. - testenv.NeedsGo1Point(t, 13) - const files = ` -- go.mod -- module mod.com @@ -26,26 +22,27 @@ go 1.17 package p const C1 = "a.go" --- ignore.go -- +-- exclude.go -- -// +build ignore +//go:build exclude +// +build exclude -package ignore +package exclude -const C2 = "ignore.go" +const C2 = "exclude.go" ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a.go") - syms := env.WorkspaceSymbol("C") + syms := env.Symbol("C") if got, want := len(syms), 1; got != want { t.Errorf("got %d symbols, want %d", got, want) } // Opening up an ignored file will result in an overlay with missing // metadata, but this shouldn't break workspace symbols requests. - env.OpenFile("ignore.go") - syms = env.WorkspaceSymbol("C") + env.OpenFile("exclude.go") + syms = env.Symbol("C") if got, want := len(syms), 1; got != want { t.Errorf("got %d symbols, want %d", got, want) } @@ -72,9 +69,7 @@ const ( var symbolMatcher = string(source.SymbolFastFuzzy) WithOptions( - EditorConfig{ - SymbolMatcher: &symbolMatcher, - }, + Settings{"symbolMatcher": symbolMatcher}, ).Run(t, files, func(t *testing.T, env *Env) { want := []string{ "Foo", // prefer exact segment matches first @@ -83,8 +78,8 @@ const ( "Fooey", // shorter than Fooest, Foobar "Fooest", } - got := env.WorkspaceSymbol("Foo") - compareSymbols(t, got, want) + got := env.Symbol("Foo") + compareSymbols(t, got, want...) }) } @@ -105,19 +100,17 @@ const ( var symbolMatcher = string(source.SymbolFastFuzzy) WithOptions( - EditorConfig{ - SymbolMatcher: &symbolMatcher, - }, + Settings{"symbolMatcher": symbolMatcher}, ).Run(t, files, func(t *testing.T, env *Env) { - compareSymbols(t, env.WorkspaceSymbol("ABC"), []string{"ABC", "AxxBxxCxx"}) - compareSymbols(t, env.WorkspaceSymbol("'ABC"), []string{"ABC"}) - compareSymbols(t, env.WorkspaceSymbol("^mod.com"), []string{"mod.com/a.ABC", "mod.com/a.AxxBxxCxx"}) - compareSymbols(t, env.WorkspaceSymbol("^mod.com Axx"), []string{"mod.com/a.AxxBxxCxx"}) - compareSymbols(t, env.WorkspaceSymbol("C$"), []string{"ABC"}) + compareSymbols(t, env.Symbol("ABC"), "ABC", "AxxBxxCxx") + compareSymbols(t, env.Symbol("'ABC"), "ABC") + compareSymbols(t, env.Symbol("^mod.com"), "mod.com/a.ABC", "mod.com/a.AxxBxxCxx") + compareSymbols(t, env.Symbol("^mod.com Axx"), "mod.com/a.AxxBxxCxx") + compareSymbols(t, env.Symbol("C$"), "ABC") }) } -func compareSymbols(t *testing.T, got []protocol.SymbolInformation, want []string) { +func compareSymbols(t *testing.T, got []protocol.SymbolInformation, want ...string) { t.Helper() if len(got) != len(want) { t.Errorf("got %d symbols, want %d", len(got), len(want)) diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go index 93d43253044..483118dd3d4 100644 --- a/gopls/internal/regtest/modfile/modfile_test.go +++ b/gopls/internal/regtest/modfile/modfile_test.go @@ -11,11 +11,11 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/bug" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" + "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/testenv" ) @@ -67,8 +67,6 @@ const Name = "Hello" ` func TestModFileModification(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const untidyModule = ` -- a/go.mod -- module mod.com @@ -94,54 +92,59 @@ func main() { // modify the go.mod file. goModContent := env.ReadWorkspaceFile("a/go.mod") env.OpenFile("a/main.go") - env.Await( - env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")), ) if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent { - t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got)) + t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got)) } // Save the buffer, which will format and organize imports. // Confirm that the go.mod file still does not change. env.SaveBuffer("a/main.go") - env.Await( - env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")), ) if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent { - t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got)) + t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got)) } }) }) // Reproduce golang/go#40269 by deleting and recreating main.go. t.Run("delete main.go", func(t *testing.T) { - t.Skip("This test will be flaky until golang/go#40269 is resolved.") - runner.Run(t, untidyModule, func(t *testing.T, env *Env) { goModContent := env.ReadWorkspaceFile("a/go.mod") mainContent := env.ReadWorkspaceFile("a/main.go") env.OpenFile("a/main.go") env.SaveBuffer("a/main.go") + // Ensure that we're done processing all the changes caused by opening + // and saving above. If not, we may run into a file locking issue on + // windows. + // + // If this proves insufficient, env.RemoveWorkspaceFile can be updated to + // retry file lock errors on windows. + env.AfterChange() env.RemoveWorkspaceFile("a/main.go") - env.Await( - env.DoneWithOpen(), - env.DoneWithSave(), - env.DoneWithChangeWatchedFiles(), - ) - env.WriteWorkspaceFile("main.go", mainContent) - env.Await( - env.DiagnosticAtRegexp("main.go", "\"example.com/blah\""), + // TODO(rfindley): awaiting here shouldn't really be necessary. We should + // be consistent eventually. + // + // Probably this was meant to exercise a race with the change below. + env.AfterChange() + + env.WriteWorkspaceFile("a/main.go", mainContent) + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")), ) - if got := env.ReadWorkspaceFile("go.mod"); got != goModContent { - t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got)) + if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent { + t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got)) } }) }) } func TestGoGetFix(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const mod = ` -- a/go.mod -- module mod.com @@ -172,11 +175,9 @@ require example.com v1.2.3 } env.OpenFile("a/main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/main.go", `"example.com/blah"`), - ReadDiagnostics("a/main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", `"example.com/blah"`)), + ReadDiagnostics("a/main.go", &d), ) var goGetDiag protocol.Diagnostic for _, diag := range d.Diagnostics { @@ -186,14 +187,13 @@ require example.com v1.2.3 } env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{goGetDiag}) if got := env.ReadWorkspaceFile("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got)) + t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) } }) } // Tests that multiple missing dependencies gives good single fixes. func TestMissingDependencyFixes(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const mod = ` -- a/go.mod -- module mod.com @@ -222,11 +222,9 @@ require random.org v1.2.3 }.Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/main.go", `"random.org/blah"`), - ReadDiagnostics("a/main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", `"random.org/blah"`)), + ReadDiagnostics("a/main.go", &d), ) var randomDiag protocol.Diagnostic for _, diag := range d.Diagnostics { @@ -236,7 +234,7 @@ require random.org v1.2.3 } env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{randomDiag}) if got := env.ReadWorkspaceFile("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got)) + t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) } }) } @@ -278,11 +276,9 @@ require random.org v1.2.3 }.Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/main.go", `"random.org/blah"`), - ReadDiagnostics("a/main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", `"random.org/blah"`)), + ReadDiagnostics("a/main.go", &d), ) var randomDiag protocol.Diagnostic for _, diag := range d.Diagnostics { @@ -292,14 +288,12 @@ require random.org v1.2.3 } env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{randomDiag}) if got := env.ReadWorkspaceFile("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got)) + t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) } }) } func TestIndirectDependencyFix(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- a/go.mod -- module mod.com @@ -331,21 +325,18 @@ require example.com v1.2.3 }.Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/go.mod") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/go.mod", "// indirect"), - ReadDiagnostics("a/go.mod", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/go.mod", "// indirect")), + ReadDiagnostics("a/go.mod", &d), ) env.ApplyQuickFixes("a/go.mod", d.Diagnostics) - if got := env.Editor.BufferText("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got)) + if got := env.BufferText("a/go.mod"); got != want { + t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) } }) } func TestUnusedDiag(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const proxy = ` -- example.com@v1.0.0/x.go -- @@ -376,15 +367,13 @@ go 1.14 }.Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/go.mod") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/go.mod", `require example.com`), - ReadDiagnostics("a/go.mod", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/go.mod", `require example.com`)), + ReadDiagnostics("a/go.mod", &d), ) env.ApplyQuickFixes("a/go.mod", d.Diagnostics) - if got := env.Editor.BufferText("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got)) + if got := env.BufferText("a/go.mod"); got != want { + t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) } }) } @@ -392,7 +381,6 @@ go 1.14 // Test to reproduce golang/go#39041. It adds a new require to a go.mod file // that already has an unused require. func TestNewDepWithUnusedDep(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const proxy = ` -- github.com/esimov/caire@v1.2.5/go.mod -- @@ -439,11 +427,9 @@ func _() { }.Run(t, repro, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexp("a/main.go", `"github.com/esimov/caire"`), - ReadDiagnostics("a/main.go", &d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", `"github.com/esimov/caire"`)), + ReadDiagnostics("a/main.go", &d), ) env.ApplyQuickFixes("a/main.go", d.Diagnostics) want := `module mod.com @@ -456,7 +442,7 @@ require ( ) ` if got := env.ReadWorkspaceFile("a/go.mod"); got != want { - t.Fatalf("TestNewDepWithUnusedDep failed:\n%s", tests.Diff(t, want, got)) + t.Fatalf("TestNewDepWithUnusedDep failed:\n%s", compare.Text(want, got)) } }) } @@ -465,8 +451,6 @@ require ( // the file watching GlobPattern in the capability registration. See // golang/go#39384. func TestModuleChangesOnDisk(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- a/go.mod -- module mod.com @@ -487,10 +471,13 @@ func main() { {"default", WithOptions(ProxyFiles(proxy), WorkspaceFolders("a"))}, {"nested", WithOptions(ProxyFiles(proxy))}, }.Run(t, mod, func(t *testing.T, env *Env) { - env.Await(env.DiagnosticAtRegexp("a/go.mod", "require")) + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/go.mod", "require")), + ) env.RunGoCommandInDir("a", "mod", "tidy") - env.Await( - EmptyDiagnostics("a/go.mod"), + env.AfterChange( + NoDiagnostics(ForFile("a/go.mod")), ) }) } @@ -498,8 +485,6 @@ func main() { // Tests golang/go#39784: a missing indirect dependency, necessary // due to blah@v2.0.0's incomplete go.mod file. func TestBadlyVersionedModule(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const proxy = ` -- example.com/blah/@v/v1.0.0.mod -- module example.com @@ -546,13 +531,15 @@ var _ = blah.Name }.Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") env.OpenFile("a/go.mod") - env.Await( + var modDiags protocol.PublishDiagnosticsParams + env.AfterChange( // We would like for the error to appear in the v2 module, but // as of writing non-workspace packages are not diagnosed. - env.DiagnosticAtRegexpWithMessage("a/main.go", `"example.com/blah/v2"`, "cannot find module providing"), - env.DiagnosticAtRegexpWithMessage("a/go.mod", `require example.com/blah/v2`, "cannot find module providing"), + Diagnostics(env.AtRegexp("a/main.go", `"example.com/blah/v2"`), WithMessage("cannot find module providing")), + Diagnostics(env.AtRegexp("a/go.mod", `require example.com/blah/v2`), WithMessage("cannot find module providing")), + ReadDiagnostics("a/go.mod", &modDiags), ) - env.ApplyQuickFixes("a/go.mod", env.DiagnosticsFor("a/go.mod").Diagnostics) + env.ApplyQuickFixes("a/go.mod", modDiags.Diagnostics) const want = `module mod.com go 1.12 @@ -563,9 +550,9 @@ require ( ) ` env.SaveBuffer("a/go.mod") - env.Await(EmptyDiagnostics("a/main.go")) - if got := env.Editor.BufferText("a/go.mod"); got != want { - t.Fatalf("suggested fixes failed:\n%s", tests.Diff(t, want, got)) + env.AfterChange(NoDiagnostics(ForFile("a/main.go"))) + if got := env.BufferText("a/go.mod"); got != want { + t.Fatalf("suggested fixes failed:\n%s", compare.Text(want, got)) } }) } @@ -575,9 +562,6 @@ func TestUnknownRevision(t *testing.T) { if runtime.GOOS == "plan9" { t.Skipf("skipping test that fails for unknown reasons on plan9; see https://go.dev/issue/50477") } - - testenv.NeedsGo1Point(t, 14) - const unknown = ` -- a/go.mod -- module mod.com @@ -603,19 +587,17 @@ func main() { t.Run("bad", func(t *testing.T) { runner.Run(t, unknown, func(t *testing.T, env *Env) { env.OpenFile("a/go.mod") - env.Await( - env.DiagnosticAtRegexp("a/go.mod", "example.com v1.2.2"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.2")), ) env.RegexpReplace("a/go.mod", "v1.2.2", "v1.2.3") env.SaveBuffer("a/go.mod") // Save to trigger diagnostics. d := protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - // Make sure the diagnostic mentions the new version -- the old diagnostic is in the same place. - env.DiagnosticAtRegexpWithMessage("a/go.mod", "example.com v1.2.3", "example.com@v1.2.3"), - ReadDiagnostics("a/go.mod", &d), - ), + env.AfterChange( + // Make sure the diagnostic mentions the new version -- the old diagnostic is in the same place. + Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.3"), WithMessage("example.com@v1.2.3")), + ReadDiagnostics("a/go.mod", &d), ) qfs := env.GetQuickFixes("a/go.mod", d.Diagnostics) if len(qfs) == 0 { @@ -623,9 +605,9 @@ func main() { } env.ApplyCodeAction(qfs[0]) // Arbitrarily pick a single fix to apply. Applying all of them seems to cause trouble in this particular test. env.SaveBuffer("a/go.mod") // Save to trigger diagnostics. - env.Await( - EmptyDiagnostics("a/go.mod"), - env.DiagnosticAtRegexp("a/main.go", "x = "), + env.AfterChange( + NoDiagnostics(ForFile("a/go.mod")), + Diagnostics(env.AtRegexp("a/main.go", "x = ")), ) }) }) @@ -654,18 +636,18 @@ func main() { t.Run("good", func(t *testing.T) { runner.Run(t, known, func(t *testing.T, env *Env) { env.OpenFile("a/go.mod") - env.Await( - env.DiagnosticAtRegexp("a/main.go", "x = "), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "x = ")), ) env.RegexpReplace("a/go.mod", "v1.2.3", "v1.2.2") env.Editor.SaveBuffer(env.Ctx, "a/go.mod") // go.mod changes must be on disk - env.Await( - env.DiagnosticAtRegexp("a/go.mod", "example.com v1.2.2"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.2")), ) env.RegexpReplace("a/go.mod", "v1.2.2", "v1.2.3") env.Editor.SaveBuffer(env.Ctx, "a/go.mod") // go.mod changes must be on disk - env.Await( - env.DiagnosticAtRegexp("a/main.go", "x = "), + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "x = ")), ) }) }) @@ -674,8 +656,6 @@ func main() { // Confirm that an error in an indirect dependency of a requirement is surfaced // as a diagnostic in the go.mod file. func TestErrorInIndirectDependency(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const badProxy = ` -- example.com@v1.2.3/go.mod -- module example.com @@ -717,8 +697,8 @@ func main() { {"nested", WithOptions(ProxyFiles(badProxy))}, }.Run(t, module, func(t *testing.T, env *Env) { env.OpenFile("a/go.mod") - env.Await( - env.DiagnosticAtRegexp("a/go.mod", "require example.com v1.2.3"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/go.mod", "require example.com v1.2.3")), ) }) } @@ -740,35 +720,37 @@ func main() { } ` WithOptions( - EditorConfig{ - Env: map[string]string{ - "GOFLAGS": "-mod=readonly", - }, - }, + EnvVars{"GOFLAGS": "-mod=readonly"}, ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") original := env.ReadWorkspaceFile("go.mod") - env.Await( - env.DiagnosticAtRegexp("main.go", `"example.com/blah"`), + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)), ) got := env.ReadWorkspaceFile("go.mod") if got != original { - t.Fatalf("go.mod file modified:\n%s", tests.Diff(t, original, got)) + t.Fatalf("go.mod file modified:\n%s", compare.Text(original, got)) } env.RunGoCommand("get", "example.com/blah@v1.2.3") env.RunGoCommand("mod", "tidy") - env.Await( - EmptyDiagnostics("main.go"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } func TestMultiModuleModDiagnostics(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - + testenv.NeedsGo1Point(t, 18) // uses go.work const mod = ` +-- go.work -- +go 1.18 + +use ( + a + b +) -- a/go.mod -- module moda.com @@ -801,17 +783,17 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - Modes(Experimental), ).Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexpWithMessage("a/go.mod", "example.com v1.2.3", "is not used"), + env.AfterChange( + Diagnostics( + env.AtRegexp("a/go.mod", "example.com v1.2.3"), + WithMessage("is not used"), + ), ) }) } func TestModTidyWithBuildTags(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const mod = ` -- go.mod -- module mod.com @@ -830,12 +812,11 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - EditorConfig{ - BuildFlags: []string{"-tags", "bob"}, - }, + Settings{"buildFlags": []string{"-tags", "bob"}}, ).Run(t, mod, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", `"example.com/blah"`), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)), ) }) } @@ -854,17 +835,13 @@ func main() {} Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.RegexpReplace("go.mod", "module", "modul") - env.Await( - env.DiagnosticAtRegexp("go.mod", "modul"), + env.AfterChange( + Diagnostics(env.AtRegexp("go.mod", "modul")), ) }) } func TestSumUpdateFixesDiagnostics(t *testing.T) { - t.Skipf("Skipping known-flaky test; see https://go.dev/issue/51352.") - - testenv.NeedsGo1Point(t, 14) - const mod = ` -- go.mod -- module mod.com @@ -891,16 +868,17 @@ func main() { ).Run(t, mod, func(t *testing.T, env *Env) { d := &protocol.PublishDiagnosticsParams{} env.OpenFile("go.mod") - env.Await( - OnceMet( - env.GoSumDiagnostic("go.mod", `example.com v1.2.3`), - ReadDiagnostics("go.mod", d), + env.AfterChange( + Diagnostics( + env.AtRegexp("go.mod", `example.com v1.2.3`), + WithMessage("go.sum is out of sync"), ), + ReadDiagnostics("go.mod", d), ) env.ApplyQuickFixes("go.mod", d.Diagnostics) env.SaveBuffer("go.mod") // Save to trigger diagnostics. - env.Await( - EmptyDiagnostics("go.mod"), + env.AfterChange( + NoDiagnostics(ForFile("go.mod")), ) }) } @@ -928,20 +906,20 @@ func hello() {} // TODO(rFindley) this doesn't work in multi-module workspace mode, because // it keeps around the last parsing modfile. Update this test to also // exercise the workspace module. - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.Await(env.DoneWithOpen()) env.RegexpReplace("go.mod", "module", "modul") // Confirm that we still have metadata with only on-disk edits. env.OpenFile("main.go") - file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "hello")) - if filepath.Base(file) != "hello.go" { - t.Fatalf("expected definition in hello.go, got %s", file) + loc := env.GoToDefinition(env.RegexpSearch("main.go", "hello")) + if filepath.Base(string(loc.URI)) != "hello.go" { + t.Fatalf("expected definition in hello.go, got %s", loc.URI) } // Confirm that we no longer have metadata when the file is saved. env.SaveBufferWithoutActions("go.mod") - _, _, err := env.Editor.GoToDefinition(env.Ctx, "main.go", env.RegexpSearch("main.go", "hello")) + _, err := env.Editor.GoToDefinition(env.Ctx, env.RegexpSearch("main.go", "hello")) if err == nil { t.Fatalf("expected error, got none") } @@ -949,8 +927,6 @@ func hello() {} } func TestRemoveUnusedDependency(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const proxy = ` -- hasdep.com@v1.2.3/go.mod -- module hasdep.com @@ -1000,19 +976,17 @@ func main() {} ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") d := &protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - env.DiagnosticAtRegexp("go.mod", "require hasdep.com v1.2.3"), - ReadDiagnostics("go.mod", d), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("go.mod", "require hasdep.com v1.2.3")), + ReadDiagnostics("go.mod", d), ) const want = `module mod.com go 1.12 ` env.ApplyQuickFixes("go.mod", d.Diagnostics) - if got := env.Editor.BufferText("go.mod"); got != want { - t.Fatalf("unexpected content in go.mod:\n%s", tests.Diff(t, want, got)) + if got := env.BufferText("go.mod"); got != want { + t.Fatalf("unexpected content in go.mod:\n%s", compare.Text(want, got)) } }) }) @@ -1043,12 +1017,10 @@ func main() {} ).Run(t, mod, func(t *testing.T, env *Env) { d := &protocol.PublishDiagnosticsParams{} env.OpenFile("go.mod") - pos := env.RegexpSearch("go.mod", "require hasdep.com v1.2.3") - env.Await( - OnceMet( - DiagnosticAt("go.mod", pos.Line, pos.Column), - ReadDiagnostics("go.mod", d), - ), + pos := env.RegexpSearch("go.mod", "require hasdep.com v1.2.3").Range.Start + env.AfterChange( + Diagnostics(AtPosition("go.mod", pos.Line, pos.Character)), + ReadDiagnostics("go.mod", d), ) const want = `module mod.com @@ -1064,15 +1036,14 @@ require random.com v1.2.3 diagnostics = append(diagnostics, d) } env.ApplyQuickFixes("go.mod", diagnostics) - if got := env.Editor.BufferText("go.mod"); got != want { - t.Fatalf("unexpected content in go.mod:\n%s", tests.Diff(t, want, got)) + if got := env.BufferText("go.mod"); got != want { + t.Fatalf("unexpected content in go.mod:\n%s", compare.Text(want, got)) } }) }) } func TestSumUpdateQuickFix(t *testing.T) { - testenv.NeedsGo1Point(t, 14) const mod = ` -- go.mod -- module mod.com @@ -1096,29 +1067,28 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") params := &protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - env.GoSumDiagnostic("go.mod", "example.com"), - ReadDiagnostics("go.mod", params), + env.AfterChange( + Diagnostics( + env.AtRegexp("go.mod", `example.com`), + WithMessage("go.sum is out of sync"), ), + ReadDiagnostics("go.mod", params), ) env.ApplyQuickFixes("go.mod", params.Diagnostics) const want = `example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c= example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo= ` if got := env.ReadWorkspaceFile("go.sum"); got != want { - t.Fatalf("unexpected go.sum contents:\n%s", tests.Diff(t, want, got)) + t.Fatalf("unexpected go.sum contents:\n%s", compare.Text(want, got)) } }) } func TestDownloadDeps(t *testing.T) { - testenv.NeedsGo1Point(t, 14) - const proxy = ` -- example.com@v1.2.3/go.mod -- module example.com @@ -1165,24 +1135,26 @@ func main() { ` WithOptions( ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") d := &protocol.PublishDiagnosticsParams{} - env.Await( - env.DiagnosticAtRegexpWithMessage("main.go", `"example.com/blah"`, `could not import example.com/blah (no required module provides package "example.com/blah")`), + env.AfterChange( + Diagnostics( + env.AtRegexp("main.go", `"example.com/blah"`), + WithMessage(`could not import example.com/blah (no required module provides package "example.com/blah")`), + ), ReadDiagnostics("main.go", d), ) env.ApplyQuickFixes("main.go", d.Diagnostics) - env.Await( - EmptyDiagnostics("main.go"), - NoDiagnostics("go.mod"), + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + NoDiagnostics(ForFile("go.mod")), ) }) } func TestInvalidGoVersion(t *testing.T) { - testenv.NeedsGo1Point(t, 14) // Times out on 1.13 for reasons unclear. Not worth worrying about. const files = ` -- go.mod -- module mod.com @@ -1192,8 +1164,25 @@ go foo package main ` Run(t, files, func(t *testing.T, env *Env) { - env.Await(env.DiagnosticAtRegexpWithMessage("go.mod", `go foo`, "invalid go version")) + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("go.mod", `go foo`), WithMessage("invalid go version")), + ) env.WriteWorkspaceFile("go.mod", "module mod.com \n\ngo 1.12\n") - env.Await(EmptyDiagnostics("go.mod")) + env.AfterChange(NoDiagnostics(ForFile("go.mod"))) + }) +} + +// This is a regression test for a bug in the line-oriented implementation +// of the "apply diffs" operation used by the fake editor. +func TestIssue57627(t *testing.T) { + const files = ` +-- go.work -- +package main +` + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.work") + env.SetBufferContent("go.work", "go 1.18\nuse moda/a") + env.SaveBuffer("go.work") // doesn't fail }) } diff --git a/gopls/internal/regtest/template/template_test.go b/gopls/internal/regtest/template/template_test.go index 9489e9bf7fe..48635643c2d 100644 --- a/gopls/internal/regtest/template/template_test.go +++ b/gopls/internal/regtest/template/template_test.go @@ -9,9 +9,9 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/protocol" - . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" ) func TestMain(m *testing.M) { @@ -35,11 +35,9 @@ go 1.17 {{end}} ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - "semanticTokens": true, - }, + Settings{ + "templateExtensions": []string{"tmpl"}, + "semanticTokens": true, }, ).Run(t, files, func(t *testing.T, env *Env) { var p protocol.SemanticTokensParams @@ -66,16 +64,19 @@ Hello {{}} <-- missing body {{end}} ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - "semanticTokens": true, - }, + Settings{ + "templateExtensions": []string{"tmpl"}, + "semanticTokens": true, }, ).Run(t, files, func(t *testing.T, env *Env) { // TODO: can we move this diagnostic onto {{}}? - env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}")) - d := env.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source + var diags protocol.PublishDiagnosticsParams + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}")), + ReadDiagnostics("hello.tmpl", &diags), + ) + d := diags.Diagnostics // issue 50786: check for Source if len(d) != 1 { t.Errorf("expected 1 diagnostic, got %d", len(d)) return @@ -95,7 +96,7 @@ Hello {{}} <-- missing body } env.WriteWorkspaceFile("hello.tmpl", "{{range .Planets}}\nHello {{.}}\n{{end}}") - env.Await(EmptyDiagnostics("hello.tmpl")) + env.AfterChange(NoDiagnostics(ForFile("hello.tmpl"))) }) } @@ -112,16 +113,15 @@ B {{}} <-- missing body ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - }, - DirectoryFilters: []string{"-b"}, + Settings{ + "directoryFilters": []string{"-b"}, + "templateExtensions": []string{"tmpl"}, }, ).Run(t, files, func(t *testing.T, env *Env) { - env.Await( - OnceMet(env.DiagnosticAtRegexp("a/a.tmpl", "()A")), - NoDiagnostics("b/b.tmpl"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.tmpl", "()A")), + NoDiagnostics(ForFile("b/b.tmpl")), ) }) } @@ -136,16 +136,13 @@ go 1.12 Run(t, files, func(t *testing.T, env *Env) { env.CreateBuffer("hello.tmpl", "") - env.Await( - OnceMet( - env.DoneWithOpen(), - NoDiagnostics("hello.tmpl"), // Don't get spurious errors for empty templates. - ), + env.AfterChange( + NoDiagnostics(ForFile("hello.tmpl")), // Don't get spurious errors for empty templates. ) env.SetBufferContent("hello.tmpl", "{{range .Planets}}\nHello {{}}\n{{end}}") - env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}")) + env.Await(Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}"))) env.RegexpReplace("hello.tmpl", "{{}}", "{{.}}") - env.Await(EmptyOrNoDiagnostics("hello.tmpl")) + env.Await(NoDiagnostics(ForFile("hello.tmpl"))) }) } @@ -163,11 +160,15 @@ Hello {{}} <-- missing body Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("hello.tmpl") - env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}")) + env.AfterChange( + Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}")), + ) // Since we don't have templateExtensions configured, closing hello.tmpl // should make its diagnostics disappear. env.CloseBuffer("hello.tmpl") - env.Await(EmptyDiagnostics("hello.tmpl")) + env.AfterChange( + NoDiagnostics(ForFile("hello.tmpl")), + ) }) } @@ -184,16 +185,14 @@ go 1.12 ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl", "gotmpl"}, - }, + Settings{ + "templateExtensions": []string{"tmpl", "gotmpl"}, }, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a.tmpl") x := env.RegexpSearch("a.tmpl", `A`) - file, pos := env.GoToDefinition("a.tmpl", x) - refs := env.References(file, pos) + loc := env.GoToDefinition(x) + refs := env.References(loc) if len(refs) != 2 { t.Fatalf("got %v reference(s), want 2", len(refs)) } @@ -206,9 +205,9 @@ go 1.12 } } - content, npos := env.Hover(file, pos) - if pos != npos { - t.Errorf("pos? got %v, wanted %v", npos, pos) + content, nloc := env.Hover(loc) + if loc != nloc { + t.Errorf("loc? got %v, wanted %v", nloc, loc) } if content.Value != "template A defined" { t.Errorf("got %s, wanted 'template A defined", content.Value) diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go index e66d08ab125..edb479a9cf2 100644 --- a/gopls/internal/regtest/watch/watch_test.go +++ b/gopls/internal/regtest/watch/watch_test.go @@ -8,12 +8,11 @@ import ( "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/bug" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) func TestMain(m *testing.M) { @@ -38,12 +37,13 @@ func _() { // diagnostics are updated. t.Run("unopened", func(t *testing.T) { Run(t, pkg, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("a/a.go", "x"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.go", "x")), ) env.WriteWorkspaceFile("a/a.go", `package a; func _() {};`) - env.Await( - EmptyDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) }) @@ -56,13 +56,11 @@ func _() { // Insert a trivial edit so that we don't automatically update the buffer // (see CL 267577). env.EditBuffer("a/a.go", fake.NewEdit(0, 0, 0, 0, " ")) - env.Await(env.DoneWithOpen()) + env.AfterChange() env.WriteWorkspaceFile("a/a.go", `package a; func _() {};`) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - env.DiagnosticAtRegexp("a/a.go", "x"), - )) + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "x")), + ) }) }) } @@ -91,10 +89,10 @@ func _() { ` Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - env.Await(env.DoneWithOpen()) + env.AfterChange() env.WriteWorkspaceFile("b/b.go", `package b; func B() {};`) - env.Await( - env.DiagnosticAtRegexp("a/a.go", "b.B"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "b.B")), ) }) } @@ -124,8 +122,9 @@ func _() { } ` Run(t, pkg, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("a/a.go", "x"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.go", "x")), ) env.WriteWorkspaceFiles(map[string]string{ "b/b.go": `package b; func B() {};`, @@ -137,9 +136,9 @@ func _() { b.B() }`, }) - env.Await( - EmptyDiagnostics("a/a.go"), - NoDiagnostics("b/b.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + NoDiagnostics(ForFile("b/b.go")), ) }) } @@ -168,10 +167,10 @@ func _() { ` Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") - env.Await(env.DoneWithOpen()) + env.AfterChange() env.RemoveWorkspaceFile("b/b.go") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "\"mod.com/b\""), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "\"mod.com/b\"")), ) }) } @@ -199,14 +198,13 @@ func _() { } ` Run(t, missing, func(t *testing.T, env *Env) { - t.Skip("the initial workspace load fails and never retries") - - env.Await( - env.DiagnosticAtRegexp("a/a.go", "\"mod.com/c\""), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.go", "\"mod.com/c\"")), ) env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`) - env.Await( - EmptyDiagnostics("c/c.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) } @@ -227,8 +225,8 @@ func _() {} Run(t, original, func(t *testing.T, env *Env) { env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`) env.WriteWorkspaceFile("a/a.go", `package a; import "mod.com/c"; func _() { c.C() }`) - env.Await( - NoDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) } @@ -248,12 +246,13 @@ func _() { } ` Run(t, pkg, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("a/a.go", "hello"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("a/a.go", "hello")), ) env.WriteWorkspaceFile("a/a2.go", `package a; func hello() {};`) - env.Await( - EmptyDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) } @@ -324,15 +323,12 @@ func _() { t.Run("method before implementation", func(t *testing.T) { Run(t, pkg, func(t *testing.T, env *Env) { env.WriteWorkspaceFile("b/b.go", newMethod) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - DiagnosticAt("a/a.go", 12, 12), - ), + env.AfterChange( + Diagnostics(AtPosition("a/a.go", 12, 12)), ) env.WriteWorkspaceFile("a/a.go", implementation) - env.Await( - EmptyDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) }) @@ -340,15 +336,12 @@ func _() { t.Run("implementation before method", func(t *testing.T) { Run(t, pkg, func(t *testing.T, env *Env) { env.WriteWorkspaceFile("a/a.go", implementation) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) env.WriteWorkspaceFile("b/b.go", newMethod) - env.Await( - NoDiagnostics("a/a.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), ) }) }) @@ -359,12 +352,9 @@ func _() { "a/a.go": implementation, "b/b.go": newMethod, }) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a.go"), - ), - NoDiagnostics("b/b.go"), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + NoDiagnostics(ForFile("b/b.go")), ) }) }) @@ -373,7 +363,6 @@ func _() { // Tests golang/go#38498. Delete a file and then force a reload. // Assert that we no longer try to load the file. func TestDeleteFiles(t *testing.T) { - testenv.NeedsGo1Point(t, 13) // Poor overlay support causes problems on 1.12. const pkg = ` -- go.mod -- module mod.com @@ -389,69 +378,57 @@ func _() { package a ` t.Run("close then delete", func(t *testing.T) { - WithOptions(EditorConfig{ - VerboseOutput: true, - }).Run(t, pkg, func(t *testing.T, env *Env) { + WithOptions( + Settings{"verboseOutput": true}, + ).Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("a/a_unneeded.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - LogMatching(protocol.Info, "a_unneeded.go", 1, false), - ), + env.AfterChange( + LogMatching(protocol.Info, "a_unneeded.go", 1, false), ) // Close and delete the open file, mimicking what an editor would do. env.CloseBuffer("a/a_unneeded.go") env.RemoveWorkspaceFile("a/a_unneeded.go") env.RegexpReplace("a/a.go", "var _ int", "fmt.Println(\"\")") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "fmt"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "fmt")), ) env.SaveBuffer("a/a.go") - env.Await( - OnceMet( - env.DoneWithSave(), - // There should only be one log message containing - // a_unneeded.go, from the initial workspace load, which we - // check for earlier. If there are more, there's a bug. - LogMatching(protocol.Info, "a_unneeded.go", 1, false), - ), - EmptyDiagnostics("a/a.go"), + env.AfterChange( + // There should only be one log message containing + // a_unneeded.go, from the initial workspace load, which we + // check for earlier. If there are more, there's a bug. + LogMatching(protocol.Info, "a_unneeded.go", 1, false), + NoDiagnostics(ForFile("a/a.go")), ) }) }) t.Run("delete then close", func(t *testing.T) { WithOptions( - EditorConfig{VerboseOutput: true}, + Settings{"verboseOutput": true}, ).Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("a/a_unneeded.go") - env.Await( - OnceMet( - env.DoneWithOpen(), - LogMatching(protocol.Info, "a_unneeded.go", 1, false), - ), + env.AfterChange( + LogMatching(protocol.Info, "a_unneeded.go", 1, false), ) // Delete and then close the file. env.RemoveWorkspaceFile("a/a_unneeded.go") env.CloseBuffer("a/a_unneeded.go") env.RegexpReplace("a/a.go", "var _ int", "fmt.Println(\"\")") - env.Await( - env.DiagnosticAtRegexp("a/a.go", "fmt"), + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "fmt")), ) env.SaveBuffer("a/a.go") - env.Await( - OnceMet( - env.DoneWithSave(), - // There should only be one log message containing - // a_unneeded.go, from the initial workspace load, which we - // check for earlier. If there are more, there's a bug. - LogMatching(protocol.Info, "a_unneeded.go", 1, false), - ), - EmptyDiagnostics("a/a.go"), + env.AfterChange( + // There should only be one log message containing + // a_unneeded.go, from the initial workspace load, which we + // check for earlier. If there are more, there's a bug. + LogMatching(protocol.Info, "a_unneeded.go", 1, false), + NoDiagnostics(ForFile("a/a.go")), ) }) }) @@ -489,39 +466,11 @@ package a func _() {} ` Run(t, pkg, func(t *testing.T, env *Env) { - env.ChangeFilesOnDisk([]fake.FileEvent{ - { - Path: "a/a3.go", - Content: `package a - -var Hello int -`, - ProtocolEvent: protocol.FileEvent{ - URI: env.Sandbox.Workdir.URI("a/a3.go"), - Type: protocol.Created, - }, - }, - { - Path: "a/a1.go", - ProtocolEvent: protocol.FileEvent{ - URI: env.Sandbox.Workdir.URI("a/a1.go"), - Type: protocol.Deleted, - }, - }, - { - Path: "a/a2.go", - Content: `package a; func _() {};`, - ProtocolEvent: protocol.FileEvent{ - URI: env.Sandbox.Workdir.URI("a/a2.go"), - Type: protocol.Changed, - }, - }, - }) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("main.go"), - ), + env.WriteWorkspaceFile("a/a3.go", "package a\n\nvar Hello int\n") + env.RemoveWorkspaceFile("a/a1.go") + env.WriteWorkspaceFile("a/a2.go", "package a; func _() {};") + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } @@ -565,6 +514,9 @@ module mod.com go 1.12 require example.com v1.2.2 +-- go.sum -- +example.com v1.2.3 h1:OnPPkx+rW63kj9pgILsu12MORKhSlnFa3DVRJq1HZ7g= +example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo= -- main.go -- package main @@ -593,26 +545,24 @@ func main() { } `, }) - env.Await( + env.AfterChange( env.DoneWithChangeWatchedFiles(), - NoDiagnostics("main.go"), + NoDiagnostics(ForFile("main.go")), ) }) } // Reproduces golang/go#40340. -func TestSwitchFromGOPATHToModules(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - +func TestSwitchFromGOPATHToModuleMode(t *testing.T) { const files = ` -- foo/blah/blah.go -- package blah const Name = "" --- foo/main.go -- +-- main.go -- package main -import "blah" +import "foo/blah" func main() { _ = blah.Name @@ -620,29 +570,32 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, - Modes(Experimental), // module is in a subdirectory + Modes(Default), // golang/go#57521: this test is temporarily failing in 'experimental' mode + EnvVars{"GO111MODULE": "auto"}, ).Run(t, files, func(t *testing.T, env *Env) { - env.OpenFile("foo/main.go") - env.Await(env.DiagnosticAtRegexp("foo/main.go", `"blah"`)) - if err := env.Sandbox.RunGoCommand(env.Ctx, "foo", "mod", []string{"init", "mod.com"}, true); err != nil { + env.OpenFile("main.go") + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + ) + if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil { t.Fatal(err) } - env.RegexpReplace("foo/main.go", `"blah"`, `"mod.com/blah"`) - env.Await( - EmptyDiagnostics("foo/main.go"), + + // TODO(golang/go#57558, golang/go#57512): file watching is asynchronous, + // and we must wait for the view to be reconstructed before touching + // main.go, so that the new view "knows" about main.go. This is a bug, but + // awaiting the change here avoids it. + env.AfterChange() + + env.RegexpReplace("main.go", `"foo/blah"`, `"mod.com/foo/blah"`) + env.AfterChange( + NoDiagnostics(ForFile("main.go")), ) }) } // Reproduces golang/go#40487. func TestSwitchFromModulesToGOPATH(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - const files = ` -- foo/go.mod -- module mod.com @@ -663,23 +616,16 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, + EnvVars{"GO111MODULE": "auto"}, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo/main.go") env.RemoveWorkspaceFile("foo/go.mod") - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - env.DiagnosticAtRegexp("foo/main.go", `"mod.com/blah"`), - ), + env.AfterChange( + Diagnostics(env.AtRegexp("foo/main.go", `"mod.com/blah"`)), ) env.RegexpReplace("foo/main.go", `"mod.com/blah"`, `"foo/blah"`) - env.Await( - EmptyDiagnostics("foo/main.go"), + env.AfterChange( + NoDiagnostics(ForFile("foo/main.go")), ) }) } @@ -722,15 +668,9 @@ func TestAll(t *testing.T) { } `, }) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a.go"), - ), - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a_test.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + NoDiagnostics(ForFile("a/a_test.go")), ) // Now, add a new file to the test variant and use its symbol in the // original test file. Expect no diagnostics. @@ -754,15 +694,9 @@ func hi() {} func TestSomething(t *testing.T) {} `, }) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a_test.go"), - ), - OnceMet( - env.DoneWithChangeWatchedFiles(), - NoDiagnostics("a/a2_test.go"), - ), + env.AfterChange( + NoDiagnostics(ForFile("a/a_test.go")), + NoDiagnostics(ForFile("a/a2_test.go")), ) }) } diff --git a/gopls/internal/regtest/workspace/broken_test.go b/gopls/internal/regtest/workspace/broken_test.go new file mode 100644 index 00000000000..005a7e94638 --- /dev/null +++ b/gopls/internal/regtest/workspace/broken_test.go @@ -0,0 +1,264 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp" + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/testenv" +) + +// This file holds various tests for UX with respect to broken workspaces. +// +// TODO: consolidate other tests here. +// +// TODO: write more tests: +// - an explicit GOWORK value that doesn't exist +// - using modules and/or GOWORK inside of GOPATH? + +// Test for golang/go#53933 +func TestBrokenWorkspace_DuplicateModules(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + + // TODO(golang/go#57650): fix this feature. + t.Skip("we no longer detect duplicate modules") + + // This proxy module content is replaced by the workspace, but is still + // required for module resolution to function in the Go command. + const proxy = ` +-- example.com/foo@v0.0.1/go.mod -- +module example.com/foo + +go 1.12 +` + + const src = ` +-- go.work -- +go 1.18 + +use ( + ./package1 + ./package1/vendor/example.com/foo + ./package2 + ./package2/vendor/example.com/foo +) + +-- package1/go.mod -- +module mod.test + +go 1.18 + +require example.com/foo v0.0.1 +-- package1/main.go -- +package main + +import "example.com/foo" + +func main() { + _ = foo.CompleteMe +} +-- package1/vendor/example.com/foo/go.mod -- +module example.com/foo + +go 1.18 +-- package1/vendor/example.com/foo/foo.go -- +package foo + +const CompleteMe = 111 +-- package2/go.mod -- +module mod2.test + +go 1.18 + +require example.com/foo v0.0.1 +-- package2/main.go -- +package main + +import "example.com/foo" + +func main() { + _ = foo.CompleteMe +} +-- package2/vendor/example.com/foo/go.mod -- +module example.com/foo + +go 1.18 +-- package2/vendor/example.com/foo/foo.go -- +package foo + +const CompleteMe = 222 +` + + WithOptions( + ProxyFiles(proxy), + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("package1/main.go") + env.Await( + OutstandingWork(lsp.WorkspaceLoadFailure, `found module "example.com/foo" multiple times in the workspace`), + ) + + // Remove the redundant vendored copy of example.com. + env.WriteWorkspaceFile("go.work", `go 1.18 + use ( + ./package1 + ./package2 + ./package2/vendor/example.com/foo + ) + `) + env.Await(NoOutstandingWork()) + + // Check that definitions in package1 go to the copy vendored in package2. + location := env.GoToDefinition(env.RegexpSearch("package1/main.go", "CompleteMe")).URI.SpanURI().Filename() + const wantLocation = "package2/vendor/example.com/foo/foo.go" + if !strings.HasSuffix(location, wantLocation) { + t.Errorf("got definition of CompleteMe at %q, want %q", location, wantLocation) + } + }) +} + +// Test for golang/go#43186: correcting the module path should fix errors +// without restarting gopls. +func TestBrokenWorkspace_WrongModulePath(t *testing.T) { + const files = ` +-- go.mod -- +module mod.testx + +go 1.18 +-- p/internal/foo/foo.go -- +package foo + +const C = 1 +-- p/internal/bar/bar.go -- +package bar + +import "mod.test/p/internal/foo" + +const D = foo.C + 1 +-- p/internal/bar/bar_test.go -- +package bar_test + +import ( + "mod.test/p/internal/foo" + . "mod.test/p/internal/bar" +) + +const E = D + foo.C +-- p/internal/baz/baz_test.go -- +package baz_test + +import ( + named "mod.test/p/internal/bar" +) + +const F = named.D - 3 +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("p/internal/bar/bar.go") + env.AfterChange( + Diagnostics(env.AtRegexp("p/internal/bar/bar.go", "\"mod.test/p/internal/foo\"")), + ) + env.OpenFile("go.mod") + env.RegexpReplace("go.mod", "mod.testx", "mod.test") + env.SaveBuffer("go.mod") // saving triggers a reload + env.AfterChange(NoDiagnostics()) + }) +} + +func TestMultipleModules_Warning(t *testing.T) { + msgForVersion := func(ver int) string { + if ver >= 18 { + return `gopls was not able to find modules in your workspace.` + } else { + return `gopls requires a module at the root of your workspace.` + } + } + + const modules = ` +-- a/go.mod -- +module a.com + +go 1.12 +-- a/a.go -- +package a +-- a/empty.go -- +// an empty file +-- b/go.mod -- +module b.com + +go 1.12 +-- b/b.go -- +package b +` + for _, go111module := range []string{"on", "auto"} { + t.Run("GO111MODULE="+go111module, func(t *testing.T) { + WithOptions( + Modes(Default), + EnvVars{"GO111MODULE": go111module}, + ).Run(t, modules, func(t *testing.T, env *Env) { + ver := env.GoVersion() + msg := msgForVersion(ver) + env.OpenFile("a/a.go") + env.OpenFile("a/empty.go") + env.OpenFile("b/go.mod") + env.AfterChange( + Diagnostics(env.AtRegexp("a/a.go", "package a")), + Diagnostics(env.AtRegexp("b/go.mod", "module b.com")), + OutstandingWork(lsp.WorkspaceLoadFailure, msg), + ) + + // Changing the workspace folders to the valid modules should resolve + // the workspace errors and diagnostics. + // + // TODO(rfindley): verbose work tracking doesn't follow changing the + // workspace folder, therefore we can't invoke AfterChange here. + env.ChangeWorkspaceFolders("a", "b") + env.Await( + NoDiagnostics(ForFile("a/a.go")), + NoDiagnostics(ForFile("b/go.mod")), + NoOutstandingWork(), + ) + + env.ChangeWorkspaceFolders(".") + + // TODO(rfindley): when GO111MODULE=auto, we need to open or change a + // file here in order to detect a critical error. This is because gopls + // has forgotten about a/a.go, and therefore doesn't hit the heuristic + // "all packages are command-line-arguments". + // + // This is broken, and could be fixed by adjusting the heuristic to + // account for the scenario where there are *no* workspace packages, or + // (better) trying to get workspace packages for each open file. See + // also golang/go#54261. + env.OpenFile("b/b.go") + env.AfterChange( + // TODO(rfindley): fix these missing diagnostics. + // Diagnostics(env.AtRegexp("a/a.go", "package a")), + // Diagnostics(env.AtRegexp("b/go.mod", "module b.com")), + Diagnostics(env.AtRegexp("b/b.go", "package b")), + OutstandingWork(lsp.WorkspaceLoadFailure, msg), + ) + }) + }) + } + + // Expect no warning if GO111MODULE=auto in a directory in GOPATH. + t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) { + WithOptions( + Modes(Default), + EnvVars{"GO111MODULE": "auto"}, + InGOPATH(), + ).Run(t, modules, func(t *testing.T, env *Env) { + env.OpenFile("a/a.go") + env.AfterChange( + NoDiagnostics(ForFile("a/a.go")), + NoOutstandingWork(), + ) + }) + }) +} diff --git a/gopls/internal/regtest/workspace/directoryfilters_test.go b/gopls/internal/regtest/workspace/directoryfilters_test.go new file mode 100644 index 00000000000..6e2a15557fd --- /dev/null +++ b/gopls/internal/regtest/workspace/directoryfilters_test.go @@ -0,0 +1,259 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "sort" + "strings" + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/testenv" +) + +// This file contains regression tests for the directoryFilters setting. +// +// TODO: +// - consolidate some of these tests into a single test +// - add more tests for changing directory filters + +func TestDirectoryFilters(t *testing.T) { + WithOptions( + ProxyFiles(workspaceProxy), + WorkspaceFolders("pkg"), + Settings{ + "directoryFilters": []string{"-inner"}, + }, + ).Run(t, workspaceModule, func(t *testing.T, env *Env) { + syms := env.Symbol("Hi") + sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName }) + for _, s := range syms { + if strings.Contains(s.ContainerName, "inner") { + t.Errorf("WorkspaceSymbol: found symbol %q with container %q, want \"inner\" excluded", s.Name, s.ContainerName) + } + } + }) +} + +func TestDirectoryFiltersLoads(t *testing.T) { + // exclude, and its error, should be excluded from the workspace. + const files = ` +-- go.mod -- +module example.com + +go 1.12 +-- exclude/exclude.go -- +package exclude + +const _ = Nonexistant +` + + WithOptions( + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + NoDiagnostics(ForFile("exclude/x.go")), + ) + }) +} + +func TestDirectoryFiltersTransitiveDep(t *testing.T) { + // Even though exclude is excluded from the workspace, it should + // still be importable as a non-workspace package. + const files = ` +-- go.mod -- +module example.com + +go 1.12 +-- include/include.go -- +package include +import "example.com/exclude" + +const _ = exclude.X +-- exclude/exclude.go -- +package exclude + +const _ = Nonexistant // should be ignored, since this is a non-workspace package +const X = 1 +` + + WithOptions( + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + NoDiagnostics(ForFile("exclude/exclude.go")), // filtered out + NoDiagnostics(ForFile("include/include.go")), // successfully builds + ) + }) +} + +func TestDirectoryFiltersWorkspaceModules(t *testing.T) { + // Define a module include.com which should be in the workspace, plus a + // module exclude.com which should be excluded and therefore come from + // the proxy. + const files = ` +-- include/go.mod -- +module include.com + +go 1.12 + +require exclude.com v1.0.0 + +-- include/go.sum -- +exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I= +exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4= + +-- include/include.go -- +package include + +import "exclude.com" + +var _ = exclude.X // satisfied only by the workspace version +-- exclude/go.mod -- +module exclude.com + +go 1.12 +-- exclude/exclude.go -- +package exclude + +const X = 1 +` + const proxy = ` +-- exclude.com@v1.0.0/go.mod -- +module exclude.com + +go 1.12 +-- exclude.com@v1.0.0/exclude.go -- +package exclude +` + WithOptions( + Modes(Experimental), + ProxyFiles(proxy), + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.Await(Diagnostics(env.AtRegexp("include/include.go", `exclude.(X)`))) + }) +} + +// Test for golang/go#46438: support for '**' in directory filters. +func TestDirectoryFilters_Wildcard(t *testing.T) { + filters := []string{"-**/bye"} + WithOptions( + ProxyFiles(workspaceProxy), + WorkspaceFolders("pkg"), + Settings{ + "directoryFilters": filters, + }, + ).Run(t, workspaceModule, func(t *testing.T, env *Env) { + syms := env.Symbol("Bye") + sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName }) + for _, s := range syms { + if strings.Contains(s.ContainerName, "bye") { + t.Errorf("WorkspaceSymbol: found symbol %q with container %q with filters %v", s.Name, s.ContainerName, filters) + } + } + }) +} + +// Test for golang/go#52993: wildcard directoryFilters should apply to +// goimports scanning as well. +func TestDirectoryFilters_ImportScanning(t *testing.T) { + const files = ` +-- go.mod -- +module mod.test + +go 1.12 +-- main.go -- +package main + +func main() { + bye.Goodbye() +} +-- p/bye/bye.go -- +package bye + +func Goodbye() {} +` + + WithOptions( + Settings{ + "directoryFilters": []string{"-**/bye"}, + }, + // This test breaks in 'Experimental' mode, because with + // experimentalWorkspaceModule set we the goimports scan behaves + // differently. + // + // Since this feature is going away (golang/go#52897), don't investigate. + Modes(Default), + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + beforeSave := env.BufferText("main.go") + env.OrganizeImports("main.go") + got := env.BufferText("main.go") + if got != beforeSave { + t.Errorf("after organizeImports code action, got modified buffer:\n%s", got) + } + }) +} + +// Test for golang/go#52993: non-wildcard directoryFilters should still be +// applied relative to the workspace folder, not the module root. +func TestDirectoryFilters_MultiRootImportScanning(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + + const files = ` +-- go.work -- +go 1.18 + +use ( + a + b +) +-- a/go.mod -- +module mod1.test + +go 1.18 +-- a/main.go -- +package main + +func main() { + hi.Hi() +} +-- a/hi/hi.go -- +package hi + +func Hi() {} +-- b/go.mod -- +module mod2.test + +go 1.18 +-- b/main.go -- +package main + +func main() { + hi.Hi() +} +-- b/hi/hi.go -- +package hi + +func Hi() {} +` + + WithOptions( + Settings{ + "directoryFilters": []string{"-hi"}, // this test fails with -**/hi + }, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("a/main.go") + beforeSave := env.BufferText("a/main.go") + env.OrganizeImports("a/main.go") + got := env.BufferText("a/main.go") + if got == beforeSave { + t.Errorf("after organizeImports code action, got identical buffer:\n%s", got) + } + }) +} diff --git a/gopls/internal/regtest/workspace/fromenv_test.go b/gopls/internal/regtest/workspace/fromenv_test.go new file mode 100644 index 00000000000..c05012d74c7 --- /dev/null +++ b/gopls/internal/regtest/workspace/fromenv_test.go @@ -0,0 +1,68 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/testenv" +) + +// Test that setting go.work via environment variables or settings works. +func TestUseGoWorkOutsideTheWorkspace(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const files = ` +-- work/a/go.mod -- +module a.com + +go 1.12 +-- work/a/a.go -- +package a +-- work/b/go.mod -- +module b.com + +go 1.12 +-- work/b/b.go -- +package b + +func _() { + x := 1 // unused +} +-- other/c/go.mod -- +module c.com + +go 1.18 +-- other/c/c.go -- +package c +-- config/go.work -- +go 1.18 + +use ( + $SANDBOX_WORKDIR/work/a + $SANDBOX_WORKDIR/work/b + $SANDBOX_WORKDIR/other/c +) +` + + WithOptions( + WorkspaceFolders("work"), // use a nested workspace dir, so that GOWORK is outside the workspace + EnvVars{"GOWORK": "$SANDBOX_WORKDIR/config/go.work"}, + ).Run(t, files, func(t *testing.T, env *Env) { + // When we have an explicit GOWORK set, we should get a file watch request. + env.OnceMet( + InitialWorkspaceLoad, + FileWatchMatching(`other`), + FileWatchMatching(`config.go\.work`), + ) + env.Await(FileWatchMatching(`config.go\.work`)) + // Even though work/b is not open, we should get its diagnostics as it is + // included in the workspace. + env.OpenFile("work/a/a.go") + env.AfterChange( + Diagnostics(env.AtRegexp("work/b/b.go", "x := 1"), WithMessage("not used")), + ) + }) +} diff --git a/gopls/internal/regtest/workspace/metadata_test.go b/gopls/internal/regtest/workspace/metadata_test.go index 28291a2e23d..ac64b0758e7 100644 --- a/gopls/internal/regtest/workspace/metadata_test.go +++ b/gopls/internal/regtest/workspace/metadata_test.go @@ -5,9 +5,10 @@ package workspace import ( + "strings" "testing" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" "golang.org/x/tools/internal/testenv" ) @@ -15,9 +16,6 @@ import ( // file. func TestFixImportDecl(t *testing.T) { - // It appears that older Go versions don't even see p.go from the initial - // workspace load. - testenv.NeedsGo1Point(t, 15) const src = ` -- go.mod -- module mod.test @@ -35,9 +33,149 @@ const C = 42 Run(t, src, func(t *testing.T, env *Env) { env.OpenFile("p.go") env.RegexpReplace("p.go", "\"fmt\"", "\"fmt\"\n)") - env.Await(OnceMet( - env.DoneWithChange(), - EmptyDiagnostics("p.go"), - )) + env.AfterChange( + NoDiagnostics(ForFile("p.go")), + ) + }) +} + +// Test that moving ignoring a file via build constraints causes diagnostics to +// be resolved. +func TestIgnoreFile(t *testing.T) { + testenv.NeedsGo1Point(t, 17) // needs native overlays and support for go:build directives + + const src = ` +-- go.mod -- +module mod.test + +go 1.12 +-- foo.go -- +package main + +func main() {} +-- bar.go -- +package main + +func main() {} + ` + + WithOptions( + // TODO(golang/go#54180): we don't run in 'experimental' mode here, because + // with "experimentalUseInvalidMetadata", this test fails because the + // orphaned bar.go is diagnosed using stale metadata, and then not + // re-diagnosed when new metadata arrives. + // + // We could fix this by re-running diagnostics after a load, but should + // consider whether that is worthwhile. + Modes(Default), + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("foo.go") + env.OpenFile("bar.go") + env.OnceMet( + env.DoneWithOpen(), + Diagnostics(env.AtRegexp("foo.go", "func (main)")), + Diagnostics(env.AtRegexp("bar.go", "func (main)")), + ) + + // Ignore bar.go. This should resolve diagnostics. + env.RegexpReplace("bar.go", "package main", "//go:build ignore\n\npackage main") + + // To make this test pass with experimentalUseInvalidMetadata, we could make + // an arbitrary edit that invalidates the snapshot, at which point the + // orphaned diagnostics will be invalidated. + // + // But of course, this should not be necessary: we should invalidate stale + // information when fresh metadata arrives. + // env.RegexpReplace("foo.go", "package main", "package main // test") + env.AfterChange( + NoDiagnostics(ForFile("foo.go")), + NoDiagnostics(ForFile("bar.go")), + ) + + // If instead of 'ignore' (which gopls treats as a standalone package) we + // used a different build tag, we should get a warning about having no + // packages for bar.go + env.RegexpReplace("bar.go", "ignore", "excluded") + env.AfterChange( + Diagnostics(env.AtRegexp("bar.go", "package (main)"), WithMessage("No packages")), + ) + }) +} + +func TestReinitializeRepeatedly(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + + const multiModule = ` +-- go.work -- +go 1.18 + +use ( + moda/a + modb +) +-- moda/a/go.mod -- +module a.com + +require b.com v1.2.3 +-- moda/a/go.sum -- +b.com v1.2.3 h1:tXrlXP0rnjRpKNmkbLYoWBdq0ikb3C3bKK9//moAWBI= +b.com v1.2.3/go.mod h1:D+J7pfFBZK5vdIdZEFquR586vKKIkqG7Qjw9AxG5BQ8= +-- moda/a/a.go -- +package a + +import ( + "b.com/b" +) + +func main() { + var x int + _ = b.Hello() + // AAA +} +-- modb/go.mod -- +module b.com + +-- modb/b/b.go -- +package b + +func Hello() int { + var x int +} +` + WithOptions( + ProxyFiles(workspaceModuleProxy), + Settings{ + // For this test, we want workspace diagnostics to start immediately + // during change processing. + "diagnosticsDelay": "0", + }, + ).Run(t, multiModule, func(t *testing.T, env *Env) { + env.OpenFile("moda/a/a.go") + env.AfterChange() + + // This test verifies that we fully process workspace reinitialization + // (which allows GOPROXY), even when the reinitialized snapshot is + // invalidated by subsequent changes. + // + // First, update go.work to remove modb. This will cause reinitialization + // to fetch b.com from the proxy. + env.WriteWorkspaceFile("go.work", "go 1.18\nuse moda/a") + // Next, wait for gopls to start processing the change. Because we've set + // diagnosticsDelay to zero, this will start diagnosing the workspace (and + // try to reinitialize on the snapshot context). + env.Await(env.StartedChangeWatchedFiles()) + // Finally, immediately make a file change to cancel the previous + // operation. This is racy, but will usually cause initialization to be + // canceled. + env.RegexpReplace("moda/a/a.go", "AAA", "BBB") + env.AfterChange() + // Now, to satisfy a definition request, gopls will try to reload moda. But + // without access to the proxy (because this is no longer a + // reinitialization), this loading will fail. + loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + got := env.Sandbox.Workdir.URIToPath(loc.URI) + if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(got, want) { + t.Errorf("expected %s, got %v", want, got) + } }) } diff --git a/gopls/internal/regtest/workspace/misspelling_test.go b/gopls/internal/regtest/workspace/misspelling_test.go new file mode 100644 index 00000000000..0419a116344 --- /dev/null +++ b/gopls/internal/regtest/workspace/misspelling_test.go @@ -0,0 +1,80 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "runtime" + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" +) + +// Test for golang/go#57081. +func TestFormattingMisspelledURI(t *testing.T) { + if runtime.GOOS != "windows" && runtime.GOOS != "darwin" { + t.Skip("golang/go#57081 only reproduces on case-insensitive filesystems.") + } + const files = ` +-- go.mod -- +module mod.test + +go 1.19 +-- foo.go -- +package foo + +const C = 2 // extra space is intentional +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("Foo.go") + env.FormatBuffer("Foo.go") + want := env.BufferText("Foo.go") + + if want == "" { + t.Fatalf("Foo.go is empty") + } + + // In golang/go#57081, we observed that if overlay cases don't match, gopls + // will find (and format) the on-disk contents rather than the overlay, + // resulting in invalid edits. + // + // Verify that this doesn't happen, by confirming that formatting is + // idempotent. + env.FormatBuffer("Foo.go") + got := env.BufferText("Foo.go") + if diff := compare.Text(want, got); diff != "" { + t.Errorf("invalid content after second formatting:\n%s", diff) + } + }) +} + +// Test that we can find packages for open files with different spelling on +// case-insensitive file systems. +func TestPackageForMisspelledURI(t *testing.T) { + t.Skip("golang/go#57081: this test fails because the Go command does not load Foo.go correctly") + if runtime.GOOS != "windows" && runtime.GOOS != "darwin" { + t.Skip("golang/go#57081 only reproduces on case-insensitive filesystems.") + } + const files = ` +-- go.mod -- +module mod.test + +go 1.19 +-- foo.go -- +package foo + +const C = D +-- bar.go -- +package foo + +const D = 2 +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("Foo.go") + env.AfterChange(NoDiagnostics()) + }) +} diff --git a/gopls/internal/regtest/workspace/standalone_test.go b/gopls/internal/regtest/workspace/standalone_test.go new file mode 100644 index 00000000000..e1021dfbcd1 --- /dev/null +++ b/gopls/internal/regtest/workspace/standalone_test.go @@ -0,0 +1,206 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/gopls/internal/lsp/protocol" + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +func TestStandaloneFiles(t *testing.T) { + const files = ` +-- go.mod -- +module mod.test + +go 1.16 +-- lib/lib.go -- +package lib + +const C = 0 + +type I interface { + M() +} +-- lib/ignore.go -- +//go:build ignore +// +build ignore + +package main + +import ( + "mod.test/lib" +) + +const C = 1 + +type Mer struct{} +func (Mer) M() + +func main() { + println(lib.C + C) +} +` + WithOptions( + // On Go 1.17 and earlier, this test fails with + // experimentalWorkspaceModule. Not investigated, as + // experimentalWorkspaceModule will be removed. + Modes(Default), + ).Run(t, files, func(t *testing.T, env *Env) { + // Initially, gopls should not know about the standalone file as it hasn't + // been opened. Therefore, we should only find one symbol 'C'. + syms := env.Symbol("C") + if got, want := len(syms), 1; got != want { + t.Errorf("got %d symbols, want %d", got, want) + } + + // Similarly, we should only find one reference to "C", and no + // implementations of I. + checkLocations := func(method string, gotLocations []protocol.Location, wantFiles ...string) { + var gotFiles []string + for _, l := range gotLocations { + gotFiles = append(gotFiles, env.Sandbox.Workdir.URIToPath(l.URI)) + } + sort.Strings(gotFiles) + sort.Strings(wantFiles) + if diff := cmp.Diff(wantFiles, gotFiles); diff != "" { + t.Errorf("%s(...): unexpected locations (-want +got):\n%s", method, diff) + } + } + + env.OpenFile("lib/lib.go") + env.AfterChange(NoDiagnostics()) + + // Replacing C with D should not cause any workspace diagnostics, since we + // haven't yet opened the standalone file. + env.RegexpReplace("lib/lib.go", "C", "D") + env.AfterChange(NoDiagnostics()) + env.RegexpReplace("lib/lib.go", "D", "C") + env.AfterChange(NoDiagnostics()) + + refs := env.References(env.RegexpSearch("lib/lib.go", "C")) + checkLocations("References", refs, "lib/lib.go") + + impls := env.Implementations(env.RegexpSearch("lib/lib.go", "I")) + checkLocations("Implementations", impls) // no implementations + + // Opening the standalone file should not result in any diagnostics. + env.OpenFile("lib/ignore.go") + env.AfterChange(NoDiagnostics()) + + // Having opened the standalone file, we should find its symbols in the + // workspace. + syms = env.Symbol("C") + if got, want := len(syms), 2; got != want { + t.Fatalf("got %d symbols, want %d", got, want) + } + + foundMainC := false + var symNames []string + for _, sym := range syms { + symNames = append(symNames, sym.Name) + if sym.Name == "main.C" { + foundMainC = true + } + } + if !foundMainC { + t.Errorf("WorkspaceSymbol(\"C\") = %v, want containing main.C", symNames) + } + + // We should resolve workspace definitions in the standalone file. + fileLoc := env.GoToDefinition(env.RegexpSearch("lib/ignore.go", "lib.(C)")) + file := env.Sandbox.Workdir.URIToPath(fileLoc.URI) + if got, want := file, "lib/lib.go"; got != want { + t.Errorf("GoToDefinition(lib.C) = %v, want %v", got, want) + } + + // ...as well as intra-file definitions + loc := env.GoToDefinition(env.RegexpSearch("lib/ignore.go", "\\+ (C)")) + wantLoc := env.RegexpSearch("lib/ignore.go", "const (C)") + if loc != wantLoc { + t.Errorf("GoToDefinition(C) = %v, want %v", loc, wantLoc) + } + + // Renaming "lib.C" to "lib.D" should cause a diagnostic in the standalone + // file. + env.RegexpReplace("lib/lib.go", "C", "D") + env.AfterChange(Diagnostics(env.AtRegexp("lib/ignore.go", "lib.(C)"))) + + // Undoing the replacement should fix diagnostics + env.RegexpReplace("lib/lib.go", "D", "C") + env.AfterChange(NoDiagnostics()) + + // Now that our workspace has no errors, we should be able to find + // references and rename. + refs = env.References(env.RegexpSearch("lib/lib.go", "C")) + checkLocations("References", refs, "lib/lib.go", "lib/ignore.go") + + impls = env.Implementations(env.RegexpSearch("lib/lib.go", "I")) + checkLocations("Implementations", impls, "lib/ignore.go") + + // Renaming should rename in the standalone package. + env.Rename(env.RegexpSearch("lib/lib.go", "C"), "D") + env.RegexpSearch("lib/ignore.go", "lib.D") + }) +} + +func TestStandaloneFiles_Configuration(t *testing.T) { + const files = ` +-- go.mod -- +module mod.test + +go 1.18 +-- lib.go -- +package lib // without this package, files are loaded as command-line-arguments +-- ignore.go -- +//go:build ignore +// +build ignore + +package main + +// An arbitrary comment. + +func main() {} +-- standalone.go -- +//go:build standalone +// +build standalone + +package main + +func main() {} +` + + WithOptions( + Settings{ + "standaloneTags": []string{"standalone", "script"}, + }, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("ignore.go") + env.OpenFile("standalone.go") + + env.AfterChange( + Diagnostics(env.AtRegexp("ignore.go", "package (main)")), + NoDiagnostics(ForFile("standalone.go")), + ) + + cfg := env.Editor.Config() + cfg.Settings = map[string]interface{}{ + "standaloneTags": []string{"ignore"}, + } + env.ChangeConfiguration(cfg) + + // TODO(golang/go#56158): gopls does not purge previously published + // diagnostice when configuration changes. + env.RegexpReplace("ignore.go", "arbitrary", "meaningless") + + env.AfterChange( + NoDiagnostics(ForFile("ignore.go")), + Diagnostics(env.AtRegexp("standalone.go", "package (main)")), + ) + }) +} diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 5e5bcd13b5d..0aff4713be4 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -5,20 +5,21 @@ package workspace import ( + "context" "fmt" "path/filepath" - "sort" "strings" "testing" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/gopls/internal/lsp" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/internal/bug" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/testenv" - . "golang.org/x/tools/internal/lsp/regtest" + . "golang.org/x/tools/gopls/internal/lsp/regtest" ) func TestMain(m *testing.M) { @@ -34,6 +35,8 @@ go 1.12 -- example.com@v1.2.3/blah/blah.go -- package blah +import "fmt" + func SaySomething() { fmt.Println("something") } @@ -61,7 +64,7 @@ require ( random.org v1.2.3 ) -- pkg/go.sum -- -example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c= +example.com v1.2.3 h1:veRD4tUnatQRgsULqULZPjeoBGFr2qBhevSCZllD2Ds= example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo= random.org v1.2.3 h1:+JE2Fkp7gS0zsHXGEQJ7hraom3pNTlkxC4b2qPfA+/Q= random.org v1.2.3/go.mod h1:E9KM6+bBX2g5ykHZ9H27w16sWo3QwgonyjM44Dnej3I= @@ -128,7 +131,7 @@ func TestReferences(t *testing.T) { WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) { f := "pkg/inner/inner.go" env.OpenFile(f) - locations := env.References(f, env.RegexpSearch(f, `SaySomething`)) + locations := env.References(env.RegexpSearch(f, `SaySomething`)) want := 3 if got := len(locations); got != want { t.Fatalf("expected %v locations, got %v", want, got) @@ -138,38 +141,6 @@ func TestReferences(t *testing.T) { } } -// make sure that directory filters work -func TestFilters(t *testing.T) { - for _, tt := range []struct { - name, rootPath string - }{ - { - name: "module root", - rootPath: "pkg", - }, - } { - t.Run(tt.name, func(t *testing.T) { - opts := []RunOption{ProxyFiles(workspaceProxy)} - if tt.rootPath != "" { - opts = append(opts, WorkspaceFolders(tt.rootPath)) - } - f := func(o *source.Options) { - o.DirectoryFilters = append(o.DirectoryFilters, "-inner") - } - opts = append(opts, Options(f)) - WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) { - syms := env.WorkspaceSymbol("Hi") - sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName }) - for i, s := range syms { - if strings.Contains(s.ContainerName, "/inner") { - t.Errorf("%s %v %s %s %d\n", s.Name, s.Kind, s.ContainerName, tt.name, i) - } - } - }) - }) - } -} - // Make sure that analysis diagnostics are cleared for the whole package when // the only opened file is closed. This test was inspired by the experience in // VS Code, where clicking on a reference result triggers a @@ -180,12 +151,34 @@ func TestClearAnalysisDiagnostics(t *testing.T) { WorkspaceFolders("pkg/inner"), ).Run(t, workspaceModule, func(t *testing.T, env *Env) { env.OpenFile("pkg/main.go") - env.Await( - env.DiagnosticAtRegexp("pkg/main2.go", "fmt.Print"), + env.AfterChange( + Diagnostics(env.AtRegexp("pkg/main2.go", "fmt.Print")), ) env.CloseBuffer("pkg/main.go") - env.Await( - EmptyDiagnostics("pkg/main2.go"), + env.AfterChange( + NoDiagnostics(ForFile("pkg/main2.go")), + ) + }) +} + +// TestReloadOnlyOnce checks that changes to the go.mod file do not result in +// redundant package loads (golang/go#54473). +// +// Note that this test may be fragile, as it depends on specific structure to +// log messages around reinitialization. Nevertheless, it is important for +// guarding against accidentally duplicate reloading. +func TestReloadOnlyOnce(t *testing.T) { + WithOptions( + ProxyFiles(workspaceProxy), + WorkspaceFolders("pkg"), + ).Run(t, workspaceModule, func(t *testing.T, env *Env) { + dir := env.Sandbox.Workdir.URI("goodbye").SpanURI().Filename() + goModWithReplace := fmt.Sprintf(`%s +replace random.org => %s +`, env.ReadWorkspaceFile("pkg/go.mod"), dir) + env.WriteWorkspaceFile("pkg/go.mod", goModWithReplace) + env.AfterChange( + LogMatching(protocol.Info, `packages\.Load #\d+\n`, 2, false), ) }) } @@ -206,8 +199,7 @@ func TestWatchReplaceTargets(t *testing.T) { replace random.org => %s `, env.ReadWorkspaceFile("pkg/go.mod"), dir) env.WriteWorkspaceFile("pkg/go.mod", goModWithReplace) - env.Await( - env.DoneWithChangeWatchedFiles(), + env.AfterChange( UnregistrationMatching("didChangeWatchedFiles"), RegistrationMatching("didChangeWatchedFiles"), ) @@ -222,6 +214,8 @@ go 1.12 -- example.com@v1.2.3/blah/blah.go -- package blah +import "fmt" + func SaySomething() { fmt.Println("something") } @@ -236,6 +230,7 @@ func Hello() {} ` func TestAutomaticWorkspaceModule_Interdependent(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work const multiModule = ` -- moda/a/go.mod -- module a.com @@ -267,19 +262,18 @@ func Hello() int { ` WithOptions( ProxyFiles(workspaceModuleProxy), - Modes(Experimental), ).Run(t, multiModule, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("moda/a/a.go", "x"), - env.DiagnosticAtRegexp("modb/b/b.go", "x"), - env.NoDiagnosticAtRegexp("moda/a/a.go", `"b.com/b"`), + env.RunGoCommand("work", "init") + env.RunGoCommand("work", "use", "-r", ".") + env.AfterChange( + Diagnostics(env.AtRegexp("moda/a/a.go", "x")), + Diagnostics(env.AtRegexp("modb/b/b.go", "x")), + NoDiagnostics(env.AtRegexp("moda/a/a.go", `"b.com/b"`)), ) }) } -func TestMultiModuleWithExclude(t *testing.T) { - testenv.NeedsGo1Point(t, 16) - +func TestModuleWithExclude(t *testing.T) { const proxy = ` -- c.com@v1.2.3/go.mod -- module c.com @@ -290,6 +284,8 @@ require b.com v1.2.3 -- c.com@v1.2.3/blah/blah.go -- package blah +import "fmt" + func SaySomething() { fmt.Println("something") } @@ -305,10 +301,6 @@ func Hello() {} module b.com go 1.12 --- b.com@v1.2.4/b/b.go -- -package b - -func Hello() {} ` const multiModule = ` -- go.mod -- @@ -329,20 +321,29 @@ func main() { ` WithOptions( ProxyFiles(proxy), - Modes(Experimental), ).Run(t, multiModule, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("main.go", "x"), + env.OnceMet( + InitialWorkspaceLoad, + Diagnostics(env.AtRegexp("main.go", "x")), ) }) } // This change tests that the version of the module used changes after it has // been deleted from the workspace. +// +// TODO(golang/go#55331): delete this placeholder along with experimental +// workspace module. func TestDeleteModule_Interdependent(t *testing.T) { - t.Skip("Skipping due to golang/go#46375: race due to orphaned file reloading") - + testenv.NeedsGo1Point(t, 18) // uses go.work const multiModule = ` +-- go.work -- +go 1.18 + +use ( + moda/a + modb +) -- moda/a/go.mod -- module a.com @@ -373,34 +374,25 @@ func Hello() int { ` WithOptions( ProxyFiles(workspaceModuleProxy), - Modes(Experimental), ).Run(t, multiModule, func(t *testing.T, env *Env) { env.OpenFile("moda/a/a.go") env.Await(env.DoneWithOpen()) - original, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) + originalLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + original := env.Sandbox.Workdir.URIToPath(originalLoc.URI) if want := "modb/b/b.go"; !strings.HasSuffix(original, want) { t.Errorf("expected %s, got %v", want, original) } env.CloseBuffer(original) - env.Await(env.DoneWithClose()) + env.AfterChange() env.RemoveWorkspaceFile("modb/b/b.go") env.RemoveWorkspaceFile("modb/go.mod") - env.Await( - env.DoneWithChangeWatchedFiles(), - ) + env.WriteWorkspaceFile("go.work", "go 1.18\nuse moda/a") + env.AfterChange() - d := protocol.PublishDiagnosticsParams{} - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("moda/a/go.mod", "require b.com v1.2.3", "b.com@v1.2.3 has not been downloaded"), - ReadDiagnostics("moda/a/go.mod", &d), - ), - ) - env.ApplyQuickFixes("moda/a/go.mod", d.Diagnostics) - env.Await(env.DoneWithChangeWatchedFiles()) - got, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) + gotLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + got := env.Sandbox.Workdir.URIToPath(gotLoc.URI) if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(got, want) { t.Errorf("expected %s, got %v", want, got) } @@ -410,7 +402,14 @@ func Hello() int { // Tests that the version of the module used changes after it has been added // to the workspace. func TestCreateModule_Interdependent(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work const multiModule = ` +-- go.work -- +go 1.18 + +use ( + moda/a +) -- moda/a/go.mod -- module a.com @@ -431,16 +430,23 @@ func main() { } ` WithOptions( - Modes(Experimental), ProxyFiles(workspaceModuleProxy), ).Run(t, multiModule, func(t *testing.T, env *Env) { env.OpenFile("moda/a/a.go") - original, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) + loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + original := env.Sandbox.Workdir.URIToPath(loc.URI) if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(original, want) { t.Errorf("expected %s, got %v", want, original) } env.CloseBuffer(original) env.WriteWorkspaceFiles(map[string]string{ + "go.work": `go 1.18 + +use ( + moda/a + modb +) +`, "modb/go.mod": "module b.com", "modb/b/b.go": `package b @@ -449,13 +455,9 @@ func Hello() int { } `, }) - env.Await( - OnceMet( - env.DoneWithChangeWatchedFiles(), - env.DiagnosticAtRegexp("modb/b/b.go", "x"), - ), - ) - got, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) + env.AfterChange(Diagnostics(env.AtRegexp("modb/b/b.go", "x"))) + gotLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + got := env.Sandbox.Workdir.URIToPath(gotLoc.URI) if want := "modb/b/b.go"; !strings.HasSuffix(got, want) { t.Errorf("expected %s, got %v", want, original) } @@ -465,7 +467,15 @@ func Hello() int { // This test confirms that a gopls workspace can recover from initialization // with one invalid module. func TestOneBrokenModule(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work const multiModule = ` +-- go.work -- +go 1.18 + +use ( + moda/a + modb +) -- moda/a/go.mod -- module a.com @@ -494,158 +504,19 @@ func Hello() int { ` WithOptions( ProxyFiles(workspaceModuleProxy), - Modes(Experimental), ).Run(t, multiModule, func(t *testing.T, env *Env) { env.OpenFile("modb/go.mod") - env.Await( - OnceMet( - env.DoneWithOpen(), - DiagnosticAt("modb/go.mod", 0, 0), - ), + env.AfterChange( + Diagnostics(AtPosition("modb/go.mod", 0, 0)), ) env.RegexpReplace("modb/go.mod", "modul", "module") env.SaveBufferWithoutActions("modb/go.mod") - env.Await( - env.DiagnosticAtRegexp("modb/b/b.go", "x"), + env.AfterChange( + Diagnostics(env.AtRegexp("modb/b/b.go", "x")), ) }) } -func TestUseGoplsMod(t *testing.T) { - // This test validates certain functionality related to using a gopls.mod - // file to specify workspace modules. - testenv.NeedsGo1Point(t, 14) - const multiModule = ` --- moda/a/go.mod -- -module a.com - -require b.com v1.2.3 --- moda/a/go.sum -- -b.com v1.2.3 h1:tXrlXP0rnjRpKNmkbLYoWBdq0ikb3C3bKK9//moAWBI= -b.com v1.2.3/go.mod h1:D+J7pfFBZK5vdIdZEFquR586vKKIkqG7Qjw9AxG5BQ8= --- moda/a/a.go -- -package a - -import ( - "b.com/b" -) - -func main() { - var x int - _ = b.Hello() -} --- modb/go.mod -- -module b.com - -require example.com v1.2.3 --- modb/go.sum -- -example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c= -example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo= --- modb/b/b.go -- -package b - -func Hello() int { - var x int -} --- gopls.mod -- -module gopls-workspace - -require ( - a.com v0.0.0-goplsworkspace - b.com v1.2.3 -) - -replace a.com => $SANDBOX_WORKDIR/moda/a -` - WithOptions( - ProxyFiles(workspaceModuleProxy), - Modes(Experimental), - ).Run(t, multiModule, func(t *testing.T, env *Env) { - // Initially, the gopls.mod should cause only the a.com module to be - // loaded. Validate this by jumping to a definition in b.com and ensuring - // that we go to the module cache. - env.OpenFile("moda/a/a.go") - env.Await(env.DoneWithOpen()) - - // To verify which modules are loaded, we'll jump to the definition of - // b.Hello. - checkHelloLocation := func(want string) error { - location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) - if !strings.HasSuffix(location, want) { - return fmt.Errorf("expected %s, got %v", want, location) - } - return nil - } - - // Initially this should be in the module cache, as b.com is not replaced. - if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil { - t.Fatal(err) - } - - // Now, modify the gopls.mod file on disk to activate the b.com module in - // the workspace. - workdir := env.Sandbox.Workdir.RootURI().SpanURI().Filename() - env.WriteWorkspaceFile("gopls.mod", fmt.Sprintf(`module gopls-workspace - -require ( - a.com v1.9999999.0-goplsworkspace - b.com v1.9999999.0-goplsworkspace -) - -replace a.com => %s/moda/a -replace b.com => %s/modb -`, workdir, workdir)) - env.Await(env.DoneWithChangeWatchedFiles()) - // Check that go.mod diagnostics picked up the newly active mod file. - // The local version of modb has an extra dependency we need to download. - env.OpenFile("modb/go.mod") - env.Await(env.DoneWithOpen()) - - var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"), - ReadDiagnostics("modb/go.mod", &d), - ), - ) - env.ApplyQuickFixes("modb/go.mod", d.Diagnostics) - env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x")) - // Jumping to definition should now go to b.com in the workspace. - if err := checkHelloLocation("modb/b/b.go"); err != nil { - t.Fatal(err) - } - - // Now, let's modify the gopls.mod *overlay* (not on disk), and verify that - // this change is only picked up once it is saved. - env.OpenFile("gopls.mod") - env.Await(env.DoneWithOpen()) - env.SetBufferContent("gopls.mod", fmt.Sprintf(`module gopls-workspace - -require ( - a.com v0.0.0-goplsworkspace -) - -replace a.com => %s/moda/a -`, workdir)) - - // Editing the gopls.mod removes modb from the workspace modules, and so - // should clear outstanding diagnostics... - env.Await(OnceMet( - env.DoneWithChange(), - EmptyDiagnostics("modb/go.mod"), - )) - // ...but does not yet cause a workspace reload, so we should still jump to modb. - if err := checkHelloLocation("modb/b/b.go"); err != nil { - t.Fatal(err) - } - // Saving should reload the workspace. - env.SaveBufferWithoutActions("gopls.mod") - if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil { - t.Fatal(err) - } - }) -} - // TestBadGoWork exercises the panic from golang/vscode-go#2121. func TestBadGoWork(t *testing.T) { const files = ` @@ -660,9 +531,9 @@ module example.com/bar } func TestUseGoWork(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work // This test validates certain functionality related to using a go.work // file to specify workspace modules. - testenv.NeedsGo1Point(t, 14) const multiModule = ` -- moda/a/go.mod -- module a.com @@ -714,9 +585,10 @@ use ( // To verify which modules are loaded, we'll jump to the definition of // b.Hello. checkHelloLocation := func(want string) error { - location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) - if !strings.HasSuffix(location, want) { - return fmt.Errorf("expected %s, got %v", want, location) + loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + file := env.Sandbox.Workdir.URIToPath(loc.URI) + if !strings.HasSuffix(file, want) { + return fmt.Errorf("expected %s, got %v", want, file) } return nil } @@ -736,21 +608,12 @@ use ( ./modb ) `) - env.Await(env.DoneWithChangeWatchedFiles()) - // Check that go.mod diagnostics picked up the newly active mod file. - // The local version of modb has an extra dependency we need to download. - env.OpenFile("modb/go.mod") - env.Await(env.DoneWithOpen()) - var d protocol.PublishDiagnosticsParams - env.Await( - OnceMet( - env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"), - ReadDiagnostics("modb/go.mod", &d), - ), + // As of golang/go#54069, writing go.work to the workspace triggers a + // workspace reload. + env.AfterChange( + Diagnostics(env.AtRegexp("modb/b/b.go", "x")), ) - env.ApplyQuickFixes("modb/go.mod", d.Diagnostics) - env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x")) // Jumping to definition should now go to b.com in the workspace. if err := checkHelloLocation("modb/b/b.go"); err != nil { @@ -760,7 +623,7 @@ use ( // Now, let's modify the go.work *overlay* (not on disk), and verify that // this change is only picked up once it is saved. env.OpenFile("go.work") - env.Await(env.DoneWithOpen()) + env.AfterChange() env.SetBufferContent("go.work", `go 1.17 use ( @@ -785,7 +648,7 @@ use ( // This fails if guarded with a OnceMet(DoneWithSave(), ...), because it is // debounced (and therefore not synchronous with the change). - env.Await(EmptyOrNoDiagnostics("modb/go.mod")) + env.Await(NoDiagnostics(ForFile("modb/go.mod"))) // Test Formatting. env.SetBufferContent("go.work", `go 1.18 @@ -812,6 +675,8 @@ use ( } func TestUseGoWorkDiagnosticMissingModule(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + const files = ` -- go.work -- go 1.18 @@ -822,8 +687,8 @@ module example.com/bar ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("go.work") - env.Await( - env.DiagnosticAtRegexpWithMessage("go.work", "use", "directory ./foo does not contain a module"), + env.AfterChange( + Diagnostics(env.AtRegexp("go.work", "use"), WithMessage("directory ./foo does not contain a module")), ) // The following tests is a regression test against an issue where we weren't // copying the workFile struct field on workspace when a new one was created in @@ -832,17 +697,18 @@ module example.com/bar // struct, and then set the content back to the old contents to make sure // the diagnostic still shows up. env.SetBufferContent("go.work", "go 1.18 \n\n use ./bar\n") - env.Await( - env.NoDiagnosticAtRegexp("go.work", "use"), + env.AfterChange( + NoDiagnostics(env.AtRegexp("go.work", "use")), ) env.SetBufferContent("go.work", "go 1.18 \n\n use ./foo\n") - env.Await( - env.DiagnosticAtRegexpWithMessage("go.work", "use", "directory ./foo does not contain a module"), + env.AfterChange( + Diagnostics(env.AtRegexp("go.work", "use"), WithMessage("directory ./foo does not contain a module")), ) }) } func TestUseGoWorkDiagnosticSyntaxError(t *testing.T) { + testenv.NeedsGo1Point(t, 18) const files = ` -- go.work -- go 1.18 @@ -852,14 +718,16 @@ replace ` Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("go.work") - env.Await( - env.DiagnosticAtRegexpWithMessage("go.work", "usa", "unknown directive: usa"), - env.DiagnosticAtRegexpWithMessage("go.work", "replace", "usage: replace"), + env.AfterChange( + Diagnostics(env.AtRegexp("go.work", "usa"), WithMessage("unknown directive: usa")), + Diagnostics(env.AtRegexp("go.work", "replace"), WithMessage("usage: replace")), ) }) } func TestUseGoWorkHover(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const files = ` -- go.work -- go 1.18 @@ -886,8 +754,7 @@ module example.com/bar/baz } for hoverRE, want := range tcs { - pos := env.RegexpSearch("go.work", hoverRE) - got, _ := env.Hover("go.work", pos) + got, _ := env.Hover(env.RegexpSearch("go.work", hoverRE)) if got.Value != want { t.Errorf(`hover on %q: got %q, want %q`, hoverRE, got, want) } @@ -936,23 +803,22 @@ use ( ).Run(t, workspace, func(t *testing.T, env *Env) { env.OpenFile("moda/a/a.go") env.Await(env.DoneWithOpen()) - location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello")) + loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello")) + file := env.Sandbox.Workdir.URIToPath(loc.URI) want := "modb/b/b.go" - if !strings.HasSuffix(location, want) { - t.Errorf("expected %s, got %v", want, location) + if !strings.HasSuffix(file, want) { + t.Errorf("expected %s, got %v", want, file) } }) } func TestNonWorkspaceFileCreation(t *testing.T) { - testenv.NeedsGo1Point(t, 13) - const files = ` --- go.mod -- +-- work/go.mod -- module mod.com go 1.12 --- x.go -- +-- work/x.go -- package x ` @@ -961,15 +827,41 @@ package foo import "fmt" var _ = fmt.Printf ` - Run(t, files, func(t *testing.T, env *Env) { - env.CreateBuffer("/tmp/foo.go", "") - env.EditBuffer("/tmp/foo.go", fake.NewEdit(0, 0, 0, 0, code)) - env.GoToDefinition("/tmp/foo.go", env.RegexpSearch("/tmp/foo.go", `Printf`)) + WithOptions( + WorkspaceFolders("work"), // so that outside/... is outside the workspace + ).Run(t, files, func(t *testing.T, env *Env) { + env.CreateBuffer("outside/foo.go", "") + env.EditBuffer("outside/foo.go", fake.NewEdit(0, 0, 0, 0, code)) + env.GoToDefinition(env.RegexpSearch("outside/foo.go", `Printf`)) }) } -func TestMultiModuleV2(t *testing.T) { +func TestGoWork_V2Module(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + // When using a go.work, we must have proxy content even if it is replaced. + const proxy = ` +-- b.com/v2@v2.1.9/go.mod -- +module b.com/v2 + +go 1.12 +-- b.com/v2@v2.1.9/b/b.go -- +package b + +func Ciao()() int { + return 0 +} +` + const multiModule = ` +-- go.work -- +go 1.18 + +use ( + moda/a + modb + modb/v2 + modc +) -- moda/a/go.mod -- module a.com @@ -1012,122 +904,40 @@ func main() { var x int } ` + WithOptions( - Modes(Experimental), + ProxyFiles(proxy), ).Run(t, multiModule, func(t *testing.T, env *Env) { - env.Await( - env.DiagnosticAtRegexp("moda/a/a.go", "x"), - env.DiagnosticAtRegexp("modb/b/b.go", "x"), - env.DiagnosticAtRegexp("modb/v2/b/b.go", "x"), - env.DiagnosticAtRegexp("modc/main.go", "x"), + env.OnceMet( + InitialWorkspaceLoad, + // TODO(rfindley): assert on the full set of diagnostics here. We + // should ensure that we don't have a diagnostic at b.Hi in a.go. + Diagnostics(env.AtRegexp("moda/a/a.go", "x")), + Diagnostics(env.AtRegexp("modb/b/b.go", "x")), + Diagnostics(env.AtRegexp("modb/v2/b/b.go", "x")), + Diagnostics(env.AtRegexp("modc/main.go", "x")), ) }) } -func TestDirectoryFiltersLoads(t *testing.T) { - // exclude, and its error, should be excluded from the workspace. - const files = ` --- go.mod -- -module example.com - -go 1.12 --- exclude/exclude.go -- -package exclude - -const _ = Nonexistant -` - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) { - env.Await(NoDiagnostics("exclude/x.go")) - }) -} - -func TestDirectoryFiltersTransitiveDep(t *testing.T) { - // Even though exclude is excluded from the workspace, it should - // still be importable as a non-workspace package. - const files = ` --- go.mod -- -module example.com - -go 1.12 --- include/include.go -- -package include -import "example.com/exclude" - -const _ = exclude.X --- exclude/exclude.go -- -package exclude - -const _ = Nonexistant // should be ignored, since this is a non-workspace package -const X = 1 -` - - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) { - env.Await( - NoDiagnostics("exclude/exclude.go"), // filtered out - NoDiagnostics("include/include.go"), // successfully builds - ) - }) -} - -func TestDirectoryFiltersWorkspaceModules(t *testing.T) { - // Define a module include.com which should be in the workspace, plus a - // module exclude.com which should be excluded and therefore come from - // the proxy. - const files = ` --- include/go.mod -- -module include.com - -go 1.12 - -require exclude.com v1.0.0 - --- include/go.sum -- -exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I= -exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4= - --- include/include.go -- -package include - -import "exclude.com" - -var _ = exclude.X // satisfied only by the workspace version --- exclude/go.mod -- -module exclude.com - -go 1.12 --- exclude/exclude.go -- -package exclude - -const X = 1 -` - const proxy = ` --- exclude.com@v1.0.0/go.mod -- -module exclude.com - -go 1.12 --- exclude.com@v1.0.0/exclude.go -- -package exclude -` - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg, Modes(Experimental), ProxyFiles(proxy)).Run(t, files, func(t *testing.T, env *Env) { - env.Await(env.DiagnosticAtRegexp("include/include.go", `exclude.(X)`)) - }) -} - // Confirm that a fix for a tidy module will correct all modules in the // workspace. func TestMultiModule_OneBrokenModule(t *testing.T) { - testenv.NeedsGo1Point(t, 15) + // In the earlier 'experimental workspace mode', gopls would aggregate go.sum + // entries for the workspace module, allowing it to correctly associate + // missing go.sum with diagnostics. With go.work files, this doesn't work: + // the go.command will happily write go.work.sum. + t.Skip("golang/go#57509: go.mod diagnostics do not work in go.work mode") + testenv.NeedsGo1Point(t, 18) // uses go.work + const files = ` +-- go.work -- +go 1.18 - const mod = ` +use ( + a + b +) +-- go.work.sum -- -- a/go.mod -- module a.com @@ -1154,15 +964,15 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - Modes(Experimental), - ).Run(t, mod, func(t *testing.T, env *Env) { + ).Run(t, files, func(t *testing.T, env *Env) { params := &protocol.PublishDiagnosticsParams{} env.OpenFile("b/go.mod") - env.Await( - OnceMet( - env.GoSumDiagnostic("b/go.mod", `example.com v1.2.3`), - ReadDiagnostics("b/go.mod", params), + env.AfterChange( + Diagnostics( + env.AtRegexp("go.mod", `example.com v1.2.3`), + WithMessage("go.sum is out of sync"), ), + ReadDiagnostics("b/go.mod", params), ) for _, d := range params.Diagnostics { if !strings.Contains(d.Message, "go.sum is out of sync") { @@ -1174,8 +984,8 @@ func main() { } env.ApplyQuickFixes("b/go.mod", []protocol.Diagnostic{d}) } - env.Await( - EmptyDiagnostics("b/go.mod"), + env.AfterChange( + NoDiagnostics(ForFile("b/go.mod")), ) }) } @@ -1204,10 +1014,8 @@ go 1.12 package main ` WithOptions( - EditorConfig{Env: map[string]string{ - "GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath"), - }}, - Modes(Singleton), + EnvVars{"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath")}, + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( // Confirm that the build configuration is seen as valid, @@ -1218,13 +1026,18 @@ package main }) } -func TestAddGoWork(t *testing.T) { +func TestAddAndRemoveGoWork(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + // Use a workspace with a module in the root directory to exercise the case + // where a go.work is added to the existing root directory. This verifies + // that we're detecting changes to the module source, not just the root + // directory. const nomod = ` --- a/go.mod -- +-- go.mod -- module a.com go 1.16 --- a/main.go -- +-- main.go -- package main func main() {} @@ -1238,31 +1051,47 @@ package main func main() {} ` WithOptions( - Modes(Singleton), + Modes(Default), ).Run(t, nomod, func(t *testing.T, env *Env) { - env.OpenFile("a/main.go") + env.OpenFile("main.go") env.OpenFile("b/main.go") - env.Await( - DiagnosticAt("a/main.go", 0, 0), - DiagnosticAt("b/main.go", 0, 0), + // Since b/main.go is not in the workspace, it should have a warning on its + // package declaration. + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(AtPosition("b/main.go", 0, 0)), ) env.WriteWorkspaceFile("go.work", `go 1.16 use ( - a + . b ) `) - env.Await(NoOutstandingDiagnostics()) + env.AfterChange(NoDiagnostics()) + // Removing the go.work file should put us back where we started. + env.RemoveWorkspaceFile("go.work") + + // TODO(golang/go#57558, golang/go#57508): file watching is asynchronous, + // and we must wait for the view to be reconstructed before touching + // b/main.go, so that the new view "knows" about b/main.go. This is simply + // a bug, but awaiting the change here avoids it. + env.Await(env.DoneWithChangeWatchedFiles()) + + // TODO(rfindley): fix this bug: reopening b/main.go is necessary here + // because we no longer "see" the file in any view. + env.CloseBuffer("b/main.go") + env.OpenFile("b/main.go") + + env.AfterChange( + NoDiagnostics(ForFile("main.go")), + Diagnostics(AtPosition("b/main.go", 0, 0)), + ) }) } // Tests the fix for golang/go#52500. func TestChangeTestVariant_Issue52500(t *testing.T) { - // This test fails for unknown reasons at Go <= 15. Presumably the loading of - // test variants behaves differently, possibly due to lack of support for - // native overlays. - testenv.NeedsGo1Point(t, 16) const src = ` -- go.mod -- module mod.test @@ -1292,16 +1121,143 @@ func (Server) Foo() {} // as invalid. So we need to wait for the metadata of main_test.go to be // updated before moving other_test.go back to the main_test package. env.Await( - env.DiagnosticAtRegexpWithMessage("other_test.go", "Server", "undeclared"), - env.DiagnosticAtRegexpWithMessage("main_test.go", "otherConst", "undeclared"), + Diagnostics(env.AtRegexp("other_test.go", "Server")), + Diagnostics(env.AtRegexp("main_test.go", "otherConst")), ) env.RegexpReplace("other_test.go", "main", "main_test") - env.Await( - EmptyDiagnostics("other_test.go"), - EmptyDiagnostics("main_test.go"), + env.AfterChange( + NoDiagnostics(ForFile("other_test.go")), + NoDiagnostics(ForFile("main_test.go")), ) // This will cause a test failure if other_test.go is not in any package. - _, _ = env.GoToDefinition("other_test.go", env.RegexpSearch("other_test.go", "Server")) + _ = env.GoToDefinition(env.RegexpSearch("other_test.go", "Server")) + }) +} + +// Test for golang/go#48929. +func TestClearNonWorkspaceDiagnostics(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + + const ws = ` +-- go.work -- +go 1.18 + +use ( + ./b +) +-- a/go.mod -- +module a + +go 1.17 +-- a/main.go -- +package main + +func main() { + var V string +} +-- b/go.mod -- +module b + +go 1.17 +-- b/main.go -- +package b + +import ( + _ "fmt" +) +` + Run(t, ws, func(t *testing.T, env *Env) { + env.OpenFile("b/main.go") + env.AfterChange( + NoDiagnostics(ForFile("a/main.go")), + ) + env.OpenFile("a/main.go") + env.AfterChange( + Diagnostics(env.AtRegexp("a/main.go", "V"), WithMessage("not used")), + ) + env.CloseBuffer("a/main.go") + + // Make an arbitrary edit because gopls explicitly diagnoses a/main.go + // whenever it is "changed". + // + // TODO(rfindley): it should not be necessary to make another edit here. + // Gopls should be smart enough to avoid diagnosing a. + env.RegexpReplace("b/main.go", "package b", "package b // a package") + env.AfterChange( + NoDiagnostics(ForFile("a/main.go")), + ) + }) +} + +// Test that we don't get a version warning when the Go version in PATH is +// supported. +func TestOldGoNotification_SupportedVersion(t *testing.T) { + v := goVersion(t) + if v < lsp.OldestSupportedGoVersion() { + t.Skipf("go version 1.%d is unsupported", v) + } + + Run(t, "", func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + NoShownMessage("upgrade"), + ) }) } + +// Test that we do get a version warning when the Go version in PATH is +// unsupported, though this test may never execute if we stop running CI at +// legacy Go versions (see also TestOldGoNotification_Fake) +func TestOldGoNotification_UnsupportedVersion(t *testing.T) { + v := goVersion(t) + if v >= lsp.OldestSupportedGoVersion() { + t.Skipf("go version 1.%d is supported", v) + } + + Run(t, "", func(t *testing.T, env *Env) { + env.Await( + // Note: cannot use OnceMet(InitialWorkspaceLoad, ...) here, as the + // upgrade message may race with the IWL. + ShownMessage("Please upgrade"), + ) + }) +} + +func TestOldGoNotification_Fake(t *testing.T) { + // Get the Go version from path, and make sure it's unsupported. + // + // In the future we'll stop running CI on legacy Go versions. By mutating the + // oldest supported Go version here, we can at least ensure that the + // ShowMessage pop-up works. + ctx := context.Background() + goversion, err := gocommand.GoVersion(ctx, gocommand.Invocation{}, &gocommand.Runner{}) + if err != nil { + t.Fatal(err) + } + defer func(t []lsp.GoVersionSupport) { + lsp.GoVersionTable = t + }(lsp.GoVersionTable) + lsp.GoVersionTable = []lsp.GoVersionSupport{ + {GoVersion: goversion, InstallGoplsVersion: "v1.0.0"}, + } + + Run(t, "", func(t *testing.T, env *Env) { + env.Await( + // Note: cannot use OnceMet(InitialWorkspaceLoad, ...) here, as the + // upgrade message may race with the IWL. + ShownMessage("Please upgrade"), + ) + }) +} + +// goVersion returns the version of the Go command in PATH. +func goVersion(t *testing.T) int { + t.Helper() + ctx := context.Background() + goversion, err := gocommand.GoVersion(ctx, gocommand.Invocation{}, &gocommand.Runner{}) + if err != nil { + t.Fatal(err) + } + return goversion +} diff --git a/gopls/internal/span/parse.go b/gopls/internal/span/parse.go new file mode 100644 index 00000000000..715d5fe44fd --- /dev/null +++ b/gopls/internal/span/parse.go @@ -0,0 +1,114 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "path/filepath" + "strconv" + "strings" + "unicode/utf8" +) + +// Parse returns the location represented by the input. +// Only file paths are accepted, not URIs. +// The returned span will be normalized, and thus if printed may produce a +// different string. +func Parse(input string) Span { + return ParseInDir(input, ".") +} + +// ParseInDir is like Parse, but interprets paths relative to wd. +func ParseInDir(input, wd string) Span { + uri := func(path string) URI { + if !filepath.IsAbs(path) { + path = filepath.Join(wd, path) + } + return URIFromPath(path) + } + // :0:0#0-0:0#0 + valid := input + var hold, offset int + hadCol := false + suf := rstripSuffix(input) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep == ":" { + valid = suf.remains + hold = suf.num + hadCol = true + suf = rstripSuffix(suf.remains) + } + switch { + case suf.sep == ":": + return New(uri(suf.remains), NewPoint(suf.num, hold, offset), Point{}) + case suf.sep == "-": + // we have a span, fall out of the case to continue + default: + // separator not valid, rewind to either the : or the start + return New(uri(valid), NewPoint(hold, 0, offset), Point{}) + } + // only the span form can get here + // at this point we still don't know what the numbers we have mean + // if have not yet seen a : then we might have either a line or a column depending + // on whether start has a column or not + // we build an end point and will fix it later if needed + end := NewPoint(suf.num, hold, offset) + hold, offset = 0, 0 + suf = rstripSuffix(suf.remains) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep != ":" { + // turns out we don't have a span after all, rewind + return New(uri(valid), end, Point{}) + } + valid = suf.remains + hold = suf.num + suf = rstripSuffix(suf.remains) + if suf.sep != ":" { + // line#offset only + return New(uri(valid), NewPoint(hold, 0, offset), end) + } + // we have a column, so if end only had one number, it is also the column + if !hadCol { + end = NewPoint(suf.num, end.v.Line, end.v.Offset) + } + return New(uri(suf.remains), NewPoint(suf.num, hold, offset), end) +} + +type suffix struct { + remains string + sep string + num int +} + +func rstripSuffix(input string) suffix { + if len(input) == 0 { + return suffix{"", "", -1} + } + remains := input + + // Remove optional trailing decimal number. + num := -1 + last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) + if last >= 0 && last < len(remains)-1 { + number, err := strconv.ParseInt(remains[last+1:], 10, 64) + if err == nil { + num = int(number) + remains = remains[:last+1] + } + } + // now see if we have a trailing separator + r, w := utf8.DecodeLastRuneInString(remains) + // TODO(adonovan): this condition is clearly wrong. Should the third byte be '-'? + if r != ':' && r != '#' && r == '#' { + return suffix{input, "", -1} + } + remains = remains[:len(remains)-w] + return suffix{remains, string(r), num} +} diff --git a/gopls/internal/span/span.go b/gopls/internal/span/span.go new file mode 100644 index 00000000000..07345c8ef50 --- /dev/null +++ b/gopls/internal/span/span.go @@ -0,0 +1,253 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package span contains support for representing with positions and ranges in +// text files. +package span + +import ( + "encoding/json" + "fmt" + "go/token" + "path" + "sort" + "strings" + + "golang.org/x/tools/gopls/internal/lsp/safetoken" +) + +// A Span represents a range of text within a source file. The start +// and end points of a valid span may be hold either its byte offset, +// or its (line, column) pair, or both. Columns are measured in bytes. +// +// Spans are appropriate in user interfaces (e.g. command-line tools) +// and tests where a position is notated without access to the content +// of the file. +// +// Use protocol.Mapper to convert between Span and other +// representations, such as go/token (also UTF-8) or the LSP protocol +// (UTF-16). The latter requires access to file contents. +// +// See overview comments at ../lsp/protocol/mapper.go. +type Span struct { + v span +} + +// Point represents a single point within a file. +// In general this should only be used as part of a Span, as on its own it +// does not carry enough information. +type Point struct { + v point +} + +// The private span/point types have public fields to support JSON +// encoding, but the public Span/Point types hide these fields by +// defining methods that shadow them. (This is used by a few of the +// command-line tool subcommands, which emit spans and have a -json +// flag.) + +type span struct { + URI URI `json:"uri"` + Start point `json:"start"` + End point `json:"end"` +} + +type point struct { + Line int `json:"line"` // 1-based line number + Column int `json:"column"` // 1-based, UTF-8 codes (bytes) + Offset int `json:"offset"` // 0-based byte offset +} + +// Invalid is a span that reports false from IsValid +var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} + +var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} + +func New(uri URI, start, end Point) Span { + s := Span{v: span{URI: uri, Start: start.v, End: end.v}} + s.v.clean() + return s +} + +func NewPoint(line, col, offset int) Point { + p := Point{v: point{Line: line, Column: col, Offset: offset}} + p.v.clean() + return p +} + +// SortSpans sorts spans into a stable but unspecified order. +func SortSpans(spans []Span) { + sort.SliceStable(spans, func(i, j int) bool { + return compare(spans[i], spans[j]) < 0 + }) +} + +// compare implements a three-valued ordered comparison of Spans. +func compare(a, b Span) int { + // This is a textual comparison. It does not perform path + // cleaning, case folding, resolution of symbolic links, + // testing for existence, or any I/O. + if cmp := strings.Compare(string(a.URI()), string(b.URI())); cmp != 0 { + return cmp + } + if cmp := comparePoint(a.v.Start, b.v.Start); cmp != 0 { + return cmp + } + return comparePoint(a.v.End, b.v.End) +} + +func ComparePoint(a, b Point) int { + return comparePoint(a.v, b.v) +} + +func comparePoint(a, b point) int { + if !a.hasPosition() { + if a.Offset < b.Offset { + return -1 + } + if a.Offset > b.Offset { + return 1 + } + return 0 + } + if a.Line < b.Line { + return -1 + } + if a.Line > b.Line { + return 1 + } + if a.Column < b.Column { + return -1 + } + if a.Column > b.Column { + return 1 + } + return 0 +} + +func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } +func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } +func (s Span) IsValid() bool { return s.v.Start.isValid() } +func (s Span) IsPoint() bool { return s.v.Start == s.v.End } +func (s Span) URI() URI { return s.v.URI } +func (s Span) Start() Point { return Point{s.v.Start} } +func (s Span) End() Point { return Point{s.v.End} } +func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } +func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } + +func (p Point) HasPosition() bool { return p.v.hasPosition() } +func (p Point) HasOffset() bool { return p.v.hasOffset() } +func (p Point) IsValid() bool { return p.v.isValid() } +func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } +func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } +func (p Point) Line() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Line +} +func (p Point) Column() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Column +} +func (p Point) Offset() int { + if !p.v.hasOffset() { + panic(fmt.Errorf("offset not set in %v", p.v)) + } + return p.v.Offset +} + +func (p point) hasPosition() bool { return p.Line > 0 } +func (p point) hasOffset() bool { return p.Offset >= 0 } +func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } +func (p point) isZero() bool { + return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) +} + +func (s *span) clean() { + //this presumes the points are already clean + if !s.End.isValid() || (s.End == point{}) { + s.End = s.Start + } +} + +func (p *point) clean() { + if p.Line < 0 { + p.Line = 0 + } + if p.Column <= 0 { + if p.Line > 0 { + p.Column = 1 + } else { + p.Column = 0 + } + } + if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { + p.Offset = -1 + } +} + +// Format implements fmt.Formatter to print the Location in a standard form. +// The format produced is one that can be read back in using Parse. +func (s Span) Format(f fmt.State, c rune) { + fullForm := f.Flag('+') + preferOffset := f.Flag('#') + // we should always have a uri, simplify if it is file format + //TODO: make sure the end of the uri is unambiguous + uri := string(s.v.URI) + if c == 'f' { + uri = path.Base(uri) + } else if !fullForm { + uri = s.v.URI.Filename() + } + fmt.Fprint(f, uri) + if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { + return + } + // see which bits of start to write + printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) + printLine := s.HasPosition() && (fullForm || !printOffset) + printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) + fmt.Fprint(f, ":") + if printLine { + fmt.Fprintf(f, "%d", s.v.Start.Line) + } + if printColumn { + fmt.Fprintf(f, ":%d", s.v.Start.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.Start.Offset) + } + // start is written, do we need end? + if s.IsPoint() { + return + } + // we don't print the line if it did not change + printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) + fmt.Fprint(f, "-") + if printLine { + fmt.Fprintf(f, "%d", s.v.End.Line) + } + if printColumn { + if printLine { + fmt.Fprint(f, ":") + } + fmt.Fprintf(f, "%d", s.v.End.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.End.Offset) + } +} + +// SetRange implements packagestest.rangeSetter, allowing +// gopls' test suites to use Spans instead of Range in parameters. +func (span *Span) SetRange(file *token.File, start, end token.Pos) { + point := func(pos token.Pos) Point { + posn := safetoken.Position(file, pos) + return NewPoint(posn.Line, posn.Column, posn.Offset) + } + *span = New(URIFromPath(file.Name()), point(start), point(end)) +} diff --git a/gopls/internal/span/span_test.go b/gopls/internal/span/span_test.go new file mode 100644 index 00000000000..d2aaff12cab --- /dev/null +++ b/gopls/internal/span/span_test.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span_test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/span" +) + +func TestFormat(t *testing.T) { + formats := []string{"%v", "%#v", "%+v"} + + // Element 0 is the input, and the elements 0-2 are the expected + // output in [%v %#v %+v] formats. Thus the first must be in + // canonical form (invariant under span.Parse + fmt.Sprint). + // The '#' form displays offsets; the '+' form outputs a URI. + // If len=4, element 0 is a noncanonical input and 1-3 are expected outputs. + for _, test := range [][]string{ + {"C:/file_a", "C:/file_a", "file:///C:/file_a:#0"}, + {"C:/file_b:1:2", "C:/file_b:1:2", "file:///C:/file_b:1:2"}, + {"C:/file_c:1000", "C:/file_c:1000", "file:///C:/file_c:1000:1"}, + {"C:/file_d:14:9", "C:/file_d:14:9", "file:///C:/file_d:14:9"}, + {"C:/file_e:1:2-7", "C:/file_e:1:2-7", "file:///C:/file_e:1:2-1:7"}, + {"C:/file_f:500-502", "C:/file_f:500-502", "file:///C:/file_f:500:1-502:1"}, + {"C:/file_g:3:7-8", "C:/file_g:3:7-8", "file:///C:/file_g:3:7-3:8"}, + {"C:/file_h:3:7-4:8", "C:/file_h:3:7-4:8", "file:///C:/file_h:3:7-4:8"}, + {"C:/file_i:#100", "C:/file_i:#100", "file:///C:/file_i:#100"}, + {"C:/file_j:#26-#28", "C:/file_j:#26-#28", "file:///C:/file_j:#26-0#28"}, // 0#28? + {"C:/file_h:3:7#26-4:8#37", // not canonical + "C:/file_h:3:7-4:8", "C:/file_h:#26-#37", "file:///C:/file_h:3:7#26-4:8#37"}} { + input := test[0] + spn := span.Parse(input) + wants := test[0:3] + if len(test) == 4 { + wants = test[1:4] + } + for i, format := range formats { + want := toPath(wants[i]) + if got := fmt.Sprintf(format, spn); got != want { + t.Errorf("Sprintf(%q, %q) = %q, want %q", format, input, got, want) + } + } + } +} + +func toPath(value string) string { + if strings.HasPrefix(value, "file://") { + return value + } + return filepath.FromSlash(value) +} diff --git a/internal/span/uri.go b/gopls/internal/span/uri.go similarity index 75% rename from internal/span/uri.go rename to gopls/internal/span/uri.go index a9777ff8598..e6191f7ab12 100644 --- a/internal/span/uri.go +++ b/gopls/internal/span/uri.go @@ -8,7 +8,6 @@ import ( "fmt" "net/url" "os" - "path" "path/filepath" "runtime" "strings" @@ -38,6 +37,26 @@ func filename(uri URI) (string, error) { if uri == "" { return "", nil } + + // This conservative check for the common case + // of a simple non-empty absolute POSIX filename + // avoids the allocation of a net.URL. + if strings.HasPrefix(string(uri), "file:///") { + rest := string(uri)[len("file://"):] // leave one slash + for i := 0; i < len(rest); i++ { + b := rest[i] + // Reject these cases: + if b < ' ' || b == 0x7f || // control character + b == '%' || b == '+' || // URI escape + b == ':' || // Windows drive letter + b == '@' || b == '&' || b == '?' { // authority or query + goto slow + } + } + return rest, nil + } +slow: + u, err := url.ParseRequestURI(string(uri)) if err != nil { return "", err @@ -50,9 +69,12 @@ func filename(uri URI) (string, error) { if isWindowsDriveURIPath(u.Path) { u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] } + return u.Path, nil } +// TODO(adonovan): document this function, and any invariants of +// span.URI that it is supposed to establish. func URIFromURI(s string) URI { if !strings.HasPrefix(s, "file://") { return URI(s) @@ -80,24 +102,9 @@ func URIFromURI(s string) URI { return URI(u.String()) } -func CompareURI(a, b URI) int { - if equalURI(a, b) { - return 0 - } - if a < b { - return -1 - } - return 1 -} - -func equalURI(a, b URI) bool { - if a == b { - return true - } - // If we have the same URI basename, we may still have the same file URIs. - if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { - return false - } +// SameExistingFile reports whether two spans denote the +// same existing file by querying the file system. +func SameExistingFile(a, b URI) bool { fa, err := filename(a) if err != nil { return false @@ -106,7 +113,6 @@ func equalURI(a, b URI) bool { if err != nil { return false } - // Stat the files to check if they are equal. infoa, err := os.Stat(filepath.FromSlash(fa)) if err != nil { return false @@ -119,7 +125,9 @@ func equalURI(a, b URI) bool { } // URIFromPath returns a span URI for the supplied file path. -// It will always have the file scheme. +// +// For empty paths, URIFromPath returns the empty URI "". +// For non-empty paths, URIFromPath returns a uri with the file:// scheme. func URIFromPath(path string) URI { if path == "" { return "" @@ -158,7 +166,7 @@ func isWindowsDrivePath(path string) bool { return unicode.IsLetter(rune(path[0])) && path[1] == ':' } -// isWindowsDriveURI returns true if the file URI is of the format used by +// isWindowsDriveURIPath returns true if the file URI is of the format used by // Windows URIs. The url.Parse package does not specially handle Windows paths // (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). func isWindowsDriveURIPath(uri string) bool { @@ -167,3 +175,11 @@ func isWindowsDriveURIPath(uri string) bool { } return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' } + +// Dir returns the URI for the directory containing uri. Dir panics if uri is +// not a file uri. +// +// TODO(rfindley): add a unit test for various edge cases. +func Dir(uri URI) URI { + return URIFromPath(filepath.Dir(uri.Filename())) +} diff --git a/internal/span/uri_test.go b/gopls/internal/span/uri_test.go similarity index 98% rename from internal/span/uri_test.go rename to gopls/internal/span/uri_test.go index bcbad87128e..e9904378504 100644 --- a/internal/span/uri_test.go +++ b/gopls/internal/span/uri_test.go @@ -10,7 +10,7 @@ package span_test import ( "testing" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/span" ) // TestURI tests the conversion between URIs and filenames. The test cases diff --git a/internal/span/uri_windows_test.go b/gopls/internal/span/uri_windows_test.go similarity index 98% rename from internal/span/uri_windows_test.go rename to gopls/internal/span/uri_windows_test.go index e50b58f1bb2..3891e0d3e77 100644 --- a/internal/span/uri_windows_test.go +++ b/gopls/internal/span/uri_windows_test.go @@ -10,7 +10,7 @@ package span_test import ( "testing" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/span" ) // TestURI tests the conversion between URIs and filenames. The test cases diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go index a89354f67ee..1f171f09d62 100644 --- a/gopls/internal/vulncheck/command.go +++ b/gopls/internal/vulncheck/command.go @@ -9,115 +9,373 @@ package vulncheck import ( "context" + "encoding/json" + "errors" + "fmt" "log" "os" + "regexp" + "sort" "strings" + "sync" + "golang.org/x/mod/semver" + "golang.org/x/sync/errgroup" "golang.org/x/tools/go/packages" - gvc "golang.org/x/tools/gopls/internal/govulncheck" - "golang.org/x/tools/internal/lsp/command" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/source" "golang.org/x/vuln/client" + gvcapi "golang.org/x/vuln/exp/govulncheck" + "golang.org/x/vuln/osv" + "golang.org/x/vuln/vulncheck" ) func init() { - Govulncheck = govulncheck + VulnerablePackages = vulnerablePackages } -func govulncheck(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) { - if args.Pattern == "" { - args.Pattern = "." +func findGOVULNDB(env []string) []string { + for _, kv := range env { + if strings.HasPrefix(kv, "GOVULNDB=") { + return strings.Split(kv[len("GOVULNDB="):], ",") + } + } + if GOVULNDB := os.Getenv("GOVULNDB"); GOVULNDB != "" { + return strings.Split(GOVULNDB, ",") } + return []string{"https://vuln.go.dev"} +} - dbClient, err := client.NewClient(findGOVULNDB(cfg), client.Options{HTTPCache: gvc.DefaultCache()}) - if err != nil { - return res, err +// GoVersionForVulnTest is an internal environment variable used in gopls +// testing to examine govulncheck behavior with a go version different +// than what `go version` returns in the system. +const GoVersionForVulnTest = "_GOPLS_TEST_VULNCHECK_GOVERSION" + +func init() { + Main = func(cfg packages.Config, patterns ...string) error { + // Set the mode that Source needs. + cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedTypes | + packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps | + packages.NeedModule + logf := log.New(os.Stderr, "", log.Ltime).Printf + logf("Loading packages...") + pkgs, err := packages.Load(&cfg, patterns...) + if err != nil { + logf("Failed to load packages: %v", err) + return err + } + if n := packages.PrintErrors(pkgs); n > 0 { + err := errors.New("failed to load packages due to errors") + logf("%v", err) + return err + } + logf("Loaded %d packages and their dependencies", len(pkgs)) + cache, err := govulncheck.DefaultCache() + if err != nil { + return err + } + cli, err := client.NewClient(findGOVULNDB(cfg.Env), client.Options{ + HTTPCache: cache, + }) + if err != nil { + return err + } + res, err := gvcapi.Source(context.Background(), &gvcapi.Config{ + Client: cli, + GoVersion: os.Getenv(GoVersionForVulnTest), + }, vulncheck.Convert(pkgs)) + if err != nil { + return err + } + affecting := 0 + for _, v := range res.Vulns { + if v.IsCalled() { + affecting++ + } + } + logf("Found %d affecting vulns and %d unaffecting vulns in imported packages", affecting, len(res.Vulns)-affecting) + if err := json.NewEncoder(os.Stdout).Encode(res); err != nil { + return err + } + return nil } +} - c := cmd{Client: dbClient} - vulns, err := c.Run(ctx, cfg, args.Pattern) - if err != nil { - return res, err +var ( + // Regexp for matching go tags. The groups are: + // 1 the major.minor version + // 2 the patch version, or empty if none + // 3 the entire prerelease, if present + // 4 the prerelease type ("beta" or "rc") + // 5 the prerelease number + tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc|-pre)(\d+))?$`) +) + +// This is a modified copy of pkgsite/internal/stdlib:VersionForTag. +func GoTagToSemver(tag string) string { + if tag == "" { + return "" } - res.Vuln = vulns - return res, err + tag = strings.Fields(tag)[0] + // Special cases for go1. + if tag == "go1" { + return "v1.0.0" + } + if tag == "go1.0" { + return "" + } + m := tagRegexp.FindStringSubmatch(tag) + if m == nil { + return "" + } + version := "v" + m[1] + if m[2] != "" { + version += m[2] + } else { + version += ".0" + } + if m[3] != "" { + if !strings.HasPrefix(m[4], "-") { + version += "-" + } + version += m[4] + "." + m[5] + } + return version } -func findGOVULNDB(cfg *packages.Config) []string { - for _, kv := range cfg.Env { - if strings.HasPrefix(kv, "GOVULNDB=") { - return strings.Split(kv[len("GOVULNDB="):], ",") +// semverToGoTag returns the Go standard library repository tag corresponding +// to semver, a version string without the initial "v". +// Go tags differ from standard semantic versions in a few ways, +// such as beginning with "go" instead of "v". +func semverToGoTag(v string) string { + if strings.HasPrefix(v, "v0.0.0") { + return "master" + } + // Special case: v1.0.0 => go1. + if v == "v1.0.0" { + return "go1" + } + if !semver.IsValid(v) { + return fmt.Sprintf("", v) + } + goVersion := semver.Canonical(v) + prerelease := semver.Prerelease(goVersion) + versionWithoutPrerelease := strings.TrimSuffix(goVersion, prerelease) + patch := strings.TrimPrefix(versionWithoutPrerelease, semver.MajorMinor(goVersion)+".") + if patch == "0" { + versionWithoutPrerelease = strings.TrimSuffix(versionWithoutPrerelease, ".0") + } + goVersion = fmt.Sprintf("go%s", strings.TrimPrefix(versionWithoutPrerelease, "v")) + if prerelease != "" { + // Go prereleases look like "beta1" instead of "beta.1". + // "beta1" is bad for sorting (since beta10 comes before beta9), so + // require the dot form. + i := finalDigitsIndex(prerelease) + if i >= 1 { + if prerelease[i-1] != '.' { + return fmt.Sprintf("", v) + } + // Remove the dot. + prerelease = prerelease[:i-1] + prerelease[i:] } + goVersion += strings.TrimPrefix(prerelease, "-") } - if GOVULNDB := os.Getenv("GOVULNDB"); GOVULNDB != "" { - return strings.Split(GOVULNDB, ",") + return goVersion +} + +// finalDigitsIndex returns the index of the first digit in the sequence of digits ending s. +// If s doesn't end in digits, it returns -1. +func finalDigitsIndex(s string) int { + // Assume ASCII (since the semver package does anyway). + var i int + for i = len(s) - 1; i >= 0; i-- { + if s[i] < '0' || s[i] > '9' { + break + } } - return []string{"https://vuln.go.dev"} + if i == len(s)-1 { + return -1 + } + return i + 1 } -type Vuln = command.Vuln -type CallStack = command.CallStack -type StackEntry = command.StackEntry +// vulnerablePackages queries the vulndb and reports which vulnerabilities +// apply to this snapshot. The result contains a set of packages, +// grouped by vuln ID and by module. +func vulnerablePackages(ctx context.Context, snapshot source.Snapshot, modfile source.FileHandle) (*govulncheck.Result, error) { + // We want to report the intersection of vulnerable packages in the vulndb + // and packages transitively imported by this module ('go list -deps all'). + // We use snapshot.AllMetadata to retrieve the list of packages + // as an approximation. + // + // TODO(hyangah): snapshot.AllMetadata is a superset of + // `go list all` - e.g. when the workspace has multiple main modules + // (multiple go.mod files), that can include packages that are not + // used by this module. Vulncheck behavior with go.work is not well + // defined. Figure out the meaning, and if we decide to present + // the result as if each module is analyzed independently, make + // gopls track a separate build list for each module and use that + // information instead of snapshot.AllMetadata. + metadata, err := snapshot.AllMetadata(ctx) + if err != nil { + return nil, err + } -// cmd is an in-process govulncheck command runner -// that uses the provided client.Client. -type cmd struct { - Client client.Client -} + // TODO(hyangah): handle vulnerabilities in the standard library. -// Run runs the govulncheck after loading packages using the provided packages.Config. -func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) (_ []Vuln, err error) { - cfg.Mode |= packages.NeedModule | packages.NeedName | packages.NeedFiles | - packages.NeedCompiledGoFiles | packages.NeedImports | packages.NeedTypes | - packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps + // Group packages by modules since vuln db is keyed by module. + metadataByModule := map[source.PackagePath][]*source.Metadata{} + for _, md := range metadata { + mi := md.Module + modulePath := source.PackagePath("stdlib") + if mi != nil { + modulePath = source.PackagePath(mi.Path) + } + metadataByModule[modulePath] = append(metadataByModule[modulePath], md) + } - log.Println("loading packages...") - loadedPkgs, err := gvc.LoadPackages(cfg, patterns...) + // Request vuln entries from remote service. + fsCache, err := govulncheck.DefaultCache() if err != nil { - log.Printf("package load failed: %v", err) return nil, err } - log.Printf("loaded %d packages\n", len(loadedPkgs)) - - r, err := gvc.Source(ctx, loadedPkgs, c.Client) + cli, err := client.NewClient( + findGOVULNDB(snapshot.View().Options().EnvSlice()), + client.Options{HTTPCache: govulncheck.NewInMemoryCache(fsCache)}) if err != nil { return nil, err } - callInfo := gvc.GetCallInfo(r, loadedPkgs) - return toVulns(callInfo) - // TODO: add import graphs. -} + // Keys are osv.Entry.IDs + vulnsResult := map[string]*govulncheck.Vuln{} + var ( + group errgroup.Group + mu sync.Mutex + ) -func toVulns(ci *gvc.CallInfo) ([]Vuln, error) { - var vulns []Vuln + goVersion := snapshot.View().Options().Env[GoVersionForVulnTest] + if goVersion == "" { + goVersion = snapshot.View().GoVersionString() + } + group.SetLimit(10) + stdlibModule := &packages.Module{ + Path: "stdlib", + Version: goVersion, + } + for path, mds := range metadataByModule { + path, mds := path, mds + group.Go(func() error { + effectiveModule := stdlibModule + if m := mds[0].Module; m != nil { + effectiveModule = m + } + for effectiveModule.Replace != nil { + effectiveModule = effectiveModule.Replace + } + ver := effectiveModule.Version - for _, vg := range ci.VulnGroups { - v0 := vg[0] - lf := gvc.LatestFixed(v0.OSV.Affected) - if lf != "" && lf[0] != 'v' { - lf = "v" + lf - } - vuln := Vuln{ - ID: v0.OSV.ID, - PkgPath: v0.PkgPath, - CurrentVersion: ci.ModuleVersions[v0.ModPath], - FixedVersion: lf, - Details: v0.OSV.Details, - - Aliases: v0.OSV.Aliases, - Symbol: v0.Symbol, - ModPath: v0.ModPath, - URL: href(v0.OSV), - } + // TODO(go.dev/issues/56312): batch these requests for efficiency. + vulns, err := cli.GetByModule(ctx, effectiveModule.Path) + if err != nil { + return err + } + if len(vulns) == 0 { // No known vulnerability. + return nil + } + + // set of packages in this module known to gopls. + // This will be lazily initialized when we need it. + var knownPkgs map[source.PackagePath]bool + + // Report vulnerabilities that affect packages of this module. + for _, entry := range vulns { + var vulnerablePkgs []*govulncheck.Package - // Keep first call stack for each vuln. - for _, v := range vg { - if css := ci.CallStacks[v]; len(css) > 0 { - vuln.CallStacks = append(vuln.CallStacks, toCallStack(css[0])) - vuln.CallStackSummaries = append(vuln.CallStackSummaries, gvc.SummarizeCallStack(css[0], ci.TopPackages, v.PkgPath)) + for _, a := range entry.Affected { + if a.Package.Ecosystem != osv.GoEcosystem || a.Package.Name != effectiveModule.Path { + continue + } + if !a.Ranges.AffectsSemver(ver) { + continue + } + for _, imp := range a.EcosystemSpecific.Imports { + if knownPkgs == nil { + knownPkgs = toPackagePathSet(mds) + } + if knownPkgs[source.PackagePath(imp.Path)] { + vulnerablePkgs = append(vulnerablePkgs, &govulncheck.Package{ + Path: imp.Path, + }) + } + } + } + if len(vulnerablePkgs) == 0 { + continue + } + mu.Lock() + vuln, ok := vulnsResult[entry.ID] + if !ok { + vuln = &govulncheck.Vuln{OSV: entry} + vulnsResult[entry.ID] = vuln + } + vuln.Modules = append(vuln.Modules, &govulncheck.Module{ + Path: string(path), + FoundVersion: ver, + FixedVersion: fixedVersion(effectiveModule.Path, entry.Affected), + Packages: vulnerablePkgs, + }) + mu.Unlock() } - } - vulns = append(vulns, vuln) + return nil + }) + } + if err := group.Wait(); err != nil { + return nil, err + } + + vulns := make([]*govulncheck.Vuln, 0, len(vulnsResult)) + for _, v := range vulnsResult { + vulns = append(vulns, v) + } + // Sort so the results are deterministic. + sort.Slice(vulns, func(i, j int) bool { + return vulns[i].OSV.ID < vulns[j].OSV.ID + }) + ret := &govulncheck.Result{ + Vulns: vulns, + Mode: govulncheck.ModeImports, + } + return ret, nil +} + +// toPackagePathSet transforms the metadata to a set of package paths. +func toPackagePathSet(mds []*source.Metadata) map[source.PackagePath]bool { + pkgPaths := make(map[source.PackagePath]bool, len(mds)) + for _, md := range mds { + pkgPaths[md.PkgPath] = true + } + return pkgPaths +} + +func fixedVersion(modulePath string, affected []osv.Affected) string { + fixed := govulncheck.LatestFixed(modulePath, affected) + if fixed != "" { + fixed = versionString(modulePath, fixed) + } + return fixed +} + +// versionString prepends a version string prefix (`v` or `go` +// depending on the modulePath) to the given semver-style version string. +func versionString(modulePath, version string) string { + if version == "" { + return "" + } + v := "v" + version + // These are internal Go module paths used by the vuln DB + // when listing vulns in standard library and the go command. + if modulePath == "stdlib" || modulePath == "toolchain" { + return semverToGoTag(v) } - return vulns, nil + return v } diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go deleted file mode 100644 index f689ab96722..00000000000 --- a/gopls/internal/vulncheck/command_test.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package vulncheck - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/vuln/client" - "golang.org/x/vuln/osv" -) - -func TestCmd_Run(t *testing.T) { - runTest(t, workspace1, proxy1, func(ctx context.Context, snapshot source.Snapshot) { - cmd := &cmd{Client: testClient1} - cfg := packagesCfg(ctx, snapshot) - result, err := cmd.Run(ctx, cfg, "./...") - if err != nil { - t.Fatal(err) - } - // Check that we find the right number of vulnerabilities. - // There should be three entries as there are three vulnerable - // symbols in the two import-reachable OSVs. - var got []report - for _, v := range result { - got = append(got, toReport(v)) - } - - var want = []report{ - { - Vuln: Vuln{ - ID: "GO-2022-01", - Symbol: "VulnData.Vuln1", - PkgPath: "golang.org/amod/avuln", - ModPath: "golang.org/amod", - URL: "https://pkg.go.dev/vuln/GO-2022-01", - CurrentVersion: "v1.1.3", - FixedVersion: "v1.0.4", - CallStackSummaries: []string{ - "golang.org/entry/x.X calls golang.org/amod/avuln.VulnData.Vuln1", - "golang.org/entry/x.X calls golang.org/cmod/c.C1, which eventually calls golang.org/amod/avuln.VulnData.Vuln2", - }, - }, - CallStacksStr: []string{ - "golang.org/entry/x.X [approx.] (x.go:8)\n" + - "golang.org/amod/avuln.VulnData.Vuln1 (avuln.go:3)\n", - "golang.org/entry/x.X (x.go:8)\n" + - "golang.org/cmod/c.C1 (c.go:13)\n" + - "golang.org/amod/avuln.VulnData.Vuln2 (avuln.go:4)\n", - }, - }, - { - Vuln: Vuln{ - ID: "GO-2022-02", - Symbol: "Vuln", - PkgPath: "golang.org/bmod/bvuln", - ModPath: "golang.org/bmod", - URL: "https://pkg.go.dev/vuln/GO-2022-02", - CurrentVersion: "v0.5.0", - CallStackSummaries: []string{"golang.org/entry/y.Y calls golang.org/bmod/bvuln.Vuln"}, - }, - CallStacksStr: []string{ - "golang.org/entry/y.Y [approx.] (y.go:5)\n" + - "golang.org/bmod/bvuln.Vuln (bvuln.go:2)\n", - }, - }, - } - // sort reports for stability before comparison. - for _, rpts := range [][]report{got, want} { - sort.Slice(rpts, func(i, j int) bool { - a, b := rpts[i], rpts[j] - if a.ID != b.ID { - return a.ID < b.ID - } - if a.PkgPath != b.PkgPath { - return a.PkgPath < b.PkgPath - } - return a.Symbol < b.Symbol - }) - } - if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(report{}, "Vuln.CallStacks")); diff != "" { - t.Error(diff) - } - - }) -} - -type report struct { - Vuln - // Trace is stringified Vuln.CallStacks - CallStacksStr []string -} - -func toReport(v Vuln) report { - var r = report{Vuln: v} - for _, s := range v.CallStacks { - r.CallStacksStr = append(r.CallStacksStr, CallStackString(s)) - } - return r -} - -func CallStackString(callstack CallStack) string { - var b bytes.Buffer - for _, entry := range callstack { - fname := filepath.Base(entry.URI.SpanURI().Filename()) - fmt.Fprintf(&b, "%v (%v:%d)\n", entry.Name, fname, entry.Pos.Line) - } - return b.String() -} - -const workspace1 = ` --- go.mod -- -module golang.org/entry - -require ( - golang.org/cmod v1.1.3 -) -go 1.18 --- x/x.go -- -package x - -import ( - "golang.org/cmod/c" - "golang.org/entry/y" -) - -func X() { - c.C1().Vuln1() // vuln use: X -> Vuln1 -} - -func CallY() { - y.Y() // vuln use: CallY -> y.Y -> bvuln.Vuln -} - --- y/y.go -- -package y - -import "golang.org/cmod/c" - -func Y() { - c.C2()() // vuln use: Y -> bvuln.Vuln -} -` - -const proxy1 = ` --- golang.org/cmod@v1.1.3/go.mod -- -module golang.org/cmod - -go 1.12 --- golang.org/cmod@v1.1.3/c/c.go -- -package c - -import ( - "golang.org/amod/avuln" - "golang.org/bmod/bvuln" -) - -type I interface { - Vuln1() -} - -func C1() I { - v := avuln.VulnData{} - v.Vuln2() // vuln use - return v -} - -func C2() func() { - return bvuln.Vuln -} --- golang.org/amod@v1.1.3/go.mod -- -module golang.org/amod - -go 1.14 --- golang.org/amod@v1.1.3/avuln/avuln.go -- -package avuln - -type VulnData struct {} -func (v VulnData) Vuln1() {} -func (v VulnData) Vuln2() {} --- golang.org/bmod@v0.5.0/go.mod -- -module golang.org/bmod - -go 1.14 --- golang.org/bmod@v0.5.0/bvuln/bvuln.go -- -package bvuln - -func Vuln() { - // something evil -} -` - -// testClient contains the following test vulnerabilities -// -// golang.org/amod/avuln.{VulnData.Vuln1, vulnData.Vuln2} -// golang.org/bmod/bvuln.{Vuln} -var testClient1 = &mockClient{ - ret: map[string][]*osv.Entry{ - "golang.org/amod": { - { - ID: "GO-2022-01", - References: []osv.Reference{ - { - Type: "href", - URL: "pkg.go.dev/vuln/GO-2022-01", - }, - }, - Affected: []osv.Affected{{ - Package: osv.Package{Name: "golang.org/amod/avuln"}, - Ranges: osv.Affects{{Type: osv.TypeSemver, Events: []osv.RangeEvent{{Introduced: "1.0.0"}, {Fixed: "1.0.4"}, {Introduced: "1.1.2"}}}}, - EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"VulnData.Vuln1", "VulnData.Vuln2"}}, - }}, - }, - }, - "golang.org/bmod": { - { - ID: "GO-2022-02", - Affected: []osv.Affected{{ - Package: osv.Package{Name: "golang.org/bmod/bvuln"}, - Ranges: osv.Affects{{Type: osv.TypeSemver}}, - EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"Vuln"}}, - }}, - }, - }, - }, -} - -type mockClient struct { - client.Client - ret map[string][]*osv.Entry -} - -func (mc *mockClient) GetByModule(ctx context.Context, a string) ([]*osv.Entry, error) { - return mc.ret[a], nil -} - -func runTest(t *testing.T, workspaceData, proxyData string, test func(context.Context, source.Snapshot)) { - ws, err := fake.NewSandbox(&fake.SandboxConfig{ - Files: fake.UnpackTxt(workspaceData), - ProxyFiles: fake.UnpackTxt(proxyData), - }) - if err != nil { - t.Fatal(err) - } - defer ws.Close() - - ctx := tests.Context(t) - - // get the module cache populated and the go.sum file at the root auto-generated. - dir := ws.Workdir.RootURI().SpanURI().Filename() - if err := ws.RunGoCommand(ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil { - t.Fatal(err) - } - - cache := cache.New(nil) - session := cache.NewSession(ctx) - options := source.DefaultOptions().Clone() - tests.DefaultOptions(options) - session.SetOptions(options) - envs := []string{} - for k, v := range ws.GoEnv() { - envs = append(envs, k+"="+v) - } - options.SetEnvSlice(envs) - name := ws.RootDir() - folder := ws.Workdir.RootURI().SpanURI() - view, snapshot, release, err := session.NewView(ctx, name, folder, options) - if err != nil { - t.Fatal(err) - } - defer release() - defer view.Shutdown(ctx) - - test(ctx, snapshot) -} - -// TODO: expose this as a method of Snapshot. -func packagesCfg(ctx context.Context, snapshot source.Snapshot) *packages.Config { - view := snapshot.View() - viewBuildFlags := view.Options().BuildFlags - var viewEnv []string - if e := view.Options().EnvSlice(); e != nil { - viewEnv = append(os.Environ(), e...) - } - return &packages.Config{ - // Mode will be set by cmd.Run. - Context: ctx, - Tests: true, - BuildFlags: viewBuildFlags, - Env: viewEnv, - Dir: view.Folder().Filename(), - } -} diff --git a/gopls/internal/vulncheck/util.go b/gopls/internal/vulncheck/util.go deleted file mode 100644 index c329461894e..00000000000 --- a/gopls/internal/vulncheck/util.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package vulncheck - -import ( - "fmt" - "go/token" - - gvc "golang.org/x/tools/gopls/internal/govulncheck" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/vuln/osv" - "golang.org/x/vuln/vulncheck" -) - -func toCallStack(src vulncheck.CallStack) CallStack { - var dest []StackEntry - for _, e := range src { - dest = append(dest, toStackEntry(e)) - } - return dest -} - -func toStackEntry(src vulncheck.StackEntry) StackEntry { - f, call := src.Function, src.Call - pos := f.Pos - desc := gvc.FuncName(f) - if src.Call != nil { - pos = src.Call.Pos // Exact call site position is helpful. - if !call.Resolved { - // In case of a statically unresolved call site, communicate to the client - // that this was approximately resolved to f - - desc += " [approx.]" - } - } - return StackEntry{ - Name: desc, - URI: filenameToURI(pos), - Pos: posToPosition(pos), - } -} - -// href returns a URL embedded in the entry if any. -// If no suitable URL is found, it returns a default entry in -// pkg.go.dev/vuln. -func href(vuln *osv.Entry) string { - for _, affected := range vuln.Affected { - if url := affected.DatabaseSpecific.URL; url != "" { - return url - } - } - for _, r := range vuln.References { - if r.Type == "WEB" { - return r.URL - } - } - return fmt.Sprintf("https://pkg.go.dev/vuln/%s", vuln.ID) -} - -func filenameToURI(pos *token.Position) protocol.DocumentURI { - if pos == nil || pos.Filename == "" { - return "" - } - return protocol.URIFromPath(pos.Filename) -} - -func posToPosition(pos *token.Position) (p protocol.Position) { - // token.Position.Line starts from 1, and - // LSP protocol's position line is 0-based. - if pos != nil { - p.Line = uint32(pos.Line - 1) - // TODO(hyangah): LSP uses UTF16 column. - // We need utility like span.ToUTF16Column, - // but somthing that does not require file contents. - } - return p -} diff --git a/gopls/internal/vulncheck/vulncheck.go b/gopls/internal/vulncheck/vulncheck.go index 2c4d0d2978d..3c361bd01e4 100644 --- a/gopls/internal/vulncheck/vulncheck.go +++ b/gopls/internal/vulncheck/vulncheck.go @@ -10,14 +10,16 @@ package vulncheck import ( "context" - "errors" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/command" + "golang.org/x/tools/gopls/internal/govulncheck" + "golang.org/x/tools/gopls/internal/lsp/source" ) -// Govulncheck runs the in-process govulncheck implementation. // With go1.18+, this is swapped with the real implementation. -var Govulncheck = func(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) { - return res, errors.New("not implemented") -} +var Main func(cfg packages.Config, patterns ...string) error = nil + +// VulnerablePackages queries the vulndb and reports which vulnerabilities +// apply to this snapshot. The result contains a set of packages, +// grouped by vuln ID and by module. +var VulnerablePackages func(ctx context.Context, snapshot source.Snapshot, modfile source.FileHandle) (*govulncheck.Result, error) = nil diff --git a/gopls/internal/vulncheck/vulntest/db.go b/gopls/internal/vulncheck/vulntest/db.go new file mode 100644 index 00000000000..511a47e1ba9 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/db.go @@ -0,0 +1,303 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +// Package vulntest provides helpers for vulncheck functionality testing. +package vulntest + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "golang.org/x/tools/gopls/internal/span" + "golang.org/x/tools/txtar" + "golang.org/x/vuln/client" + "golang.org/x/vuln/osv" +) + +// NewDatabase returns a read-only DB containing the provided +// txtar-format collection of vulnerability reports. +// Each vulnerability report is a YAML file whose format +// is defined in golang.org/x/vulndb/doc/format.md. +// A report file name must have the id as its base name, +// and have .yaml as its extension. +// +// db, err := NewDatabase(ctx, reports) +// ... +// defer db.Clean() +// client, err := NewClient(db) +// ... +// +// The returned DB's Clean method must be called to clean up the +// generated database. +func NewDatabase(ctx context.Context, txtarReports []byte) (*DB, error) { + disk, err := ioutil.TempDir("", "vulndb-test") + if err != nil { + return nil, err + } + if err := generateDB(ctx, txtarReports, disk, false); err != nil { + os.RemoveAll(disk) + return nil, err + } + + return &DB{disk: disk}, nil +} + +// DB is a read-only vulnerability database on disk. +// Users can use this database with golang.org/x/vuln APIs +// by setting the `VULNDBā€œ environment variable. +type DB struct { + disk string +} + +// URI returns the file URI that can be used for VULNDB environment +// variable. +func (db *DB) URI() string { + u := span.URIFromPath(db.disk) + return string(u) +} + +// Clean deletes the database. +func (db *DB) Clean() error { + return os.RemoveAll(db.disk) +} + +// NewClient returns a vuln DB client that works with the given DB. +func NewClient(db *DB) (client.Client, error) { + return client.NewClient([]string{db.URI()}, client.Options{}) +} + +// +// The following was selectively copied from golang.org/x/vulndb/internal/database +// + +const ( + dbURL = "https://pkg.go.dev/vuln/" + + // idDirectory is the name of the directory that contains entries + // listed by their IDs. + idDirectory = "ID" + + // stdFileName is the name of the .json file in the vulndb repo + // that will contain info on standard library vulnerabilities. + stdFileName = "stdlib" + + // toolchainFileName is the name of the .json file in the vulndb repo + // that will contain info on toolchain (cmd/...) vulnerabilities. + toolchainFileName = "toolchain" + + // cmdModule is the name of the module containing Go toolchain + // binaries. + cmdModule = "cmd" + + // stdModule is the name of the module containing Go std packages. + stdModule = "std" +) + +// generateDB generates the file-based vuln DB in the directory jsonDir. +func generateDB(ctx context.Context, txtarData []byte, jsonDir string, indent bool) error { + archive := txtar.Parse(txtarData) + + jsonVulns, entries, err := generateEntries(ctx, archive) + if err != nil { + return err + } + + index := make(client.DBIndex, len(jsonVulns)) + for modulePath, vulns := range jsonVulns { + epath, err := client.EscapeModulePath(modulePath) + if err != nil { + return err + } + if err := writeVulns(filepath.Join(jsonDir, epath), vulns, indent); err != nil { + return err + } + for _, v := range vulns { + if v.Modified.After(index[modulePath]) { + index[modulePath] = v.Modified + } + } + } + if err := writeJSON(filepath.Join(jsonDir, "index.json"), index, indent); err != nil { + return err + } + if err := writeAliasIndex(jsonDir, entries, indent); err != nil { + return err + } + return writeEntriesByID(filepath.Join(jsonDir, idDirectory), entries, indent) +} + +func generateEntries(_ context.Context, archive *txtar.Archive) (map[string][]osv.Entry, []osv.Entry, error) { + now := time.Now() + jsonVulns := map[string][]osv.Entry{} + var entries []osv.Entry + for _, f := range archive.Files { + if !strings.HasSuffix(f.Name, ".yaml") { + continue + } + r, err := readReport(bytes.NewReader(f.Data)) + if err != nil { + return nil, nil, err + } + name := strings.TrimSuffix(filepath.Base(f.Name), filepath.Ext(f.Name)) + linkName := fmt.Sprintf("%s%s", dbURL, name) + entry, modulePaths := generateOSVEntry(name, linkName, now, *r) + for _, modulePath := range modulePaths { + jsonVulns[modulePath] = append(jsonVulns[modulePath], entry) + } + entries = append(entries, entry) + } + return jsonVulns, entries, nil +} + +func writeVulns(outPath string, vulns []osv.Entry, indent bool) error { + if err := os.MkdirAll(filepath.Dir(outPath), 0755); err != nil { + return fmt.Errorf("failed to create directory %q: %s", filepath.Dir(outPath), err) + } + return writeJSON(outPath+".json", vulns, indent) +} + +func writeEntriesByID(idDir string, entries []osv.Entry, indent bool) error { + // Write a directory containing entries by ID. + if err := os.MkdirAll(idDir, 0755); err != nil { + return fmt.Errorf("failed to create directory %q: %v", idDir, err) + } + var idIndex []string + for _, e := range entries { + outPath := filepath.Join(idDir, e.ID+".json") + if err := writeJSON(outPath, e, indent); err != nil { + return err + } + idIndex = append(idIndex, e.ID) + } + // Write an index.json in the ID directory with a list of all the IDs. + return writeJSON(filepath.Join(idDir, "index.json"), idIndex, indent) +} + +// Write a JSON file containing a map from alias to GO IDs. +func writeAliasIndex(dir string, entries []osv.Entry, indent bool) error { + aliasToGoIDs := map[string][]string{} + for _, e := range entries { + for _, a := range e.Aliases { + aliasToGoIDs[a] = append(aliasToGoIDs[a], e.ID) + } + } + return writeJSON(filepath.Join(dir, "aliases.json"), aliasToGoIDs, indent) +} + +func writeJSON(filename string, value any, indent bool) (err error) { + j, err := jsonMarshal(value, indent) + if err != nil { + return err + } + return os.WriteFile(filename, j, 0644) +} + +func jsonMarshal(v any, indent bool) ([]byte, error) { + if indent { + return json.MarshalIndent(v, "", " ") + } + return json.Marshal(v) +} + +// generateOSVEntry create an osv.Entry for a report. In addition to the report, it +// takes the ID for the vuln and a URL that will point to the entry in the vuln DB. +// It returns the osv.Entry and a list of module paths that the vuln affects. +func generateOSVEntry(id, url string, lastModified time.Time, r Report) (osv.Entry, []string) { + entry := osv.Entry{ + ID: id, + Published: r.Published, + Modified: lastModified, + Withdrawn: r.Withdrawn, + Details: r.Description, + } + + moduleMap := make(map[string]bool) + for _, m := range r.Modules { + switch m.Module { + case stdModule: + moduleMap[stdFileName] = true + case cmdModule: + moduleMap[toolchainFileName] = true + default: + moduleMap[m.Module] = true + } + entry.Affected = append(entry.Affected, generateAffected(m, url)) + } + for _, ref := range r.References { + entry.References = append(entry.References, osv.Reference{ + Type: string(ref.Type), + URL: ref.URL, + }) + } + + var modulePaths []string + for module := range moduleMap { + modulePaths = append(modulePaths, module) + } + // TODO: handle missing fields - Aliases + + return entry, modulePaths +} + +func generateAffectedRanges(versions []VersionRange) osv.Affects { + a := osv.AffectsRange{Type: osv.TypeSemver} + if len(versions) == 0 || versions[0].Introduced == "" { + a.Events = append(a.Events, osv.RangeEvent{Introduced: "0"}) + } + for _, v := range versions { + if v.Introduced != "" { + a.Events = append(a.Events, osv.RangeEvent{Introduced: v.Introduced.Canonical()}) + } + if v.Fixed != "" { + a.Events = append(a.Events, osv.RangeEvent{Fixed: v.Fixed.Canonical()}) + } + } + return osv.Affects{a} +} + +func generateImports(m *Module) (imps []osv.EcosystemSpecificImport) { + for _, p := range m.Packages { + syms := append([]string{}, p.Symbols...) + syms = append(syms, p.DerivedSymbols...) + sort.Strings(syms) + imps = append(imps, osv.EcosystemSpecificImport{ + Path: p.Package, + GOOS: p.GOOS, + GOARCH: p.GOARCH, + Symbols: syms, + }) + } + return imps +} +func generateAffected(m *Module, url string) osv.Affected { + name := m.Module + switch name { + case stdModule: + name = "stdlib" + case cmdModule: + name = "toolchain" + } + return osv.Affected{ + Package: osv.Package{ + Name: name, + Ecosystem: osv.GoEcosystem, + }, + Ranges: generateAffectedRanges(m.Versions), + DatabaseSpecific: osv.DatabaseSpecific{URL: url}, + EcosystemSpecific: osv.EcosystemSpecific{ + Imports: generateImports(m), + }, + } +} diff --git a/gopls/internal/vulncheck/vulntest/db_test.go b/gopls/internal/vulncheck/vulntest/db_test.go new file mode 100644 index 00000000000..7d939421c94 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/db_test.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package vulntest + +import ( + "context" + "encoding/json" + "testing" +) + +func TestNewDatabase(t *testing.T) { + ctx := context.Background() + in := []byte(` +-- GO-2020-0001.yaml -- +modules: + - module: github.com/gin-gonic/gin + versions: + - fixed: 1.6.0 + packages: + - package: github.com/gin-gonic/gin + symbols: + - defaultLogFormatter +description: | + Something. +published: 2021-04-14T20:04:52Z +references: + - fix: https://github.com/gin-gonic/gin/pull/2237 +`) + + db, err := NewDatabase(ctx, in) + if err != nil { + t.Fatal(err) + } + defer db.Clean() + + cli, err := NewClient(db) + if err != nil { + t.Fatal(err) + } + got, err := cli.GetByID(ctx, "GO-2020-0001") + if err != nil { + t.Fatal(err) + } + if got.ID != "GO-2020-0001" { + m, _ := json.Marshal(got) + t.Errorf("got %s\nwant GO-2020-0001 entry", m) + } + gotAll, err := cli.GetByModule(ctx, "github.com/gin-gonic/gin") + if err != nil { + t.Fatal(err) + } + if len(gotAll) != 1 || gotAll[0].ID != "GO-2020-0001" { + m, _ := json.Marshal(got) + t.Errorf("got %s\nwant GO-2020-0001 entry", m) + } +} diff --git a/gopls/internal/vulncheck/vulntest/report.go b/gopls/internal/vulncheck/vulntest/report.go new file mode 100644 index 00000000000..e5595e8ba06 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/report.go @@ -0,0 +1,176 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package vulntest + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "golang.org/x/mod/semver" + "gopkg.in/yaml.v3" +) + +// +// The following was selectively copied from golang.org/x/vulndb/internal/report +// + +// readReport reads a Report in YAML format. +func readReport(in io.Reader) (*Report, error) { + d := yaml.NewDecoder(in) + // Require that all fields in the file are in the struct. + // This corresponds to v2's UnmarshalStrict. + d.KnownFields(true) + var r Report + if err := d.Decode(&r); err != nil { + return nil, fmt.Errorf("yaml.Decode: %v", err) + } + return &r, nil +} + +// Report represents a vulnerability report in the vulndb. +// Remember to update doc/format.md when this structure changes. +type Report struct { + Modules []*Module `yaml:",omitempty"` + + // Description is the CVE description from an existing CVE. If we are + // assigning a CVE ID ourselves, use CVEMetadata.Description instead. + Description string `yaml:",omitempty"` + Published time.Time `yaml:",omitempty"` + Withdrawn *time.Time `yaml:",omitempty"` + + References []*Reference `yaml:",omitempty"` +} + +// Write writes r to filename in YAML format. +func (r *Report) Write(filename string) (err error) { + f, err := os.Create(filename) + if err != nil { + return err + } + err = r.encode(f) + err2 := f.Close() + if err == nil { + err = err2 + } + return err +} + +// ToString encodes r to a YAML string. +func (r *Report) ToString() (string, error) { + var b strings.Builder + if err := r.encode(&b); err != nil { + return "", err + } + return b.String(), nil +} + +func (r *Report) encode(w io.Writer) error { + e := yaml.NewEncoder(w) + defer e.Close() + e.SetIndent(4) + return e.Encode(r) +} + +type VersionRange struct { + Introduced Version `yaml:"introduced,omitempty"` + Fixed Version `yaml:"fixed,omitempty"` +} + +type Module struct { + Module string `yaml:",omitempty"` + Versions []VersionRange `yaml:",omitempty"` + Packages []*Package `yaml:",omitempty"` +} + +type Package struct { + Package string `yaml:",omitempty"` + GOOS []string `yaml:"goos,omitempty"` + GOARCH []string `yaml:"goarch,omitempty"` + // Symbols originally identified as vulnerable. + Symbols []string `yaml:",omitempty"` + // Additional vulnerable symbols, computed from Symbols via static analysis + // or other technique. + DerivedSymbols []string `yaml:"derived_symbols,omitempty"` +} + +// Version is an SemVer 2.0.0 semantic version with no leading "v" prefix, +// as used by OSV. +type Version string + +// V returns the version with a "v" prefix. +func (v Version) V() string { + return "v" + string(v) +} + +// IsValid reports whether v is a valid semantic version string. +func (v Version) IsValid() bool { + return semver.IsValid(v.V()) +} + +// Before reports whether v < v2. +func (v Version) Before(v2 Version) bool { + return semver.Compare(v.V(), v2.V()) < 0 +} + +// Canonical returns the canonical formatting of the version. +func (v Version) Canonical() string { + return strings.TrimPrefix(semver.Canonical(v.V()), "v") +} + +// Reference type is a reference (link) type. +type ReferenceType string + +const ( + ReferenceTypeAdvisory = ReferenceType("ADVISORY") + ReferenceTypeArticle = ReferenceType("ARTICLE") + ReferenceTypeReport = ReferenceType("REPORT") + ReferenceTypeFix = ReferenceType("FIX") + ReferenceTypePackage = ReferenceType("PACKAGE") + ReferenceTypeEvidence = ReferenceType("EVIDENCE") + ReferenceTypeWeb = ReferenceType("WEB") +) + +// ReferenceTypes is the set of reference types defined in OSV. +var ReferenceTypes = []ReferenceType{ + ReferenceTypeAdvisory, + ReferenceTypeArticle, + ReferenceTypeReport, + ReferenceTypeFix, + ReferenceTypePackage, + ReferenceTypeEvidence, + ReferenceTypeWeb, +} + +// A Reference is a link to some external resource. +// +// For ease of typing, References are represented in the YAML as a +// single-element mapping of type to URL. +type Reference struct { + Type ReferenceType `json:"type,omitempty"` + URL string `json:"url,omitempty"` +} + +func (r *Reference) MarshalYAML() (interface{}, error) { + return map[string]string{ + strings.ToLower(string(r.Type)): r.URL, + }, nil +} + +func (r *Reference) UnmarshalYAML(n *yaml.Node) (err error) { + if n.Kind != yaml.MappingNode || len(n.Content) != 2 || n.Content[0].Kind != yaml.ScalarNode || n.Content[1].Kind != yaml.ScalarNode { + return &yaml.TypeError{Errors: []string{ + fmt.Sprintf("line %d: report.Reference must contain a mapping with one value", n.Line), + }} + } + r.Type = ReferenceType(strings.ToUpper(n.Content[0].Value)) + r.URL = n.Content[1].Value + return nil +} diff --git a/gopls/internal/vulncheck/vulntest/report_test.go b/gopls/internal/vulncheck/vulntest/report_test.go new file mode 100644 index 00000000000..c42dae805fa --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/report_test.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package vulntest + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func readAll(t *testing.T, filename string) io.Reader { + d, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + return bytes.NewReader(d) +} + +func TestRoundTrip(t *testing.T) { + // A report shouldn't change after being read and then written. + in := filepath.Join("testdata", "report.yaml") + r, err := readReport(readAll(t, in)) + if err != nil { + t.Fatal(err) + } + out := filepath.Join(t.TempDir(), "report.yaml") + if err := r.Write(out); err != nil { + t.Fatal(err) + } + + want, err := os.ReadFile(in) + if err != nil { + t.Fatal(err) + } + got, err := os.ReadFile(out) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("mismatch (-want, +got):\n%s", diff) + } +} diff --git a/gopls/internal/vulncheck/vulntest/stdlib.go b/gopls/internal/vulncheck/vulntest/stdlib.go new file mode 100644 index 00000000000..9bf4d4ef0d4 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/stdlib.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package vulntest + +import ( + "strings" + + "golang.org/x/mod/module" +) + +// maybeStdlib reports whether the given import path could be part of the Go +// standard library, by reporting whether the first component lacks a '.'. +func maybeStdlib(path string) bool { + if err := module.CheckImportPath(path); err != nil { + return false + } + if i := strings.IndexByte(path, '/'); i != -1 { + path = path[:i] + } + return !strings.Contains(path, ".") +} diff --git a/gopls/internal/vulncheck/vulntest/stdlib_test.go b/gopls/internal/vulncheck/vulntest/stdlib_test.go new file mode 100644 index 00000000000..8f893f3ec42 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/stdlib_test.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package vulntest + +import "testing" + +func TestMaybeStdlib(t *testing.T) { + for _, test := range []struct { + in string + want bool + }{ + {"", false}, + {"math/crypto", true}, + {"github.com/pkg/errors", false}, + {"Path is unknown", false}, + } { + got := maybeStdlib(test.in) + if got != test.want { + t.Errorf("%q: got %t, want %t", test.in, got, test.want) + } + } +} diff --git a/gopls/internal/vulncheck/vulntest/testdata/report.yaml b/gopls/internal/vulncheck/vulntest/testdata/report.yaml new file mode 100644 index 00000000000..48384b543b2 --- /dev/null +++ b/gopls/internal/vulncheck/vulntest/testdata/report.yaml @@ -0,0 +1,15 @@ +modules: + - module: github.com/gin-gonic/gin + versions: + - fixed: 1.6.0 + packages: + - package: github.com/gin-gonic/gin + symbols: + - defaultLogFormatter +description: | + The default Formatter for the Logger middleware (LoggerConfig.Formatter), + which is included in the Default engine, allows attackers to inject arbitrary + log entries by manipulating the request path. +references: + - fix: https://github.com/gin-gonic/gin/pull/1234 + - fix: https://github.com/gin-gonic/gin/commit/abcdefg diff --git a/gopls/main.go b/gopls/main.go index f73eabf5767..bdbe3615429 100644 --- a/gopls/main.go +++ b/gopls/main.go @@ -11,13 +11,15 @@ // for the most up-to-date documentation. package main // import "golang.org/x/tools/gopls" +//go:generate go run doc/generate.go + import ( "context" "golang.org/x/tools/internal/analysisinternal" "os" "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/cmd" + "golang.org/x/tools/gopls/internal/lsp/cmd" "golang.org/x/tools/internal/tool" ) diff --git a/gopls/release/release.go b/gopls/release/release.go index 173909122b3..dab95822eb6 100644 --- a/gopls/release/release.go +++ b/gopls/release/release.go @@ -15,25 +15,21 @@ import ( "flag" "fmt" "go/types" - exec "golang.org/x/sys/execabs" "io/ioutil" "log" "os" - "os/user" "path/filepath" "strconv" "strings" + exec "golang.org/x/sys/execabs" + "golang.org/x/mod/modfile" "golang.org/x/mod/semver" "golang.org/x/tools/go/packages" ) -var ( - versionFlag = flag.String("version", "", "version to tag") - remoteFlag = flag.String("remote", "", "remote to which to push the tag") - releaseFlag = flag.Bool("release", false, "release is true if you intend to tag and push a release") -) +var versionFlag = flag.String("version", "", "version to tag") func main() { flag.Parse() @@ -50,13 +46,6 @@ func main() { if semver.Build(*versionFlag) != "" { log.Fatalf("unexpected build suffix: %s", *versionFlag) } - if *releaseFlag && *remoteFlag == "" { - log.Fatalf("must provide -remote flag if releasing") - } - user, err := user.Current() - if err != nil { - log.Fatal(err) - } // Validate that the user is running the program from the gopls module. wd, err := os.Getwd() if err != nil { @@ -65,77 +54,28 @@ func main() { if filepath.Base(wd) != "gopls" { log.Fatalf("must run from the gopls module") } - // Confirm that they are running on a branch with a name following the - // format of "gopls-release-branch..". - if err := validateBranchName(*versionFlag); err != nil { - log.Fatal(err) - } // Confirm that they have updated the hardcoded version. - if err := validateHardcodedVersion(wd, *versionFlag); err != nil { + if err := validateHardcodedVersion(*versionFlag); err != nil { log.Fatal(err) } // Confirm that the versions in the go.mod file are correct. if err := validateGoModFile(wd); err != nil { log.Fatal(err) } - earlyExitMsg := "Validated that the release is ready. Exiting without tagging and publishing." - if !*releaseFlag { - fmt.Println(earlyExitMsg) - os.Exit(0) - } - fmt.Println(`Proceeding to tagging and publishing the release... -Please enter Y if you wish to proceed or anything else if you wish to exit.`) - // Accept and process user input. - var input string - fmt.Scanln(&input) - switch input { - case "Y": - fmt.Println("Proceeding to tagging and publishing the release.") - default: - fmt.Println(earlyExitMsg) - os.Exit(0) - } - // To tag the release: - // $ git -c user.email=username@google.com tag -a -m ā€œā€ gopls/v..- - goplsVersion := fmt.Sprintf("gopls/%s", *versionFlag) - cmd := exec.Command("git", "-c", fmt.Sprintf("user.email=%s@google.com", user.Username), "tag", "-a", "-m", fmt.Sprintf("%q", goplsVersion), goplsVersion) - if err := cmd.Run(); err != nil { - log.Fatal(err) - } - // Push the tag to the remote: - // $ git push gopls/v..-pre.1 - cmd = exec.Command("git", "push", *remoteFlag, goplsVersion) - if err := cmd.Run(); err != nil { - log.Fatal(err) - } -} - -// validateBranchName reports whether the user's current branch name is of the -// form "gopls-release-branch..". It reports an error if not. -func validateBranchName(version string) error { - cmd := exec.Command("git", "branch", "--show-current") - stdout, err := cmd.Output() - if err != nil { - return err - } - branch := strings.TrimSpace(string(stdout)) - expectedBranch := fmt.Sprintf("gopls-release-branch.%s", strings.TrimPrefix(semver.MajorMinor(version), "v")) - if branch != expectedBranch { - return fmt.Errorf("expected release branch %s, got %s", expectedBranch, branch) - } - return nil + fmt.Println("Validated that the release is ready.") + os.Exit(0) } // validateHardcodedVersion reports whether the version hardcoded in the gopls // binary is equivalent to the version being published. It reports an error if // not. -func validateHardcodedVersion(wd string, version string) error { +func validateHardcodedVersion(version string) error { + const debugPkg = "golang.org/x/tools/gopls/internal/lsp/debug" pkgs, err := packages.Load(&packages.Config{ - Dir: filepath.Dir(wd), Mode: packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedImports | packages.NeedTypes | packages.NeedTypesSizes, - }, "golang.org/x/tools/internal/lsp/debug") + }, debugPkg) if err != nil { return err } @@ -143,6 +83,9 @@ func validateHardcodedVersion(wd string, version string) error { return fmt.Errorf("expected 1 package, got %v", len(pkgs)) } pkg := pkgs[0] + if len(pkg.Errors) > 0 { + return fmt.Errorf("failed to load %q: first error: %w", debugPkg, pkg.Errors[0]) + } obj := pkg.Types.Scope().Lookup("Version") c, ok := obj.(*types.Const) if !ok { @@ -164,8 +107,8 @@ func validateHardcodedVersion(wd string, version string) error { return nil } -func validateGoModFile(wd string) error { - filename := filepath.Join(wd, "go.mod") +func validateGoModFile(goplsDir string) error { + filename := filepath.Join(goplsDir, "go.mod") data, err := ioutil.ReadFile(filename) if err != nil { return err diff --git a/gopls/test/debug/debug_test.go b/gopls/test/debug/debug_test.go index 4d680eebbbe..72e5d6513c0 100644 --- a/gopls/test/debug/debug_test.go +++ b/gopls/test/debug/debug_test.go @@ -6,14 +6,13 @@ package debug_test // Provide 'static type checking' of the templates. This guards against changes is various // gopls datastructures causing template execution to fail. The checking is done by -// the github.com/jba/templatecheck pacakge. Before that is run, the test checks that +// the github.com/jba/templatecheck package. Before that is run, the test checks that // its list of templates and their arguments corresponds to the arguments in // calls to render(). The test assumes that all uses of templates are done through render(). import ( "go/ast" "html/template" - "log" "runtime" "sort" "strings" @@ -21,18 +20,14 @@ import ( "github.com/jba/templatecheck" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/gopls/internal/lsp/cache" + "golang.org/x/tools/gopls/internal/lsp/debug" ) -type tdata struct { +var templates = map[string]struct { tmpl *template.Template data interface{} // a value of the needed type -} - -var templates = map[string]tdata{ +}{ "MainTmpl": {debug.MainTmpl, &debug.Instance{}}, "DebugTmpl": {debug.DebugTmpl, nil}, "RPCTmpl": {debug.RPCTmpl, &debug.Rpcs{}}, @@ -42,45 +37,9 @@ var templates = map[string]tdata{ "ViewTmpl": {debug.ViewTmpl, &cache.View{}}, "ClientTmpl": {debug.ClientTmpl, &debug.Client{}}, "ServerTmpl": {debug.ServerTmpl, &debug.Server{}}, - //"FileTmpl": {FileTmpl, source.Overlay{}}, // need to construct a source.Overlay in init - "InfoTmpl": {debug.InfoTmpl, "something"}, - "MemoryTmpl": {debug.MemoryTmpl, runtime.MemStats{}}, -} - -// construct a source.Overlay for fileTmpl -type fakeOverlay struct{} - -func (fakeOverlay) Version() int32 { - return 0 -} -func (fakeOverlay) Session() string { - return "" -} -func (fakeOverlay) VersionedFileIdentity() source.VersionedFileIdentity { - return source.VersionedFileIdentity{} -} -func (fakeOverlay) FileIdentity() source.FileIdentity { - return source.FileIdentity{} -} -func (fakeOverlay) Kind() source.FileKind { - return 0 -} -func (fakeOverlay) Read() ([]byte, error) { - return nil, nil -} -func (fakeOverlay) Saved() bool { - return true -} -func (fakeOverlay) URI() span.URI { - return "" -} - -var _ source.Overlay = fakeOverlay{} - -func init() { - log.SetFlags(log.Lshortfile) - var v fakeOverlay - templates["FileTmpl"] = tdata{debug.FileTmpl, v} + "FileTmpl": {debug.FileTmpl, &cache.Overlay{}}, + "InfoTmpl": {debug.InfoTmpl, "something"}, + "MemoryTmpl": {debug.MemoryTmpl, runtime.MemStats{}}, } func TestTemplates(t *testing.T) { @@ -90,7 +49,7 @@ func TestTemplates(t *testing.T) { cfg := &packages.Config{ Mode: packages.NeedTypesInfo | packages.LoadAllSyntax, // figure out what's necessary PJW } - pkgs, err := packages.Load(cfg, "golang.org/x/tools/internal/lsp/debug") + pkgs, err := packages.Load(cfg, "golang.org/x/tools/gopls/internal/lsp/debug") if err != nil { t.Fatal(err) } @@ -169,6 +128,7 @@ func callsOf(p *packages.Package, tree *ast.File, name string) []*ast.CallExpr { ast.Inspect(tree, f) return ans } + func treeOf(p *packages.Package, fname string) *ast.File { for _, tree := range p.Syntax { loc := tree.Package diff --git a/gopls/test/gopls_test.go b/gopls/test/gopls_test.go deleted file mode 100644 index 6282224abb5..00000000000 --- a/gopls/test/gopls_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gopls_test - -import ( - "os" - "testing" - - "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/internal/lsp/bug" - cmdtest "golang.org/x/tools/internal/lsp/cmd/test" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/testenv" -) - -func TestMain(m *testing.M) { - bug.PanicOnBugs = true - testenv.ExitIfSmallMachine() - os.Exit(m.Run()) -} - -func TestCommandLine(t *testing.T) { - cmdtest.TestCommandLine(t, "../../internal/lsp/testdata", commandLineOptions) -} - -func commandLineOptions(options *source.Options) { - options.Staticcheck = true - options.GoDiff = false - tests.DefaultOptions(options) - hooks.Options(options) -} diff --git a/gopls/test/json_test.go b/gopls/test/json_test.go index 5ea5b343450..993af3095a1 100644 --- a/gopls/test/json_test.go +++ b/gopls/test/json_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/protocol" ) // verify that type errors in Initialize lsp messages don't cause @@ -99,7 +99,7 @@ func allDeltas(t *testing.T, v [][]int, repls ...string) { } func tryChange(start, end int, repl string) error { - var p, q protocol.InitializeParams + var p, q protocol.ParamInitialize mod := input[:start] + repl + input[end:] excerpt := func() (string, string) { a := start - 5 diff --git a/internal/analysisinternal/analysis.go b/internal/analysisinternal/analysis.go index 3f1e573342f..d15f0eb7abf 100644 --- a/internal/analysisinternal/analysis.go +++ b/internal/analysisinternal/analysis.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package analysisinternal exposes internal-only fields from go/analysis. +// Package analysisinternal provides gopls' internal analyses with a +// number of helper functions that operate on typed syntax trees. package analysisinternal import ( @@ -12,18 +13,12 @@ import ( "go/token" "go/types" "strconv" - - "golang.org/x/tools/internal/lsp/fuzzy" ) -// Flag to gate diagnostics for fuzz tests in 1.18. +// DiagnoseFuzzTests controls whether the 'tests' analyzer diagnoses fuzz tests +// in Go 1.18+. var DiagnoseFuzzTests bool = false -var ( - GetTypeErrors func(p interface{}) []types.Error - SetTypeErrors func(p interface{}, errors []types.Error) -) - func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. offset, end := fset.PositionFor(start, false).Offset, start @@ -80,6 +75,9 @@ func IsZeroValue(expr ast.Expr) bool { } } +// TypeExpr returns syntax for the specified type. References to +// named types from packages other than pkg are qualified by an appropriate +// package name, as defined by the import environment of file. func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { switch t := typ.(type) { case *types.Basic: @@ -208,14 +206,6 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { } } -type TypeErrorPass string - -const ( - NoNewVars TypeErrorPass = "nonewvars" - NoResultValues TypeErrorPass = "noresultvalues" - UndeclaredName TypeErrorPass = "undeclaredname" -) - // StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. // Some examples: // @@ -309,19 +299,21 @@ func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { }) } -// FindMatchingIdents finds all identifiers in 'node' that match any of the given types. +// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. // 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within // the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that // is unrecognized. -func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident { - matches := map[types.Type][]*ast.Ident{} +func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { + // Initialize matches to contain the variable types we are searching for. + matches := make(map[types.Type][]string) for _, typ := range typs { if typ == nil { - continue + continue // TODO(adonovan): is this reachable? } - matches[typ] = []*ast.Ident{} + matches[typ] = nil // create entry } + seen := map[types.Object]struct{}{} ast.Inspect(node, func(n ast.Node) bool { if n == nil { @@ -333,8 +325,7 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t // // x := fakeStruct{f0: x} // - assignment, ok := n.(*ast.AssignStmt) - if ok && pos > assignment.Pos() && pos <= assignment.End() { + if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { return false } if n.End() > pos { @@ -367,17 +358,17 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t return true } // The object must match one of the types that we are searching for. - if idents, ok := matches[obj.Type()]; ok { - matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name)) - } - // If the object type does not exactly match any of the target types, greedily - // find the first target type that the object type can satisfy. - for typ := range matches { - if obj.Type() == typ { - continue - } - if equivalentTypes(obj.Type(), typ) { - matches[typ] = append(matches[typ], ast.NewIdent(ident.Name)) + // TODO(adonovan): opt: use typeutil.Map? + if names, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(names, ident.Name) + } else { + // If the object type does not exactly match + // any of the target types, greedily find the first + // target type that the object type can satisfy. + for typ := range matches { + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ident.Name) + } } } return true @@ -386,7 +377,7 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t } func equivalentTypes(want, got types.Type) bool { - if want == got || types.Identical(want, got) { + if types.Identical(want, got) { return true } // Code segment to help check for untyped equality from (golang/go#32146). @@ -397,30 +388,3 @@ func equivalentTypes(want, got types.Type) bool { } return types.AssignableTo(want, got) } - -// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the -// given pattern. We return the identifier whose name is most similar to the pattern. -func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { - fuzz := fuzzy.NewMatcher(pattern) - var bestFuzz ast.Expr - highScore := float32(0) // minimum score is 0 (no match) - for _, ident := range idents { - // TODO: Improve scoring algorithm. - score := fuzz.Score(ident.Name) - if score > highScore { - highScore = score - bestFuzz = ident - } else if score == 0 { - // Order matters in the fuzzy matching algorithm. If we find no match - // when matching the target to the identifier, try matching the identifier - // to the target. - revFuzz := fuzzy.NewMatcher(ident.Name) - revScore := revFuzz.Score(pattern) - if revScore > highScore { - highScore = revScore - bestFuzz = ident - } - } - } - return bestFuzz -} diff --git a/internal/lsp/bug/bug.go b/internal/bug/bug.go similarity index 100% rename from internal/lsp/bug/bug.go rename to internal/bug/bug.go diff --git a/internal/lsp/bug/bug_test.go b/internal/bug/bug_test.go similarity index 100% rename from internal/lsp/bug/bug_test.go rename to internal/bug/bug_test.go diff --git a/internal/diff/diff.go b/internal/diff/diff.go new file mode 100644 index 00000000000..3b315054c9f --- /dev/null +++ b/internal/diff/diff.go @@ -0,0 +1,162 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%s}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all edits begin and end at the start of a line? + // TODO(adonovan): opt: is this fast path necessary? + // (Also, it complicates the result ownership.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' { // not at line start + goto expand + } + } + return edits, nil // aligned + +expand: + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go new file mode 100644 index 00000000000..b6881c1f2f0 --- /dev/null +++ b/internal/diff/diff_test.go @@ -0,0 +1,199 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff_test + +import ( + "bytes" + "math/rand" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "testing" + "unicode/utf8" + + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/diff/difftest" + "golang.org/x/tools/internal/testenv" +) + +func TestApply(t *testing.T) { + for _, tc := range difftest.TestCases { + t.Run(tc.Name, func(t *testing.T) { + got, err := diff.Apply(tc.In, tc.Edits) + if err != nil { + t.Fatalf("Apply(Edits) failed: %v", err) + } + if got != tc.Out { + t.Errorf("Apply(Edits): got %q, want %q", got, tc.Out) + } + if tc.LineEdits != nil { + got, err := diff.Apply(tc.In, tc.LineEdits) + if err != nil { + t.Fatalf("Apply(LineEdits) failed: %v", err) + } + if got != tc.Out { + t.Errorf("Apply(LineEdits): got %q, want %q", got, tc.Out) + } + } + }) + } +} + +func TestNEdits(t *testing.T) { + for _, tc := range difftest.TestCases { + edits := diff.Strings(tc.In, tc.Out) + got, err := diff.Apply(tc.In, edits) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + if got != tc.Out { + t.Fatalf("%s: got %q wanted %q", tc.Name, got, tc.Out) + } + if len(edits) < len(tc.Edits) { // should find subline edits + t.Errorf("got %v, expected %v for %#v", edits, tc.Edits, tc) + } + } +} + +func TestNRandom(t *testing.T) { + rand.Seed(1) + for i := 0; i < 1000; i++ { + a := randstr("abω", 16) + b := randstr("abωc", 16) + edits := diff.Strings(a, b) + got, err := diff.Apply(a, edits) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + if got != b { + t.Fatalf("%d: got %q, wanted %q, starting with %q", i, got, b, a) + } + } +} + +// $ go test -fuzz=FuzzRoundTrip ./internal/diff +func FuzzRoundTrip(f *testing.F) { + f.Fuzz(func(t *testing.T, a, b string) { + if !utf8.ValidString(a) || !utf8.ValidString(b) { + return // inputs must be text + } + edits := diff.Strings(a, b) + got, err := diff.Apply(a, edits) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + if got != b { + t.Fatalf("applying diff(%q, %q) gives %q; edits=%v", a, b, got, edits) + } + }) +} + +func TestLineEdits(t *testing.T) { + for _, tc := range difftest.TestCases { + t.Run(tc.Name, func(t *testing.T) { + // if line edits not specified, it is the same as edits + edits := tc.LineEdits + if edits == nil { + edits = tc.Edits + } + got, err := diff.LineEdits(tc.In, tc.Edits) + if err != nil { + t.Fatalf("LineEdits: %v", err) + } + if !reflect.DeepEqual(got, edits) { + t.Errorf("LineEdits got\n%q, want\n%q\n%#v", got, edits, tc) + } + }) + } +} + +func TestToUnified(t *testing.T) { + testenv.NeedsTool(t, "patch") + for _, tc := range difftest.TestCases { + t.Run(tc.Name, func(t *testing.T) { + unified, err := diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.Edits) + if err != nil { + t.Fatal(err) + } + if unified == "" { + return + } + orig := filepath.Join(t.TempDir(), "original") + err = os.WriteFile(orig, []byte(tc.In), 0644) + if err != nil { + t.Fatal(err) + } + temp := filepath.Join(t.TempDir(), "patched") + err = os.WriteFile(temp, []byte(tc.In), 0644) + if err != nil { + t.Fatal(err) + } + cmd := exec.Command("patch", "-p0", "-u", "-s", "-o", temp, orig) + cmd.Stdin = strings.NewReader(unified) + cmd.Stdout = new(bytes.Buffer) + cmd.Stderr = new(bytes.Buffer) + if err = cmd.Run(); err != nil { + t.Fatalf("%v: %q (%q) (%q)", err, cmd.String(), + cmd.Stderr, cmd.Stdout) + } + got, err := os.ReadFile(temp) + if err != nil { + t.Fatal(err) + } + if string(got) != tc.Out { + t.Errorf("applying unified failed: got\n%q, wanted\n%q unified\n%q", + got, tc.Out, unified) + } + + }) + } +} + +func TestRegressionOld001(t *testing.T) { + a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n" + + b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n" + diffs := diff.Strings(a, b) + got, err := diff.Apply(a, diffs) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + if got != b { + i := 0 + for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ { + } + t.Errorf("oops %vd\n%q\n%q", diffs, got, b) + t.Errorf("\n%q\n%q", got[i:], b[i:]) + } +} + +func TestRegressionOld002(t *testing.T) { + a := "n\"\n)\n" + b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n" + diffs := diff.Strings(a, b) + got, err := diff.Apply(a, diffs) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + if got != b { + i := 0 + for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ { + } + t.Errorf("oops %vd\n%q\n%q", diffs, got, b) + t.Errorf("\n%q\n%q", got[i:], b[i:]) + } +} + +// return a random string of length n made of characters from s +func randstr(s string, n int) string { + src := []rune(s) + x := make([]rune, n) + for i := 0; i < n; i++ { + x[i] = src[rand.Intn(len(src))] + } + return string(x) +} diff --git a/internal/diff/difftest/difftest.go b/internal/diff/difftest/difftest.go new file mode 100644 index 00000000000..4a251111b6f --- /dev/null +++ b/internal/diff/difftest/difftest.go @@ -0,0 +1,289 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package difftest supplies a set of tests that will operate on any +// implementation of a diff algorithm as exposed by +// "golang.org/x/tools/internal/diff" +package difftest + +// There are two kinds of tests, semantic tests, and 'golden data' tests. +// The semantic tests check that the computed diffs transform the input to +// the output, and that 'patch' accepts the computed unified diffs. +// The other tests just check that Edits and LineEdits haven't changed +// unexpectedly. These fields may need to be changed when the diff algorithm +// changes. + +import ( + "testing" + + "golang.org/x/tools/internal/diff" +) + +const ( + FileA = "from" + FileB = "to" + UnifiedPrefix = "--- " + FileA + "\n+++ " + FileB + "\n" +) + +var TestCases = []struct { + Name, In, Out, Unified string + Edits, LineEdits []diff.Edit + NoDiff bool +}{{ + Name: "empty", + In: "", + Out: "", +}, { + Name: "no_diff", + In: "gargantuan\n", + Out: "gargantuan\n", +}, { + Name: "replace_all", + In: "fruit\n", + Out: "cheese\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-fruit ++cheese +`[1:], + Edits: []diff.Edit{{Start: 0, End: 5, New: "cheese"}}, + LineEdits: []diff.Edit{{Start: 0, End: 6, New: "cheese\n"}}, +}, { + Name: "insert_rune", + In: "gord\n", + Out: "gourd\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-gord ++gourd +`[1:], + Edits: []diff.Edit{{Start: 2, End: 2, New: "u"}}, + LineEdits: []diff.Edit{{Start: 0, End: 5, New: "gourd\n"}}, +}, { + Name: "delete_rune", + In: "groat\n", + Out: "goat\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-groat ++goat +`[1:], + Edits: []diff.Edit{{Start: 1, End: 2, New: ""}}, + LineEdits: []diff.Edit{{Start: 0, End: 6, New: "goat\n"}}, +}, { + Name: "replace_rune", + In: "loud\n", + Out: "lord\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-loud ++lord +`[1:], + Edits: []diff.Edit{{Start: 2, End: 3, New: "r"}}, + LineEdits: []diff.Edit{{Start: 0, End: 5, New: "lord\n"}}, +}, { + Name: "replace_partials", + In: "blanket\n", + Out: "bunker\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-blanket ++bunker +`[1:], + Edits: []diff.Edit{ + {Start: 1, End: 3, New: "u"}, + {Start: 6, End: 7, New: "r"}, + }, + LineEdits: []diff.Edit{{Start: 0, End: 8, New: "bunker\n"}}, +}, { + Name: "insert_line", + In: "1: one\n3: three\n", + Out: "1: one\n2: two\n3: three\n", + Unified: UnifiedPrefix + ` +@@ -1,2 +1,3 @@ + 1: one ++2: two + 3: three +`[1:], + Edits: []diff.Edit{{Start: 7, End: 7, New: "2: two\n"}}, +}, { + Name: "replace_no_newline", + In: "A", + Out: "B", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-A +\ No newline at end of file ++B +\ No newline at end of file +`[1:], + Edits: []diff.Edit{{Start: 0, End: 1, New: "B"}}, +}, { + Name: "append_empty", + In: "", // GNU diff -u special case: -0,0 + Out: "AB\nC", + Unified: UnifiedPrefix + ` +@@ -0,0 +1,2 @@ ++AB ++C +\ No newline at end of file +`[1:], + Edits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}}, + LineEdits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}}, +}, + // TODO(adonovan): fix this test: GNU diff -u prints "+1,2", Unifies prints "+1,3". + // { + // Name: "add_start", + // In: "A", + // Out: "B\nCA", + // Unified: UnifiedPrefix + ` + // @@ -1 +1,2 @@ + // -A + // \ No newline at end of file + // +B + // +CA + // \ No newline at end of file + // `[1:], + // Edits: []diff.TextEdit{{Span: newSpan(0, 0), NewText: "B\nC"}}, + // LineEdits: []diff.TextEdit{{Span: newSpan(0, 0), NewText: "B\nC"}}, + // }, + { + Name: "add_end", + In: "A", + Out: "AB", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-A +\ No newline at end of file ++AB +\ No newline at end of file +`[1:], + Edits: []diff.Edit{{Start: 1, End: 1, New: "B"}}, + LineEdits: []diff.Edit{{Start: 0, End: 1, New: "AB"}}, + }, { + Name: "add_empty", + In: "", + Out: "AB\nC", + Unified: UnifiedPrefix + ` +@@ -0,0 +1,2 @@ ++AB ++C +\ No newline at end of file +`[1:], + Edits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}}, + LineEdits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}}, + }, { + Name: "add_newline", + In: "A", + Out: "A\n", + Unified: UnifiedPrefix + ` +@@ -1 +1 @@ +-A +\ No newline at end of file ++A +`[1:], + Edits: []diff.Edit{{Start: 1, End: 1, New: "\n"}}, + LineEdits: []diff.Edit{{Start: 0, End: 1, New: "A\n"}}, + }, { + Name: "delete_front", + In: "A\nB\nC\nA\nB\nB\nA\n", + Out: "C\nB\nA\nB\nA\nC\n", + Unified: UnifiedPrefix + ` +@@ -1,7 +1,6 @@ +-A +-B + C ++B + A + B +-B + A ++C +`[1:], + NoDiff: true, // unified diff is different but valid + Edits: []diff.Edit{ + {Start: 0, End: 4, New: ""}, + {Start: 6, End: 6, New: "B\n"}, + {Start: 10, End: 12, New: ""}, + {Start: 14, End: 14, New: "C\n"}, + }, + LineEdits: []diff.Edit{ + {Start: 0, End: 6, New: "C\n"}, + {Start: 6, End: 8, New: "B\nA\n"}, + {Start: 10, End: 14, New: "A\n"}, + {Start: 14, End: 14, New: "C\n"}, + }, + }, { + Name: "replace_last_line", + In: "A\nB\n", + Out: "A\nC\n\n", + Unified: UnifiedPrefix + ` +@@ -1,2 +1,3 @@ + A +-B ++C ++ +`[1:], + Edits: []diff.Edit{{Start: 2, End: 3, New: "C\n"}}, + LineEdits: []diff.Edit{{Start: 2, End: 4, New: "C\n\n"}}, + }, + { + Name: "multiple_replace", + In: "A\nB\nC\nD\nE\nF\nG\n", + Out: "A\nH\nI\nJ\nE\nF\nK\n", + Unified: UnifiedPrefix + ` +@@ -1,7 +1,7 @@ + A +-B +-C +-D ++H ++I ++J + E + F +-G ++K +`[1:], + Edits: []diff.Edit{ + {Start: 2, End: 8, New: "H\nI\nJ\n"}, + {Start: 12, End: 14, New: "K\n"}, + }, + NoDiff: true, // diff algorithm produces different delete/insert pattern + }, + { + Name: "extra_newline", + In: "\nA\n", + Out: "A\n", + Edits: []diff.Edit{{Start: 0, End: 1, New: ""}}, + Unified: UnifiedPrefix + `@@ -1,2 +1 @@ +- + A +`, + }, +} + +func DiffTest(t *testing.T, compute func(before, after string) []diff.Edit) { + for _, test := range TestCases { + t.Run(test.Name, func(t *testing.T) { + edits := compute(test.In, test.Out) + got, err := diff.Apply(test.In, edits) + if err != nil { + t.Fatalf("Apply failed: %v", err) + } + unified, err := diff.ToUnified(FileA, FileB, test.In, edits) + if err != nil { + t.Fatalf("ToUnified: %v", err) + } + if got != test.Out { + t.Errorf("Apply: got patched:\n%v\nfrom diff:\n%v\nexpected:\n%v", + got, unified, test.Out) + } + if !test.NoDiff && unified != test.Unified { + t.Errorf("Unified: got diff:\n%q\nexpected:\n%q diffs:%v", + unified, test.Unified, edits) + } + }) + } +} diff --git a/internal/lsp/diff/difftest/difftest_test.go b/internal/diff/difftest/difftest_test.go similarity index 92% rename from internal/lsp/diff/difftest/difftest_test.go rename to internal/diff/difftest/difftest_test.go index fd7ecf95997..a990e522438 100644 --- a/internal/lsp/diff/difftest/difftest_test.go +++ b/internal/diff/difftest/difftest_test.go @@ -4,7 +4,7 @@ // Package difftest supplies a set of tests that will operate on any // implementation of a diff algorithm as exposed by -// "golang.org/x/tools/internal/lsp/diff" +// "golang.org/x/tools/internal/diff" package difftest_test import ( @@ -15,7 +15,7 @@ import ( "strings" "testing" - "golang.org/x/tools/internal/lsp/diff/difftest" + "golang.org/x/tools/internal/diff/difftest" "golang.org/x/tools/internal/testenv" ) @@ -23,7 +23,6 @@ func TestVerifyUnified(t *testing.T) { testenv.NeedsTool(t, "diff") for _, test := range difftest.TestCases { t.Run(test.Name, func(t *testing.T) { - t.Helper() if test.NoDiff { t.Skip("diff tool produces expected different results") } @@ -35,7 +34,7 @@ func TestVerifyUnified(t *testing.T) { diff = difftest.UnifiedPrefix + diff } if diff != test.Unified { - t.Errorf("unified:\n%q\ndiff -u:\n%q", test.Unified, diff) + t.Errorf("unified:\n%s\ndiff -u:\n%s", test.Unified, diff) } }) } diff --git a/internal/diff/export_test.go b/internal/diff/export_test.go new file mode 100644 index 00000000000..eedf0dd77ba --- /dev/null +++ b/internal/diff/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +// This file exports some private declarations to tests. + +var LineEdits = lineEdits diff --git a/internal/diff/lcs/common.go b/internal/diff/lcs/common.go new file mode 100644 index 00000000000..c3e82dd2683 --- /dev/null +++ b/internal/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/internal/diff/lcs/common_test.go b/internal/diff/lcs/common_test.go new file mode 100644 index 00000000000..f19245e404c --- /dev/null +++ b/internal/diff/lcs/common_test.go @@ -0,0 +1,140 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "math/rand" + "strings" + "testing" +) + +type Btest struct { + a, b string + lcs []string +} + +var Btests = []Btest{ + {"aaabab", "abaab", []string{"abab", "aaab"}}, + {"aabbba", "baaba", []string{"aaba"}}, + {"cabbx", "cbabx", []string{"cabx", "cbbx"}}, + {"c", "cb", []string{"c"}}, + {"aaba", "bbb", []string{"b"}}, + {"bbaabb", "b", []string{"b"}}, + {"baaabb", "bbaba", []string{"bbb", "baa", "bab"}}, + {"baaabb", "abbab", []string{"abb", "bab", "aab"}}, + {"baaba", "aaabba", []string{"aaba"}}, + {"ca", "cba", []string{"ca"}}, + {"ccbcbc", "abba", []string{"bb"}}, + {"ccbcbc", "aabba", []string{"bb"}}, + {"ccb", "cba", []string{"cb"}}, + {"caef", "axe", []string{"ae"}}, + {"bbaabb", "baabb", []string{"baabb"}}, + // Example from Myers: + {"abcabba", "cbabac", []string{"caba", "baba", "cbba"}}, + {"3456aaa", "aaa", []string{"aaa"}}, + {"aaa", "aaa123", []string{"aaa"}}, + {"aabaa", "aacaa", []string{"aaaa"}}, + {"1a", "a", []string{"a"}}, + {"abab", "bb", []string{"bb"}}, + {"123", "ab", []string{""}}, + {"a", "b", []string{""}}, + {"abc", "123", []string{""}}, + {"aa", "aa", []string{"aa"}}, + {"abcde", "12345", []string{""}}, + {"aaa3456", "aaa", []string{"aaa"}}, + {"abcde", "12345a", []string{"a"}}, + {"ab", "123", []string{""}}, + {"1a2", "a", []string{"a"}}, + // for two-sided + {"babaab", "cccaba", []string{"aba"}}, + {"aabbab", "cbcabc", []string{"bab"}}, + {"abaabb", "bcacab", []string{"baab"}}, + {"abaabb", "abaaaa", []string{"abaa"}}, + {"bababb", "baaabb", []string{"baabb"}}, + {"abbbaa", "cabacc", []string{"aba"}}, + {"aabbaa", "aacaba", []string{"aaaa", "aaba"}}, +} + +func init() { + log.SetFlags(log.Lshortfile) +} + +func check(t *testing.T, str string, lcs lcs, want []string) { + t.Helper() + if !lcs.valid() { + t.Errorf("bad lcs %v", lcs) + } + var got strings.Builder + for _, dd := range lcs { + got.WriteString(str[dd.X : dd.X+dd.Len]) + } + ans := got.String() + for _, w := range want { + if ans == w { + return + } + } + t.Fatalf("str=%q lcs=%v want=%q got=%q", str, lcs, want, ans) +} + +func checkDiffs(t *testing.T, before string, diffs []Diff, after string) { + t.Helper() + var ans strings.Builder + sofar := 0 // index of position in before + for _, d := range diffs { + if sofar < d.Start { + ans.WriteString(before[sofar:d.Start]) + } + ans.WriteString(after[d.ReplStart:d.ReplEnd]) + sofar = d.End + } + ans.WriteString(before[sofar:]) + if ans.String() != after { + t.Fatalf("diff %v took %q to %q, not to %q", diffs, before, ans.String(), after) + } +} + +func lcslen(l lcs) int { + ans := 0 + for _, d := range l { + ans += int(d.Len) + } + return ans +} + +// return a random string of length n made of characters from s +func randstr(s string, n int) string { + src := []rune(s) + x := make([]rune, n) + for i := 0; i < n; i++ { + x[i] = src[rand.Intn(len(src))] + } + return string(x) +} + +func TestLcsFix(t *testing.T) { + tests := []struct{ before, after lcs }{ + {lcs{diag{0, 0, 3}, diag{2, 2, 5}, diag{3, 4, 5}, diag{8, 9, 4}}, lcs{diag{0, 0, 2}, diag{2, 2, 1}, diag{3, 4, 5}, diag{8, 9, 4}}}, + {lcs{diag{1, 1, 6}, diag{6, 12, 3}}, lcs{diag{1, 1, 5}, diag{6, 12, 3}}}, + {lcs{diag{0, 0, 4}, diag{3, 5, 4}}, lcs{diag{0, 0, 3}, diag{3, 5, 4}}}, + {lcs{diag{0, 20, 1}, diag{0, 0, 3}, diag{1, 20, 4}}, lcs{diag{0, 0, 3}, diag{3, 22, 2}}}, + {lcs{diag{0, 0, 4}, diag{1, 1, 2}}, lcs{diag{0, 0, 4}}}, + {lcs{diag{0, 0, 4}}, lcs{diag{0, 0, 4}}}, + {lcs{}, lcs{}}, + {lcs{diag{0, 0, 4}, diag{1, 1, 6}, diag{3, 3, 2}}, lcs{diag{0, 0, 1}, diag{1, 1, 6}}}, + } + for n, x := range tests { + got := x.before.fix() + if len(got) != len(x.after) { + t.Errorf("got %v, expected %v, for %v", got, x.after, x.before) + } + olen := lcslen(x.after) + glen := lcslen(got) + if olen != glen { + t.Errorf("%d: lens(%d,%d) differ, %v, %v, %v", n, glen, olen, got, x.after, x.before) + } + } +} diff --git a/internal/diff/lcs/doc.go b/internal/diff/lcs/doc.go new file mode 100644 index 00000000000..dc779f38a01 --- /dev/null +++ b/internal/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + c | | | | | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + āŠ™ ------- āŠ™ ------- āŠ™ -------(3/3,6)------- āŠ™ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ -------(2/3,5)------- āŠ™ ------- āŠ™ ------- āŠ™ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ -------(3/5,4)------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ -------(1/2,3)-------(2/3,3)------- āŠ™ ------- āŠ™ ------- āŠ™ + c | | | | | | | + āŠ™ ------- āŠ™ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:0/5,5)-------- āŠ™ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + āŠ™ -------- āŠ™ -------- āŠ™ --------(:1/3,4)-------- āŠ™ -------- āŠ™ -------- āŠ™ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- āŠ™ --------(:2/3,3)--------(:1/4,3)-------- āŠ™ -------- āŠ™ + c | | | | | | | + āŠ™ -------- āŠ™ -------- āŠ™ --------(:3/3,2)--------(:2/4,2)-------- āŠ™ -------- āŠ™ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:3/4,1)-------- āŠ™ -------- āŠ™ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:4/4,0)-------- āŠ™ -------- āŠ™ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- (2/3,5) --------- āŠ™ --------- (:0/5,5)--------- āŠ™ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + āŠ™ --------- āŠ™ --------- āŠ™ --------- (:1/3,4)--------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + āŠ™ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- āŠ™ --------- āŠ™ + c | | | | | | | + āŠ™ --------- āŠ™ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- āŠ™ --------- āŠ™ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/internal/diff/lcs/git.sh b/internal/diff/lcs/git.sh new file mode 100644 index 00000000000..6856f843958 --- /dev/null +++ b/internal/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/lsp/source/completion/completion.go +# file=internal/lsp/source/diagnostics.go +file=internal/lsp/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/internal/diff/lcs/labels.go b/internal/diff/lcs/labels.go new file mode 100644 index 00000000000..0689f1ed700 --- /dev/null +++ b/internal/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/internal/diff/lcs/old.go b/internal/diff/lcs/old.go new file mode 100644 index 00000000000..7af11fc896c --- /dev/null +++ b/internal/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 30 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forwward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/internal/diff/lcs/old_test.go b/internal/diff/lcs/old_test.go new file mode 100644 index 00000000000..0c894316fa5 --- /dev/null +++ b/internal/diff/lcs/old_test.go @@ -0,0 +1,251 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" + "io/ioutil" + "log" + "math/rand" + "strings" + "testing" +) + +func TestAlgosOld(t *testing.T) { + for i, algo := range []func(*editGraph) lcs{forward, backward, twosided} { + t.Run(strings.Fields("forward backward twosided")[i], func(t *testing.T) { + for _, tx := range Btests { + lim := len(tx.a) + len(tx.b) + + diffs, lcs := compute(stringSeqs{tx.a, tx.b}, algo, lim) + check(t, tx.a, lcs, tx.lcs) + checkDiffs(t, tx.a, diffs, tx.b) + + diffs, lcs = compute(stringSeqs{tx.b, tx.a}, algo, lim) + check(t, tx.b, lcs, tx.lcs) + checkDiffs(t, tx.b, diffs, tx.a) + } + }) + } +} + +func TestIntOld(t *testing.T) { + // need to avoid any characters in btests + lfill, rfill := "AAAAAAAAAAAA", "BBBBBBBBBBBB" + for _, tx := range Btests { + if len(tx.a) < 2 || len(tx.b) < 2 { + continue + } + left := tx.a + lfill + right := tx.b + rfill + lim := len(tx.a) + len(tx.b) + diffs, lcs := compute(stringSeqs{left, right}, twosided, lim) + check(t, left, lcs, tx.lcs) + checkDiffs(t, left, diffs, right) + diffs, lcs = compute(stringSeqs{right, left}, twosided, lim) + check(t, right, lcs, tx.lcs) + checkDiffs(t, right, diffs, left) + + left = lfill + tx.a + right = rfill + tx.b + diffs, lcs = compute(stringSeqs{left, right}, twosided, lim) + check(t, left, lcs, tx.lcs) + checkDiffs(t, left, diffs, right) + diffs, lcs = compute(stringSeqs{right, left}, twosided, lim) + check(t, right, lcs, tx.lcs) + checkDiffs(t, right, diffs, left) + } +} + +func TestSpecialOld(t *testing.T) { // exercises lcs.fix + a := "golang.org/x/tools/intern" + b := "github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/intern" + diffs, lcs := compute(stringSeqs{a, b}, twosided, 4) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } +} + +func TestRegressionOld001(t *testing.T) { + a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n" + + b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n" + for i := 1; i < len(b); i++ { + diffs, lcs := compute(stringSeqs{a, b}, twosided, i) // 14 from gopls + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRegressionOld002(t *testing.T) { + a := "n\"\n)\n" + b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n" + for i := 1; i <= len(b); i++ { + diffs, lcs := compute(stringSeqs{a, b}, twosided, i) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRegressionOld003(t *testing.T) { + a := "golang.org/x/hello v1.0.0\nrequire golang.org/x/unused v1" + b := "golang.org/x/hello v1" + for i := 1; i <= len(a); i++ { + diffs, lcs := compute(stringSeqs{a, b}, twosided, i) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRandOld(t *testing.T) { + rand.Seed(1) + for i := 0; i < 1000; i++ { + // TODO(adonovan): use ASCII and bytesSeqs here? The use of + // non-ASCII isn't relevant to the property exercised by the test. + a := []rune(randstr("abω", 16)) + b := []rune(randstr("abωc", 16)) + seq := runesSeqs{a, b} + + const lim = 24 // large enough to get true lcs + _, forw := compute(seq, forward, lim) + _, back := compute(seq, backward, lim) + _, two := compute(seq, twosided, lim) + if lcslen(two) != lcslen(forw) || lcslen(forw) != lcslen(back) { + t.Logf("\n%v\n%v\n%v", forw, back, two) + t.Fatalf("%d forw:%d back:%d two:%d", i, lcslen(forw), lcslen(back), lcslen(two)) + } + if !two.valid() || !forw.valid() || !back.valid() { + t.Errorf("check failure") + } + } +} + +// TestDiffAPI tests the public API functions (Diff{Bytes,Strings,Runes}) +// to ensure at least miminal parity of the three representations. +func TestDiffAPI(t *testing.T) { + for _, test := range []struct { + a, b string + wantStrings, wantBytes, wantRunes string + }{ + {"abcXdef", "abcxdef", "[{3 4 3 4}]", "[{3 4 3 4}]", "[{3 4 3 4}]"}, // ASCII + {"abcωdef", "abcĪ©def", "[{3 5 3 5}]", "[{3 5 3 5}]", "[{3 4 3 4}]"}, // non-ASCII + } { + + gotStrings := fmt.Sprint(DiffStrings(test.a, test.b)) + if gotStrings != test.wantStrings { + t.Errorf("DiffStrings(%q, %q) = %v, want %v", + test.a, test.b, gotStrings, test.wantStrings) + } + gotBytes := fmt.Sprint(DiffBytes([]byte(test.a), []byte(test.b))) + if gotBytes != test.wantBytes { + t.Errorf("DiffBytes(%q, %q) = %v, want %v", + test.a, test.b, gotBytes, test.wantBytes) + } + gotRunes := fmt.Sprint(DiffRunes([]rune(test.a), []rune(test.b))) + if gotRunes != test.wantRunes { + t.Errorf("DiffRunes(%q, %q) = %v, want %v", + test.a, test.b, gotRunes, test.wantRunes) + } + } +} + +func BenchmarkTwoOld(b *testing.B) { + tests := genBench("abc", 96) + for i := 0; i < b.N; i++ { + for _, tt := range tests { + _, two := compute(stringSeqs{tt.before, tt.after}, twosided, 100) + if !two.valid() { + b.Error("check failed") + } + } + } +} + +func BenchmarkForwOld(b *testing.B) { + tests := genBench("abc", 96) + for i := 0; i < b.N; i++ { + for _, tt := range tests { + _, two := compute(stringSeqs{tt.before, tt.after}, forward, 100) + if !two.valid() { + b.Error("check failed") + } + } + } +} + +func genBench(set string, n int) []struct{ before, after string } { + // before and after for benchmarks. 24 strings of length n with + // before and after differing at least once, and about 5% + rand.Seed(3) + var ans []struct{ before, after string } + for i := 0; i < 24; i++ { + // maybe b should have an approximately known number of diffs + a := randstr(set, n) + cnt := 0 + bb := make([]rune, 0, n) + for _, r := range a { + if rand.Float64() < .05 { + cnt++ + r = 'N' + } + bb = append(bb, r) + } + if cnt == 0 { + // avoid == shortcut + bb[n/2] = 'N' + } + ans = append(ans, struct{ before, after string }{a, string(bb)}) + } + return ans +} + +// This benchmark represents a common case for a diff command: +// large file with a single relatively small diff in the middle. +// (It's not clear whether this is representative of gopls workloads +// or whether it is important to gopls diff performance.) +// +// TODO(adonovan) opt: it could be much faster. For example, +// comparing a file against itself is about 10x faster than with the +// small deletion in the middle. Strangely, comparing a file against +// itself minus the last byte is faster still; I don't know why. +// There is much low-hanging fruit here for further improvement. +func BenchmarkLargeFileSmallDiff(b *testing.B) { + data, err := ioutil.ReadFile("old.go") // large file + if err != nil { + log.Fatal(err) + } + + n := len(data) + + src := string(data) + dst := src[:n*49/100] + src[n*51/100:] // remove 2% from the middle + b.Run("string", func(b *testing.B) { + for i := 0; i < b.N; i++ { + compute(stringSeqs{src, dst}, twosided, len(src)+len(dst)) + } + }) + + srcBytes := []byte(src) + dstBytes := []byte(dst) + b.Run("bytes", func(b *testing.B) { + for i := 0; i < b.N; i++ { + compute(bytesSeqs{srcBytes, dstBytes}, twosided, len(srcBytes)+len(dstBytes)) + } + }) + + srcRunes := []rune(src) + dstRunes := []rune(dst) + b.Run("runes", func(b *testing.B) { + for i := 0; i < b.N; i++ { + compute(runesSeqs{srcRunes, dstRunes}, twosided, len(srcRunes)+len(dstRunes)) + } + }) +} diff --git a/internal/diff/lcs/sequence.go b/internal/diff/lcs/sequence.go new file mode 100644 index 00000000000..2d72d263043 --- /dev/null +++ b/internal/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/internal/diff/myers/diff.go b/internal/diff/myers/diff.go new file mode 100644 index 00000000000..7c2d4356b42 --- /dev/null +++ b/internal/diff/myers/diff.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package myers implements the Myers diff algorithm. +package myers + +import ( + "strings" + + "golang.org/x/tools/internal/diff" +) + +// Sources: +// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/ +// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2 + +func ComputeEdits(before, after string) []diff.Edit { + beforeLines := splitLines(before) + ops := operations(beforeLines, splitLines(after)) + + // Build a table mapping line number to offset. + lineOffsets := make([]int, 0, len(beforeLines)+1) + total := 0 + for i := range beforeLines { + lineOffsets = append(lineOffsets, total) + total += len(beforeLines[i]) + } + lineOffsets = append(lineOffsets, total) // EOF + + edits := make([]diff.Edit, 0, len(ops)) + for _, op := range ops { + start, end := lineOffsets[op.I1], lineOffsets[op.I2] + switch op.Kind { + case diff.Delete: + // Delete: before[I1:I2] is deleted. + edits = append(edits, diff.Edit{Start: start, End: end}) + case diff.Insert: + // Insert: after[J1:J2] is inserted at before[I1:I1]. + if content := strings.Join(op.Content, ""); content != "" { + edits = append(edits, diff.Edit{Start: start, End: end, New: content}) + } + } + } + return edits +} + +type operation struct { + Kind diff.OpKind + Content []string // content from b + I1, I2 int // indices of the line in a + J1 int // indices of the line in b, J2 implied by len(Content) +} + +// operations returns the list of operations to convert a into b, consolidating +// operations for multiple lines and not including equal lines. +func operations(a, b []string) []*operation { + if len(a) == 0 && len(b) == 0 { + return nil + } + + trace, offset := shortestEditSequence(a, b) + snakes := backtrack(trace, len(a), len(b), offset) + + M, N := len(a), len(b) + + var i int + solution := make([]*operation, len(a)+len(b)) + + add := func(op *operation, i2, j2 int) { + if op == nil { + return + } + op.I2 = i2 + if op.Kind == diff.Insert { + op.Content = b[op.J1:j2] + } + solution[i] = op + i++ + } + x, y := 0, 0 + for _, snake := range snakes { + if len(snake) < 2 { + continue + } + var op *operation + // delete (horizontal) + for snake[0]-snake[1] > x-y { + if op == nil { + op = &operation{ + Kind: diff.Delete, + I1: x, + J1: y, + } + } + x++ + if x == M { + break + } + } + add(op, x, y) + op = nil + // insert (vertical) + for snake[0]-snake[1] < x-y { + if op == nil { + op = &operation{ + Kind: diff.Insert, + I1: x, + J1: y, + } + } + y++ + } + add(op, x, y) + op = nil + // equal (diagonal) + for x < snake[0] { + x++ + y++ + } + if x >= M && y >= N { + break + } + } + return solution[:i] +} + +// backtrack uses the trace for the edit sequence computation and returns the +// "snakes" that make up the solution. A "snake" is a single deletion or +// insertion followed by zero or diagonals. +func backtrack(trace [][]int, x, y, offset int) [][]int { + snakes := make([][]int, len(trace)) + d := len(trace) - 1 + for ; x > 0 && y > 0 && d > 0; d-- { + V := trace[d] + if len(V) == 0 { + continue + } + snakes[d] = []int{x, y} + + k := x - y + + var kPrev int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + kPrev = k + 1 + } else { + kPrev = k - 1 + } + + x = V[kPrev+offset] + y = x - kPrev + } + if x < 0 || y < 0 { + return snakes + } + snakes[d] = []int{x, y} + return snakes +} + +// shortestEditSequence returns the shortest edit sequence that converts a into b. +func shortestEditSequence(a, b []string) ([][]int, int) { + M, N := len(a), len(b) + V := make([]int, 2*(N+M)+1) + offset := N + M + trace := make([][]int, N+M+1) + + // Iterate through the maximum possible length of the SES (N+M). + for d := 0; d <= N+M; d++ { + copyV := make([]int, len(V)) + // k lines are represented by the equation y = x - k. We move in + // increments of 2 because end points for even d are on even k lines. + for k := -d; k <= d; k += 2 { + // At each point, we either go down or to the right. We go down if + // k == -d, and we go to the right if k == d. We also prioritize + // the maximum x value, because we prefer deletions to insertions. + var x int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + x = V[k+1+offset] // down + } else { + x = V[k-1+offset] + 1 // right + } + + y := x - k + + // Diagonal moves while we have equal contents. + for x < M && y < N && a[x] == b[y] { + x++ + y++ + } + + V[k+offset] = x + + // Return if we've exceeded the maximum values. + if x == M && y == N { + // Makes sure to save the state of the array before returning. + copy(copyV, V) + trace[d] = copyV + return trace, offset + } + } + + // Save the state of the array. + copy(copyV, V) + trace[d] = copyV + } + return nil, 0 +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} diff --git a/internal/diff/myers/diff_test.go b/internal/diff/myers/diff_test.go new file mode 100644 index 00000000000..f244455586b --- /dev/null +++ b/internal/diff/myers/diff_test.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package myers_test + +import ( + "testing" + + "golang.org/x/tools/internal/diff/difftest" + "golang.org/x/tools/internal/diff/myers" +) + +func TestDiff(t *testing.T) { + difftest.DiffTest(t, myers.ComputeEdits) +} diff --git a/internal/diff/ndiff.go b/internal/diff/ndiff.go new file mode 100644 index 00000000000..050b08ded46 --- /dev/null +++ b/internal/diff/ndiff.go @@ -0,0 +1,109 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "golang.org/x/tools/internal/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if stringIsASCII(before) && stringIsASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if bytesIsASCII(before) && bytesIsASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// stringIsASCII reports whether s contains only ASCII. +// TODO(adonovan): combine when x/tools allows generics. +func stringIsASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +func bytesIsASCII(s []byte) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/internal/diff/unified.go b/internal/diff/unified.go new file mode 100644 index 00000000000..fa376f17872 --- /dev/null +++ b/internal/diff/unified.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // From is the name of the original file. + From string + // To is the name of the modified file. + To string + // Hunks is the set of edit hunks needed to transform the file content. + Hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + FromLine int + // The line in the original source where the hunk finishes. + ToLine int + // The set of line based edits to apply. + Lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // Kind is the type of line this represents, deletion, insertion or copy. + Kind OpKind + // Content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + Content string +} + +// OpKind is used to denote the type of operation a line represents. +// TODO(adonovan): hide this once the myers package no longer references it. +type OpKind int + +const ( + // Delete is the operation kind for a line that is present in the input + // but not in the output. + Delete OpKind = iota + // Insert is the operation kind for a line that is new in the output. + Insert + // Equal is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + Equal +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k OpKind) String() string { + switch k { + case Delete: + return "delete" + case Insert: + return "insert" + case Equal: + return "equal" + default: + panic("unknown operation kind") + } +} + +const ( + edge = 3 + gap = edge * 2 +) + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit) (unified, error) { + u := unified{ + From: fromName, + To: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + toLine += start - last + h = &hunk{ + FromLine: start + 1, + ToLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-edge, start) + h.FromLine -= delta + h.ToLine -= delta + } + last = start + for i := start; i < end; i++ { + h.Lines = append(h.Lines, line{Kind: Delete, Content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.Lines = append(h.Lines, line{Kind: Insert, Content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.Lines = append(h.Lines, line{Kind: Equal, Content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.Hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.From) + fmt.Fprintf(b, "+++ %s\n", u.To) + for _, hunk := range u.Hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fromCount++ + case Insert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.FromLine, fromCount) + } else if hunk.FromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.FromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.ToLine, toCount) + } else { + fmt.Fprintf(b, " +%d", hunk.ToLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fmt.Fprintf(b, "-%s", l.Content) + case Insert: + fmt.Fprintf(b, "+%s", l.Content) + default: + fmt.Fprintf(b, " %s", l.Content) + } + if !strings.HasSuffix(l.Content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/internal/lsp/debug/tag/tag.go b/internal/event/tag/tag.go similarity index 100% rename from internal/lsp/debug/tag/tag.go rename to internal/event/tag/tag.go diff --git a/go/analysis/internal/facts/facts.go b/internal/facts/facts.go similarity index 91% rename from go/analysis/internal/facts/facts.go rename to internal/facts/facts.go index 006abab84ef..81df45161a8 100644 --- a/go/analysis/internal/facts/facts.go +++ b/internal/facts/facts.go @@ -152,6 +152,23 @@ type gobFact struct { Fact analysis.Fact // type and value of user-defined Fact } +// A Decoder decodes the facts from the direct imports of the package +// provided to NewEncoder. A single decoder may be used to decode +// multiple fact sets (e.g. each for a different set of fact types) +// for the same package. Each call to Decode returns an independent +// fact set. +type Decoder struct { + pkg *types.Package + packages map[string]*types.Package +} + +// NewDecoder returns a fact decoder for the specified package. +func NewDecoder(pkg *types.Package) *Decoder { + // Compute the import map for this package. + // See the package doc comment. + return &Decoder{pkg, importMap(pkg.Imports())} +} + // Decode decodes all the facts relevant to the analysis of package pkg. // The read function reads serialized fact data from an external source // for one of of pkg's direct imports. The empty file is a valid @@ -159,28 +176,24 @@ type gobFact struct { // // It is the caller's responsibility to call gob.Register on all // necessary fact types. -func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) (*Set, error) { - // Compute the import map for this package. - // See the package doc comment. - packages := importMap(pkg.Imports()) - +func (d *Decoder) Decode(read func(*types.Package) ([]byte, error)) (*Set, error) { // Read facts from imported packages. // Facts may describe indirectly imported packages, or their objects. m := make(map[key]analysis.Fact) // one big bucket - for _, imp := range pkg.Imports() { + for _, imp := range d.pkg.Imports() { logf := func(format string, args ...interface{}) { if debug { prefix := fmt.Sprintf("in %s, importing %s: ", - pkg.Path(), imp.Path()) + d.pkg.Path(), imp.Path()) log.Print(prefix, fmt.Sprintf(format, args...)) } } // Read the gob-encoded facts. - data, err := read(imp.Path()) + data, err := read(imp) if err != nil { return nil, fmt.Errorf("in %s, can't import facts for package %q: %v", - pkg.Path(), imp.Path(), err) + d.pkg.Path(), imp.Path(), err) } if len(data) == 0 { continue // no facts @@ -195,7 +208,7 @@ func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) ( // Parse each one into a key and a Fact. for _, f := range gobFacts { - factPkg := packages[f.PkgPath] + factPkg := d.packages[f.PkgPath] if factPkg == nil { // Fact relates to a dependency that was // unused in this translation unit. Skip. @@ -222,7 +235,7 @@ func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) ( } } - return &Set{pkg: pkg, m: m}, nil + return &Set{pkg: d.pkg, m: m}, nil } // Encode encodes a set of facts to a memory buffer. diff --git a/internal/facts/facts_test.go b/internal/facts/facts_test.go new file mode 100644 index 00000000000..ad875153954 --- /dev/null +++ b/internal/facts/facts_test.go @@ -0,0 +1,564 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts_test + +import ( + "encoding/gob" + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "reflect" + "strings" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/facts" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/internal/typeparams" +) + +type myFact struct { + S string +} + +func (f *myFact) String() string { return fmt.Sprintf("myFact(%s)", f.S) } +func (f *myFact) AFact() {} + +func init() { + gob.Register(new(myFact)) +} + +func TestEncodeDecode(t *testing.T) { + tests := []struct { + name string + typeparams bool // requires typeparams to be enabled + files map[string]string + plookups []pkgLookups // see testEncodeDecode for details + }{ + { + name: "loading-order", + // c -> b -> a, a2 + // c does not directly depend on a, but it indirectly uses a.T. + // + // Package a2 is never loaded directly so it is incomplete. + // + // We use only types in this example because we rely on + // types.Eval to resolve the lookup expressions, and it only + // works for types. This is a definite gap in the typechecker API. + files: map[string]string{ + "a/a.go": `package a; type A int; type T int`, + "a2/a.go": `package a2; type A2 int; type Unneeded int`, + "b/b.go": `package b; import ("a"; "a2"); type B chan a2.A2; type F func() a.T`, + "c/c.go": `package c; import "b"; type C []b.B`, + }, + // In the following table, we analyze packages (a, b, c) in order, + // look up various objects accessible within each package, + // and see if they have a fact. The "analysis" exports a fact + // for every object at package level. + // + // Note: Loop iterations are not independent test cases; + // order matters, as we populate factmap. + plookups: []pkgLookups{ + {"a", []lookup{ + {"A", "myFact(a.A)"}, + }}, + {"b", []lookup{ + {"a.A", "myFact(a.A)"}, + {"a.T", "myFact(a.T)"}, + {"B", "myFact(b.B)"}, + {"F", "myFact(b.F)"}, + {"F(nil)()", "myFact(a.T)"}, // (result type of b.F) + }}, + {"c", []lookup{ + {"b.B", "myFact(b.B)"}, + {"b.F", "myFact(b.F)"}, + {"b.F(nil)()", "myFact(a.T)"}, + {"C", "myFact(c.C)"}, + {"C{}[0]", "myFact(b.B)"}, + {"<-(C{}[0])", "no fact"}, // object but no fact (we never "analyze" a2) + }}, + }, + }, + { + name: "underlying", + // c->b->a + // c does not import a directly or use any of its types, but it does use + // the types within a indirectly. c.q has the type a.a so package a should + // be included by importMap. + files: map[string]string{ + "a/a.go": `package a; type a int; type T *a`, + "b/b.go": `package b; import "a"; type B a.T`, + "c/c.go": `package c; import "b"; type C b.B; var q = *C(nil)`, + }, + plookups: []pkgLookups{ + {"a", []lookup{ + {"a", "myFact(a.a)"}, + {"T", "myFact(a.T)"}, + }}, + {"b", []lookup{ + {"B", "myFact(b.B)"}, + {"B(nil)", "myFact(b.B)"}, + {"*(B(nil))", "myFact(a.a)"}, + }}, + {"c", []lookup{ + {"C", "myFact(c.C)"}, + {"C(nil)", "myFact(c.C)"}, + {"*C(nil)", "myFact(a.a)"}, + {"q", "myFact(a.a)"}, + }}, + }, + }, + { + name: "methods", + // c->b->a + // c does not import a directly or use any of its types, but it does use + // the types within a indirectly via a method. + files: map[string]string{ + "a/a.go": `package a; type T int`, + "b/b.go": `package b; import "a"; type B struct{}; func (_ B) M() a.T { return 0 }`, + "c/c.go": `package c; import "b"; var C b.B`, + }, + plookups: []pkgLookups{ + {"a", []lookup{ + {"T", "myFact(a.T)"}, + }}, + {"b", []lookup{ + {"B{}", "myFact(b.B)"}, + {"B{}.M()", "myFact(a.T)"}, + }}, + {"c", []lookup{ + {"C", "myFact(b.B)"}, + {"C.M()", "myFact(a.T)"}, + }}, + }, + }, + { + name: "globals", + files: map[string]string{ + "a/a.go": `package a; + type T1 int + type T2 int + type T3 int + type T4 int + type T5 int + type K int; type V string + `, + "b/b.go": `package b + import "a" + var ( + G1 []a.T1 + G2 [7]a.T2 + G3 chan a.T3 + G4 *a.T4 + G5 struct{ F a.T5 } + G6 map[a.K]a.V + ) + `, + "c/c.go": `package c; import "b"; + var ( + v1 = b.G1 + v2 = b.G2 + v3 = b.G3 + v4 = b.G4 + v5 = b.G5 + v6 = b.G6 + ) + `, + }, + plookups: []pkgLookups{ + {"a", []lookup{}}, + {"b", []lookup{}}, + {"c", []lookup{ + {"v1[0]", "myFact(a.T1)"}, + {"v2[0]", "myFact(a.T2)"}, + {"<-v3", "myFact(a.T3)"}, + {"*v4", "myFact(a.T4)"}, + {"v5.F", "myFact(a.T5)"}, + {"v6[0]", "myFact(a.V)"}, + }}, + }, + }, + { + name: "typeparams", + typeparams: true, + files: map[string]string{ + "a/a.go": `package a + type T1 int + type T2 int + type T3 interface{Foo()} + type T4 int + type T5 int + type T6 interface{Foo()} + `, + "b/b.go": `package b + import "a" + type N1[T a.T1|int8] func() T + type N2[T any] struct{ F T } + type N3[T a.T3] func() T + type N4[T a.T4|int8] func() T + type N5[T interface{Bar() a.T5} ] func() T + + type t5 struct{}; func (t5) Bar() a.T5 { return 0 } + + var G1 N1[a.T1] + var G2 func() N2[a.T2] + var G3 N3[a.T3] + var G4 N4[a.T4] + var G5 N5[t5] + + func F6[T a.T6]() T { var x T; return x } + `, + "c/c.go": `package c; import "b"; + var ( + v1 = b.G1 + v2 = b.G2 + v3 = b.G3 + v4 = b.G4 + v5 = b.G5 + v6 = b.F6[t6] + ) + + type t6 struct{}; func (t6) Foo() {} + `, + }, + plookups: []pkgLookups{ + {"a", []lookup{}}, + {"b", []lookup{}}, + {"c", []lookup{ + {"v1", "myFact(b.N1)"}, + {"v1()", "myFact(a.T1)"}, + {"v2()", "myFact(b.N2)"}, + {"v2().F", "myFact(a.T2)"}, + {"v3", "myFact(b.N3)"}, + {"v4", "myFact(b.N4)"}, + {"v4()", "myFact(a.T4)"}, + {"v5", "myFact(b.N5)"}, + {"v5()", "myFact(b.t5)"}, + {"v6()", "myFact(c.t6)"}, + }}, + }, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.name, func(t *testing.T) { + t.Parallel() + if test.typeparams && !typeparams.Enabled { + t.Skip("type parameters are not enabled") + } + testEncodeDecode(t, test.files, test.plookups) + }) + } +} + +type lookup struct { + objexpr string + want string +} + +type pkgLookups struct { + path string + lookups []lookup +} + +// testEncodeDecode tests fact encoding and decoding and simulates how package facts +// are passed during analysis. It operates on a group of Go file contents. Then +// for each in tests it does the following: +// 1. loads and type checks the package, +// 2. calls (*facts.Decoder).Decode to load the facts exported by its imports, +// 3. exports a myFact Fact for all of package level objects, +// 4. For each lookup for the current package: +// 4.a) lookup the types.Object for an Go source expression in the curent package +// (or confirms one is not expected want=="no object"), +// 4.b) finds a Fact for the object (or confirms one is not expected want=="no fact"), +// 4.c) compares the content of the Fact to want. +// 5. encodes the Facts of the package. +// +// Note: tests are not independent test cases; order matters (as does a package being +// skipped). It changes what Facts can be imported. +// +// Failures are reported on t. +func testEncodeDecode(t *testing.T, files map[string]string, tests []pkgLookups) { + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // factmap represents the passing of encoded facts from one + // package to another. In practice one would use the file system. + factmap := make(map[string][]byte) + read := func(imp *types.Package) ([]byte, error) { return factmap[imp.Path()], nil } + + // Analyze packages in order, look up various objects accessible within + // each package, and see if they have a fact. The "analysis" exports a + // fact for every object at package level. + // + // Note: Loop iterations are not independent test cases; + // order matters, as we populate factmap. + for _, test := range tests { + // load package + pkg, err := load(t, dir, test.path) + if err != nil { + t.Fatal(err) + } + + // decode + facts, err := facts.NewDecoder(pkg).Decode(read) + if err != nil { + t.Fatalf("Decode failed: %v", err) + } + t.Logf("decode %s facts = %v", pkg.Path(), facts) // show all facts + + // export + // (one fact for each package-level object) + for _, name := range pkg.Scope().Names() { + obj := pkg.Scope().Lookup(name) + fact := &myFact{obj.Pkg().Name() + "." + obj.Name()} + facts.ExportObjectFact(obj, fact) + } + t.Logf("exported %s facts = %v", pkg.Path(), facts) // show all facts + + // import + // (after export, because an analyzer may import its own facts) + for _, lookup := range test.lookups { + fact := new(myFact) + var got string + if obj := find(pkg, lookup.objexpr); obj == nil { + got = "no object" + } else if facts.ImportObjectFact(obj, fact) { + got = fact.String() + } else { + got = "no fact" + } + if got != lookup.want { + t.Errorf("in %s, ImportObjectFact(%s, %T) = %s, want %s", + pkg.Path(), lookup.objexpr, fact, got, lookup.want) + } + } + + // encode + factmap[pkg.Path()] = facts.Encode() + } +} + +func find(p *types.Package, expr string) types.Object { + // types.Eval only allows us to compute a TypeName object for an expression. + // TODO(adonovan): support other expressions that denote an object: + // - an identifier (or qualified ident) for a func, const, or var + // - new(T).f for a field or method + // I've added CheckExpr in https://go-review.googlesource.com/c/go/+/144677. + // If that becomes available, use it. + + // Choose an arbitrary position within the (single-file) package + // so that we are within the scope of its import declarations. + somepos := p.Scope().Lookup(p.Scope().Names()[0]).Pos() + tv, err := types.Eval(token.NewFileSet(), p, somepos, expr) + if err != nil { + return nil + } + if n, ok := tv.Type.(*types.Named); ok { + return n.Obj() + } + return nil +} + +func load(t *testing.T, dir string, path string) (*types.Package, error) { + cfg := &packages.Config{ + Mode: packages.LoadSyntax, + Dir: dir, + Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"), + } + testenv.NeedsGoPackagesEnv(t, cfg.Env) + pkgs, err := packages.Load(cfg, path) + if err != nil { + return nil, err + } + if packages.PrintErrors(pkgs) > 0 { + return nil, fmt.Errorf("packages had errors") + } + if len(pkgs) == 0 { + return nil, fmt.Errorf("no package matched %s", path) + } + return pkgs[0].Types, nil +} + +type otherFact struct { + S string +} + +func (f *otherFact) String() string { return fmt.Sprintf("otherFact(%s)", f.S) } +func (f *otherFact) AFact() {} + +func TestFactFilter(t *testing.T) { + files := map[string]string{ + "a/a.go": `package a; type A int`, + } + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + pkg, err := load(t, dir, "a") + if err != nil { + t.Fatal(err) + } + + obj := pkg.Scope().Lookup("A") + s, err := facts.NewDecoder(pkg).Decode(func(*types.Package) ([]byte, error) { return nil, nil }) + if err != nil { + t.Fatal(err) + } + s.ExportObjectFact(obj, &myFact{"good object fact"}) + s.ExportPackageFact(&myFact{"good package fact"}) + s.ExportObjectFact(obj, &otherFact{"bad object fact"}) + s.ExportPackageFact(&otherFact{"bad package fact"}) + + filter := map[reflect.Type]bool{ + reflect.TypeOf(&myFact{}): true, + } + + pkgFacts := s.AllPackageFacts(filter) + wantPkgFacts := `[{package a ("a") myFact(good package fact)}]` + if got := fmt.Sprintf("%v", pkgFacts); got != wantPkgFacts { + t.Errorf("AllPackageFacts: got %v, want %v", got, wantPkgFacts) + } + + objFacts := s.AllObjectFacts(filter) + wantObjFacts := "[{type a.A int myFact(good object fact)}]" + if got := fmt.Sprintf("%v", objFacts); got != wantObjFacts { + t.Errorf("AllObjectFacts: got %v, want %v", got, wantObjFacts) + } +} + +// TestMalformed checks that facts can be encoded and decoded *despite* +// types.Config.Check returning an error. Importing facts is expected to +// happen when Analyzers have RunDespiteErrors set to true. So this +// needs to robust, e.g. no infinite loops. +func TestMalformed(t *testing.T) { + if !typeparams.Enabled { + t.Skip("type parameters are not enabled") + } + var findPkg func(*types.Package, string) *types.Package + findPkg = func(p *types.Package, name string) *types.Package { + if p.Name() == name { + return p + } + for _, o := range p.Imports() { + if f := findPkg(o, name); f != nil { + return f + } + } + return nil + } + + type pkgTest struct { + content string + err string // if non-empty, expected substring of err.Error() from conf.Check(). + wants map[string]string // package path to expected name + } + tests := []struct { + name string + pkgs []pkgTest + }{ + { + name: "initialization-cycle", + pkgs: []pkgTest{ + { + content: `package a; type N[T any] struct { F *N[N[T]] }`, + err: "instantiation cycle:", + wants: map[string]string{"a": "myFact(a.[N])", "b": "no package", "c": "no package"}, + }, + { + content: `package b; import "a"; type B a.N[int]`, + wants: map[string]string{"a": "myFact(a.[N])", "b": "myFact(b.[B])", "c": "no package"}, + }, + { + content: `package c; import "b"; var C b.B`, + wants: map[string]string{"a": "myFact(a.[N])", "b": "myFact(b.[B])", "c": "myFact(c.[C])"}, + }, + }, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + // setup for test wide variables. + packages := make(map[string]*types.Package) + conf := types.Config{ + Importer: closure(packages), + Error: func(err error) {}, // do not stop on first type checking error + } + fset := token.NewFileSet() + factmap := make(map[string][]byte) + read := func(imp *types.Package) ([]byte, error) { return factmap[imp.Path()], nil } + + // Processes the pkgs in order. For package, export a package fact, + // and use this fact to verify which package facts are reachable via Decode. + // We allow for packages to have type checking errors. + for i, pkgTest := range test.pkgs { + // parse + f, err := parser.ParseFile(fset, fmt.Sprintf("%d.go", i), pkgTest.content, 0) + if err != nil { + t.Fatal(err) + } + + // typecheck + pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) + var got string + if err != nil { + got = err.Error() + } + if !strings.Contains(got, pkgTest.err) { + t.Fatalf("%s: type checking error %q did not match pattern %q", pkg.Path(), err.Error(), pkgTest.err) + } + packages[pkg.Path()] = pkg + + // decode facts + facts, err := facts.NewDecoder(pkg).Decode(read) + if err != nil { + t.Fatalf("Decode failed: %v", err) + } + + // export facts + fact := &myFact{fmt.Sprintf("%s.%s", pkg.Name(), pkg.Scope().Names())} + facts.ExportPackageFact(fact) + + // import facts + for other, want := range pkgTest.wants { + fact := new(myFact) + var got string + if found := findPkg(pkg, other); found == nil { + got = "no package" + } else if facts.ImportPackageFact(found, fact) { + got = fact.String() + } else { + got = "no fact" + } + if got != want { + t.Errorf("in %s, ImportPackageFact(%s, %T) = %s, want %s", + pkg.Path(), other, fact, got, want) + } + } + + // encode facts + factmap[pkg.Path()] = facts.Encode() + } + }) + } +} + +type closure map[string]*types.Package + +func (c closure) Import(path string) (*types.Package, error) { return c[path], nil } diff --git a/internal/facts/imports.go b/internal/facts/imports.go new file mode 100644 index 00000000000..7b21668660c --- /dev/null +++ b/internal/facts/imports.go @@ -0,0 +1,130 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts + +import ( + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// importMap computes the import map for a package by traversing the +// entire exported API each of its imports. +// +// This is a workaround for the fact that we cannot access the map used +// internally by the types.Importer returned by go/importer. The entries +// in this map are the packages and objects that may be relevant to the +// current analysis unit. +// +// Packages in the map that are only indirectly imported may be +// incomplete (!pkg.Complete()). +// +// TODO(adonovan): opt: compute this information more efficiently +// by obtaining it from the internals of the gcexportdata decoder. +func importMap(imports []*types.Package) map[string]*types.Package { + objects := make(map[types.Object]bool) + typs := make(map[types.Type]bool) // Named and TypeParam + packages := make(map[string]*types.Package) + + var addObj func(obj types.Object) + var addType func(T types.Type) + + addObj = func(obj types.Object) { + if !objects[obj] { + objects[obj] = true + addType(obj.Type()) + if pkg := obj.Pkg(); pkg != nil { + packages[pkg.Path()] = pkg + } + } + } + + addType = func(T types.Type) { + switch T := T.(type) { + case *types.Basic: + // nop + case *types.Named: + // Remove infinite expansions of *types.Named by always looking at the origin. + // Some named types with type parameters [that will not type check] have + // infinite expansions: + // type N[T any] struct { F *N[N[T]] } + // importMap() is called on such types when Analyzer.RunDespiteErrors is true. + T = typeparams.NamedTypeOrigin(T).(*types.Named) + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Underlying()) + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + if tparams := typeparams.ForNamed(T); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + if targs := typeparams.NamedTypeArgs(T); targs != nil { + for i := 0; i < targs.Len(); i++ { + addType(targs.At(i)) + } + } + } + case *types.Pointer: + addType(T.Elem()) + case *types.Slice: + addType(T.Elem()) + case *types.Array: + addType(T.Elem()) + case *types.Chan: + addType(T.Elem()) + case *types.Map: + addType(T.Key()) + addType(T.Elem()) + case *types.Signature: + addType(T.Params()) + addType(T.Results()) + if tparams := typeparams.ForSignature(T); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + addObj(T.Field(i)) + } + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + addObj(T.At(i)) + } + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + for i := 0; i < T.NumEmbeddeds(); i++ { + addType(T.EmbeddedType(i)) // walk Embedded for implicits + } + case *typeparams.Union: + for i := 0; i < T.Len(); i++ { + addType(T.Term(i).Type()) + } + case *typeparams.TypeParam: + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Constraint()) + } + } + } + + for _, imp := range imports { + packages[imp.Path()] = imp + + scope := imp.Scope() + for _, name := range scope.Names() { + addObj(scope.Lookup(name)) + } + } + + return packages +} diff --git a/internal/fastwalk/fastwalk_darwin.go b/internal/fastwalk/fastwalk_darwin.go new file mode 100644 index 00000000000..0ca55e0d56f --- /dev/null +++ b/internal/fastwalk/fastwalk_darwin.go @@ -0,0 +1,119 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && cgo +// +build darwin,cgo + +package fastwalk + +/* +#include + +// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent** +// result pointer which triggers CGO's "Go pointer to Go pointer" check unless +// we allocat the result dirent* with malloc. +// +// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the +// directory, or a positive error number to indicate failure. +static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) { + struct dirent *result; + int ret = readdir_r(fd, entry, &result); + if (ret == 0 && result == NULL) { + ret = -1; // EOF + } + return ret; +} +*/ +import "C" + +import ( + "os" + "syscall" + "unsafe" +) + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := openDir(dirName) + if err != nil { + return &os.PathError{Op: "opendir", Path: dirName, Err: err} + } + defer C.closedir(fd) + + skipFiles := false + var dirent syscall.Dirent + for { + ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent)))) + if ret != 0 { + if ret == -1 { + break // EOF + } + if ret == int(syscall.EINTR) { + continue + } + return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)} + } + if dirent.Ino == 0 { + continue + } + typ := dtToType(dirent.Type) + if skipFiles && typ.IsRegular() { + continue + } + name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] + name = name[:dirent.Namlen] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + if err := fn(dirName, string(name), typ); err != nil { + if err != ErrSkipFiles { + return err + } + skipFiles = true + } + } + + return nil +} + +func dtToType(typ uint8) os.FileMode { + switch typ { + case syscall.DT_BLK: + return os.ModeDevice + case syscall.DT_CHR: + return os.ModeDevice | os.ModeCharDevice + case syscall.DT_DIR: + return os.ModeDir + case syscall.DT_FIFO: + return os.ModeNamedPipe + case syscall.DT_LNK: + return os.ModeSymlink + case syscall.DT_REG: + return 0 + case syscall.DT_SOCK: + return os.ModeSocket + } + return ^os.FileMode(0) +} + +// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR +// needs to be closed with closedir(3). +func openDir(path string) (*C.DIR, error) { + name, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + for { + fd, err := C.opendir((*C.char)(unsafe.Pointer(name))) + if err != syscall.EINTR { + return fd, err + } + } +} diff --git a/internal/fastwalk/fastwalk_dirent_ino.go b/internal/fastwalk/fastwalk_dirent_ino.go index ea02b9ebfe8..d3922890b0b 100644 --- a/internal/fastwalk/fastwalk_dirent_ino.go +++ b/internal/fastwalk/fastwalk_dirent_ino.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (linux || darwin) && !appengine -// +build linux darwin +//go:build (linux || (darwin && !cgo)) && !appengine +// +build linux darwin,!cgo // +build !appengine package fastwalk @@ -11,5 +11,5 @@ package fastwalk import "syscall" func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Ino) + return dirent.Ino } diff --git a/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/internal/fastwalk/fastwalk_dirent_namlen_bsd.go index d5c9c321ed2..38a4db6af3a 100644 --- a/internal/fastwalk/fastwalk_dirent_namlen_bsd.go +++ b/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || freebsd || openbsd || netbsd -// +build darwin freebsd openbsd netbsd +//go:build (darwin && !cgo) || freebsd || openbsd || netbsd +// +build darwin,!cgo freebsd openbsd netbsd package fastwalk diff --git a/internal/fastwalk/fastwalk_unix.go b/internal/fastwalk/fastwalk_unix.go index 58bd87841e1..f12f1a734cc 100644 --- a/internal/fastwalk/fastwalk_unix.go +++ b/internal/fastwalk/fastwalk_unix.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (linux || darwin || freebsd || openbsd || netbsd) && !appengine -// +build linux darwin freebsd openbsd netbsd +//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine +// +build linux freebsd openbsd netbsd darwin,!cgo // +build !appengine package fastwalk diff --git a/internal/lsp/fuzzy/input.go b/internal/fuzzy/input.go similarity index 100% rename from internal/lsp/fuzzy/input.go rename to internal/fuzzy/input.go diff --git a/internal/lsp/fuzzy/input_test.go b/internal/fuzzy/input_test.go similarity index 98% rename from internal/lsp/fuzzy/input_test.go rename to internal/fuzzy/input_test.go index 0228347e4f0..64f66e363b8 100644 --- a/internal/lsp/fuzzy/input_test.go +++ b/internal/fuzzy/input_test.go @@ -9,7 +9,7 @@ import ( "sort" "testing" - "golang.org/x/tools/internal/lsp/fuzzy" + "golang.org/x/tools/internal/fuzzy" ) var rolesTests = []struct { diff --git a/internal/lsp/fuzzy/matcher.go b/internal/fuzzy/matcher.go similarity index 93% rename from internal/lsp/fuzzy/matcher.go rename to internal/fuzzy/matcher.go index 265cdcf1604..c0efd30dd9a 100644 --- a/internal/lsp/fuzzy/matcher.go +++ b/internal/fuzzy/matcher.go @@ -405,3 +405,30 @@ func (m *Matcher) poorMatch() bool { } return false } + +// BestMatch returns the name most similar to the +// pattern, using fuzzy matching, or the empty string. +func BestMatch(pattern string, names []string) string { + fuzz := NewMatcher(pattern) + best := "" + highScore := float32(0) // minimum score is 0 (no match) + for _, name := range names { + // TODO: Improve scoring algorithm. + score := fuzz.Score(name) + if score > highScore { + highScore = score + best = name + } else if score == 0 { + // Order matters in the fuzzy matching algorithm. If we find no match + // when matching the target to the identifier, try matching the identifier + // to the target. + revFuzz := NewMatcher(name) + revScore := revFuzz.Score(pattern) + if revScore > highScore { + highScore = revScore + best = name + } + } + } + return best +} diff --git a/internal/lsp/fuzzy/matcher_test.go b/internal/fuzzy/matcher_test.go similarity index 99% rename from internal/lsp/fuzzy/matcher_test.go rename to internal/fuzzy/matcher_test.go index 132ab5c800a..528224bd98d 100644 --- a/internal/lsp/fuzzy/matcher_test.go +++ b/internal/fuzzy/matcher_test.go @@ -13,7 +13,7 @@ import ( "math" "testing" - "golang.org/x/tools/internal/lsp/fuzzy" + "golang.org/x/tools/internal/fuzzy" ) type comparator struct { diff --git a/internal/lsp/fuzzy/symbol.go b/internal/fuzzy/symbol.go similarity index 100% rename from internal/lsp/fuzzy/symbol.go rename to internal/fuzzy/symbol.go diff --git a/internal/lsp/fuzzy/symbol_test.go b/internal/fuzzy/symbol_test.go similarity index 97% rename from internal/lsp/fuzzy/symbol_test.go rename to internal/fuzzy/symbol_test.go index cb28160dedb..df74bbe0d37 100644 --- a/internal/lsp/fuzzy/symbol_test.go +++ b/internal/fuzzy/symbol_test.go @@ -7,7 +7,7 @@ package fuzzy_test import ( "testing" - . "golang.org/x/tools/internal/lsp/fuzzy" + . "golang.org/x/tools/internal/fuzzy" ) func TestSymbolMatchIndex(t *testing.T) { diff --git a/go/internal/gcimporter/bexport.go b/internal/gcimporter/bexport.go similarity index 99% rename from go/internal/gcimporter/bexport.go rename to internal/gcimporter/bexport.go index 196cb3f9b41..30582ed6d3d 100644 --- a/go/internal/gcimporter/bexport.go +++ b/internal/gcimporter/bexport.go @@ -12,7 +12,6 @@ import ( "bytes" "encoding/binary" "fmt" - "go/ast" "go/constant" "go/token" "go/types" @@ -145,7 +144,7 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) objcount := 0 scope := pkg.Scope() for _, name := range scope.Names() { - if !ast.IsExported(name) { + if !token.IsExported(name) { continue } if trace { @@ -482,7 +481,7 @@ func (p *exporter) method(m *types.Func) { p.pos(m) p.string(m.Name()) - if m.Name() != "_" && !ast.IsExported(m.Name()) { + if m.Name() != "_" && !token.IsExported(m.Name()) { p.pkg(m.Pkg(), false) } @@ -501,7 +500,7 @@ func (p *exporter) fieldName(f *types.Var) { // 3) field name doesn't match base type name (alias name) bname := basetypeName(f.Type()) if name == bname { - if ast.IsExported(name) { + if token.IsExported(name) { name = "" // 1) we don't need to know the field name or package } else { name = "?" // 2) use unexported name "?" to force package export @@ -514,7 +513,7 @@ func (p *exporter) fieldName(f *types.Var) { } p.string(name) - if name != "" && !ast.IsExported(name) { + if name != "" && !token.IsExported(name) { p.pkg(f.Pkg(), false) } } diff --git a/go/internal/gcimporter/bexport_test.go b/internal/gcimporter/bexport_test.go similarity index 99% rename from go/internal/gcimporter/bexport_test.go rename to internal/gcimporter/bexport_test.go index 3da5397eb50..b5e9ce10044 100644 --- a/go/internal/gcimporter/bexport_test.go +++ b/internal/gcimporter/bexport_test.go @@ -21,8 +21,8 @@ import ( "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/internal/gcimporter" "golang.org/x/tools/go/loader" + "golang.org/x/tools/internal/gcimporter" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typeparams/genericfeatures" ) @@ -109,7 +109,7 @@ type UnknownType undefined // Compare the packages' corresponding members. for _, name := range pkg.Scope().Names() { - if !ast.IsExported(name) { + if !token.IsExported(name) { continue } obj1 := pkg.Scope().Lookup(name) diff --git a/go/internal/gcimporter/bimport.go b/internal/gcimporter/bimport.go similarity index 100% rename from go/internal/gcimporter/bimport.go rename to internal/gcimporter/bimport.go diff --git a/go/internal/gcimporter/exportdata.go b/internal/gcimporter/exportdata.go similarity index 100% rename from go/internal/gcimporter/exportdata.go rename to internal/gcimporter/exportdata.go diff --git a/internal/gcimporter/gcimporter.go b/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000000..0372fb3a646 --- /dev/null +++ b/internal/gcimporter/gcimporter.go @@ -0,0 +1,265 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/internal/gcimporter/gcimporter_test.go b/internal/gcimporter/gcimporter_test.go new file mode 100644 index 00000000000..6ef704c53a5 --- /dev/null +++ b/internal/gcimporter/gcimporter_test.go @@ -0,0 +1,880 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go, +// adjusted to make it build with code from (std lib) internal/testenv copied. + +package gcimporter + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/constant" + goimporter "go/importer" + goparser "go/parser" + "go/token" + "go/types" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "golang.org/x/tools/internal/goroot" + "golang.org/x/tools/internal/testenv" +) + +func TestMain(m *testing.M) { + testenv.ExitIfSmallMachine() + os.Exit(m.Run()) +} + +// ---------------------------------------------------------------------------- + +func needsCompiler(t *testing.T, compiler string) { + if runtime.Compiler == compiler { + return + } + switch compiler { + case "gc": + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } +} + +// compile runs the compiler on filename, with dirname as the working directory, +// and writes the output file to outdirname. +// compile gives the resulting package a packagepath of p. +func compile(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string) string { + return compilePkg(t, dirname, filename, outdirname, packagefiles, "p") +} + +func compilePkg(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string, pkg string) string { + testenv.NeedsGoBuild(t) + + // filename must end with ".go" + basename := strings.TrimSuffix(filepath.Base(filename), ".go") + ok := filename != basename + if !ok { + t.Fatalf("filename doesn't end in .go: %s", filename) + } + objname := basename + ".o" + outname := filepath.Join(outdirname, objname) + + importcfgfile := os.DevNull + if len(packagefiles) > 0 { + importcfgfile = filepath.Join(outdirname, basename) + ".importcfg" + importcfg := new(bytes.Buffer) + fmt.Fprintf(importcfg, "# import config") + for k, v := range packagefiles { + fmt.Fprintf(importcfg, "\npackagefile %s=%s\n", k, v) + } + if err := os.WriteFile(importcfgfile, importcfg.Bytes(), 0655); err != nil { + t.Fatal(err) + } + } + + importreldir := strings.ReplaceAll(outdirname, string(os.PathSeparator), "/") + cmd := exec.Command("go", "tool", "compile", "-p", pkg, "-D", importreldir, "-importcfg", importcfgfile, "-o", outname, filename) + cmd.Dir = dirname + out, err := cmd.CombinedOutput() + if err != nil { + t.Logf("%s", out) + t.Fatalf("go tool compile %s failed: %s", filename, err) + } + return outname +} + +func testPath(t *testing.T, path, srcDir string) *types.Package { + t0 := time.Now() + pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) + if err != nil { + t.Errorf("testPath(%s): %s", path, err) + return nil + } + t.Logf("testPath(%s): %v", path, time.Since(t0)) + return pkg +} + +func mktmpdir(t *testing.T) string { + tmpdir, err := ioutil.TempDir("", "gcimporter_test") + if err != nil { + t.Fatal("mktmpdir:", err) + } + if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil { + os.RemoveAll(tmpdir) + t.Fatal("mktmpdir:", err) + } + return tmpdir +} + +const testfile = "exports.go" + +func TestImportTestdata(t *testing.T) { + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + + packageFiles := map[string]string{} + for _, pkg := range []string{"go/ast", "go/token"} { + export, _ := FindPkg(pkg, "testdata") + if export == "" { + t.Fatalf("no export data found for %s", pkg) + } + packageFiles[pkg] = export + } + + compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"), packageFiles) + + // filename should end with ".go" + filename := testfile[:len(testfile)-3] + if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil { + // The package's Imports list must include all packages + // explicitly imported by testfile, plus all packages + // referenced indirectly via exported objects in testfile. + // With the textual export format (when run against Go1.6), + // the list may also include additional packages that are + // not strictly required for import processing alone (they + // are exported to err "on the safe side"). + // For now, we just test the presence of a few packages + // that we know are there for sure. + got := fmt.Sprint(pkg.Imports()) + wants := []string{"go/ast", "go/token"} + if unifiedIR { + wants = []string{"go/ast"} + } + for _, want := range wants { + if !strings.Contains(got, want) { + t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) + } + } + } +} + +func TestImportTypeparamTests(t *testing.T) { + if testing.Short() { + t.Skipf("in short mode, skipping test that requires export data for all of std") + } + + testenv.NeedsGo1Point(t, 18) // requires generics + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + + // Check go files in test/typeparam, except those that fail for a known + // reason. + rootDir := filepath.Join(runtime.GOROOT(), "test", "typeparam") + list, err := os.ReadDir(rootDir) + if err != nil { + t.Fatal(err) + } + + var skip map[string]string + if !unifiedIR { + // The Go 1.18 frontend still fails several cases. + skip = map[string]string{ + "equal.go": "inconsistent embedded sorting", // TODO(rfindley): investigate this. + "nested.go": "fails to compile", // TODO(rfindley): investigate this. + "issue47631.go": "can not handle local type declarations", + "issue55101.go": "fails to compile", + } + } + + for _, entry := range list { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") { + // For now, only consider standalone go files. + continue + } + + t.Run(entry.Name(), func(t *testing.T) { + if reason, ok := skip[entry.Name()]; ok { + t.Skip(reason) + } + + filename := filepath.Join(rootDir, entry.Name()) + src, err := os.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + if !bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// compile")) { + // We're bypassing the logic of run.go here, so be conservative about + // the files we consider in an attempt to make this test more robust to + // changes in test/typeparams. + t.Skipf("not detected as a run test") + } + + // Compile and import, and compare the resulting package with the package + // that was type-checked directly. + pkgFiles, err := goroot.PkgfileMap() + if err != nil { + t.Fatal(err) + } + compile(t, rootDir, entry.Name(), filepath.Join(tmpdir, "testdata"), pkgFiles) + pkgName := strings.TrimSuffix(entry.Name(), ".go") + imported := importPkg(t, "./testdata/"+pkgName, tmpdir) + checked := checkFile(t, filename, src) + + seen := make(map[string]bool) + for _, name := range imported.Scope().Names() { + if !token.IsExported(name) { + continue // ignore synthetic names like .inittask and .dict.* + } + seen[name] = true + + importedObj := imported.Scope().Lookup(name) + got := types.ObjectString(importedObj, types.RelativeTo(imported)) + + checkedObj := checked.Scope().Lookup(name) + if checkedObj == nil { + t.Fatalf("imported object %q was not type-checked", name) + } + want := types.ObjectString(checkedObj, types.RelativeTo(checked)) + + if got != want { + t.Errorf("imported %q as %q, want %q", name, got, want) + } + } + + for _, name := range checked.Scope().Names() { + if !token.IsExported(name) || seen[name] { + continue + } + t.Errorf("did not import object %q", name) + } + }) + } +} + +func checkFile(t *testing.T, filename string, src []byte) *types.Package { + fset := token.NewFileSet() + f, err := goparser.ParseFile(fset, filename, src, 0) + if err != nil { + t.Fatal(err) + } + config := types.Config{ + Importer: goimporter.Default(), + } + pkg, err := config.Check("", fset, []*ast.File{f}, nil) + if err != nil { + t.Fatal(err) + } + return pkg +} + +func TestVersionHandling(t *testing.T) { + if debug { + t.Skip("TestVersionHandling panics in debug mode") + } + + // This package only handles gc export data. + needsCompiler(t, "gc") + + const dir = "./testdata/versions" + list, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + corruptdir := filepath.Join(tmpdir, "testdata", "versions") + if err := os.Mkdir(corruptdir, 0700); err != nil { + t.Fatal(err) + } + + for _, f := range list { + name := f.Name() + if !strings.HasSuffix(name, ".a") { + continue // not a package file + } + if strings.Contains(name, "corrupted") { + continue // don't process a leftover corrupted file + } + pkgpath := "./" + name[:len(name)-2] + + if testing.Verbose() { + t.Logf("importing %s", name) + } + + // test that export data can be imported + _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil) + if err != nil { + // ok to fail if it fails with a newer version error for select files + if strings.Contains(err.Error(), "newer version") { + switch name { + case "test_go1.11_999b.a", "test_go1.11_999i.a": + continue + } + // fall through + } + t.Errorf("import %q failed: %v", pkgpath, err) + continue + } + + // create file with corrupted export data + // 1) read file + data, err := ioutil.ReadFile(filepath.Join(dir, name)) + if err != nil { + t.Fatal(err) + } + // 2) find export data + i := bytes.Index(data, []byte("\n$$B\n")) + 5 + j := bytes.Index(data[i:], []byte("\n$$\n")) + i + if i < 0 || j < 0 || i > j { + t.Fatalf("export data section not found (i = %d, j = %d)", i, j) + } + // 3) corrupt the data (increment every 7th byte) + for k := j - 13; k >= i; k -= 7 { + data[k]++ + } + // 4) write the file + pkgpath += "_corrupted" + filename := filepath.Join(corruptdir, pkgpath) + ".a" + ioutil.WriteFile(filename, data, 0666) + + // test that importing the corrupted file results in an error + _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil) + if err == nil { + t.Errorf("import corrupted %q succeeded", pkgpath) + } else if msg := err.Error(); !strings.Contains(msg, "version skew") { + t.Errorf("import %q error incorrect (%s)", pkgpath, msg) + } + } +} + +func TestImportStdLib(t *testing.T) { + if testing.Short() { + t.Skip("the imports can be expensive, and this test is especially slow when the build cache is empty") + } + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + // Get list of packages in stdlib. Filter out test-only packages with {{if .GoFiles}} check. + var stderr bytes.Buffer + cmd := exec.Command("go", "list", "-f", "{{if .GoFiles}}{{.ImportPath}}{{end}}", "std") + cmd.Stderr = &stderr + out, err := cmd.Output() + if err != nil { + t.Fatalf("failed to run go list to determine stdlib packages: %v\nstderr:\n%v", err, stderr.String()) + } + pkgs := strings.Fields(string(out)) + + var nimports int + for _, pkg := range pkgs { + t.Run(pkg, func(t *testing.T) { + if testPath(t, pkg, filepath.Join(testenv.GOROOT(t), "src", path.Dir(pkg))) != nil { + nimports++ + } + }) + } + const minPkgs = 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more. + if len(pkgs) < minPkgs { + t.Fatalf("too few packages (%d) were imported", nimports) + } + + t.Logf("tested %d imports", nimports) +} + +var importedObjectTests = []struct { + name string + want string +}{ + // non-interfaces + {"crypto.Hash", "type Hash uint"}, + {"go/ast.ObjKind", "type ObjKind int"}, + {"go/types.Qualifier", "type Qualifier func(*Package) string"}, + {"go/types.Comparable", "func Comparable(T Type) bool"}, + {"math.Pi", "const Pi untyped float"}, + {"math.Sin", "func Sin(x float64) float64"}, + {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"}, + {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"}, + + // interfaces + {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"}, + {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"}, + {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"}, + {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"}, + {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"}, + {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"}, + {"go/types.Type", "type Type interface{String() string; Underlying() Type}"}, +} + +// TODO(rsc): Delete this init func after x/tools no longer needs to test successfully with Go 1.17. +func init() { + if build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] <= "go1.17" { + for i := range importedObjectTests { + if importedObjectTests[i].name == "context.Context" { + // Expand any to interface{}. + importedObjectTests[i].want = "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}" + } + } + } +} + +func TestImportedTypes(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + for _, test := range importedObjectTests { + obj := importObject(t, test.name) + if obj == nil { + continue // error reported elsewhere + } + got := types.ObjectString(obj, types.RelativeTo(obj.Pkg())) + + // TODO(rsc): Delete this block once go.dev/cl/368254 lands. + if got != test.want && test.want == strings.ReplaceAll(got, "interface{}", "any") { + got = test.want + } + + if got != test.want { + t.Errorf("%s: got %q; want %q", test.name, got, test.want) + } + + if named, _ := obj.Type().(*types.Named); named != nil { + verifyInterfaceMethodRecvs(t, named, 0) + } + } +} + +func TestImportedConsts(t *testing.T) { + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + tests := []struct { + name string + want constant.Kind + }{ + {"math.Pi", constant.Float}, + {"math.MaxFloat64", constant.Float}, + {"math.MaxInt64", constant.Int}, + } + + for _, test := range tests { + obj := importObject(t, test.name) + if got := obj.(*types.Const).Val().Kind(); got != test.want { + t.Errorf("%s: imported as constant.Kind(%v), want constant.Kind(%v)", test.name, got, test.want) + } + } +} + +// importObject imports the object specified by a name of the form +// ., e.g. go/types.Type. +// +// If any errors occur they are reported via t and the resulting object will +// be nil. +func importObject(t *testing.T, name string) types.Object { + s := strings.Split(name, ".") + if len(s) != 2 { + t.Fatal("inconsistent test data") + } + importPath := s[0] + objName := s[1] + + pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil) + if err != nil { + t.Error(err) + return nil + } + + obj := pkg.Scope().Lookup(objName) + if obj == nil { + t.Errorf("%s: object not found", name) + return nil + } + return obj +} + +// verifyInterfaceMethodRecvs verifies that method receiver types +// are named if the methods belong to a named interface type. +func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) { + // avoid endless recursion in case of an embedding bug that lead to a cycle + if level > 10 { + t.Errorf("%s: embeds itself", named) + return + } + + iface, _ := named.Underlying().(*types.Interface) + if iface == nil { + return // not an interface + } + + // check explicitly declared methods + for i := 0; i < iface.NumExplicitMethods(); i++ { + m := iface.ExplicitMethod(i) + recv := m.Type().(*types.Signature).Recv() + if recv == nil { + t.Errorf("%s: missing receiver type", m) + continue + } + if recv.Type() != named { + t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named) + } + } + + // check embedded interfaces (if they are named, too) + for i := 0; i < iface.NumEmbeddeds(); i++ { + // embedding of interfaces cannot have cycles; recursion will terminate + if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil { + verifyInterfaceMethodRecvs(t, etype, level+1) + } + } +} + +func TestIssue5815(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + pkg := importPkg(t, "strings", ".") + + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + if obj.Pkg() == nil { + t.Errorf("no pkg for %s", obj) + } + if tname, _ := obj.(*types.TypeName); tname != nil { + named := tname.Type().(*types.Named) + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m.Pkg() == nil { + t.Errorf("no pkg for %s", m) + } + } + } + } +} + +// Smoke test to ensure that imported methods get the correct package. +func TestCorrectMethodPackage(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + imports := make(map[string]*types.Package) + _, err := Import(imports, "net/http", ".", nil) + if err != nil { + t.Fatal(err) + } + + mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type() + mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex + sel := mset.Lookup(nil, "Lock") + lock := sel.Obj().(*types.Func) + if got, want := lock.Pkg().Path(), "sync"; got != want { + t.Errorf("got package path %q; want %q", got, want) + } +} + +func TestIssue13566(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + testoutdir := filepath.Join(tmpdir, "testdata") + + // b.go needs to be compiled from the output directory so that the compiler can + // find the compiled package a. We pass the full path to compile() so that we + // don't have to copy the file to that directory. + bpath, err := filepath.Abs(filepath.Join("testdata", "b.go")) + if err != nil { + t.Fatal(err) + } + + jsonExport, _ := FindPkg("encoding/json", "testdata") + if jsonExport == "" { + t.Fatalf("no export data found for encoding/json") + } + + compilePkg(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport}, apkg(testoutdir)) + compile(t, testoutdir, bpath, testoutdir, map[string]string{apkg(testoutdir): filepath.Join(testoutdir, "a.o")}) + + // import must succeed (test for issue at hand) + pkg := importPkg(t, "./testdata/b", tmpdir) + + // make sure all indirectly imported packages have names + for _, imp := range pkg.Imports() { + if imp.Name() == "" { + t.Errorf("no name for %s package", imp.Path()) + } + } +} + +func TestIssue13898(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + + // import go/internal/gcimporter which imports go/types partially + imports := make(map[string]*types.Package) + _, err := Import(imports, "go/internal/gcimporter", ".", nil) + if err != nil { + t.Fatal(err) + } + + // look for go/types package + var goTypesPkg *types.Package + for path, pkg := range imports { + if path == "go/types" { + goTypesPkg = pkg + break + } + } + if goTypesPkg == nil { + t.Fatal("go/types not found") + } + + // look for go/types.Object type + obj := lookupObj(t, goTypesPkg.Scope(), "Object") + typ, ok := obj.Type().(*types.Named) + if !ok { + t.Fatalf("go/types.Object type is %v; wanted named type", typ) + } + + // lookup go/types.Object.Pkg method + m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg") + if m == nil { + t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect) + } + + // the method must belong to go/types + if m.Pkg().Path() != "go/types" { + t.Fatalf("found %v; want go/types", m.Pkg()) + } +} + +func TestIssue15517(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + + compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"), nil) + + // Multiple imports of p must succeed without redeclaration errors. + // We use an import path that's not cleaned up so that the eventual + // file path for the package is different from the package path; this + // will expose the error if it is present. + // + // (Issue: Both the textual and the binary importer used the file path + // of the package to be imported as key into the shared packages map. + // However, the binary importer then used the package path to identify + // the imported package to mark it as complete; effectively marking the + // wrong package as complete. By using an "unclean" package path, the + // file and package path are different, exposing the problem if present. + // The same issue occurs with vendoring.) + imports := make(map[string]*types.Package) + for i := 0; i < 3; i++ { + if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestIssue15920(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + compileAndImportPkg(t, "issue15920") +} + +func TestIssue20046(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + // "./issue20046".V.M must exist + pkg := compileAndImportPkg(t, "issue20046") + obj := lookupObj(t, pkg.Scope(), "V") + if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil { + t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect) + } +} + +func TestIssue25301(t *testing.T) { + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + compileAndImportPkg(t, "issue25301") +} + +func TestIssue51836(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // requires generics + + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + testoutdir := filepath.Join(tmpdir, "testdata") + + dir := filepath.Join("testdata", "issue51836") + // Following the pattern of TestIssue13898, aa.go needs to be compiled from + // the output directory. We pass the full path to compile() so that we don't + // have to copy the file to that directory. + bpath, err := filepath.Abs(filepath.Join(dir, "aa.go")) + if err != nil { + t.Fatal(err) + } + compilePkg(t, dir, "a.go", testoutdir, nil, apkg(testoutdir)) + compile(t, testoutdir, bpath, testoutdir, map[string]string{apkg(testoutdir): filepath.Join(testoutdir, "a.o")}) + + // import must succeed (test for issue at hand) + _ = importPkg(t, "./testdata/aa", tmpdir) +} + +func TestIssue57015(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // requires generics + + // This package only handles gc export data. + needsCompiler(t, "gc") + + // On windows, we have to set the -D option for the compiler to avoid having a drive + // letter and an illegal ':' in the import path - just skip it (see also issue #3483). + if runtime.GOOS == "windows" { + t.Skip("avoid dealing with relative paths/drive letters on windows") + } + + compileAndImportPkg(t, "issue57015") +} + +// This is a regression test for a failure to export a package +// containing a specific type error. +// +// Though the issue and test are specific, they may be representatives +// of class of exporter bugs on ill-typed code that we have yet to +// flush out. +// +// TODO(adonovan): systematize our search for similar problems using +// fuzz testing, and drive this test from a table of test cases +// discovered by fuzzing. +func TestIssue57729(t *testing.T) { + // The lack of a receiver causes Recv.Type=Invalid. + // (The type checker then treats Foo as a package-level + // function, inserting it into the package scope.) + // The exporter needs to apply the same treatment. + const src = `package p; func () Foo() {}` + + // Parse the ill-typed input. + fset := token.NewFileSet() + f, err := goparser.ParseFile(fset, "p.go", src, 0) + if err != nil { + t.Fatalf("parse: %v", err) + } + + // Type check it, expecting errors. + config := &types.Config{ + Error: func(err error) { t.Log(err) }, // don't abort at first error + } + pkg1, _ := config.Check("p", fset, []*ast.File{f}, nil) + + // Export it. + // (Shallowness isn't important here.) + data, err := IExportShallow(fset, pkg1) + if err != nil { + t.Fatalf("export: %v", err) // any failure to export is a bug + } + + // Re-import it. + imports := make(map[string]*types.Package) + insert := func(pkg1 *types.Package, name string) { panic("unexpected insert") } + pkg2, err := IImportShallow(fset, imports, data, "p", insert) + if err != nil { + t.Fatalf("import: %v", err) // any failure of IExport+IImport is a bug. + } + + // Check that Lookup("Foo") still returns something. + // We can't assert the type hasn't change: it has, + // from a method of Invalid to a standalone function. + hasObj1 := pkg1.Scope().Lookup("Foo") != nil + hasObj2 := pkg2.Scope().Lookup("Foo") != nil + if hasObj1 != hasObj2 { + t.Errorf("export+import changed Lookup('Foo')!=nil: was %t, became %t", hasObj1, hasObj2) + } +} + +// apkg returns the package "a" prefixed by (as a package) testoutdir +func apkg(testoutdir string) string { + apkg := testoutdir + "/a" + if os.PathSeparator != '/' { + apkg = strings.ReplaceAll(apkg, string(os.PathSeparator), "/") + } + return apkg +} + +func importPkg(t *testing.T, path, srcDir string) *types.Package { + pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) + if err != nil { + t.Fatal(err) + } + return pkg +} + +func compileAndImportPkg(t *testing.T, name string) *types.Package { + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"), nil) + return importPkg(t, "./testdata/"+name, tmpdir) +} + +func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object { + if obj := scope.Lookup(name); obj != nil { + return obj + } + t.Fatalf("%s not found", name) + return nil +} diff --git a/go/internal/gcimporter/iexport.go b/internal/gcimporter/iexport.go similarity index 78% rename from go/internal/gcimporter/iexport.go rename to internal/gcimporter/iexport.go index 9a4ff329e12..ba53cdcdd10 100644 --- a/go/internal/gcimporter/iexport.go +++ b/internal/gcimporter/iexport.go @@ -12,7 +12,6 @@ import ( "bytes" "encoding/binary" "fmt" - "go/ast" "go/constant" "go/token" "go/types" @@ -23,9 +22,45 @@ import ( "strconv" "strings" + "golang.org/x/tools/internal/tokeninternal" "golang.org/x/tools/internal/typeparams" ) +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be +// decoded by the same version of IIExportShallow. If you plan to save +// export data in the file system, be sure to include a cryptographic +// digest of the executable in the key to avoid version skew. +func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow +// in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { + const bundle = false + pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// InsertType is the type of a function that creates a types.TypeName +// object for a named type and inserts it into the scope of the +// specified Package. +type InsertType = func(pkg *types.Package, name string) + // Current bundled export format version. Increase with each format change. // 0: initial implementation const bundleVersion = 0 @@ -36,15 +71,17 @@ const bundleVersion = 0 // The package path of the top-level package will not be recorded, // so that calls to IImportData can override with a provided package path. func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg}) + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) } // IExportBundle writes an indexed export bundle for pkgs to out. func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - return iexportCommon(out, fset, true, iexportVersion, pkgs) + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) } -func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) { +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { if !debug { defer func() { if e := recover(); e != nil { @@ -61,6 +98,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, p := iexporter{ fset: fset, version: version, + shallow: shallow, allPkgs: map[*types.Package]bool{}, stringIndex: map[string]uint64{}, declIndex: map[types.Object]uint64{}, @@ -82,7 +120,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, for _, pkg := range pkgs { scope := pkg.Scope() for _, name := range scope.Names() { - if ast.IsExported(name) { + if token.IsExported(name) { p.pushDecl(scope.Lookup(name)) } } @@ -101,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, p.doDecl(p.declTodo.popHead()) } + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + // Append indices to data0 section. dataLen := uint64(p.data0.Len()) w := p.newWriter() @@ -126,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, } hdr.uint64(uint64(p.version)) hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } hdr.uint64(dataLen) // Flush output. io.Copy(out, &hdr) io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } io.Copy(out, &p.data0) return nil } +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + // writeIndex writes out an object index. mainIndex indicates whether // we're writing out the main index, which is also read by // non-compiler tools and includes a complete package description @@ -205,7 +313,8 @@ type iexporter struct { out *bytes.Buffer version int - localpkg *types.Package + shallow bool // don't put types from other packages in the index + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -217,6 +326,12 @@ type iexporter struct { strings intWriter stringIndex map[string]uint64 + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + data0 intWriter declIndex map[types.Object]uint64 tparamNames map[types.Object]string // typeparam->exported name @@ -225,6 +340,11 @@ type iexporter struct { indent int // for tracing support } +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + func (p *iexporter) trace(format string, args ...interface{}) { if !trace { // Call sites should also be guarded, but having this check here allows @@ -248,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 { return off } +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(obj types.Object) { // Package unsafe is known to the compiler and predeclared. @@ -256,6 +395,11 @@ func (p *iexporter) pushDecl(obj types.Object) { panic("cannot export package unsafe") } + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + if _, ok := p.declIndex[obj]; ok { return } @@ -303,7 +447,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.Func: sig, _ := obj.Type().(*types.Signature) if sig.Recv() != nil { - panic(internalErrorf("unexpected method: %v", sig)) + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } } // Function. @@ -415,13 +565,30 @@ func (w *exportWriter) tag(tag byte) { } func (w *exportWriter) pos(pos token.Pos) { - if w.p.version >= iexportVersionPosCol { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { w.posV1(pos) } else { w.posV0(pos) } } +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + func (w *exportWriter) posV1(pos token.Pos) { if w.p.fset == nil { w.int64(0) @@ -497,7 +664,7 @@ func (w *exportWriter) pkg(pkg *types.Package) { w.string(w.exportPath(pkg)) } -func (w *exportWriter) qualifiedIdent(obj types.Object) { +func (w *exportWriter) qualifiedType(obj *types.TypeName) { name := w.p.exportName(obj) // Ensure any referenced declarations are written out too. @@ -556,11 +723,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { return } w.startType(definedType) - w.qualifiedIdent(t.Obj()) + w.qualifiedType(t.Obj()) case *typeparams.TypeParam: w.startType(typeParamType) - w.qualifiedIdent(t.Obj()) + w.qualifiedType(t.Obj()) case *types.Pointer: w.startType(pointerType) @@ -602,14 +769,17 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Struct: w.startType(structType) - w.setPkg(pkg, true) - n := t.NumFields() + if n > 0 { + w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects + } else { + w.setPkg(pkg, true) + } w.uint64(uint64(n)) for i := 0; i < n; i++ { f := t.Field(i) w.pos(f.Pos()) - w.string(f.Name()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg w.typ(f.Type(), pkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) diff --git a/go/internal/gcimporter/iexport_common_test.go b/internal/gcimporter/iexport_common_test.go similarity index 100% rename from go/internal/gcimporter/iexport_common_test.go rename to internal/gcimporter/iexport_common_test.go diff --git a/go/internal/gcimporter/iexport_go118_test.go b/internal/gcimporter/iexport_go118_test.go similarity index 97% rename from go/internal/gcimporter/iexport_go118_test.go rename to internal/gcimporter/iexport_go118_test.go index 5dfa2580f6b..c60a9b5ee86 100644 --- a/go/internal/gcimporter/iexport_go118_test.go +++ b/internal/gcimporter/iexport_go118_test.go @@ -21,7 +21,8 @@ import ( "strings" "testing" - "golang.org/x/tools/go/internal/gcimporter" + "golang.org/x/tools/internal/gcimporter" + "golang.org/x/tools/internal/testenv" ) // TODO(rfindley): migrate this to testdata, as has been done in the standard library. @@ -96,6 +97,8 @@ func testExportSrc(t *testing.T, src []byte) { } func TestImportTypeparamTests(t *testing.T) { + testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache + // Check go files in test/typeparam. rootDir := filepath.Join(runtime.GOROOT(), "test", "typeparam") list, err := os.ReadDir(rootDir) diff --git a/go/internal/gcimporter/iexport_test.go b/internal/gcimporter/iexport_test.go similarity index 85% rename from go/internal/gcimporter/iexport_test.go rename to internal/gcimporter/iexport_test.go index f0e83e519fe..93183f9dc6f 100644 --- a/go/internal/gcimporter/iexport_test.go +++ b/internal/gcimporter/iexport_test.go @@ -30,8 +30,9 @@ import ( "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/internal/gcimporter" + "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/go/loader" + "golang.org/x/tools/internal/gcimporter" "golang.org/x/tools/internal/typeparams/genericfeatures" ) @@ -58,7 +59,8 @@ func readExportFile(filename string) ([]byte, error) { func iexport(fset *token.FileSet, version int, pkg *types.Package) ([]byte, error) { var buf bytes.Buffer - if err := gcimporter.IExportCommon(&buf, fset, false, version, []*types.Package{pkg}); err != nil { + const bundle, shallow = false, false + if err := gcimporter.IExportCommon(&buf, fset, bundle, shallow, version, []*types.Package{pkg}); err != nil { return nil, err } return buf.Bytes(), nil @@ -196,7 +198,7 @@ func testPkg(t *testing.T, fset *token.FileSet, version int, pkg *types.Package, // Compare the packages' corresponding members. for _, name := range pkg.Scope().Names() { - if !ast.IsExported(name) { + if !token.IsExported(name) { continue } obj1 := pkg.Scope().Lookup(name) @@ -403,3 +405,50 @@ func valueToRat(x constant.Value) *big.Rat { } return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) } + +// This is a regression test for a bug in iexport of types.Struct: +// unexported fields were losing their implicit package qualifier. +func TestUnexportedStructFields(t *testing.T) { + fset := token.NewFileSet() + export := make(map[string][]byte) + + // process parses and type-checks a single-file + // package and saves its export data. + process := func(path, content string) { + syntax, err := parser.ParseFile(fset, path+"/x.go", content, 0) + if err != nil { + t.Fatal(err) + } + packages := make(map[string]*types.Package) // keys are package paths + cfg := &types.Config{ + Importer: importerFunc(func(path string) (*types.Package, error) { + data, ok := export[path] + if !ok { + return nil, fmt.Errorf("missing export data for %s", path) + } + return gcexportdata.Read(bytes.NewReader(data), fset, packages, path) + }), + } + pkg := types.NewPackage(path, syntax.Name.Name) + check := types.NewChecker(cfg, fset, pkg, nil) + if err := check.Files([]*ast.File{syntax}); err != nil { + t.Fatal(err) + } + var out bytes.Buffer + if err := gcexportdata.Write(&out, fset, pkg); err != nil { + t.Fatal(err) + } + export[path] = out.Bytes() + } + + // Historically this led to a spurious error: + // "cannot convert a.M (variable of type a.MyTime) to type time.Time" + // because the private fields of Time and MyTime were not identical. + process("time", `package time; type Time struct { x, y int }`) + process("a", `package a; import "time"; type MyTime time.Time; var M MyTime`) + process("b", `package b; import ("a"; "time"); var _ = time.Time(a.M)`) +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/go/internal/gcimporter/iimport.go b/internal/gcimporter/iimport.go similarity index 84% rename from go/internal/gcimporter/iimport.go rename to internal/gcimporter/iimport.go index 28b91b86567..448f903e86a 100644 --- a/go/internal/gcimporter/iimport.go +++ b/internal/gcimporter/iimport.go @@ -17,6 +17,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" "strings" @@ -50,6 +51,8 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 ) type ident struct { @@ -82,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path) + pkgs, err := iimportCommon(fset, imports, data, false, path, nil) if err != nil { return 0, nil, err } @@ -91,11 +94,11 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "") + return iimportCommon(fset, imports, data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { - const currentVersion = 1 +func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { defer func() { @@ -134,19 +137,34 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if insert != nil { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } dLen := int64(r.uint64()) whence, _ := r.Seek(0, io.SeekCurrent) stringData := data[whence : whence+sLen] - declData := data[whence+sLen : whence+sLen+dLen] - r.Seek(sLen+dLen, io.SeekCurrent) + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) p := iimporter{ version: int(version), ipath: path, + insert: insert, stringData: stringData, stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), pkgCache: make(map[uint64]*types.Package), declData: declData, @@ -184,11 +202,18 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } + if i == 0 && !bundle { + p.localpkg = pkg + } p.pkgCache[pkgPathOff] = pkg + // Read index for package. nameIndex := make(map[string]uint64) - for nSyms := r.uint64(); nSyms > 0; nSyms-- { + nSyms := r.uint64() + // In shallow mode we don't expect an index for other packages. + assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } @@ -264,8 +289,14 @@ type iimporter struct { version int ipath string + localpkg *types.Package + insert func(pkg *types.Package, name string) // "shallow" mode only + stringData []byte stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i pkgCache map[uint64]*types.Package declData []byte @@ -307,6 +338,13 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { + // In "shallow" mode, call back to the application to + // find the object and insert it into the package scope. + if p.insert != nil { + assert(pkg != p.localpkg) + p.insert(pkg, name) // "can't fail" + return + } errorf("%v.%v not in index", pkg, name) } @@ -331,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string { return s } +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + func (p *iimporter) pkgAt(off uint64) *types.Package { if pkg, ok := p.pkgCache[off]; ok { return pkg @@ -512,7 +599,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -561,8 +650,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -581,7 +670,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -591,47 +681,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - // Ensure that the imported Kind is Float, else this constant may run into - // bitsize limits on overlarge integers. Eventually we can instead adopt - // the approach of CL 288632, but that CL relies on go/constant APIs that - // were introduced in go1.13. - // - // TODO(rFindley): sync the logic here with tip Go once we no longer - // support go1.12. - x = constant.ToFloat(x) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - return x + return constant.Make(&f) } func (r *importReader) ident() string { @@ -645,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { + if r.p.insert != nil { // shallow mode + return r.posv2() + } if r.p.version >= iexportVersionPosCol { r.posv1() } else { @@ -681,6 +750,15 @@ func (r *importReader) posv1() { } } +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + func (r *importReader) typ() types.Type { return r.p.typAt(r.uint64(), nil) } diff --git a/go/internal/gcimporter/israce_test.go b/internal/gcimporter/israce_test.go similarity index 100% rename from go/internal/gcimporter/israce_test.go rename to internal/gcimporter/israce_test.go diff --git a/go/internal/gcimporter/newInterface10.go b/internal/gcimporter/newInterface10.go similarity index 100% rename from go/internal/gcimporter/newInterface10.go rename to internal/gcimporter/newInterface10.go diff --git a/go/internal/gcimporter/newInterface11.go b/internal/gcimporter/newInterface11.go similarity index 100% rename from go/internal/gcimporter/newInterface11.go rename to internal/gcimporter/newInterface11.go diff --git a/internal/gcimporter/shallow_test.go b/internal/gcimporter/shallow_test.go new file mode 100644 index 00000000000..429c34b3dd7 --- /dev/null +++ b/internal/gcimporter/shallow_test.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter_test + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "strings" + "testing" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/gcimporter" + "golang.org/x/tools/internal/testenv" +) + +// TestStd type-checks the standard library using shallow export data. +func TestShallowStd(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode; too slow (https://golang.org/issue/14113)") + } + testenv.NeedsTool(t, "go") + + // Load import graph of the standard library. + // (No parsing or type-checking.) + cfg := &packages.Config{ + Mode: packages.NeedImports | + packages.NeedName | + packages.NeedFiles | // see https://github.com/golang/go/issues/56632 + packages.NeedCompiledGoFiles, + Tests: false, + } + pkgs, err := packages.Load(cfg, "std") + if err != nil { + t.Fatalf("load: %v", err) + } + if len(pkgs) < 200 { + t.Fatalf("too few packages: %d", len(pkgs)) + } + + // Type check the packages in parallel postorder. + done := make(map[*packages.Package]chan struct{}) + packages.Visit(pkgs, nil, func(p *packages.Package) { + done[p] = make(chan struct{}) + }) + packages.Visit(pkgs, nil, + func(pkg *packages.Package) { + go func() { + // Wait for all deps to be done. + for _, imp := range pkg.Imports { + <-done[imp] + } + typecheck(t, pkg) + close(done[pkg]) + }() + }) + for _, root := range pkgs { + <-done[root] + } +} + +// typecheck reads, parses, and type-checks a package. +// It squirrels the export data in the the ppkg.ExportFile field. +func typecheck(t *testing.T, ppkg *packages.Package) { + if ppkg.PkgPath == "unsafe" { + return // unsafe is special + } + + // Create a local FileSet just for this package. + fset := token.NewFileSet() + + // Parse files in parallel. + syntax := make([]*ast.File, len(ppkg.CompiledGoFiles)) + var group errgroup.Group + for i, filename := range ppkg.CompiledGoFiles { + i, filename := i, filename + group.Go(func() error { + f, err := parser.ParseFile(fset, filename, nil, parser.SkipObjectResolution) + if err != nil { + return err // e.g. missing file + } + syntax[i] = f + return nil + }) + } + if err := group.Wait(); err != nil { + t.Fatal(err) + } + // Inv: all files were successfully parsed. + + // Build map of dependencies by package path. + // (We don't compute this mapping for the entire + // packages graph because it is not globally consistent.) + depsByPkgPath := make(map[string]*packages.Package) + { + var visit func(*packages.Package) + visit = func(pkg *packages.Package) { + if depsByPkgPath[pkg.PkgPath] == nil { + depsByPkgPath[pkg.PkgPath] = pkg + for path := range pkg.Imports { + visit(pkg.Imports[path]) + } + } + } + visit(ppkg) + } + + // importer state + var ( + insert func(p *types.Package, name string) + importMap = make(map[string]*types.Package) // keys are PackagePaths + ) + loadFromExportData := func(imp *packages.Package) (*types.Package, error) { + data := []byte(imp.ExportFile) + return gcimporter.IImportShallow(fset, importMap, data, imp.PkgPath, insert) + } + insert = func(p *types.Package, name string) { + imp, ok := depsByPkgPath[p.Path()] + if !ok { + t.Fatalf("can't find dependency: %q", p.Path()) + } + imported, err := loadFromExportData(imp) + if err != nil { + t.Fatalf("unmarshal: %v", err) + } + if imported != p { + t.Fatalf("internal error: inconsistent packages") + } + if obj := imported.Scope().Lookup(name); obj == nil { + t.Fatalf("lookup %q.%s failed", imported.Path(), name) + } + } + + cfg := &types.Config{ + Error: func(e error) { + t.Error(e) + }, + Importer: importerFunc(func(importPath string) (*types.Package, error) { + if importPath == "unsafe" { + return types.Unsafe, nil // unsafe has no exportdata + } + imp, ok := ppkg.Imports[importPath] + if !ok { + return nil, fmt.Errorf("missing import %q", importPath) + } + return loadFromExportData(imp) + }), + } + + // Type-check the syntax trees. + tpkg, _ := cfg.Check(ppkg.PkgPath, fset, syntax, nil) + postTypeCheck(t, fset, tpkg) + + // Save the export data. + data, err := gcimporter.IExportShallow(fset, tpkg) + if err != nil { + t.Fatalf("internal error marshalling export data: %v", err) + } + ppkg.ExportFile = string(data) +} + +// postTypeCheck is called after a package is type checked. +// We use it to assert additional correctness properties, +// for example, that the apparent location of "fmt.Println" +// corresponds to its source location: in other words, +// export+import preserves high-fidelity positions. +func postTypeCheck(t *testing.T, fset *token.FileSet, pkg *types.Package) { + // We hard-code a few interesting test-case objects. + var obj types.Object + switch pkg.Path() { + case "fmt": + // func fmt.Println + obj = pkg.Scope().Lookup("Println") + case "net/http": + // method (*http.Request).ParseForm + req := pkg.Scope().Lookup("Request") + obj, _, _ = types.LookupFieldOrMethod(req.Type(), true, pkg, "ParseForm") + default: + return + } + if obj == nil { + t.Errorf("object not found in package %s", pkg.Path()) + return + } + + // Now check the source fidelity of the object's position. + posn := fset.Position(obj.Pos()) + data, err := os.ReadFile(posn.Filename) + if err != nil { + t.Errorf("can't read source file declaring %v: %v", obj, err) + return + } + + // Check line and column denote a source interval containing the object's identifier. + line := strings.Split(string(data), "\n")[posn.Line-1] + + if id := line[posn.Column-1 : posn.Column-1+len(obj.Name())]; id != obj.Name() { + t.Errorf("%+v: expected declaration of %v at this line, column; got %q", posn, obj, line) + } + + // Check offset. + if id := string(data[posn.Offset : posn.Offset+len(obj.Name())]); id != obj.Name() { + t.Errorf("%+v: expected declaration of %v at this offset; got %q", posn, obj, id) + } + + // Check commutativity of Position() and start+len(name) operations: + // Position(startPos+len(name)) == Position(startPos) + len(name). + // This important property is a consequence of the way in which the + // decoder fills the gaps in the sparse line-start offset table. + endPosn := fset.Position(obj.Pos() + token.Pos(len(obj.Name()))) + wantEndPosn := token.Position{ + Filename: posn.Filename, + Offset: posn.Offset + len(obj.Name()), + Line: posn.Line, + Column: posn.Column + len(obj.Name()), + } + if endPosn != wantEndPosn { + t.Errorf("%+v: expected end Position of %v here; was at %+v", wantEndPosn, obj, endPosn) + } +} diff --git a/internal/gcimporter/stdlib_test.go b/internal/gcimporter/stdlib_test.go new file mode 100644 index 00000000000..33ff7958118 --- /dev/null +++ b/internal/gcimporter/stdlib_test.go @@ -0,0 +1,94 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter_test + +import ( + "bytes" + "fmt" + "go/token" + "go/types" + "runtime" + "testing" + "unsafe" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/testenv" +) + +// TestStdlib ensures that all packages in std and x/tools can be +// type-checked using export data. Takes around 3s. +func TestStdlib(t *testing.T) { + testenv.NeedsGoPackages(t) + + // gcexportdata.Read rapidly consumes FileSet address space, + // so disable the test on 32-bit machines. + // (We could use a fresh FileSet per type-check, but that + // would require us to re-parse the source using it.) + if unsafe.Sizeof(token.NoPos) < 8 { + t.Skip("skipping test on 32-bit machine") + } + + // Load, parse and type-check the standard library. + // If we have the full source code for x/tools, also load and type-check that. + cfg := &packages.Config{Mode: packages.LoadAllSyntax} + patterns := []string{"std"} + minPkgs := 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more. + switch runtime.GOOS { + case "android", "ios": + // The go_.*_exec script for mobile builders only copies over the source tree + // for the package under test. + default: + patterns = append(patterns, "golang.org/x/tools/...") + minPkgs += 160 // At the time of writing, 'GOOS=plan9 go list ./... | wc -l' reports 188. + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + t.Fatalf("failed to load/parse/type-check: %v", err) + } + if packages.PrintErrors(pkgs) > 0 { + t.Fatal("there were errors during loading") + } + if len(pkgs) < minPkgs { + t.Errorf("too few packages (%d) were loaded", len(pkgs)) + } + + export := make(map[string][]byte) // keys are package IDs + + // Re-type check them all in post-order, using export data. + packages.Visit(pkgs, nil, func(pkg *packages.Package) { + packages := make(map[string]*types.Package) // keys are package paths + cfg := &types.Config{ + Error: func(e error) { + t.Errorf("type error: %v", e) + }, + Importer: importerFunc(func(importPath string) (*types.Package, error) { + // Resolve import path to (vendored?) package path. + imported := pkg.Imports[importPath] + + if imported.PkgPath == "unsafe" { + return types.Unsafe, nil // unsafe has no exportdata + } + + data, ok := export[imported.ID] + if !ok { + return nil, fmt.Errorf("missing export data for %s", importPath) + } + return gcexportdata.Read(bytes.NewReader(data), pkg.Fset, packages, imported.PkgPath) + }), + } + + // Re-typecheck the syntax and save the export data in the map. + newPkg := types.NewPackage(pkg.PkgPath, pkg.Name) + check := types.NewChecker(cfg, pkg.Fset, newPkg, nil) + check.Files(pkg.Syntax) + + var out bytes.Buffer + if err := gcexportdata.Write(&out, pkg.Fset, newPkg); err != nil { + t.Fatalf("internal error writing export data: %v", err) + } + export[pkg.ID] = out.Bytes() + }) +} diff --git a/go/internal/gcimporter/support_go117.go b/internal/gcimporter/support_go117.go similarity index 100% rename from go/internal/gcimporter/support_go117.go rename to internal/gcimporter/support_go117.go diff --git a/internal/gcimporter/support_go118.go b/internal/gcimporter/support_go118.go new file mode 100644 index 00000000000..edbe6ea7041 --- /dev/null +++ b/internal/gcimporter/support_go118.go @@ -0,0 +1,37 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "Ā·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/go/internal/gcimporter/testdata/a.go b/internal/gcimporter/testdata/a.go similarity index 100% rename from go/internal/gcimporter/testdata/a.go rename to internal/gcimporter/testdata/a.go diff --git a/internal/gcimporter/testdata/a/a.go b/internal/gcimporter/testdata/a/a.go new file mode 100644 index 00000000000..56e4292cda9 --- /dev/null +++ b/internal/gcimporter/testdata/a/a.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Input for TestIssue13566 + +package a + +import "encoding/json" + +type A struct { + a *A + json json.RawMessage +} diff --git a/go/internal/gcimporter/testdata/b.go b/internal/gcimporter/testdata/b.go similarity index 100% rename from go/internal/gcimporter/testdata/b.go rename to internal/gcimporter/testdata/b.go diff --git a/go/internal/gcimporter/testdata/exports.go b/internal/gcimporter/testdata/exports.go similarity index 100% rename from go/internal/gcimporter/testdata/exports.go rename to internal/gcimporter/testdata/exports.go diff --git a/go/internal/gcimporter/testdata/issue15920.go b/internal/gcimporter/testdata/issue15920.go similarity index 100% rename from go/internal/gcimporter/testdata/issue15920.go rename to internal/gcimporter/testdata/issue15920.go diff --git a/go/internal/gcimporter/testdata/issue20046.go b/internal/gcimporter/testdata/issue20046.go similarity index 100% rename from go/internal/gcimporter/testdata/issue20046.go rename to internal/gcimporter/testdata/issue20046.go diff --git a/go/internal/gcimporter/testdata/issue25301.go b/internal/gcimporter/testdata/issue25301.go similarity index 100% rename from go/internal/gcimporter/testdata/issue25301.go rename to internal/gcimporter/testdata/issue25301.go diff --git a/go/internal/gcimporter/testdata/issue51836/a.go b/internal/gcimporter/testdata/issue51836/a.go similarity index 100% rename from go/internal/gcimporter/testdata/issue51836/a.go rename to internal/gcimporter/testdata/issue51836/a.go diff --git a/internal/gcimporter/testdata/issue51836/a/a.go b/internal/gcimporter/testdata/issue51836/a/a.go new file mode 100644 index 00000000000..e9223c9aa82 --- /dev/null +++ b/internal/gcimporter/testdata/issue51836/a/a.go @@ -0,0 +1,8 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type T[K any] struct { +} diff --git a/go/internal/gcimporter/testdata/issue51836/aa.go b/internal/gcimporter/testdata/issue51836/aa.go similarity index 100% rename from go/internal/gcimporter/testdata/issue51836/aa.go rename to internal/gcimporter/testdata/issue51836/aa.go diff --git a/internal/gcimporter/testdata/issue57015.go b/internal/gcimporter/testdata/issue57015.go new file mode 100644 index 00000000000..b6be81191f9 --- /dev/null +++ b/internal/gcimporter/testdata/issue57015.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue57015 + +type E error + +type X[T any] struct {} + +func F() X[interface { + E +}] { + panic(0) +} + diff --git a/go/internal/gcimporter/testdata/p.go b/internal/gcimporter/testdata/p.go similarity index 100% rename from go/internal/gcimporter/testdata/p.go rename to internal/gcimporter/testdata/p.go diff --git a/go/internal/gcimporter/testdata/versions/test.go b/internal/gcimporter/testdata/versions/test.go similarity index 100% rename from go/internal/gcimporter/testdata/versions/test.go rename to internal/gcimporter/testdata/versions/test.go diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_0i.a b/internal/gcimporter/testdata/versions/test_go1.11_0i.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.11_0i.a rename to internal/gcimporter/testdata/versions/test_go1.11_0i.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_6b.a b/internal/gcimporter/testdata/versions/test_go1.11_6b.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.11_6b.a rename to internal/gcimporter/testdata/versions/test_go1.11_6b.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_999b.a b/internal/gcimporter/testdata/versions/test_go1.11_999b.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.11_999b.a rename to internal/gcimporter/testdata/versions/test_go1.11_999b.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_999i.a b/internal/gcimporter/testdata/versions/test_go1.11_999i.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.11_999i.a rename to internal/gcimporter/testdata/versions/test_go1.11_999i.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.7_0.a b/internal/gcimporter/testdata/versions/test_go1.7_0.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.7_0.a rename to internal/gcimporter/testdata/versions/test_go1.7_0.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.7_1.a b/internal/gcimporter/testdata/versions/test_go1.7_1.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.7_1.a rename to internal/gcimporter/testdata/versions/test_go1.7_1.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.8_4.a b/internal/gcimporter/testdata/versions/test_go1.8_4.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.8_4.a rename to internal/gcimporter/testdata/versions/test_go1.8_4.a diff --git a/go/internal/gcimporter/testdata/versions/test_go1.8_5.a b/internal/gcimporter/testdata/versions/test_go1.8_5.a similarity index 100% rename from go/internal/gcimporter/testdata/versions/test_go1.8_5.a rename to internal/gcimporter/testdata/versions/test_go1.8_5.a diff --git a/internal/gcimporter/unified_no.go b/internal/gcimporter/unified_no.go new file mode 100644 index 00000000000..286bf445483 --- /dev/null +++ b/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/internal/gcimporter/unified_yes.go b/internal/gcimporter/unified_yes.go new file mode 100644 index 00000000000..b5d69ffbe68 --- /dev/null +++ b/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/internal/gcimporter/ureader_no.go b/internal/gcimporter/ureader_no.go new file mode 100644 index 00000000000..8eb20729c2a --- /dev/null +++ b/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/internal/gcimporter/ureader_yes.go b/internal/gcimporter/ureader_yes.go new file mode 100644 index 00000000000..b285a11ce25 --- /dev/null +++ b/internal/gcimporter/ureader_yes.go @@ -0,0 +1,738 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(flattenImports(imports)) + + return pkg +} + +// flattenImports returns the transitive closure of all imported +// packages rooted from pkgs. +func flattenImports(pkgs []*types.Package) []*types.Package { + var res []*types.Package + seen := make(map[*types.Package]struct{}) + for _, pkg := range pkgs { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + + // pkg.Imports() is already flattened. + for _, pkg := range pkg.Imports() { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + } + } + return res +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/internal/gocommand/invoke.go b/internal/gocommand/invoke.go index 67256dc3974..d50551693f3 100644 --- a/internal/gocommand/invoke.go +++ b/internal/gocommand/invoke.go @@ -10,8 +10,10 @@ import ( "context" "fmt" "io" + "log" "os" "regexp" + "runtime" "strconv" "strings" "sync" @@ -232,6 +234,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { return runCmdContext(ctx, cmd) } +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { @@ -243,11 +251,24 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { resChan <- cmd.Wait() }() - select { - case err := <-resChan: - return err - case <-ctx.Done(): + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(1 * time.Minute): + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } } + // Cancelled. Interrupt and see if it ends voluntarily. cmd.Process.Signal(os.Interrupt) select { @@ -255,11 +276,63 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { return err case <-time.After(time.Second): } + // Didn't shut down in response to interrupt. Kill it hard. - cmd.Process.Kill() + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { + // Don't panic here as this reliably fails on windows with EINVAL. + log.Printf("error killing the Go command: %v", err) + } + + // See above: don't wait indefinitely if we're debugging hanging Go commands. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill + HandleHangingGoCommand(cmd.Process) + } + } return <-resChan } +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { diff --git a/internal/gocommand/version.go b/internal/gocommand/version.go index 71304368020..307a76d474a 100644 --- a/internal/gocommand/version.go +++ b/internal/gocommand/version.go @@ -7,11 +7,19 @@ package gocommand import ( "context" "fmt" + "regexp" "strings" ) -// GoVersion checks the go version by running "go list" with modules off. -// It returns the X in Go 1.X. +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} @@ -38,7 +46,7 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { if len(stdout) < 3 { return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) } - // Split up "[go1.1 go1.15]" + // Split up "[go1.1 go1.15]" and return highest go1.X value. tags := strings.Fields(stdout[1 : len(stdout)-2]) for i := len(tags) - 1; i >= 0; i-- { var version int @@ -49,3 +57,25 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { } return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) } + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/internal/gocommand/version_test.go b/internal/gocommand/version_test.go new file mode 100644 index 00000000000..27016e4c074 --- /dev/null +++ b/internal/gocommand/version_test.go @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "strconv" + "testing" +) + +func TestParseGoVersionOutput(t *testing.T) { + tests := []struct { + args string + want string + }{ + {"go version go1.12 linux/amd64", "go1.12"}, + {"go version go1.18.1 darwin/amd64", "go1.18.1"}, + {"go version go1.19.rc1 windows/arm64", "go1.19.rc1"}, + {"go version devel d5de62df152baf4de6e9fe81933319b86fd95ae4 linux/386", "devel d5de62df152baf4de6e9fe81933319b86fd95ae4"}, + {"go version devel go1.20-1f068f0dc7 Tue Oct 18 20:58:37 2022 +0000 darwin/amd64", "devel go1.20-1f068f0dc7"}, + {"v1.19.1 foo/bar", ""}, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + if got := ParseGoVersionOutput(tt.args); got != tt.want { + t.Errorf("parseGoVersionOutput() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/goroot/importcfg.go b/internal/goroot/importcfg.go new file mode 100644 index 00000000000..f1cd28e2ec3 --- /dev/null +++ b/internal/goroot/importcfg.go @@ -0,0 +1,71 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goroot is a copy of package internal/goroot +// in the main GO repot. It provides a utility to produce +// an importcfg and import path to package file map mapping +// standard library packages to the locations of their export +// data files. +package goroot + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "sync" +) + +// Importcfg returns an importcfg file to be passed to the +// Go compiler that contains the cached paths for the .a files for the +// standard library. +func Importcfg() (string, error) { + var icfg bytes.Buffer + + m, err := PkgfileMap() + if err != nil { + return "", err + } + fmt.Fprintf(&icfg, "# import config") + for importPath, export := range m { + fmt.Fprintf(&icfg, "\npackagefile %s=%s", importPath, export) + } + s := icfg.String() + return s, nil +} + +var ( + stdlibPkgfileMap map[string]string + stdlibPkgfileErr error + once sync.Once +) + +// PkgfileMap returns a map of package paths to the location on disk +// of the .a file for the package. +// The caller must not modify the map. +func PkgfileMap() (map[string]string, error) { + once.Do(func() { + m := make(map[string]string) + output, err := exec.Command("go", "list", "-export", "-e", "-f", "{{.ImportPath}} {{.Export}}", "std", "cmd").Output() + if err != nil { + stdlibPkgfileErr = err + } + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + sp := strings.SplitN(line, " ", 2) + if len(sp) != 2 { + err = fmt.Errorf("determining pkgfile map: invalid line in go list output: %q", line) + return + } + importPath, export := sp[0], sp[1] + if export != "" { + m[importPath] = export + } + } + stdlibPkgfileMap = m + }) + return stdlibPkgfileMap, stdlibPkgfileErr +} diff --git a/internal/imports/fix.go b/internal/imports/fix.go index d859617b774..642a5ac2d75 100644 --- a/internal/imports/fix.go +++ b/internal/imports/fix.go @@ -697,6 +697,9 @@ func candidateImportName(pkg *pkg) string { // GetAllCandidates calls wrapped for each package whose name starts with // searchPrefix, and can be imported from filename with the package name filePkg. +// +// Beware that the wrapped function may be called multiple times concurrently. +// TODO(adonovan): encapsulate the concurrency. func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { @@ -796,7 +799,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} +var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. @@ -807,6 +810,11 @@ type ProcessEnv struct { ModFlag string ModFile string + // SkipPathInScan returns true if the path should be skipped from scans of + // the RootCurrentModule root type. The function argument is a clean, + // absolute path. + SkipPathInScan func(string) bool + // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because // exec.Command will not honor it. @@ -861,7 +869,7 @@ func (e *ProcessEnv) init() error { } foundAllRequired := true - for _, k := range RequiredGoEnvVars { + for _, k := range requiredGoEnvVars { if _, ok := e.Env[k]; !ok { foundAllRequired = false break @@ -877,7 +885,7 @@ func (e *ProcessEnv) init() error { } goEnv := map[string]string{} - stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...) if err != nil { return err } @@ -906,7 +914,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 { + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) return e.resolver, nil } @@ -1367,9 +1375,9 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error return err } var roots []gopathwalk.Root - roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT}) + roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT}) for _, p := range filepath.SplitList(goenv["GOPATH"]) { - roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH}) + roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH}) } // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. roots = filterRoots(roots, callback.rootFound) diff --git a/internal/imports/mkstdlib.go b/internal/imports/mkstdlib.go index 47714bf0719..470b93f1df2 100644 --- a/internal/imports/mkstdlib.go +++ b/internal/imports/mkstdlib.go @@ -15,6 +15,7 @@ import ( "bytes" "fmt" "go/format" + "go/token" "io" "io/ioutil" "log" @@ -23,8 +24,9 @@ import ( "regexp" "runtime" "sort" + "strings" - exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/packages" ) func mustOpen(name string) io.Reader { @@ -41,47 +43,29 @@ func api(base string) string { var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) -var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true} - func main() { var buf bytes.Buffer outf := func(format string, args ...interface{}) { fmt.Fprintf(&buf, format, args...) } + outf(`// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +`) outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") outf("package imports\n") outf("var stdlib = map[string][]string{\n") - f := io.MultiReader( - mustOpen(api("go1.txt")), - mustOpen(api("go1.1.txt")), - mustOpen(api("go1.2.txt")), - mustOpen(api("go1.3.txt")), - mustOpen(api("go1.4.txt")), - mustOpen(api("go1.5.txt")), - mustOpen(api("go1.6.txt")), - mustOpen(api("go1.7.txt")), - mustOpen(api("go1.8.txt")), - mustOpen(api("go1.9.txt")), - mustOpen(api("go1.10.txt")), - mustOpen(api("go1.11.txt")), - mustOpen(api("go1.12.txt")), - mustOpen(api("go1.13.txt")), - mustOpen(api("go1.14.txt")), - mustOpen(api("go1.15.txt")), - mustOpen(api("go1.16.txt")), - mustOpen(api("go1.17.txt")), - mustOpen(api("go1.18.txt")), - - // The API of the syscall/js package needs to be computed explicitly, - // because it's not included in the GOROOT/api/go1.*.txt files at this time. - syscallJSAPI(), - ) + f := readAPI() sc := bufio.NewScanner(f) + // The APIs of the syscall/js and unsafe packages need to be computed explicitly, + // because they're not included in the GOROOT/api/go1.*.txt files at this time. pkgs := map[string]map[string]bool{ - "unsafe": unsafeSyms, + "syscall/js": syms("syscall/js", "GOOS=js", "GOARCH=wasm"), + "unsafe": syms("unsafe"), } - paths := []string{"unsafe"} + paths := []string{"syscall/js", "unsafe"} for sc.Scan() { l := sc.Text() @@ -100,7 +84,7 @@ func main() { } sort.Strings(paths) for _, path := range paths { - outf("\t%q: []string{\n", path) + outf("\t%q: {\n", path) pkg := pkgs[path] var syms []string for sym := range pkg { @@ -123,17 +107,39 @@ func main() { } } -// syscallJSAPI returns the API of the syscall/js package. -// It's computed from the contents of $(go env GOROOT)/src/syscall/js. -func syscallJSAPI() io.Reader { - var exeSuffix string - if runtime.GOOS == "windows" { - exeSuffix = ".exe" +// readAPI opens an io.Reader that reads all stdlib API content. +func readAPI() io.Reader { + entries, err := os.ReadDir(filepath.Join(runtime.GOROOT(), "api")) + if err != nil { + log.Fatal(err) + } + var readers []io.Reader + for _, entry := range entries { + name := entry.Name() + if strings.HasPrefix(name, "go") && strings.HasSuffix(name, ".txt") { + readers = append(readers, mustOpen(api(name))) + } + } + return io.MultiReader(readers...) +} + +// syms computes the exported symbols in the specified package. +func syms(pkg string, extraEnv ...string) map[string]bool { + var env []string + if len(extraEnv) != 0 { + env = append(os.Environ(), extraEnv...) } - cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") - out, err := cmd.Output() + pkgs, err := packages.Load(&packages.Config{Mode: packages.NeedTypes, Env: env}, pkg) if err != nil { log.Fatalln(err) + } else if len(pkgs) != 1 { + log.Fatalf("got %d packages, want one package %q", len(pkgs), pkg) + } + syms := make(map[string]bool) + for _, name := range pkgs[0].Types.Scope().Names() { + if token.IsExported(name) { + syms[name] = true + } } - return bytes.NewReader(out) + return syms } diff --git a/internal/imports/mod.go b/internal/imports/mod.go index 2bcf41f5fa7..7d99d04ca8a 100644 --- a/internal/imports/mod.go +++ b/internal/imports/mod.go @@ -70,9 +70,17 @@ func (r *ModuleResolver) init() error { Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, } - vendorEnabled, mainModVendor, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) - if err != nil { - return err + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON + + // Module vendor directories are ignored in workspace mode: + // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md + if len(r.env.Env["GOWORK"]) == 0 { + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } } if mainModVendor != nil && vendorEnabled { @@ -121,22 +129,22 @@ func (r *ModuleResolver) init() error { }) r.roots = []gopathwalk.Root{ - {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT}, + {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}, } r.mainByDir = make(map[string]*gocommand.ModuleJSON) for _, main := range r.mains { - r.roots = append(r.roots, gopathwalk.Root{main.Dir, gopathwalk.RootCurrentModule}) + r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule}) r.mainByDir[main.Dir] = main } if vendorEnabled { - r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther}) } else { addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { // This is redundant with the cache, but we'll skip it cheaply enough. - r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) } else { - r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) } } // Walk dependent modules before scanning the full mod cache, direct deps first. @@ -150,7 +158,7 @@ func (r *ModuleResolver) init() error { addDep(mod) } } - r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) } r.scannedRoots = map[gopathwalk.Root]bool{} @@ -458,6 +466,16 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error // We assume cached directories are fully cached, including all their // children, and have not changed. We can skip them. skip := func(root gopathwalk.Root, dir string) bool { + if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule { + if root.Path == dir { + return false + } + + if r.env.SkipPathInScan(filepath.Clean(dir)) { + return true + } + } + info, ok := r.cacheLoad(dir) if !ok { return false diff --git a/internal/imports/mod_test.go b/internal/imports/mod_test.go index 5f71805fa77..cb7fd449938 100644 --- a/internal/imports/mod_test.go +++ b/internal/imports/mod_test.go @@ -29,7 +29,7 @@ import ( // Tests that we can find packages in the stdlib. func TestScanStdlib(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x `, "") @@ -42,7 +42,7 @@ module x // where the module is in scope -- here we have to figure out the import path // without any help from go list. func TestScanOutOfScopeNestedModule(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -68,7 +68,7 @@ package x`, "") // Tests that we don't find a nested module contained in a local replace target. // The code for this case is too annoying to write, so it's just ignored. func TestScanNestedModuleInLocalReplace(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -107,7 +107,7 @@ package z // Tests that path encoding is handled correctly. Adapted from mod_case.txt. func TestModCase(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -124,7 +124,7 @@ import _ "rsc.io/QUOTE/QUOTE" // Not obviously relevant to goimports. Adapted from mod_domain_root.txt anyway. func TestModDomainRoot(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -140,7 +140,7 @@ import _ "example.com" // Tests that scanning the module cache > 1 time is able to find the same module. func TestModMultipleScans(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -159,7 +159,7 @@ import _ "example.com" // Tests that scanning the module cache > 1 time is able to find the same module // in the module cache. func TestModMultipleScansWithSubdirs(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -178,7 +178,7 @@ import _ "rsc.io/quote" // Tests that scanning the module cache > 1 after changing a package in module cache to make it unimportable // is able to find the same module. func TestModCacheEditModFile(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -219,7 +219,7 @@ import _ "rsc.io/quote" // Tests that -mod=vendor works. Adapted from mod_vendor_build.txt. func TestModVendorBuild(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module m go 1.12 @@ -250,7 +250,7 @@ import _ "rsc.io/sampler" // Tests that -mod=vendor is auto-enabled only for go1.14 and higher. // Vaguely inspired by mod_vendor_auto.txt. func TestModVendorAuto(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module m go 1.14 @@ -276,7 +276,7 @@ import _ "rsc.io/sampler" // Tests that a module replace works. Adapted from mod_list.txt. We start with // go.mod2; the first part of the test is irrelevant. func TestModList(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x require rsc.io/quote v1.5.1 @@ -293,7 +293,7 @@ import _ "rsc.io/quote" // Tests that a local replace works. Adapted from mod_local_replace.txt. func TestModLocalReplace(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- x/y/go.mod -- module x/y require zz v1.0.0 @@ -317,7 +317,7 @@ package z // Tests that the package at the root of the main module can be found. // Adapted from the first part of mod_multirepo.txt. func TestModMultirepo1(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote @@ -333,7 +333,7 @@ package quote // of mod_multirepo.txt (We skip the case where it doesn't have a go.mod // entry -- we just don't work in that case.) func TestModMultirepo3(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote @@ -352,7 +352,7 @@ import _ "rsc.io/quote/v2" // Tests that a nested module is found in the module cache, even though // it's checked out. Adapted from the fourth part of mod_multirepo.txt. func TestModMultirepo4(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote require rsc.io/quote/v2 v2.0.1 @@ -376,7 +376,7 @@ import _ "rsc.io/quote/v2" // Tests a simple module dependency. Adapted from the first part of mod_replace.txt. func TestModReplace1(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -392,7 +392,7 @@ package main // Tests a local replace. Adapted from the second part of mod_replace.txt. func TestModReplace2(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -418,7 +418,7 @@ import "rsc.io/sampler" // Tests that a module can be replaced by a different module path. Adapted // from the third part of mod_replace.txt. func TestModReplace3(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -451,7 +451,7 @@ package quote // mod_replace_import.txt, with example.com/v changed to /vv because Go 1.11 // thinks /v is an invalid major version. func TestModReplaceImport(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module example.com/m @@ -556,7 +556,7 @@ package v func TestModWorkspace(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- go 1.18 @@ -592,7 +592,7 @@ package b func TestModWorkspaceReplace(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- use m @@ -651,7 +651,7 @@ func G() { func TestModWorkspaceReplaceOverride(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, `-- go.work -- + mt := setup(t, nil, `-- go.work -- use m use n replace example.com/dep => ./dep3 @@ -716,7 +716,7 @@ func G() { func TestModWorkspacePrune(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- go 1.18 @@ -884,8 +884,7 @@ package z // Tests that we handle GO111MODULE=on with no go.mod file. See #30855. func TestNoMainModule(t *testing.T) { - testenv.NeedsGo1Point(t, 12) - mt := setup(t, ` + mt := setup(t, map[string]string{"GO111MODULE": "on"}, ` -- x.go -- package x `, "") @@ -993,9 +992,10 @@ type modTest struct { // setup builds a test environment from a txtar and supporting modules // in testdata/mod, along the lines of TestScript in cmd/go. -func setup(t *testing.T, main, wd string) *modTest { +// +// extraEnv is applied on top of the default test env. +func setup(t *testing.T, extraEnv map[string]string, main, wd string) *modTest { t.Helper() - testenv.NeedsGo1Point(t, 11) testenv.NeedsTool(t, "go") proxyOnce.Do(func() { @@ -1023,13 +1023,16 @@ func setup(t *testing.T, main, wd string) *modTest { Env: map[string]string{ "GOPATH": filepath.Join(dir, "gopath"), "GOMODCACHE": "", - "GO111MODULE": "on", + "GO111MODULE": "auto", "GOSUMDB": "off", "GOPROXY": proxydir.ToURL(proxyDir), }, WorkingDir: filepath.Join(mainDir, wd), GocmdRunner: &gocommand.Runner{}, } + for k, v := range extraEnv { + env.Env[k] = v + } if *testDebug { env.Logf = log.Printf } @@ -1168,7 +1171,7 @@ func removeDir(dir string) { // Tests that findModFile can find the mod files from a path in the module cache. func TestFindModFileModCache(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -1189,7 +1192,6 @@ import _ "rsc.io/quote" // Tests that crud in the module cache is ignored. func TestInvalidModCache(t *testing.T) { - testenv.NeedsGo1Point(t, 11) dir, err := ioutil.TempDir("", t.Name()) if err != nil { t.Fatal(err) @@ -1220,7 +1222,7 @@ func TestInvalidModCache(t *testing.T) { } func TestGetCandidatesRanking(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module example.com @@ -1286,7 +1288,6 @@ import ( } func BenchmarkScanModCache(b *testing.B) { - testenv.NeedsGo1Point(b, 11) env := &ProcessEnv{ GocmdRunner: &gocommand.Runner{}, Logf: log.Printf, diff --git a/internal/imports/sortimports.go b/internal/imports/sortimports.go index 85144db1dfa..1a0a7ebd9e4 100644 --- a/internal/imports/sortimports.go +++ b/internal/imports/sortimports.go @@ -52,6 +52,7 @@ func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. + // Ignore line directives. if len(d.Specs) > 0 { lastSpec := d.Specs[len(d.Specs)-1] lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line diff --git a/internal/imports/zstdlib.go b/internal/imports/zstdlib.go index 437fbb78dbd..31a75949cdc 100644 --- a/internal/imports/zstdlib.go +++ b/internal/imports/zstdlib.go @@ -1,11 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // Code generated by mkstdlib.go. DO NOT EDIT. package imports var stdlib = map[string][]string{ - "archive/tar": []string{ + "archive/tar": { "ErrFieldTooLong", "ErrHeader", + "ErrInsecurePath", "ErrWriteAfterClose", "ErrWriteTooLong", "FileInfoHeader", @@ -34,13 +39,14 @@ var stdlib = map[string][]string{ "TypeXHeader", "Writer", }, - "archive/zip": []string{ + "archive/zip": { "Compressor", "Decompressor", "Deflate", "ErrAlgorithm", "ErrChecksum", "ErrFormat", + "ErrInsecurePath", "File", "FileHeader", "FileInfoHeader", @@ -54,7 +60,7 @@ var stdlib = map[string][]string{ "Store", "Writer", }, - "bufio": []string{ + "bufio": { "ErrAdvanceTooFar", "ErrBadReadCount", "ErrBufferFull", @@ -81,14 +87,17 @@ var stdlib = map[string][]string{ "SplitFunc", "Writer", }, - "bytes": []string{ + "bytes": { "Buffer", + "Clone", "Compare", "Contains", "ContainsAny", "ContainsRune", "Count", "Cut", + "CutPrefix", + "CutSuffix", "Equal", "EqualFold", "ErrTooLarge", @@ -138,11 +147,11 @@ var stdlib = map[string][]string{ "TrimSpace", "TrimSuffix", }, - "compress/bzip2": []string{ + "compress/bzip2": { "NewReader", "StructuralError", }, - "compress/flate": []string{ + "compress/flate": { "BestCompression", "BestSpeed", "CorruptInputError", @@ -160,7 +169,7 @@ var stdlib = map[string][]string{ "WriteError", "Writer", }, - "compress/gzip": []string{ + "compress/gzip": { "BestCompression", "BestSpeed", "DefaultCompression", @@ -175,7 +184,7 @@ var stdlib = map[string][]string{ "Reader", "Writer", }, - "compress/lzw": []string{ + "compress/lzw": { "LSB", "MSB", "NewReader", @@ -184,7 +193,7 @@ var stdlib = map[string][]string{ "Reader", "Writer", }, - "compress/zlib": []string{ + "compress/zlib": { "BestCompression", "BestSpeed", "DefaultCompression", @@ -201,7 +210,7 @@ var stdlib = map[string][]string{ "Resetter", "Writer", }, - "container/heap": []string{ + "container/heap": { "Fix", "Init", "Interface", @@ -209,28 +218,31 @@ var stdlib = map[string][]string{ "Push", "Remove", }, - "container/list": []string{ + "container/list": { "Element", "List", "New", }, - "container/ring": []string{ + "container/ring": { "New", "Ring", }, - "context": []string{ + "context": { "Background", + "CancelCauseFunc", "CancelFunc", "Canceled", + "Cause", "Context", "DeadlineExceeded", "TODO", "WithCancel", + "WithCancelCause", "WithDeadline", "WithTimeout", "WithValue", }, - "crypto": []string{ + "crypto": { "BLAKE2b_256", "BLAKE2b_384", "BLAKE2b_512", @@ -259,12 +271,12 @@ var stdlib = map[string][]string{ "Signer", "SignerOpts", }, - "crypto/aes": []string{ + "crypto/aes": { "BlockSize", "KeySizeError", "NewCipher", }, - "crypto/cipher": []string{ + "crypto/cipher": { "AEAD", "Block", "BlockMode", @@ -281,13 +293,13 @@ var stdlib = map[string][]string{ "StreamReader", "StreamWriter", }, - "crypto/des": []string{ + "crypto/des": { "BlockSize", "KeySizeError", "NewCipher", "NewTripleDESCipher", }, - "crypto/dsa": []string{ + "crypto/dsa": { "ErrInvalidPublicKey", "GenerateKey", "GenerateParameters", @@ -302,7 +314,16 @@ var stdlib = map[string][]string{ "Sign", "Verify", }, - "crypto/ecdsa": []string{ + "crypto/ecdh": { + "Curve", + "P256", + "P384", + "P521", + "PrivateKey", + "PublicKey", + "X25519", + }, + "crypto/ecdsa": { "GenerateKey", "PrivateKey", "PublicKey", @@ -311,9 +332,10 @@ var stdlib = map[string][]string{ "Verify", "VerifyASN1", }, - "crypto/ed25519": []string{ + "crypto/ed25519": { "GenerateKey", "NewKeyFromSeed", + "Options", "PrivateKey", "PrivateKeySize", "PublicKey", @@ -322,8 +344,9 @@ var stdlib = map[string][]string{ "Sign", "SignatureSize", "Verify", + "VerifyWithOptions", }, - "crypto/elliptic": []string{ + "crypto/elliptic": { "Curve", "CurveParams", "GenerateKey", @@ -336,28 +359,28 @@ var stdlib = map[string][]string{ "Unmarshal", "UnmarshalCompressed", }, - "crypto/hmac": []string{ + "crypto/hmac": { "Equal", "New", }, - "crypto/md5": []string{ + "crypto/md5": { "BlockSize", "New", "Size", "Sum", }, - "crypto/rand": []string{ + "crypto/rand": { "Int", "Prime", "Read", "Reader", }, - "crypto/rc4": []string{ + "crypto/rc4": { "Cipher", "KeySizeError", "NewCipher", }, - "crypto/rsa": []string{ + "crypto/rsa": { "CRTValue", "DecryptOAEP", "DecryptPKCS1v15", @@ -382,13 +405,13 @@ var stdlib = map[string][]string{ "VerifyPKCS1v15", "VerifyPSS", }, - "crypto/sha1": []string{ + "crypto/sha1": { "BlockSize", "New", "Size", "Sum", }, - "crypto/sha256": []string{ + "crypto/sha256": { "BlockSize", "New", "New224", @@ -397,7 +420,7 @@ var stdlib = map[string][]string{ "Sum224", "Sum256", }, - "crypto/sha512": []string{ + "crypto/sha512": { "BlockSize", "New", "New384", @@ -412,17 +435,19 @@ var stdlib = map[string][]string{ "Sum512_224", "Sum512_256", }, - "crypto/subtle": []string{ + "crypto/subtle": { "ConstantTimeByteEq", "ConstantTimeCompare", "ConstantTimeCopy", "ConstantTimeEq", "ConstantTimeLessOrEq", "ConstantTimeSelect", + "XORBytes", }, - "crypto/tls": []string{ + "crypto/tls": { "Certificate", "CertificateRequestInfo", + "CertificateVerificationError", "CipherSuite", "CipherSuiteName", "CipherSuites", @@ -506,7 +531,7 @@ var stdlib = map[string][]string{ "X25519", "X509KeyPair", }, - "crypto/x509": []string{ + "crypto/x509": { "CANotAuthorizedForExtKeyUsage", "CANotAuthorizedForThisName", "CertPool", @@ -588,6 +613,7 @@ var stdlib = map[string][]string{ "ParsePKCS1PublicKey", "ParsePKCS8PrivateKey", "ParsePKIXPublicKey", + "ParseRevocationList", "PublicKeyAlgorithm", "PureEd25519", "RSA", @@ -599,6 +625,7 @@ var stdlib = map[string][]string{ "SHA384WithRSAPSS", "SHA512WithRSA", "SHA512WithRSAPSS", + "SetFallbackRoots", "SignatureAlgorithm", "SystemCertPool", "SystemRootsError", @@ -611,7 +638,7 @@ var stdlib = map[string][]string{ "UnknownSignatureAlgorithm", "VerifyOptions", }, - "crypto/x509/pkix": []string{ + "crypto/x509/pkix": { "AlgorithmIdentifier", "AttributeTypeAndValue", "AttributeTypeAndValueSET", @@ -623,7 +650,7 @@ var stdlib = map[string][]string{ "RevokedCertificate", "TBSCertificateList", }, - "database/sql": []string{ + "database/sql": { "ColumnType", "Conn", "DB", @@ -664,7 +691,7 @@ var stdlib = map[string][]string{ "Tx", "TxOptions", }, - "database/sql/driver": []string{ + "database/sql/driver": { "Bool", "ColumnConverter", "Conn", @@ -712,12 +739,12 @@ var stdlib = map[string][]string{ "ValueConverter", "Valuer", }, - "debug/buildinfo": []string{ + "debug/buildinfo": { "BuildInfo", "Read", "ReadFile", }, - "debug/dwarf": []string{ + "debug/dwarf": { "AddrType", "ArrayType", "Attr", @@ -968,7 +995,7 @@ var stdlib = map[string][]string{ "UnsupportedType", "VoidType", }, - "debug/elf": []string{ + "debug/elf": { "ARM_MAGIC_TRAMP_NUMBER", "COMPRESS_HIOS", "COMPRESS_HIPROC", @@ -1238,6 +1265,7 @@ var stdlib = map[string][]string{ "EM_L10M", "EM_LANAI", "EM_LATTICEMICO32", + "EM_LOONGARCH", "EM_M16C", "EM_M32", "EM_M32C", @@ -1820,6 +1848,96 @@ var stdlib = map[string][]string{ "R_ARM_XPC25", "R_INFO", "R_INFO32", + "R_LARCH", + "R_LARCH_32", + "R_LARCH_32_PCREL", + "R_LARCH_64", + "R_LARCH_ABS64_HI12", + "R_LARCH_ABS64_LO20", + "R_LARCH_ABS_HI20", + "R_LARCH_ABS_LO12", + "R_LARCH_ADD16", + "R_LARCH_ADD24", + "R_LARCH_ADD32", + "R_LARCH_ADD64", + "R_LARCH_ADD8", + "R_LARCH_B16", + "R_LARCH_B21", + "R_LARCH_B26", + "R_LARCH_COPY", + "R_LARCH_GNU_VTENTRY", + "R_LARCH_GNU_VTINHERIT", + "R_LARCH_GOT64_HI12", + "R_LARCH_GOT64_LO20", + "R_LARCH_GOT64_PC_HI12", + "R_LARCH_GOT64_PC_LO20", + "R_LARCH_GOT_HI20", + "R_LARCH_GOT_LO12", + "R_LARCH_GOT_PC_HI20", + "R_LARCH_GOT_PC_LO12", + "R_LARCH_IRELATIVE", + "R_LARCH_JUMP_SLOT", + "R_LARCH_MARK_LA", + "R_LARCH_MARK_PCREL", + "R_LARCH_NONE", + "R_LARCH_PCALA64_HI12", + "R_LARCH_PCALA64_LO20", + "R_LARCH_PCALA_HI20", + "R_LARCH_PCALA_LO12", + "R_LARCH_RELATIVE", + "R_LARCH_RELAX", + "R_LARCH_SOP_ADD", + "R_LARCH_SOP_AND", + "R_LARCH_SOP_ASSERT", + "R_LARCH_SOP_IF_ELSE", + "R_LARCH_SOP_NOT", + "R_LARCH_SOP_POP_32_S_0_10_10_16_S2", + "R_LARCH_SOP_POP_32_S_0_5_10_16_S2", + "R_LARCH_SOP_POP_32_S_10_12", + "R_LARCH_SOP_POP_32_S_10_16", + "R_LARCH_SOP_POP_32_S_10_16_S2", + "R_LARCH_SOP_POP_32_S_10_5", + "R_LARCH_SOP_POP_32_S_5_20", + "R_LARCH_SOP_POP_32_U", + "R_LARCH_SOP_POP_32_U_10_12", + "R_LARCH_SOP_PUSH_ABSOLUTE", + "R_LARCH_SOP_PUSH_DUP", + "R_LARCH_SOP_PUSH_GPREL", + "R_LARCH_SOP_PUSH_PCREL", + "R_LARCH_SOP_PUSH_PLT_PCREL", + "R_LARCH_SOP_PUSH_TLS_GD", + "R_LARCH_SOP_PUSH_TLS_GOT", + "R_LARCH_SOP_PUSH_TLS_TPREL", + "R_LARCH_SOP_SL", + "R_LARCH_SOP_SR", + "R_LARCH_SOP_SUB", + "R_LARCH_SUB16", + "R_LARCH_SUB24", + "R_LARCH_SUB32", + "R_LARCH_SUB64", + "R_LARCH_SUB8", + "R_LARCH_TLS_DTPMOD32", + "R_LARCH_TLS_DTPMOD64", + "R_LARCH_TLS_DTPREL32", + "R_LARCH_TLS_DTPREL64", + "R_LARCH_TLS_GD_HI20", + "R_LARCH_TLS_GD_PC_HI20", + "R_LARCH_TLS_IE64_HI12", + "R_LARCH_TLS_IE64_LO20", + "R_LARCH_TLS_IE64_PC_HI12", + "R_LARCH_TLS_IE64_PC_LO20", + "R_LARCH_TLS_IE_HI20", + "R_LARCH_TLS_IE_LO12", + "R_LARCH_TLS_IE_PC_HI20", + "R_LARCH_TLS_IE_PC_LO12", + "R_LARCH_TLS_LD_HI20", + "R_LARCH_TLS_LD_PC_HI20", + "R_LARCH_TLS_LE64_HI12", + "R_LARCH_TLS_LE64_LO20", + "R_LARCH_TLS_LE_HI20", + "R_LARCH_TLS_LE_LO12", + "R_LARCH_TLS_TPREL32", + "R_LARCH_TLS_TPREL64", "R_MIPS", "R_MIPS_16", "R_MIPS_26", @@ -1881,15 +1999,25 @@ var stdlib = map[string][]string{ "R_PPC64_ADDR16_HIGH", "R_PPC64_ADDR16_HIGHA", "R_PPC64_ADDR16_HIGHER", + "R_PPC64_ADDR16_HIGHER34", "R_PPC64_ADDR16_HIGHERA", + "R_PPC64_ADDR16_HIGHERA34", "R_PPC64_ADDR16_HIGHEST", + "R_PPC64_ADDR16_HIGHEST34", "R_PPC64_ADDR16_HIGHESTA", + "R_PPC64_ADDR16_HIGHESTA34", "R_PPC64_ADDR16_LO", "R_PPC64_ADDR16_LO_DS", "R_PPC64_ADDR24", "R_PPC64_ADDR32", "R_PPC64_ADDR64", "R_PPC64_ADDR64_LOCAL", + "R_PPC64_COPY", + "R_PPC64_D28", + "R_PPC64_D34", + "R_PPC64_D34_HA30", + "R_PPC64_D34_HI30", + "R_PPC64_D34_LO", "R_PPC64_DTPMOD64", "R_PPC64_DTPREL16", "R_PPC64_DTPREL16_DS", @@ -1903,8 +2031,12 @@ var stdlib = map[string][]string{ "R_PPC64_DTPREL16_HIGHESTA", "R_PPC64_DTPREL16_LO", "R_PPC64_DTPREL16_LO_DS", + "R_PPC64_DTPREL34", "R_PPC64_DTPREL64", "R_PPC64_ENTRY", + "R_PPC64_GLOB_DAT", + "R_PPC64_GNU_VTENTRY", + "R_PPC64_GNU_VTINHERIT", "R_PPC64_GOT16", "R_PPC64_GOT16_DS", "R_PPC64_GOT16_HA", @@ -1915,29 +2047,50 @@ var stdlib = map[string][]string{ "R_PPC64_GOT_DTPREL16_HA", "R_PPC64_GOT_DTPREL16_HI", "R_PPC64_GOT_DTPREL16_LO_DS", + "R_PPC64_GOT_DTPREL_PCREL34", + "R_PPC64_GOT_PCREL34", "R_PPC64_GOT_TLSGD16", "R_PPC64_GOT_TLSGD16_HA", "R_PPC64_GOT_TLSGD16_HI", "R_PPC64_GOT_TLSGD16_LO", + "R_PPC64_GOT_TLSGD_PCREL34", "R_PPC64_GOT_TLSLD16", "R_PPC64_GOT_TLSLD16_HA", "R_PPC64_GOT_TLSLD16_HI", "R_PPC64_GOT_TLSLD16_LO", + "R_PPC64_GOT_TLSLD_PCREL34", "R_PPC64_GOT_TPREL16_DS", "R_PPC64_GOT_TPREL16_HA", "R_PPC64_GOT_TPREL16_HI", "R_PPC64_GOT_TPREL16_LO_DS", + "R_PPC64_GOT_TPREL_PCREL34", "R_PPC64_IRELATIVE", "R_PPC64_JMP_IREL", "R_PPC64_JMP_SLOT", "R_PPC64_NONE", + "R_PPC64_PCREL28", + "R_PPC64_PCREL34", + "R_PPC64_PCREL_OPT", + "R_PPC64_PLT16_HA", + "R_PPC64_PLT16_HI", + "R_PPC64_PLT16_LO", "R_PPC64_PLT16_LO_DS", + "R_PPC64_PLT32", + "R_PPC64_PLT64", + "R_PPC64_PLTCALL", + "R_PPC64_PLTCALL_NOTOC", "R_PPC64_PLTGOT16", "R_PPC64_PLTGOT16_DS", "R_PPC64_PLTGOT16_HA", "R_PPC64_PLTGOT16_HI", "R_PPC64_PLTGOT16_LO", "R_PPC64_PLTGOT_LO_DS", + "R_PPC64_PLTREL32", + "R_PPC64_PLTREL64", + "R_PPC64_PLTSEQ", + "R_PPC64_PLTSEQ_NOTOC", + "R_PPC64_PLT_PCREL34", + "R_PPC64_PLT_PCREL34_NOTOC", "R_PPC64_REL14", "R_PPC64_REL14_BRNTAKEN", "R_PPC64_REL14_BRTAKEN", @@ -1945,13 +2098,28 @@ var stdlib = map[string][]string{ "R_PPC64_REL16DX_HA", "R_PPC64_REL16_HA", "R_PPC64_REL16_HI", + "R_PPC64_REL16_HIGH", + "R_PPC64_REL16_HIGHA", + "R_PPC64_REL16_HIGHER", + "R_PPC64_REL16_HIGHER34", + "R_PPC64_REL16_HIGHERA", + "R_PPC64_REL16_HIGHERA34", + "R_PPC64_REL16_HIGHEST", + "R_PPC64_REL16_HIGHEST34", + "R_PPC64_REL16_HIGHESTA", + "R_PPC64_REL16_HIGHESTA34", "R_PPC64_REL16_LO", "R_PPC64_REL24", "R_PPC64_REL24_NOTOC", + "R_PPC64_REL30", "R_PPC64_REL32", "R_PPC64_REL64", "R_PPC64_RELATIVE", + "R_PPC64_SECTOFF", "R_PPC64_SECTOFF_DS", + "R_PPC64_SECTOFF_HA", + "R_PPC64_SECTOFF_HI", + "R_PPC64_SECTOFF_LO", "R_PPC64_SECTOFF_LO_DS", "R_PPC64_TLS", "R_PPC64_TLSGD", @@ -1976,7 +2144,11 @@ var stdlib = map[string][]string{ "R_PPC64_TPREL16_HIGHESTA", "R_PPC64_TPREL16_LO", "R_PPC64_TPREL16_LO_DS", + "R_PPC64_TPREL34", "R_PPC64_TPREL64", + "R_PPC64_UADDR16", + "R_PPC64_UADDR32", + "R_PPC64_UADDR64", "R_PPC_ADDR14", "R_PPC_ADDR14_BRNTAKEN", "R_PPC_ADDR14_BRTAKEN", @@ -2315,7 +2487,7 @@ var stdlib = map[string][]string{ "Type", "Version", }, - "debug/gosym": []string{ + "debug/gosym": { "DecodingError", "Func", "LineTable", @@ -2327,7 +2499,7 @@ var stdlib = map[string][]string{ "UnknownFileError", "UnknownLineError", }, - "debug/macho": []string{ + "debug/macho": { "ARM64_RELOC_ADDEND", "ARM64_RELOC_BRANCH26", "ARM64_RELOC_GOT_LOAD_PAGE21", @@ -2457,13 +2629,20 @@ var stdlib = map[string][]string{ "X86_64_RELOC_TLV", "X86_64_RELOC_UNSIGNED", }, - "debug/pe": []string{ + "debug/pe": { "COFFSymbol", + "COFFSymbolAuxFormat5", "COFFSymbolSize", "DataDirectory", "File", "FileHeader", "FormatError", + "IMAGE_COMDAT_SELECT_ANY", + "IMAGE_COMDAT_SELECT_ASSOCIATIVE", + "IMAGE_COMDAT_SELECT_EXACT_MATCH", + "IMAGE_COMDAT_SELECT_LARGEST", + "IMAGE_COMDAT_SELECT_NODUPLICATES", + "IMAGE_COMDAT_SELECT_SAME_SIZE", "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", "IMAGE_DIRECTORY_ENTRY_BASERELOC", "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", @@ -2508,6 +2687,8 @@ var stdlib = map[string][]string{ "IMAGE_FILE_MACHINE_EBC", "IMAGE_FILE_MACHINE_I386", "IMAGE_FILE_MACHINE_IA64", + "IMAGE_FILE_MACHINE_LOONGARCH32", + "IMAGE_FILE_MACHINE_LOONGARCH64", "IMAGE_FILE_MACHINE_M32R", "IMAGE_FILE_MACHINE_MIPS16", "IMAGE_FILE_MACHINE_MIPSFPU", @@ -2515,6 +2696,9 @@ var stdlib = map[string][]string{ "IMAGE_FILE_MACHINE_POWERPC", "IMAGE_FILE_MACHINE_POWERPCFP", "IMAGE_FILE_MACHINE_R4000", + "IMAGE_FILE_MACHINE_RISCV128", + "IMAGE_FILE_MACHINE_RISCV32", + "IMAGE_FILE_MACHINE_RISCV64", "IMAGE_FILE_MACHINE_SH3", "IMAGE_FILE_MACHINE_SH3DSP", "IMAGE_FILE_MACHINE_SH4", @@ -2527,6 +2711,14 @@ var stdlib = map[string][]string{ "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", "IMAGE_FILE_SYSTEM", "IMAGE_FILE_UP_SYSTEM_ONLY", + "IMAGE_SCN_CNT_CODE", + "IMAGE_SCN_CNT_INITIALIZED_DATA", + "IMAGE_SCN_CNT_UNINITIALIZED_DATA", + "IMAGE_SCN_LNK_COMDAT", + "IMAGE_SCN_MEM_DISCARDABLE", + "IMAGE_SCN_MEM_EXECUTE", + "IMAGE_SCN_MEM_READ", + "IMAGE_SCN_MEM_WRITE", "IMAGE_SUBSYSTEM_EFI_APPLICATION", "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_ROM", @@ -2553,7 +2745,7 @@ var stdlib = map[string][]string{ "StringTable", "Symbol", }, - "debug/plan9obj": []string{ + "debug/plan9obj": { "ErrNoSymbols", "File", "FileHeader", @@ -2567,16 +2759,16 @@ var stdlib = map[string][]string{ "SectionHeader", "Sym", }, - "embed": []string{ + "embed": { "FS", }, - "encoding": []string{ + "encoding": { "BinaryMarshaler", "BinaryUnmarshaler", "TextMarshaler", "TextUnmarshaler", }, - "encoding/ascii85": []string{ + "encoding/ascii85": { "CorruptInputError", "Decode", "Encode", @@ -2584,7 +2776,7 @@ var stdlib = map[string][]string{ "NewDecoder", "NewEncoder", }, - "encoding/asn1": []string{ + "encoding/asn1": { "BitString", "ClassApplication", "ClassContextSpecific", @@ -2622,7 +2814,7 @@ var stdlib = map[string][]string{ "Unmarshal", "UnmarshalWithParams", }, - "encoding/base32": []string{ + "encoding/base32": { "CorruptInputError", "Encoding", "HexEncoding", @@ -2633,7 +2825,7 @@ var stdlib = map[string][]string{ "StdEncoding", "StdPadding", }, - "encoding/base64": []string{ + "encoding/base64": { "CorruptInputError", "Encoding", "NewDecoder", @@ -2646,7 +2838,10 @@ var stdlib = map[string][]string{ "StdPadding", "URLEncoding", }, - "encoding/binary": []string{ + "encoding/binary": { + "AppendByteOrder", + "AppendUvarint", + "AppendVarint", "BigEndian", "ByteOrder", "LittleEndian", @@ -2663,7 +2858,7 @@ var stdlib = map[string][]string{ "Varint", "Write", }, - "encoding/csv": []string{ + "encoding/csv": { "ErrBareQuote", "ErrFieldCount", "ErrQuote", @@ -2674,7 +2869,7 @@ var stdlib = map[string][]string{ "Reader", "Writer", }, - "encoding/gob": []string{ + "encoding/gob": { "CommonType", "Decoder", "Encoder", @@ -2685,7 +2880,7 @@ var stdlib = map[string][]string{ "Register", "RegisterName", }, - "encoding/hex": []string{ + "encoding/hex": { "Decode", "DecodeString", "DecodedLen", @@ -2699,7 +2894,7 @@ var stdlib = map[string][]string{ "NewDecoder", "NewEncoder", }, - "encoding/json": []string{ + "encoding/json": { "Compact", "Decoder", "Delim", @@ -2726,13 +2921,13 @@ var stdlib = map[string][]string{ "UnsupportedValueError", "Valid", }, - "encoding/pem": []string{ + "encoding/pem": { "Block", "Decode", "Encode", "EncodeToMemory", }, - "encoding/xml": []string{ + "encoding/xml": { "Attr", "CharData", "Comment", @@ -2766,13 +2961,14 @@ var stdlib = map[string][]string{ "UnmarshalerAttr", "UnsupportedTypeError", }, - "errors": []string{ + "errors": { "As", "Is", + "Join", "New", "Unwrap", }, - "expvar": []string{ + "expvar": { "Do", "Float", "Func", @@ -2789,7 +2985,7 @@ var stdlib = map[string][]string{ "String", "Var", }, - "flag": []string{ + "flag": { "Arg", "Args", "Bool", @@ -2822,6 +3018,7 @@ var stdlib = map[string][]string{ "Set", "String", "StringVar", + "TextVar", "Uint", "Uint64", "Uint64Var", @@ -2833,8 +3030,12 @@ var stdlib = map[string][]string{ "Visit", "VisitAll", }, - "fmt": []string{ + "fmt": { + "Append", + "Appendf", + "Appendln", "Errorf", + "FormatString", "Formatter", "Fprint", "Fprintf", @@ -2860,7 +3061,7 @@ var stdlib = map[string][]string{ "State", "Stringer", }, - "go/ast": []string{ + "go/ast": { "ArrayType", "AssignStmt", "Bad", @@ -2963,7 +3164,7 @@ var stdlib = map[string][]string{ "Visitor", "Walk", }, - "go/build": []string{ + "go/build": { "AllowBinary", "ArchChar", "Context", @@ -2980,7 +3181,7 @@ var stdlib = map[string][]string{ "Package", "ToolDir", }, - "go/build/constraint": []string{ + "go/build/constraint": { "AndExpr", "Expr", "IsGoBuild", @@ -2992,7 +3193,7 @@ var stdlib = map[string][]string{ "SyntaxError", "TagExpr", }, - "go/constant": []string{ + "go/constant": { "BinaryOp", "BitLen", "Bool", @@ -3033,7 +3234,7 @@ var stdlib = map[string][]string{ "Val", "Value", }, - "go/doc": []string{ + "go/doc": { "AllDecls", "AllMethods", "Example", @@ -3054,17 +3255,35 @@ var stdlib = map[string][]string{ "Type", "Value", }, - "go/format": []string{ + "go/doc/comment": { + "Block", + "Code", + "DefaultLookupPackage", + "Doc", + "DocLink", + "Heading", + "Italic", + "Link", + "LinkDef", + "List", + "ListItem", + "Paragraph", + "Parser", + "Plain", + "Printer", + "Text", + }, + "go/format": { "Node", "Source", }, - "go/importer": []string{ + "go/importer": { "Default", "For", "ForCompiler", "Lookup", }, - "go/parser": []string{ + "go/parser": { "AllErrors", "DeclarationErrors", "ImportsOnly", @@ -3079,7 +3298,7 @@ var stdlib = map[string][]string{ "SpuriousErrors", "Trace", }, - "go/printer": []string{ + "go/printer": { "CommentedNode", "Config", "Fprint", @@ -3089,7 +3308,7 @@ var stdlib = map[string][]string{ "TabIndent", "UseSpaces", }, - "go/scanner": []string{ + "go/scanner": { "Error", "ErrorHandler", "ErrorList", @@ -3098,7 +3317,7 @@ var stdlib = map[string][]string{ "ScanComments", "Scanner", }, - "go/token": []string{ + "go/token": { "ADD", "ADD_ASSIGN", "AND", @@ -3196,7 +3415,7 @@ var stdlib = map[string][]string{ "XOR", "XOR_ASSIGN", }, - "go/types": []string{ + "go/types": { "ArgumentError", "Array", "AssertableTo", @@ -3302,6 +3521,7 @@ var stdlib = map[string][]string{ "RecvOnly", "RelativeTo", "Rune", + "Satisfies", "Scope", "Selection", "SelectionKind", @@ -3347,17 +3567,17 @@ var stdlib = map[string][]string{ "WriteSignature", "WriteType", }, - "hash": []string{ + "hash": { "Hash", "Hash32", "Hash64", }, - "hash/adler32": []string{ + "hash/adler32": { "Checksum", "New", "Size", }, - "hash/crc32": []string{ + "hash/crc32": { "Castagnoli", "Checksum", "ChecksumIEEE", @@ -3371,7 +3591,7 @@ var stdlib = map[string][]string{ "Table", "Update", }, - "hash/crc64": []string{ + "hash/crc64": { "Checksum", "ECMA", "ISO", @@ -3381,7 +3601,7 @@ var stdlib = map[string][]string{ "Table", "Update", }, - "hash/fnv": []string{ + "hash/fnv": { "New128", "New128a", "New32", @@ -3389,16 +3609,18 @@ var stdlib = map[string][]string{ "New64", "New64a", }, - "hash/maphash": []string{ + "hash/maphash": { + "Bytes", "Hash", "MakeSeed", "Seed", + "String", }, - "html": []string{ + "html": { "EscapeString", "UnescapeString", }, - "html/template": []string{ + "html/template": { "CSS", "ErrAmbigContext", "ErrBadHTML", @@ -3436,7 +3658,7 @@ var stdlib = map[string][]string{ "URL", "URLQueryEscaper", }, - "image": []string{ + "image": { "Alpha", "Alpha16", "Black", @@ -3489,7 +3711,7 @@ var stdlib = map[string][]string{ "ZP", "ZR", }, - "image/color": []string{ + "image/color": { "Alpha", "Alpha16", "Alpha16Model", @@ -3525,11 +3747,11 @@ var stdlib = map[string][]string{ "YCbCrModel", "YCbCrToRGB", }, - "image/color/palette": []string{ + "image/color/palette": { "Plan9", "WebSafe", }, - "image/draw": []string{ + "image/draw": { "Draw", "DrawMask", "Drawer", @@ -3541,7 +3763,7 @@ var stdlib = map[string][]string{ "RGBA64Image", "Src", }, - "image/gif": []string{ + "image/gif": { "Decode", "DecodeAll", "DecodeConfig", @@ -3553,7 +3775,7 @@ var stdlib = map[string][]string{ "GIF", "Options", }, - "image/jpeg": []string{ + "image/jpeg": { "Decode", "DecodeConfig", "DefaultQuality", @@ -3563,7 +3785,7 @@ var stdlib = map[string][]string{ "Reader", "UnsupportedError", }, - "image/png": []string{ + "image/png": { "BestCompression", "BestSpeed", "CompressionLevel", @@ -3578,11 +3800,11 @@ var stdlib = map[string][]string{ "NoCompression", "UnsupportedError", }, - "index/suffixarray": []string{ + "index/suffixarray": { "Index", "New", }, - "io": []string{ + "io": { "ByteReader", "ByteScanner", "ByteWriter", @@ -3601,8 +3823,10 @@ var stdlib = map[string][]string{ "LimitedReader", "MultiReader", "MultiWriter", + "NewOffsetWriter", "NewSectionReader", "NopCloser", + "OffsetWriter", "Pipe", "PipeReader", "PipeWriter", @@ -3634,7 +3858,7 @@ var stdlib = map[string][]string{ "WriterAt", "WriterTo", }, - "io/fs": []string{ + "io/fs": { "DirEntry", "ErrClosed", "ErrExist", @@ -3669,6 +3893,7 @@ var stdlib = map[string][]string{ "ReadDirFile", "ReadFile", "ReadFileFS", + "SkipAll", "SkipDir", "Stat", "StatFS", @@ -3678,7 +3903,7 @@ var stdlib = map[string][]string{ "WalkDir", "WalkDirFunc", }, - "io/ioutil": []string{ + "io/ioutil": { "Discard", "NopCloser", "ReadAll", @@ -3688,7 +3913,7 @@ var stdlib = map[string][]string{ "TempFile", "WriteFile", }, - "log": []string{ + "log": { "Default", "Fatal", "Fatalf", @@ -3717,7 +3942,7 @@ var stdlib = map[string][]string{ "SetPrefix", "Writer", }, - "log/syslog": []string{ + "log/syslog": { "Dial", "LOG_ALERT", "LOG_AUTH", @@ -3752,7 +3977,7 @@ var stdlib = map[string][]string{ "Priority", "Writer", }, - "math": []string{ + "math": { "Abs", "Acos", "Acosh", @@ -3851,7 +4076,7 @@ var stdlib = map[string][]string{ "Y1", "Yn", }, - "math/big": []string{ + "math/big": { "Above", "Accuracy", "AwayFromZero", @@ -3878,7 +4103,7 @@ var stdlib = map[string][]string{ "ToZero", "Word", }, - "math/bits": []string{ + "math/bits": { "Add", "Add32", "Add64", @@ -3930,7 +4155,7 @@ var stdlib = map[string][]string{ "TrailingZeros8", "UintSize", }, - "math/cmplx": []string{ + "math/cmplx": { "Abs", "Acos", "Acosh", @@ -3959,7 +4184,7 @@ var stdlib = map[string][]string{ "Tan", "Tanh", }, - "math/rand": []string{ + "math/rand": { "ExpFloat64", "Float32", "Float64", @@ -3984,7 +4209,7 @@ var stdlib = map[string][]string{ "Uint64", "Zipf", }, - "mime": []string{ + "mime": { "AddExtensionType", "BEncoding", "ErrInvalidMediaParameter", @@ -3996,7 +4221,7 @@ var stdlib = map[string][]string{ "WordDecoder", "WordEncoder", }, - "mime/multipart": []string{ + "mime/multipart": { "ErrMessageTooLarge", "File", "FileHeader", @@ -4007,13 +4232,13 @@ var stdlib = map[string][]string{ "Reader", "Writer", }, - "mime/quotedprintable": []string{ + "mime/quotedprintable": { "NewReader", "NewWriter", "Reader", "Writer", }, - "net": []string{ + "net": { "Addr", "AddrError", "Buffers", @@ -4039,6 +4264,7 @@ var stdlib = map[string][]string{ "FlagLoopback", "FlagMulticast", "FlagPointToPoint", + "FlagRunning", "FlagUp", "Flags", "HardwareAddr", @@ -4115,7 +4341,7 @@ var stdlib = map[string][]string{ "UnixListener", "UnknownNetworkError", }, - "net/http": []string{ + "net/http": { "AllowQuerySemicolons", "CanonicalHeaderKey", "Client", @@ -4168,6 +4394,7 @@ var stdlib = map[string][]string{ "ListenAndServe", "ListenAndServeTLS", "LocalAddrContextKey", + "MaxBytesError", "MaxBytesHandler", "MaxBytesReader", "MethodConnect", @@ -4182,6 +4409,7 @@ var stdlib = map[string][]string{ "NewFileTransport", "NewRequest", "NewRequestWithContext", + "NewResponseController", "NewServeMux", "NoBody", "NotFound", @@ -4201,6 +4429,7 @@ var stdlib = map[string][]string{ "RedirectHandler", "Request", "Response", + "ResponseController", "ResponseWriter", "RoundTripper", "SameSite", @@ -4290,25 +4519,25 @@ var stdlib = map[string][]string{ "TrailerPrefix", "Transport", }, - "net/http/cgi": []string{ + "net/http/cgi": { "Handler", "Request", "RequestFromMap", "Serve", }, - "net/http/cookiejar": []string{ + "net/http/cookiejar": { "Jar", "New", "Options", "PublicSuffixList", }, - "net/http/fcgi": []string{ + "net/http/fcgi": { "ErrConnClosed", "ErrRequestAborted", "ProcessEnv", "Serve", }, - "net/http/httptest": []string{ + "net/http/httptest": { "DefaultRemoteAddr", "NewRecorder", "NewRequest", @@ -4318,7 +4547,7 @@ var stdlib = map[string][]string{ "ResponseRecorder", "Server", }, - "net/http/httptrace": []string{ + "net/http/httptrace": { "ClientTrace", "ContextClientTrace", "DNSDoneInfo", @@ -4327,7 +4556,7 @@ var stdlib = map[string][]string{ "WithClientTrace", "WroteRequestInfo", }, - "net/http/httputil": []string{ + "net/http/httputil": { "BufferPool", "ClientConn", "DumpRequest", @@ -4343,10 +4572,11 @@ var stdlib = map[string][]string{ "NewProxyClientConn", "NewServerConn", "NewSingleHostReverseProxy", + "ProxyRequest", "ReverseProxy", "ServerConn", }, - "net/http/pprof": []string{ + "net/http/pprof": { "Cmdline", "Handler", "Index", @@ -4354,7 +4584,7 @@ var stdlib = map[string][]string{ "Symbol", "Trace", }, - "net/mail": []string{ + "net/mail": { "Address", "AddressParser", "ErrHeaderNotPresent", @@ -4365,7 +4595,7 @@ var stdlib = map[string][]string{ "ParseDate", "ReadMessage", }, - "net/netip": []string{ + "net/netip": { "Addr", "AddrFrom16", "AddrFrom4", @@ -4374,6 +4604,8 @@ var stdlib = map[string][]string{ "AddrPortFrom", "IPv4Unspecified", "IPv6LinkLocalAllNodes", + "IPv6LinkLocalAllRouters", + "IPv6Loopback", "IPv6Unspecified", "MustParseAddr", "MustParseAddrPort", @@ -4384,7 +4616,7 @@ var stdlib = map[string][]string{ "Prefix", "PrefixFrom", }, - "net/rpc": []string{ + "net/rpc": { "Accept", "Call", "Client", @@ -4411,14 +4643,14 @@ var stdlib = map[string][]string{ "ServerCodec", "ServerError", }, - "net/rpc/jsonrpc": []string{ + "net/rpc/jsonrpc": { "Dial", "NewClient", "NewClientCodec", "NewServerCodec", "ServeConn", }, - "net/smtp": []string{ + "net/smtp": { "Auth", "CRAMMD5Auth", "Client", @@ -4428,7 +4660,7 @@ var stdlib = map[string][]string{ "SendMail", "ServerInfo", }, - "net/textproto": []string{ + "net/textproto": { "CanonicalMIMEHeaderKey", "Conn", "Dial", @@ -4444,10 +4676,11 @@ var stdlib = map[string][]string{ "TrimString", "Writer", }, - "net/url": []string{ + "net/url": { "Error", "EscapeError", "InvalidHostError", + "JoinPath", "Parse", "ParseQuery", "ParseRequestURI", @@ -4461,7 +4694,7 @@ var stdlib = map[string][]string{ "Userinfo", "Values", }, - "os": []string{ + "os": { "Args", "Chdir", "Chmod", @@ -4577,16 +4810,18 @@ var stdlib = map[string][]string{ "UserHomeDir", "WriteFile", }, - "os/exec": []string{ + "os/exec": { "Cmd", "Command", "CommandContext", + "ErrDot", "ErrNotFound", + "ErrWaitDelay", "Error", "ExitError", "LookPath", }, - "os/signal": []string{ + "os/signal": { "Ignore", "Ignored", "Notify", @@ -4594,7 +4829,7 @@ var stdlib = map[string][]string{ "Reset", "Stop", }, - "os/user": []string{ + "os/user": { "Current", "Group", "Lookup", @@ -4607,7 +4842,7 @@ var stdlib = map[string][]string{ "UnknownUserIdError", "User", }, - "path": []string{ + "path": { "Base", "Clean", "Dir", @@ -4618,7 +4853,7 @@ var stdlib = map[string][]string{ "Match", "Split", }, - "path/filepath": []string{ + "path/filepath": { "Abs", "Base", "Clean", @@ -4630,11 +4865,13 @@ var stdlib = map[string][]string{ "Glob", "HasPrefix", "IsAbs", + "IsLocal", "Join", "ListSeparator", "Match", "Rel", "Separator", + "SkipAll", "SkipDir", "Split", "SplitList", @@ -4644,12 +4881,12 @@ var stdlib = map[string][]string{ "WalkDir", "WalkFunc", }, - "plugin": []string{ + "plugin": { "Open", "Plugin", "Symbol", }, - "reflect": []string{ + "reflect": { "Append", "AppendSlice", "Array", @@ -4724,7 +4961,7 @@ var stdlib = map[string][]string{ "VisibleFields", "Zero", }, - "regexp": []string{ + "regexp": { "Compile", "CompilePOSIX", "Match", @@ -4735,7 +4972,7 @@ var stdlib = map[string][]string{ "QuoteMeta", "Regexp", }, - "regexp/syntax": []string{ + "regexp/syntax": { "ClassNL", "Compile", "DotNL", @@ -4756,9 +4993,11 @@ var stdlib = map[string][]string{ "ErrInvalidRepeatOp", "ErrInvalidRepeatSize", "ErrInvalidUTF8", + "ErrLarge", "ErrMissingBracket", "ErrMissingParen", "ErrMissingRepeatArgument", + "ErrNestingDepth", "ErrTrailingBackslash", "ErrUnexpectedParen", "Error", @@ -4813,7 +5052,7 @@ var stdlib = map[string][]string{ "UnicodeGroups", "WasDollar", }, - "runtime": []string{ + "runtime": { "BlockProfile", "BlockProfileRecord", "Breakpoint", @@ -4861,11 +5100,19 @@ var stdlib = map[string][]string{ "UnlockOSThread", "Version", }, - "runtime/cgo": []string{ + "runtime/cgo": { "Handle", + "Incomplete", "NewHandle", }, - "runtime/debug": []string{ + "runtime/coverage": { + "ClearCounters", + "WriteCounters", + "WriteCountersDir", + "WriteMeta", + "WriteMetaDir", + }, + "runtime/debug": { "BuildInfo", "BuildSetting", "FreeOSMemory", @@ -4878,12 +5125,13 @@ var stdlib = map[string][]string{ "SetGCPercent", "SetMaxStack", "SetMaxThreads", + "SetMemoryLimit", "SetPanicOnFault", "SetTraceback", "Stack", "WriteHeapDump", }, - "runtime/metrics": []string{ + "runtime/metrics": { "All", "Description", "Float64Histogram", @@ -4896,7 +5144,7 @@ var stdlib = map[string][]string{ "Value", "ValueKind", }, - "runtime/pprof": []string{ + "runtime/pprof": { "Do", "ForLabels", "Label", @@ -4912,7 +5160,7 @@ var stdlib = map[string][]string{ "WithLabels", "WriteHeapProfile", }, - "runtime/trace": []string{ + "runtime/trace": { "IsEnabled", "Log", "Logf", @@ -4924,7 +5172,8 @@ var stdlib = map[string][]string{ "Task", "WithRegion", }, - "sort": []string{ + "sort": { + "Find", "Float64Slice", "Float64s", "Float64sAreSorted", @@ -4947,7 +5196,7 @@ var stdlib = map[string][]string{ "Strings", "StringsAreSorted", }, - "strconv": []string{ + "strconv": { "AppendBool", "AppendFloat", "AppendInt", @@ -4987,7 +5236,7 @@ var stdlib = map[string][]string{ "Unquote", "UnquoteChar", }, - "strings": []string{ + "strings": { "Builder", "Clone", "Compare", @@ -4996,6 +5245,8 @@ var stdlib = map[string][]string{ "ContainsRune", "Count", "Cut", + "CutPrefix", + "CutSuffix", "EqualFold", "Fields", "FieldsFunc", @@ -5041,7 +5292,7 @@ var stdlib = map[string][]string{ "TrimSpace", "TrimSuffix", }, - "sync": []string{ + "sync": { "Cond", "Locker", "Map", @@ -5052,24 +5303,28 @@ var stdlib = map[string][]string{ "RWMutex", "WaitGroup", }, - "sync/atomic": []string{ + "sync/atomic": { "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr", + "Bool", "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapPointer", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", + "Int32", + "Int64", "LoadInt32", "LoadInt64", "LoadPointer", "LoadUint32", "LoadUint64", "LoadUintptr", + "Pointer", "StoreInt32", "StoreInt64", "StorePointer", @@ -5082,9 +5337,12 @@ var stdlib = map[string][]string{ "SwapUint32", "SwapUint64", "SwapUintptr", + "Uint32", + "Uint64", + "Uintptr", "Value", }, - "syscall": []string{ + "syscall": { "AF_ALG", "AF_APPLETALK", "AF_ARP", @@ -5158,6 +5416,7 @@ var stdlib = map[string][]string{ "AF_TIPC", "AF_UNIX", "AF_UNSPEC", + "AF_UTUN", "AF_VENDOR00", "AF_VENDOR01", "AF_VENDOR02", @@ -5496,20 +5755,25 @@ var stdlib = map[string][]string{ "CLOCAL", "CLONE_CHILD_CLEARTID", "CLONE_CHILD_SETTID", + "CLONE_CLEAR_SIGHAND", "CLONE_CSIGNAL", "CLONE_DETACHED", "CLONE_FILES", "CLONE_FS", + "CLONE_INTO_CGROUP", "CLONE_IO", + "CLONE_NEWCGROUP", "CLONE_NEWIPC", "CLONE_NEWNET", "CLONE_NEWNS", "CLONE_NEWPID", + "CLONE_NEWTIME", "CLONE_NEWUSER", "CLONE_NEWUTS", "CLONE_PARENT", "CLONE_PARENT_SETTID", "CLONE_PID", + "CLONE_PIDFD", "CLONE_PTRACE", "CLONE_SETTLS", "CLONE_SIGHAND", @@ -6052,6 +6316,7 @@ var stdlib = map[string][]string{ "EPROTONOSUPPORT", "EPROTOTYPE", "EPWROFF", + "EQFULL", "ERANGE", "EREMCHG", "EREMOTE", @@ -6478,6 +6743,7 @@ var stdlib = map[string][]string{ "F_DUPFD", "F_DUPFD_CLOEXEC", "F_EXLCK", + "F_FINDSIGS", "F_FLUSH_DATA", "F_FREEZE_FS", "F_FSCTL", @@ -6488,6 +6754,7 @@ var stdlib = map[string][]string{ "F_FSPRIV", "F_FSVOID", "F_FULLFSYNC", + "F_GETCODEDIR", "F_GETFD", "F_GETFL", "F_GETLEASE", @@ -6501,6 +6768,7 @@ var stdlib = map[string][]string{ "F_GETPATH_MTMINFO", "F_GETPIPE_SZ", "F_GETPROTECTIONCLASS", + "F_GETPROTECTIONLEVEL", "F_GETSIG", "F_GLOBAL_NOCACHE", "F_LOCK", @@ -6533,6 +6801,7 @@ var stdlib = map[string][]string{ "F_SETLK64", "F_SETLKW", "F_SETLKW64", + "F_SETLKWTIMEOUT", "F_SETLK_REMOTE", "F_SETNOSIGPIPE", "F_SETOWN", @@ -6542,9 +6811,11 @@ var stdlib = map[string][]string{ "F_SETSIG", "F_SETSIZE", "F_SHLCK", + "F_SINGLE_WRITER", "F_TEST", "F_THAW_FS", "F_TLOCK", + "F_TRANSCODEKEY", "F_ULOCK", "F_UNLCK", "F_UNLCKSYS", @@ -7740,12 +8011,20 @@ var stdlib = map[string][]string{ "NOFLSH", "NOTE_ABSOLUTE", "NOTE_ATTRIB", + "NOTE_BACKGROUND", "NOTE_CHILD", + "NOTE_CRITICAL", "NOTE_DELETE", "NOTE_EOF", "NOTE_EXEC", "NOTE_EXIT", "NOTE_EXITSTATUS", + "NOTE_EXIT_CSERROR", + "NOTE_EXIT_DECRYPTFAIL", + "NOTE_EXIT_DETAIL", + "NOTE_EXIT_DETAIL_MASK", + "NOTE_EXIT_MEMORY", + "NOTE_EXIT_REPARENTED", "NOTE_EXTEND", "NOTE_FFAND", "NOTE_FFCOPY", @@ -7754,6 +8033,7 @@ var stdlib = map[string][]string{ "NOTE_FFNOP", "NOTE_FFOR", "NOTE_FORK", + "NOTE_LEEWAY", "NOTE_LINK", "NOTE_LOWAT", "NOTE_NONE", @@ -7832,6 +8112,7 @@ var stdlib = map[string][]string{ "O_CREAT", "O_DIRECT", "O_DIRECTORY", + "O_DP_GETRAWENCRYPTED", "O_DSYNC", "O_EVTONLY", "O_EXCL", @@ -8121,6 +8402,7 @@ var stdlib = map[string][]string{ "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", + "RLIMIT_CPU_USAGE_MONITOR", "RLIMIT_DATA", "RLIMIT_FSIZE", "RLIMIT_NOFILE", @@ -8233,9 +8515,11 @@ var stdlib = map[string][]string{ "RTF_PROTO1", "RTF_PROTO2", "RTF_PROTO3", + "RTF_PROXY", "RTF_REINSTATE", "RTF_REJECT", "RTF_RNH_LOCKED", + "RTF_ROUTER", "RTF_SOURCE", "RTF_SRC", "RTF_STATIC", @@ -8754,6 +9038,7 @@ var stdlib = map[string][]string{ "SO_NO_OFFLOAD", "SO_NP_EXTENSIONS", "SO_NREAD", + "SO_NUMRCVPKT", "SO_NWRITE", "SO_OOBINLINE", "SO_OVERFLOWED", @@ -8923,6 +9208,7 @@ var stdlib = map[string][]string{ "SYS_CREAT", "SYS_CREATE_MODULE", "SYS_CSOPS", + "SYS_CSOPS_AUDITTOKEN", "SYS_DELETE", "SYS_DELETE_MODULE", "SYS_DUP", @@ -9109,6 +9395,7 @@ var stdlib = map[string][]string{ "SYS_JAIL_GET", "SYS_JAIL_REMOVE", "SYS_JAIL_SET", + "SYS_KAS_INFO", "SYS_KDEBUG_TRACE", "SYS_KENV", "SYS_KEVENT", @@ -9136,6 +9423,7 @@ var stdlib = map[string][]string{ "SYS_LCHMOD", "SYS_LCHOWN", "SYS_LCHOWN32", + "SYS_LEDGER", "SYS_LGETFH", "SYS_LGETXATTR", "SYS_LINK", @@ -9232,6 +9520,7 @@ var stdlib = map[string][]string{ "SYS_OPENAT", "SYS_OPENBSD_POLL", "SYS_OPEN_BY_HANDLE_AT", + "SYS_OPEN_DPROTECTED_NP", "SYS_OPEN_EXTENDED", "SYS_OPEN_NOCANCEL", "SYS_OVADVISE", @@ -9864,6 +10153,7 @@ var stdlib = map[string][]string{ "TCP_CONNECTIONTIMEOUT", "TCP_CORK", "TCP_DEFER_ACCEPT", + "TCP_ENABLE_ECN", "TCP_INFO", "TCP_KEEPALIVE", "TCP_KEEPCNT", @@ -9886,11 +10176,13 @@ var stdlib = map[string][]string{ "TCP_NODELAY", "TCP_NOOPT", "TCP_NOPUSH", + "TCP_NOTSENT_LOWAT", "TCP_NSTATES", "TCP_QUICKACK", "TCP_RXT_CONNDROPTIME", "TCP_RXT_FINDROP", "TCP_SACK_ENABLE", + "TCP_SENDMOREACKS", "TCP_SYNCNT", "TCP_VENDOR", "TCP_WINDOW_CLAMP", @@ -10234,7 +10526,7 @@ var stdlib = map[string][]string{ "XP1_UNI_RECV", "XP1_UNI_SEND", }, - "syscall/js": []string{ + "syscall/js": { "CopyBytesToGo", "CopyBytesToJS", "Error", @@ -10256,7 +10548,7 @@ var stdlib = map[string][]string{ "ValueError", "ValueOf", }, - "testing": []string{ + "testing": { "AllocsPerRun", "B", "Benchmark", @@ -10284,12 +10576,12 @@ var stdlib = map[string][]string{ "TB", "Verbose", }, - "testing/fstest": []string{ + "testing/fstest": { "MapFS", "MapFile", "TestFS", }, - "testing/iotest": []string{ + "testing/iotest": { "DataErrReader", "ErrReader", "ErrTimeout", @@ -10301,7 +10593,7 @@ var stdlib = map[string][]string{ "TimeoutReader", "TruncateWriter", }, - "testing/quick": []string{ + "testing/quick": { "Check", "CheckEqual", "CheckEqualError", @@ -10311,7 +10603,7 @@ var stdlib = map[string][]string{ "SetupError", "Value", }, - "text/scanner": []string{ + "text/scanner": { "Char", "Comment", "EOF", @@ -10334,7 +10626,7 @@ var stdlib = map[string][]string{ "String", "TokenString", }, - "text/tabwriter": []string{ + "text/tabwriter": { "AlignRight", "Debug", "DiscardEmptyColumns", @@ -10345,7 +10637,7 @@ var stdlib = map[string][]string{ "TabIndent", "Writer", }, - "text/template": []string{ + "text/template": { "ExecError", "FuncMap", "HTMLEscape", @@ -10363,7 +10655,7 @@ var stdlib = map[string][]string{ "Template", "URLQueryEscaper", }, - "text/template/parse": []string{ + "text/template/parse": { "ActionNode", "BoolNode", "BranchNode", @@ -10419,13 +10711,15 @@ var stdlib = map[string][]string{ "VariableNode", "WithNode", }, - "time": []string{ + "time": { "ANSIC", "After", "AfterFunc", "April", "August", "Date", + "DateOnly", + "DateTime", "December", "Duration", "February", @@ -10480,6 +10774,7 @@ var stdlib = map[string][]string{ "Tick", "Ticker", "Time", + "TimeOnly", "Timer", "Tuesday", "UTC", @@ -10491,7 +10786,7 @@ var stdlib = map[string][]string{ "Wednesday", "Weekday", }, - "unicode": []string{ + "unicode": { "ASCII_Hex_Digit", "Adlam", "Ahom", @@ -10777,14 +11072,15 @@ var stdlib = map[string][]string{ "Zp", "Zs", }, - "unicode/utf16": []string{ + "unicode/utf16": { + "AppendRune", "Decode", "DecodeRune", "Encode", "EncodeRune", "IsSurrogate", }, - "unicode/utf8": []string{ + "unicode/utf8": { "AppendRune", "DecodeLastRune", "DecodeLastRuneInString", @@ -10805,11 +11101,15 @@ var stdlib = map[string][]string{ "ValidRune", "ValidString", }, - "unsafe": []string{ + "unsafe": { + "Add", "Alignof", - "ArbitraryType", "Offsetof", "Pointer", "Sizeof", + "Slice", + "SliceData", + "String", + "StringData", }, } diff --git a/internal/jsonrpc2/conn.go b/internal/jsonrpc2/conn.go index ca7752d664a..529cfa5ded3 100644 --- a/internal/jsonrpc2/conn.go +++ b/internal/jsonrpc2/conn.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/debug/tag" + "golang.org/x/tools/internal/event/tag" ) // Conn is the common interface to jsonrpc clients and servers. diff --git a/internal/jsonrpc2/serve.go b/internal/jsonrpc2/serve.go index 4181bf10c33..cfbcbcb021c 100644 --- a/internal/jsonrpc2/serve.go +++ b/internal/jsonrpc2/serve.go @@ -8,6 +8,7 @@ import ( "context" "errors" "io" + "math" "net" "os" "time" @@ -100,7 +101,7 @@ func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeou }() // Max duration: ~290 years; surely that's long enough. - const forever = 1<<63 - 1 + const forever = math.MaxInt64 if idleTimeout <= 0 { idleTimeout = forever } diff --git a/internal/jsonrpc2/servertest/servertest.go b/internal/jsonrpc2/servertest/servertest.go index 392e084a9ad..37f8475bee2 100644 --- a/internal/jsonrpc2/servertest/servertest.go +++ b/internal/jsonrpc2/servertest/servertest.go @@ -50,7 +50,7 @@ func NewTCPServer(ctx context.Context, server jsonrpc2.StreamServer, framer json // Connect dials the test server and returns a jsonrpc2 Connection that is // ready for use. -func (s *TCPServer) Connect(ctx context.Context) jsonrpc2.Conn { +func (s *TCPServer) Connect(_ context.Context) jsonrpc2.Conn { netConn, err := net.Dial("tcp", s.Addr) if err != nil { panic(fmt.Sprintf("servertest: failed to connect to test instance: %v", err)) @@ -68,7 +68,7 @@ type PipeServer struct { } // NewPipeServer returns a test server that can be connected to via io.Pipes. -func NewPipeServer(ctx context.Context, server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer { +func NewPipeServer(server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer { if framer == nil { framer = jsonrpc2.NewRawStream } diff --git a/internal/jsonrpc2/servertest/servertest_test.go b/internal/jsonrpc2/servertest/servertest_test.go index 38fa21a24d9..1780d4f9147 100644 --- a/internal/jsonrpc2/servertest/servertest_test.go +++ b/internal/jsonrpc2/servertest/servertest_test.go @@ -26,7 +26,7 @@ func TestTestServer(t *testing.T) { server := jsonrpc2.HandlerServer(fakeHandler) tcpTS := NewTCPServer(ctx, server, nil) defer tcpTS.Close() - pipeTS := NewPipeServer(ctx, server, nil) + pipeTS := NewPipeServer(server, nil) defer pipeTS.Close() tests := []struct { diff --git a/internal/jsonrpc2_v2/conn.go b/internal/jsonrpc2_v2/conn.go index edcf0939f2a..04d1445cc92 100644 --- a/internal/jsonrpc2_v2/conn.go +++ b/internal/jsonrpc2_v2/conn.go @@ -7,13 +7,17 @@ package jsonrpc2 import ( "context" "encoding/json" + "errors" "fmt" "io" + "sync" "sync/atomic" + "time" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/debug/tag" + "golang.org/x/tools/internal/event/tag" ) // Binder builds a connection configuration. @@ -23,10 +27,21 @@ import ( type Binder interface { // Bind returns the ConnectionOptions to use when establishing the passed-in // Connection. - // The connection is not ready to use when Bind is called. - Bind(context.Context, *Connection) (ConnectionOptions, error) + // + // The connection is not ready to use when Bind is called, + // but Bind may close it without reading or writing to it. + Bind(context.Context, *Connection) ConnectionOptions } +// A BinderFunc implements the Binder interface for a standalone Bind function. +type BinderFunc func(context.Context, *Connection) ConnectionOptions + +func (f BinderFunc) Bind(ctx context.Context, c *Connection) ConnectionOptions { + return f(ctx, c) +} + +var _ Binder = BinderFunc(nil) + // ConnectionOptions holds the options for new connections. type ConnectionOptions struct { // Framer allows control over the message framing and encoding. @@ -38,6 +53,10 @@ type ConnectionOptions struct { // Handler is used as the queued message handler for inbound messages. // If nil, all responses will be ErrNotHandled. Handler Handler + // OnInternalError, if non-nil, is called with any internal errors that occur + // while serving the connection, such as protocol errors or invariant + // violations. (If nil, internal errors result in panics.) + OnInternalError func(error) } // Connection manages the jsonrpc2 protocol, connecting responses back to their @@ -45,102 +64,244 @@ type ConnectionOptions struct { // Connection is bidirectional; it does not have a designated server or client // end. type Connection struct { - seq int64 // must only be accessed using atomic operations - closer io.Closer - writerBox chan Writer - outgoingBox chan map[ID]chan<- *Response - incomingBox chan map[ID]*incoming - async *async + seq int64 // must only be accessed using atomic operations + + stateMu sync.Mutex + state inFlightState // accessed only in updateInFlight + done chan struct{} // closed (under stateMu) when state.closed is true and all goroutines have completed + + writer chan Writer // 1-buffered; stores the writer when not in use + + handler Handler + + onInternalError func(error) + onDone func() } -type AsyncCall struct { - id ID - response chan *Response // the channel a response will be delivered on - resultBox chan asyncResult - endSpan func() // close the tracing span when all processing for the message is complete +// inFlightState records the state of the incoming and outgoing calls on a +// Connection. +type inFlightState struct { + connClosing bool // true when the Connection's Close method has been called + reading bool // true while the readIncoming goroutine is running + readErr error // non-nil when the readIncoming goroutine exits (typically io.EOF) + writeErr error // non-nil if a call to the Writer has failed with a non-canceled Context + + // closer shuts down and cleans up the Reader and Writer state, ideally + // interrupting any Read or Write call that is currently blocked. It is closed + // when the state is idle and one of: connClosing is true, readErr is non-nil, + // or writeErr is non-nil. + // + // After the closer has been invoked, the closer field is set to nil + // and the closeErr field is simultaneously set to its result. + closer io.Closer + closeErr error // error returned from closer.Close + + outgoingCalls map[ID]*AsyncCall // calls only + outgoingNotifications int // # of notifications awaiting "write" + + // incoming stores the total number of incoming calls and notifications + // that have not yet written or processed a result. + incoming int + + incomingByID map[ID]*incomingRequest // calls only + + // handlerQueue stores the backlog of calls and notifications that were not + // already handled by a preempter. + // The queue does not include the request currently being handled (if any). + handlerQueue []*incomingRequest + handlerRunning bool +} + +// updateInFlight locks the state of the connection's in-flight requests, allows +// f to mutate that state, and closes the connection if it is idle and either +// is closing or has a read or write error. +func (c *Connection) updateInFlight(f func(*inFlightState)) { + c.stateMu.Lock() + defer c.stateMu.Unlock() + + s := &c.state + + f(s) + + select { + case <-c.done: + // The connection was already completely done at the start of this call to + // updateInFlight, so it must remain so. (The call to f should have noticed + // that and avoided making any updates that would cause the state to be + // non-idle.) + if !s.idle() { + panic("jsonrpc2_v2: updateInFlight transitioned to non-idle when already done") + } + return + default: + } + + if s.idle() && s.shuttingDown(ErrUnknown) != nil { + if s.closer != nil { + s.closeErr = s.closer.Close() + s.closer = nil // prevent duplicate Close calls + } + if s.reading { + // The readIncoming goroutine is still running. Our call to Close should + // cause it to exit soon, at which point it will make another call to + // updateInFlight, set s.reading to false, and mark the Connection done. + } else { + // The readIncoming goroutine has exited, or never started to begin with. + // Since everything else is idle, we're completely done. + if c.onDone != nil { + c.onDone() + } + close(c.done) + } + } } -type asyncResult struct { - result []byte - err error +// idle reports whether the connection is in a state with no pending calls or +// notifications. +// +// If idle returns true, the readIncoming goroutine may still be running, +// but no other goroutines are doing work on behalf of the connection. +func (s *inFlightState) idle() bool { + return len(s.outgoingCalls) == 0 && s.outgoingNotifications == 0 && s.incoming == 0 && !s.handlerRunning } -// incoming is used to track an incoming request as it is being handled -type incoming struct { - request *Request // the request being processed - baseCtx context.Context // a base context for the message processing - done func() // a function called when all processing for the message is complete - handleCtx context.Context // the context for handling the message, child of baseCtx - cancel func() // a function that cancels the handling context +// shuttingDown reports whether the connection is in a state that should +// disallow new (incoming and outgoing) calls. It returns either nil or +// an error that is or wraps the provided errClosing. +func (s *inFlightState) shuttingDown(errClosing error) error { + if s.connClosing { + // If Close has been called explicitly, it doesn't matter what state the + // Reader and Writer are in: we shouldn't be starting new work because the + // caller told us not to start new work. + return errClosing + } + if s.readErr != nil { + // If the read side of the connection is broken, we cannot read new call + // requests, and cannot read responses to our outgoing calls. + return fmt.Errorf("%w: %v", errClosing, s.readErr) + } + if s.writeErr != nil { + // If the write side of the connection is broken, we cannot write responses + // for incoming calls, and cannot write requests for outgoing calls. + return fmt.Errorf("%w: %v", errClosing, s.writeErr) + } + return nil +} + +// incomingRequest is used to track an incoming request as it is being handled +type incomingRequest struct { + *Request // the request being processed + ctx context.Context + cancel context.CancelFunc + endSpan func() // called (and set to nil) when the response is sent } // Bind returns the options unmodified. -func (o ConnectionOptions) Bind(context.Context, *Connection) (ConnectionOptions, error) { - return o, nil +func (o ConnectionOptions) Bind(context.Context, *Connection) ConnectionOptions { + return o } // newConnection creates a new connection and runs it. +// // This is used by the Dial and Serve functions to build the actual connection. -func newConnection(ctx context.Context, rwc io.ReadWriteCloser, binder Binder) (*Connection, error) { +// +// The connection is closed automatically (and its resources cleaned up) when +// the last request has completed after the underlying ReadWriteCloser breaks, +// but it may be stopped earlier by calling Close (for a clean shutdown). +func newConnection(bindCtx context.Context, rwc io.ReadWriteCloser, binder Binder, onDone func()) *Connection { + // TODO: Should we create a new event span here? + // This will propagate cancellation from ctx; should it? + ctx := notDone{bindCtx} + c := &Connection{ - closer: rwc, - writerBox: make(chan Writer, 1), - outgoingBox: make(chan map[ID]chan<- *Response, 1), - incomingBox: make(chan map[ID]*incoming, 1), - async: newAsync(), + state: inFlightState{closer: rwc}, + done: make(chan struct{}), + writer: make(chan Writer, 1), + onDone: onDone, } + // It's tempting to set a finalizer on c to verify that the state has gone + // idle when the connection becomes unreachable. Unfortunately, the Binder + // interface makes that unsafe: it allows the Handler to close over the + // Connection, which could create a reference cycle that would cause the + // Connection to become uncollectable. - options, err := binder.Bind(ctx, c) - if err != nil { - return nil, err - } - if options.Framer == nil { - options.Framer = HeaderFramer() - } - if options.Preempter == nil { - options.Preempter = defaultHandler{} - } - if options.Handler == nil { - options.Handler = defaultHandler{} - } - c.outgoingBox <- make(map[ID]chan<- *Response) - c.incomingBox <- make(map[ID]*incoming) - // the goroutines started here will continue until the underlying stream is closed - reader := options.Framer.Reader(rwc) - readToQueue := make(chan *incoming) - queueToDeliver := make(chan *incoming) - go c.readIncoming(ctx, reader, readToQueue) - go c.manageQueue(ctx, options.Preempter, readToQueue, queueToDeliver) - go c.deliverMessages(ctx, options.Handler, queueToDeliver) - - // releaseing the writer must be the last thing we do in case any requests - // are blocked waiting for the connection to be ready - c.writerBox <- options.Framer.Writer(rwc) - return c, nil + options := binder.Bind(bindCtx, c) + framer := options.Framer + if framer == nil { + framer = HeaderFramer() + } + c.handler = options.Handler + if c.handler == nil { + c.handler = defaultHandler{} + } + c.onInternalError = options.OnInternalError + + c.writer <- framer.Writer(rwc) + reader := framer.Reader(rwc) + + c.updateInFlight(func(s *inFlightState) { + select { + case <-c.done: + // Bind already closed the connection; don't start a goroutine to read it. + return + default: + } + + // The goroutine started here will continue until the underlying stream is closed. + // + // (If the Binder closed the Connection already, this should error out and + // return almost immediately.) + s.reading = true + go c.readIncoming(ctx, reader, options.Preempter) + }) + return c } // Notify invokes the target method but does not wait for a response. // The params will be marshaled to JSON before sending over the wire, and will // be handed to the method invoked. -func (c *Connection) Notify(ctx context.Context, method string, params interface{}) error { - notify, err := NewNotification(method, params) - if err != nil { - return fmt.Errorf("marshaling notify parameters: %v", err) - } +func (c *Connection) Notify(ctx context.Context, method string, params interface{}) (err error) { ctx, done := event.Start(ctx, method, tag.Method.Of(method), tag.RPCDirection.Of(tag.Outbound), ) - event.Metric(ctx, tag.Started.Of(1)) - err = c.write(ctx, notify) - switch { - case err != nil: - event.Label(ctx, tag.StatusCode.Of("ERROR")) - default: - event.Label(ctx, tag.StatusCode.Of("OK")) + attempted := false + + defer func() { + labelStatus(ctx, err) + done() + if attempted { + c.updateInFlight(func(s *inFlightState) { + s.outgoingNotifications-- + }) + } + }() + + c.updateInFlight(func(s *inFlightState) { + // If the connection is shutting down, allow outgoing notifications only if + // there is at least one call still in flight. The number of calls in flight + // cannot increase once shutdown begins, and allowing outgoing notifications + // may permit notifications that will cancel in-flight calls. + if len(s.outgoingCalls) == 0 && len(s.incomingByID) == 0 { + err = s.shuttingDown(ErrClientClosing) + if err != nil { + return + } + } + s.outgoingNotifications++ + attempted = true + }) + if err != nil { + return err } - done() - return err + + notify, err := NewNotification(method, params) + if err != nil { + return fmt.Errorf("marshaling notify parameters: %v", err) + } + + event.Metric(ctx, tag.Started.Of(1)) + return c.write(ctx, notify) } // Call invokes the target method and returns an object that can be used to await the response. @@ -149,339 +310,503 @@ func (c *Connection) Notify(ctx context.Context, method string, params interface // You do not have to wait for the response, it can just be ignored if not needed. // If sending the call failed, the response will be ready and have the error in it. func (c *Connection) Call(ctx context.Context, method string, params interface{}) *AsyncCall { - result := &AsyncCall{ - id: Int64ID(atomic.AddInt64(&c.seq, 1)), - resultBox: make(chan asyncResult, 1), - } - // generate a new request identifier - call, err := NewCall(result.id, method, params) - if err != nil { - //set the result to failed - result.resultBox <- asyncResult{err: fmt.Errorf("marshaling call parameters: %w", err)} - return result - } + // Generate a new request identifier. + id := Int64ID(atomic.AddInt64(&c.seq, 1)) ctx, endSpan := event.Start(ctx, method, tag.Method.Of(method), tag.RPCDirection.Of(tag.Outbound), - tag.RPCID.Of(fmt.Sprintf("%q", result.id)), + tag.RPCID.Of(fmt.Sprintf("%q", id)), ) - result.endSpan = endSpan + + ac := &AsyncCall{ + id: id, + ready: make(chan struct{}), + ctx: ctx, + endSpan: endSpan, + } + // When this method returns, either ac is retired, or the request has been + // written successfully and the call is awaiting a response (to be provided by + // the readIncoming goroutine). + + call, err := NewCall(ac.id, method, params) + if err != nil { + ac.retire(&Response{ID: id, Error: fmt.Errorf("marshaling call parameters: %w", err)}) + return ac + } + + c.updateInFlight(func(s *inFlightState) { + err = s.shuttingDown(ErrClientClosing) + if err != nil { + return + } + if s.outgoingCalls == nil { + s.outgoingCalls = make(map[ID]*AsyncCall) + } + s.outgoingCalls[ac.id] = ac + }) + if err != nil { + ac.retire(&Response{ID: id, Error: err}) + return ac + } + event.Metric(ctx, tag.Started.Of(1)) - // We have to add ourselves to the pending map before we send, otherwise we - // are racing the response. - // rchan is buffered in case the response arrives without a listener. - result.response = make(chan *Response, 1) - pending := <-c.outgoingBox - pending[result.id] = result.response - c.outgoingBox <- pending - // now we are ready to send if err := c.write(ctx, call); err != nil { - // sending failed, we will never get a response, so deliver a fake one - r, _ := NewResponse(result.id, nil, err) - c.incomingResponse(r) + // Sending failed. We will never get a response, so deliver a fake one if it + // wasn't already retired by the connection breaking. + c.updateInFlight(func(s *inFlightState) { + if s.outgoingCalls[ac.id] == ac { + delete(s.outgoingCalls, ac.id) + ac.retire(&Response{ID: id, Error: err}) + } else { + // ac was already retired by the readIncoming goroutine: + // perhaps our write raced with the Read side of the connection breaking. + } + }) } - return result + return ac +} + +type AsyncCall struct { + id ID + ready chan struct{} // closed after response has been set and span has been ended + response *Response + ctx context.Context // for event logging only + endSpan func() // close the tracing span when all processing for the message is complete } // ID used for this call. // This can be used to cancel the call if needed. -func (a *AsyncCall) ID() ID { return a.id } +func (ac *AsyncCall) ID() ID { return ac.id } // IsReady can be used to check if the result is already prepared. // This is guaranteed to return true on a result for which Await has already // returned, or a call that failed to send in the first place. -func (a *AsyncCall) IsReady() bool { +func (ac *AsyncCall) IsReady() bool { select { - case r := <-a.resultBox: - a.resultBox <- r + case <-ac.ready: return true default: return false } } -// Await the results of a Call. +// retire processes the response to the call. +func (ac *AsyncCall) retire(response *Response) { + select { + case <-ac.ready: + panic(fmt.Sprintf("jsonrpc2: retire called twice for ID %v", ac.id)) + default: + } + + ac.response = response + labelStatus(ac.ctx, response.Error) + ac.endSpan() + // Allow the trace context, which may retain a lot of reachable values, + // to be garbage-collected. + ac.ctx, ac.endSpan = nil, nil + + close(ac.ready) +} + +// Await waits for (and decodes) the results of a Call. // The response will be unmarshaled from JSON into the result. -func (a *AsyncCall) Await(ctx context.Context, result interface{}) error { - defer a.endSpan() - var r asyncResult +func (ac *AsyncCall) Await(ctx context.Context, result interface{}) error { select { - case response := <-a.response: - // response just arrived, prepare the result - switch { - case response.Error != nil: - r.err = response.Error - event.Label(ctx, tag.StatusCode.Of("ERROR")) - default: - r.result = response.Result - event.Label(ctx, tag.StatusCode.Of("OK")) - } - case r = <-a.resultBox: - // result already available case <-ctx.Done(): - event.Label(ctx, tag.StatusCode.Of("CANCELLED")) return ctx.Err() + case <-ac.ready: } - // refill the box for the next caller - a.resultBox <- r - // and unpack the result - if r.err != nil { - return r.err + if ac.response.Error != nil { + return ac.response.Error } - if result == nil || len(r.result) == 0 { + if result == nil { return nil } - return json.Unmarshal(r.result, result) + return json.Unmarshal(ac.response.Result, result) } // Respond delivers a response to an incoming Call. // // Respond must be called exactly once for any message for which a handler // returns ErrAsyncResponse. It must not be called for any other message. -func (c *Connection) Respond(id ID, result interface{}, rerr error) error { - pending := <-c.incomingBox - defer func() { c.incomingBox <- pending }() - entry, found := pending[id] - if !found { - return nil +func (c *Connection) Respond(id ID, result interface{}, err error) error { + var req *incomingRequest + c.updateInFlight(func(s *inFlightState) { + req = s.incomingByID[id] + }) + if req == nil { + return c.internalErrorf("Request not found for ID %v", id) + } + + if err == ErrAsyncResponse { + // Respond is supposed to supply the asynchronous response, so it would be + // confusing to call Respond with an error that promises to call Respond + // again. + err = c.internalErrorf("Respond called with ErrAsyncResponse for %q", req.Method) } - delete(pending, id) - return c.respond(entry, result, rerr) + return c.processResult("Respond", req, result, err) } -// Cancel is used to cancel an inbound message by ID, it does not cancel -// outgoing messages. -// This is only used inside a message handler that is layering a -// cancellation protocol on top of JSON RPC 2. -// It will not complain if the ID is not a currently active message, and it will -// not cause any messages that have not arrived yet with that ID to be +// Cancel cancels the Context passed to the Handle call for the inbound message +// with the given ID. +// +// Cancel will not complain if the ID is not a currently active message, and it +// will not cause any messages that have not arrived yet with that ID to be // cancelled. func (c *Connection) Cancel(id ID) { - pending := <-c.incomingBox - defer func() { c.incomingBox <- pending }() - if entry, found := pending[id]; found && entry.cancel != nil { - entry.cancel() - entry.cancel = nil + var req *incomingRequest + c.updateInFlight(func(s *inFlightState) { + req = s.incomingByID[id] + }) + if req != nil { + req.cancel() } } // Wait blocks until the connection is fully closed, but does not close it. func (c *Connection) Wait() error { - return c.async.wait() + var err error + <-c.done + c.updateInFlight(func(s *inFlightState) { + err = s.closeErr + }) + return err } -// Close can be used to close the underlying stream, and then wait for the connection to -// fully shut down. -// This does not cancel in flight requests, but waits for them to gracefully complete. +// Close stops accepting new requests, waits for in-flight requests and enqueued +// Handle calls to complete, and then closes the underlying stream. +// +// After the start of a Close, notification requests (that lack IDs and do not +// receive responses) will continue to be passed to the Preempter, but calls +// with IDs will receive immediate responses with ErrServerClosing, and no new +// requests (not even notifications!) will be enqueued to the Handler. func (c *Connection) Close() error { - // close the underlying stream - if err := c.closer.Close(); err != nil && !isClosingError(err) { - return err - } - // and then wait for it to cause the connection to close - if err := c.Wait(); err != nil && !isClosingError(err) { - return err - } - return nil + // Stop handling new requests, and interrupt the reader (by closing the + // connection) as soon as the active requests finish. + c.updateInFlight(func(s *inFlightState) { s.connClosing = true }) + + return c.Wait() } // readIncoming collects inbound messages from the reader and delivers them, either responding // to outgoing calls or feeding requests to the queue. -func (c *Connection) readIncoming(ctx context.Context, reader Reader, toQueue chan<- *incoming) { - defer close(toQueue) +func (c *Connection) readIncoming(ctx context.Context, reader Reader, preempter Preempter) { + var err error for { - // get the next message - // no lock is needed, this is the only reader - msg, n, err := reader.Read(ctx) + var ( + msg Message + n int64 + ) + msg, n, err = reader.Read(ctx) if err != nil { - // The stream failed, we cannot continue - c.async.setError(err) - return + break } + switch msg := msg.(type) { case *Request: - entry := &incoming{ - request: msg, - } - // add a span to the context for this request - labels := append(make([]label.Label, 0, 3), // make space for the id if present - tag.Method.Of(msg.Method), - tag.RPCDirection.Of(tag.Inbound), - ) - if msg.IsCall() { - labels = append(labels, tag.RPCID.Of(fmt.Sprintf("%q", msg.ID))) - } - entry.baseCtx, entry.done = event.Start(ctx, msg.Method, labels...) - event.Metric(entry.baseCtx, - tag.Started.Of(1), - tag.ReceivedBytes.Of(n)) - // in theory notifications cannot be cancelled, but we build them a cancel context anyway - entry.handleCtx, entry.cancel = context.WithCancel(entry.baseCtx) - // if the request is a call, add it to the incoming map so it can be - // cancelled by id - if msg.IsCall() { - pending := <-c.incomingBox - pending[msg.ID] = entry - c.incomingBox <- pending - } - // send the message to the incoming queue - toQueue <- entry + c.acceptRequest(ctx, msg, n, preempter) + case *Response: - // If method is not set, this should be a response, in which case we must - // have an id to send the response back to the caller. - c.incomingResponse(msg) + c.updateInFlight(func(s *inFlightState) { + if ac, ok := s.outgoingCalls[msg.ID]; ok { + delete(s.outgoingCalls, msg.ID) + ac.retire(msg) + } else { + // TODO: How should we report unexpected responses? + } + }) + + default: + c.internalErrorf("Read returned an unexpected message of type %T", msg) } } + + c.updateInFlight(func(s *inFlightState) { + s.reading = false + s.readErr = err + + // Retire any outgoing requests that were still in flight: with the Reader no + // longer being processed, they necessarily cannot receive a response. + for id, ac := range s.outgoingCalls { + ac.retire(&Response{ID: id, Error: err}) + } + s.outgoingCalls = nil + }) } -func (c *Connection) incomingResponse(msg *Response) { - pending := <-c.outgoingBox - response, ok := pending[msg.ID] - if ok { - delete(pending, msg.ID) +// acceptRequest either handles msg synchronously or enqueues it to be handled +// asynchronously. +func (c *Connection) acceptRequest(ctx context.Context, msg *Request, msgBytes int64, preempter Preempter) { + // Add a span to the context for this request. + labels := append(make([]label.Label, 0, 3), // Make space for the ID if present. + tag.Method.Of(msg.Method), + tag.RPCDirection.Of(tag.Inbound), + ) + if msg.IsCall() { + labels = append(labels, tag.RPCID.Of(fmt.Sprintf("%q", msg.ID))) } - c.outgoingBox <- pending - if response != nil { - response <- msg + ctx, endSpan := event.Start(ctx, msg.Method, labels...) + event.Metric(ctx, + tag.Started.Of(1), + tag.ReceivedBytes.Of(msgBytes)) + + // In theory notifications cannot be cancelled, but we build them a cancel + // context anyway. + ctx, cancel := context.WithCancel(ctx) + req := &incomingRequest{ + Request: msg, + ctx: ctx, + cancel: cancel, + endSpan: endSpan, } -} -// manageQueue reads incoming requests, attempts to process them with the preempter, or queue them -// up for normal handling. -func (c *Connection) manageQueue(ctx context.Context, preempter Preempter, fromRead <-chan *incoming, toDeliver chan<- *incoming) { - defer close(toDeliver) - q := []*incoming{} - ok := true - for { - var nextReq *incoming - if len(q) == 0 { - // no messages in the queue - // if we were closing, then we are done - if !ok { + // If the request is a call, add it to the incoming map so it can be + // cancelled (or responded) by ID. + var err error + c.updateInFlight(func(s *inFlightState) { + s.incoming++ + + if req.IsCall() { + if s.incomingByID[req.ID] != nil { + err = fmt.Errorf("%w: request ID %v already in use", ErrInvalidRequest, req.ID) + req.ID = ID{} // Don't misattribute this error to the existing request. return } - // not closing, but nothing in the queue, so just block waiting for a read - nextReq, ok = <-fromRead - } else { - // we have a non empty queue, so pick whichever of reading or delivering - // that we can make progress on - select { - case nextReq, ok = <-fromRead: - case toDeliver <- q[0]: - //TODO: this causes a lot of shuffling, should we use a growing ring buffer? compaction? - q = q[1:] + + if s.incomingByID == nil { + s.incomingByID = make(map[ID]*incomingRequest) } + s.incomingByID[req.ID] = req + + // When shutting down, reject all new Call requests, even if they could + // theoretically be handled by the preempter. The preempter could return + // ErrAsyncResponse, which would increase the amount of work in flight + // when we're trying to ensure that it strictly decreases. + err = s.shuttingDown(ErrServerClosing) } - if nextReq != nil { - // TODO: should we allow to limit the queue size? - var result interface{} - rerr := nextReq.handleCtx.Err() - if rerr == nil { - // only preempt if not already cancelled - result, rerr = preempter.Preempt(nextReq.handleCtx, nextReq.request) - } - switch { - case rerr == ErrNotHandled: - // message not handled, add it to the queue for the main handler - q = append(q, nextReq) - case rerr == ErrAsyncResponse: - // message handled but the response will come later - default: - // anything else means the message is fully handled - c.reply(nextReq, result, rerr) - } + }) + if err != nil { + c.processResult("acceptRequest", req, nil, err) + return + } + + if preempter != nil { + result, err := preempter.Preempt(req.ctx, req.Request) + + if req.IsCall() && errors.Is(err, ErrAsyncResponse) { + // This request will remain in flight until Respond is called for it. + return + } + + if !errors.Is(err, ErrNotHandled) { + c.processResult("Preempt", req, result, err) + return } } + + c.updateInFlight(func(s *inFlightState) { + // If the connection is shutting down, don't enqueue anything to the + // handler — not even notifications. That ensures that if the handler + // continues to make progress, it will eventually become idle and + // close the connection. + err = s.shuttingDown(ErrServerClosing) + if err != nil { + return + } + + // We enqueue requests that have not been preempted to an unbounded slice. + // Unfortunately, we cannot in general limit the size of the handler + // queue: we have to read every response that comes in on the wire + // (because it may be responding to a request issued by, say, an + // asynchronous handler), and in order to get to that response we have + // to read all of the requests that came in ahead of it. + s.handlerQueue = append(s.handlerQueue, req) + if !s.handlerRunning { + // We start the handleAsync goroutine when it has work to do, and let it + // exit when the queue empties. + // + // Otherwise, in order to synchronize the handler we would need some other + // goroutine (probably readIncoming?) to explicitly wait for handleAsync + // to finish, and that would complicate error reporting: either the error + // report from the goroutine would be blocked on the handler emptying its + // queue (which was tried, and introduced a deadlock detected by + // TestCloseCallRace), or the error would need to be reported separately + // from synchronizing completion. Allowing the handler goroutine to exit + // when idle seems simpler than trying to implement either of those + // alternatives correctly. + s.handlerRunning = true + go c.handleAsync() + } + }) + if err != nil { + c.processResult("acceptRequest", req, nil, err) + } } -func (c *Connection) deliverMessages(ctx context.Context, handler Handler, fromQueue <-chan *incoming) { - defer c.async.done() - for entry := range fromQueue { - // cancel any messages in the queue that we have a pending cancel for - var result interface{} - rerr := entry.handleCtx.Err() - if rerr == nil { - // only deliver if not already cancelled - result, rerr = handler.Handle(entry.handleCtx, entry.request) +// handleAsync invokes the handler on the requests in the handler queue +// sequentially until the queue is empty. +func (c *Connection) handleAsync() { + for { + var req *incomingRequest + c.updateInFlight(func(s *inFlightState) { + if len(s.handlerQueue) > 0 { + req, s.handlerQueue = s.handlerQueue[0], s.handlerQueue[1:] + } else { + s.handlerRunning = false + } + }) + if req == nil { + return } - switch { - case rerr == ErrNotHandled: - // message not handled, report it back to the caller as an error - c.reply(entry, nil, fmt.Errorf("%w: %q", ErrMethodNotFound, entry.request.Method)) - case rerr == ErrAsyncResponse: - // message handled but the response will come later - default: - c.reply(entry, result, rerr) + + // Only deliver to the Handler if not already canceled. + if err := req.ctx.Err(); err != nil { + c.updateInFlight(func(s *inFlightState) { + if s.writeErr != nil { + // Assume that req.ctx was canceled due to s.writeErr. + // TODO(#51365): use a Context API to plumb this through req.ctx. + err = fmt.Errorf("%w: %v", ErrServerClosing, s.writeErr) + } + }) + c.processResult("handleAsync", req, nil, err) + continue } + + result, err := c.handler.Handle(req.ctx, req.Request) + c.processResult(c.handler, req, result, err) } } -// reply is used to reply to an incoming request that has just been handled -func (c *Connection) reply(entry *incoming, result interface{}, rerr error) { - if entry.request.IsCall() { - // we have a call finishing, remove it from the incoming map - pending := <-c.incomingBox - defer func() { c.incomingBox <- pending }() - delete(pending, entry.request.ID) +// processResult processes the result of a request and, if appropriate, sends a response. +func (c *Connection) processResult(from interface{}, req *incomingRequest, result interface{}, err error) error { + switch err { + case ErrAsyncResponse: + if !req.IsCall() { + return c.internalErrorf("%#v returned ErrAsyncResponse for a %q Request without an ID", from, req.Method) + } + return nil // This request is still in flight, so don't record the result yet. + case ErrNotHandled, ErrMethodNotFound: + // Add detail describing the unhandled method. + err = fmt.Errorf("%w: %q", ErrMethodNotFound, req.Method) } - if err := c.respond(entry, result, rerr); err != nil { - // no way to propagate this error - //TODO: should we do more than just log it? - event.Error(entry.baseCtx, "jsonrpc2 message delivery failed", err) + + if req.endSpan == nil { + return c.internalErrorf("%#v produced a duplicate %q Response", from, req.Method) } -} -// respond sends a response. -// This is the code shared between reply and SendResponse. -func (c *Connection) respond(entry *incoming, result interface{}, rerr error) error { - var err error - if entry.request.IsCall() { - // send the response - if result == nil && rerr == nil { - // call with no response, send an error anyway - rerr = fmt.Errorf("%w: %q produced no response", ErrInternal, entry.request.Method) + if result != nil && err != nil { + c.internalErrorf("%#v returned a non-nil result with a non-nil error for %s:\n%v\n%#v", from, req.Method, err, result) + result = nil // Discard the spurious result and respond with err. + } + + if req.IsCall() { + if result == nil && err == nil { + err = c.internalErrorf("%#v returned a nil result and nil error for a %q Request that requires a Response", from, req.Method) } - var response *Response - response, err = NewResponse(entry.request.ID, result, rerr) - if err == nil { - // we write the response with the base context, in case the message was cancelled - err = c.write(entry.baseCtx, response) + + response, respErr := NewResponse(req.ID, result, err) + + // The caller could theoretically reuse the request's ID as soon as we've + // sent the response, so ensure that it is removed from the incoming map + // before sending. + c.updateInFlight(func(s *inFlightState) { + delete(s.incomingByID, req.ID) + }) + if respErr == nil { + writeErr := c.write(notDone{req.ctx}, response) + if err == nil { + err = writeErr + } + } else { + err = c.internalErrorf("%#v returned a malformed result for %q: %w", from, req.Method, respErr) } - } else { - switch { - case rerr != nil: - // notification failed - err = fmt.Errorf("%w: %q notification failed: %v", ErrInternal, entry.request.Method, rerr) - rerr = nil - case result != nil: - //notification produced a response, which is an error - err = fmt.Errorf("%w: %q produced unwanted response", ErrInternal, entry.request.Method) - default: - // normal notification finish + } else { // req is a notification + if result != nil { + err = c.internalErrorf("%#v returned a non-nil result for a %q Request without an ID", from, req.Method) + } else if err != nil { + err = fmt.Errorf("%w: %q notification failed: %v", ErrInternal, req.Method, err) + } + if err != nil { + // TODO: can/should we do anything with this error beyond writing it to the event log? + // (Is this the right label to attach to the log?) + event.Label(req.ctx, keys.Err.Of(err)) } } - switch { - case rerr != nil || err != nil: - event.Label(entry.baseCtx, tag.StatusCode.Of("ERROR")) - default: - event.Label(entry.baseCtx, tag.StatusCode.Of("OK")) - } - // and just to be clean, invoke and clear the cancel if needed - if entry.cancel != nil { - entry.cancel() - entry.cancel = nil - } - // mark the entire request processing as done - entry.done() - return err + + labelStatus(req.ctx, err) + + // Cancel the request and finalize the event span to free any associated resources. + req.cancel() + req.endSpan() + req.endSpan = nil + c.updateInFlight(func(s *inFlightState) { + if s.incoming == 0 { + panic("jsonrpc2_v2: processResult called when incoming count is already zero") + } + s.incoming-- + }) + return nil } // write is used by all things that write outgoing messages, including replies. // it makes sure that writes are atomic func (c *Connection) write(ctx context.Context, msg Message) error { - writer := <-c.writerBox - defer func() { c.writerBox <- writer }() + writer := <-c.writer + defer func() { c.writer <- writer }() n, err := writer.Write(ctx, msg) event.Metric(ctx, tag.SentBytes.Of(n)) + + if err != nil && ctx.Err() == nil { + // The call to Write failed, and since ctx.Err() is nil we can't attribute + // the failure (even indirectly) to Context cancellation. The writer appears + // to be broken, and future writes are likely to also fail. + // + // If the read side of the connection is also broken, we might not even be + // able to receive cancellation notifications. Since we can't reliably write + // the results of incoming calls and can't receive explicit cancellations, + // cancel the calls now. + c.updateInFlight(func(s *inFlightState) { + if s.writeErr == nil { + s.writeErr = err + for _, r := range s.incomingByID { + r.cancel() + } + } + }) + } + return err } + +// internalErrorf reports an internal error. By default it panics, but if +// c.onInternalError is non-nil it instead calls that and returns an error +// wrapping ErrInternal. +func (c *Connection) internalErrorf(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + if c.onInternalError == nil { + panic("jsonrpc2: " + err.Error()) + } + c.onInternalError(err) + + return fmt.Errorf("%w: %v", ErrInternal, err) +} + +// labelStatus labels the status of the event in ctx based on whether err is nil. +func labelStatus(ctx context.Context, err error) { + if err == nil { + event.Label(ctx, tag.StatusCode.Of("OK")) + } else { + event.Label(ctx, tag.StatusCode.Of("ERROR")) + } +} + +// notDone is a context.Context wrapper that returns a nil Done channel. +type notDone struct{ ctx context.Context } + +func (ic notDone) Value(key interface{}) interface{} { + return ic.ctx.Value(key) +} + +func (notDone) Done() <-chan struct{} { return nil } +func (notDone) Err() error { return nil } +func (notDone) Deadline() (time.Time, bool) { return time.Time{}, false } diff --git a/internal/jsonrpc2_v2/frame.go b/internal/jsonrpc2_v2/frame.go index b2b7dc1a172..e4248328132 100644 --- a/internal/jsonrpc2_v2/frame.go +++ b/internal/jsonrpc2_v2/frame.go @@ -120,6 +120,12 @@ func (r *headerReader) Read(ctx context.Context) (Message, int64, error) { line, err := r.in.ReadString('\n') total += int64(len(line)) if err != nil { + if err == io.EOF { + if total == 0 { + return nil, 0, io.EOF + } + err = io.ErrUnexpectedEOF + } return nil, total, fmt.Errorf("failed reading header line: %w", err) } line = strings.TrimSpace(line) diff --git a/internal/jsonrpc2_v2/jsonrpc2.go b/internal/jsonrpc2_v2/jsonrpc2.go index e685584427a..e9164b0bc95 100644 --- a/internal/jsonrpc2_v2/jsonrpc2.go +++ b/internal/jsonrpc2_v2/jsonrpc2.go @@ -47,6 +47,15 @@ type Preempter interface { Preempt(ctx context.Context, req *Request) (result interface{}, err error) } +// A PreempterFunc implements the Preempter interface for a standalone Preempt function. +type PreempterFunc func(ctx context.Context, req *Request) (interface{}, error) + +func (f PreempterFunc) Preempt(ctx context.Context, req *Request) (interface{}, error) { + return f(ctx, req) +} + +var _ Preempter = PreempterFunc(nil) + // Handler handles messages on a connection. type Handler interface { // Handle is invoked sequentially for each incoming request that has not @@ -75,12 +84,15 @@ func (defaultHandler) Handle(context.Context, *Request) (interface{}, error) { return nil, ErrNotHandled } +// A HandlerFunc implements the Handler interface for a standalone Handle function. type HandlerFunc func(ctx context.Context, req *Request) (interface{}, error) func (f HandlerFunc) Handle(ctx context.Context, req *Request) (interface{}, error) { return f(ctx, req) } +var _ Handler = HandlerFunc(nil) + // async is a small helper for operations with an asynchronous result that you // can wait for. type async struct { diff --git a/internal/jsonrpc2_v2/jsonrpc2_test.go b/internal/jsonrpc2_v2/jsonrpc2_test.go index 8e90c235f93..dd8d09c8870 100644 --- a/internal/jsonrpc2_v2/jsonrpc2_test.go +++ b/internal/jsonrpc2_v2/jsonrpc2_test.go @@ -11,7 +11,6 @@ import ( "path" "reflect" "testing" - "time" "golang.org/x/tools/internal/event/export/eventtest" jsonrpc2 "golang.org/x/tools/internal/jsonrpc2_v2" @@ -77,7 +76,7 @@ type binder struct { type handler struct { conn *jsonrpc2.Connection accumulator int - waitersBox chan map[string]chan struct{} + waiters chan map[string]chan struct{} calls map[string]*jsonrpc2.AsyncCall } @@ -137,10 +136,7 @@ func testConnection(t *testing.T, framer jsonrpc2.Framer) { if err != nil { t.Fatal(err) } - server, err := jsonrpc2.Serve(ctx, listener, binder{framer, nil}) - if err != nil { - t.Fatal(err) - } + server := jsonrpc2.NewServer(ctx, listener, binder{framer, nil}) defer func() { listener.Close() server.Wait() @@ -254,13 +250,13 @@ func verifyResults(t *testing.T, method string, results interface{}, expect inte } } -func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) (jsonrpc2.ConnectionOptions, error) { +func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) jsonrpc2.ConnectionOptions { h := &handler{ - conn: conn, - waitersBox: make(chan map[string]chan struct{}, 1), - calls: make(map[string]*jsonrpc2.AsyncCall), + conn: conn, + waiters: make(chan map[string]chan struct{}, 1), + calls: make(map[string]*jsonrpc2.AsyncCall), } - h.waitersBox <- make(map[string]chan struct{}) + h.waiters <- make(map[string]chan struct{}) if b.runTest != nil { go b.runTest(h) } @@ -268,12 +264,12 @@ func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) (jsonrpc2.C Framer: b.framer, Preempter: h, Handler: h, - }, nil + } } func (h *handler) waiter(name string) chan struct{} { - waiters := <-h.waitersBox - defer func() { h.waitersBox <- waiters }() + waiters := <-h.waiters + defer func() { h.waiters <- waiters }() waiter, found := waiters[name] if !found { waiter = make(chan struct{}) @@ -370,8 +366,6 @@ func (h *handler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface{ return true, nil case <-ctx.Done(): return nil, ctx.Err() - case <-time.After(time.Second): - return nil, fmt.Errorf("wait for %q timed out", name) } case "fork": var name string @@ -385,8 +379,6 @@ func (h *handler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface{ h.conn.Respond(req.ID, true, nil) case <-ctx.Done(): h.conn.Respond(req.ID, nil, ctx.Err()) - case <-time.After(time.Second): - h.conn.Respond(req.ID, nil, fmt.Errorf("wait for %q timed out", name)) } }() return nil, jsonrpc2.ErrAsyncResponse diff --git a/internal/jsonrpc2_v2/net.go b/internal/jsonrpc2_v2/net.go index f1e2b0c7b36..15d0aea3af0 100644 --- a/internal/jsonrpc2_v2/net.go +++ b/internal/jsonrpc2_v2/net.go @@ -9,7 +9,6 @@ import ( "io" "net" "os" - "time" ) // This file contains implementations of the transport primitives that use the standard network @@ -36,7 +35,7 @@ type netListener struct { } // Accept blocks waiting for an incoming connection to the listener. -func (l *netListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) { +func (l *netListener) Accept(context.Context) (io.ReadWriteCloser, error) { return l.net.Accept() } @@ -56,9 +55,7 @@ func (l *netListener) Close() error { // Dialer returns a dialer that can be used to connect to the listener. func (l *netListener) Dialer() Dialer { - return NetDialer(l.net.Addr().Network(), l.net.Addr().String(), net.Dialer{ - Timeout: 5 * time.Second, - }) + return NetDialer(l.net.Addr().Network(), l.net.Addr().String(), net.Dialer{}) } // NetDialer returns a Dialer using the supplied standard network dialer. @@ -98,15 +95,19 @@ type netPiper struct { } // Accept blocks waiting for an incoming connection to the listener. -func (l *netPiper) Accept(ctx context.Context) (io.ReadWriteCloser, error) { - // block until we have a listener, or are closed or cancelled +func (l *netPiper) Accept(context.Context) (io.ReadWriteCloser, error) { + // Block until the pipe is dialed or the listener is closed, + // preferring the latter if already closed at the start of Accept. + select { + case <-l.done: + return nil, errClosed + default: + } select { case rwc := <-l.dialed: return rwc, nil case <-l.done: - return nil, io.EOF - case <-ctx.Done(): - return nil, ctx.Err() + return nil, errClosed } } @@ -124,6 +125,14 @@ func (l *netPiper) Dialer() Dialer { func (l *netPiper) Dial(ctx context.Context) (io.ReadWriteCloser, error) { client, server := net.Pipe() - l.dialed <- server - return client, nil + + select { + case l.dialed <- server: + return client, nil + + case <-l.done: + client.Close() + server.Close() + return nil, errClosed + } } diff --git a/internal/jsonrpc2_v2/serve.go b/internal/jsonrpc2_v2/serve.go index 646267b5573..5e082735469 100644 --- a/internal/jsonrpc2_v2/serve.go +++ b/internal/jsonrpc2_v2/serve.go @@ -6,12 +6,11 @@ package jsonrpc2 import ( "context" - "errors" + "fmt" "io" "runtime" - "strings" "sync" - "syscall" + "sync/atomic" "time" ) @@ -42,35 +41,43 @@ type Server struct { listener Listener binder Binder async *async + + shutdownOnce sync.Once + closing int32 // atomic: set to nonzero when Shutdown is called } // Dial uses the dialer to make a new connection, wraps the returned // reader and writer using the framer to make a stream, and then builds // a connection on top of that stream using the binder. +// +// The returned Connection will operate independently using the Preempter and/or +// Handler provided by the Binder, and will release its own resources when the +// connection is broken, but the caller may Close it earlier to stop accepting +// (or sending) new requests. func Dial(ctx context.Context, dialer Dialer, binder Binder) (*Connection, error) { // dial a server rwc, err := dialer.Dial(ctx) if err != nil { return nil, err } - return newConnection(ctx, rwc, binder) + return newConnection(ctx, rwc, binder, nil), nil } -// Serve starts a new server listening for incoming connections and returns +// NewServer starts a new server listening for incoming connections and returns // it. // This returns a fully running and connected server, it does not block on // the listener. // You can call Wait to block on the server, or Shutdown to get the sever to // terminate gracefully. // To notice incoming connections, use an intercepting Binder. -func Serve(ctx context.Context, listener Listener, binder Binder) (*Server, error) { +func NewServer(ctx context.Context, listener Listener, binder Binder) *Server { server := &Server{ listener: listener, binder: binder, async: newAsync(), } go server.run(ctx) - return server, nil + return server } // Wait returns only when the server has shut down. @@ -78,173 +85,160 @@ func (s *Server) Wait() error { return s.async.wait() } +// Shutdown informs the server to stop accepting new connections. +func (s *Server) Shutdown() { + s.shutdownOnce.Do(func() { + atomic.StoreInt32(&s.closing, 1) + s.listener.Close() + }) +} + // run accepts incoming connections from the listener, // If IdleTimeout is non-zero, run exits after there are no clients for this // duration, otherwise it exits only on error. func (s *Server) run(ctx context.Context) { defer s.async.done() - var activeConns []*Connection + + var activeConns sync.WaitGroup for { - // we never close the accepted connection, we rely on the other end - // closing or the socket closing itself naturally rwc, err := s.listener.Accept(ctx) if err != nil { - if !isClosingError(err) { + // Only Shutdown closes the listener. If we get an error after Shutdown is + // called, assume that that was the cause and don't report the error; + // otherwise, report the error in case it is unexpected. + if atomic.LoadInt32(&s.closing) == 0 { s.async.setError(err) } - // we are done generating new connections for good + // We are done generating new connections for good. break } - // see if any connections were closed while we were waiting - activeConns = onlyActive(activeConns) - - // a new inbound connection, - conn, err := newConnection(ctx, rwc, s.binder) - if err != nil { - if !isClosingError(err) { - s.async.setError(err) - } - continue - } - activeConns = append(activeConns, conn) - } - - // wait for all active conns to finish - for _, c := range activeConns { - c.Wait() + // A new inbound connection. + activeConns.Add(1) + _ = newConnection(ctx, rwc, s.binder, activeConns.Done) // unregisters itself when done } + activeConns.Wait() } -func onlyActive(conns []*Connection) []*Connection { - i := 0 - for _, c := range conns { - if !c.async.isDone() { - conns[i] = c - i++ - } +// NewIdleListener wraps a listener with an idle timeout. +// +// When there are no active connections for at least the timeout duration, +// calls to Accept will fail with ErrIdleTimeout. +// +// A connection is considered inactive as soon as its Close method is called. +func NewIdleListener(timeout time.Duration, wrap Listener) Listener { + l := &idleListener{ + wrapped: wrap, + timeout: timeout, + active: make(chan int, 1), + timedOut: make(chan struct{}), + idleTimer: make(chan *time.Timer, 1), } - // trim the slice down - return conns[:i] + l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired) + return l } -// isClosingError reports if the error occurs normally during the process of -// closing a network connection. It uses imperfect heuristics that err on the -// side of false negatives, and should not be used for anything critical. -func isClosingError(err error) bool { - if err == nil { - return false - } - // Fully unwrap the error, so the following tests work. - for wrapped := err; wrapped != nil; wrapped = errors.Unwrap(err) { - err = wrapped - } - - // Was it based on an EOF error? - if err == io.EOF { - return true - } +type idleListener struct { + wrapped Listener + timeout time.Duration - // Was it based on a closed pipe? - if err == io.ErrClosedPipe { - return true - } + // Only one of these channels is receivable at any given time. + active chan int // count of active connections; closed when Close is called if not timed out + timedOut chan struct{} // closed when the idle timer expires + idleTimer chan *time.Timer // holds the timer only when idle +} - // Per https://github.com/golang/go/issues/4373, this error string should not - // change. This is not ideal, but since the worst that could happen here is - // some superfluous logging, it is acceptable. - if err.Error() == "use of closed network connection" { - return true - } +// Accept accepts an incoming connection. +// +// If an incoming connection is accepted concurrent to the listener being closed +// due to idleness, the new connection is immediately closed. +func (l *idleListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) { + rwc, err := l.wrapped.Accept(ctx) - if runtime.GOOS == "plan9" { - // Error reading from a closed connection. - if err == syscall.EINVAL { - return true + select { + case n, ok := <-l.active: + if err != nil { + if ok { + l.active <- n + } + return nil, err } - // Error trying to accept a new connection from a closed listener. - if strings.HasSuffix(err.Error(), " listen hungup") { - return true + if ok { + l.active <- n + 1 + } else { + // l.wrapped.Close Close has been called, but Accept returned a + // connection. This race can occur with concurrent Accept and Close calls + // with any net.Listener, and it is benign: since the listener was closed + // explicitly, it can't have also timed out. } - } - return false -} + return l.newConn(rwc), nil -// NewIdleListener wraps a listener with an idle timeout. -// When there are no active connections for at least the timeout duration a -// call to accept will fail with ErrIdleTimeout. -func NewIdleListener(timeout time.Duration, wrap Listener) Listener { - l := &idleListener{ - timeout: timeout, - wrapped: wrap, - newConns: make(chan *idleCloser), - closed: make(chan struct{}), - wasTimeout: make(chan struct{}), - } - go l.run() - return l -} + case <-l.timedOut: + if err == nil { + // Keeping the connection open would leave the listener simultaneously + // active and closed due to idleness, which would be contradictory and + // confusing. Close the connection and pretend that it never happened. + rwc.Close() + } else { + // In theory the timeout could have raced with an unrelated error return + // from Accept. However, ErrIdleTimeout is arguably still valid (since we + // would have closed due to the timeout independent of the error), and the + // harm from returning a spurious ErrIdleTimeout is negligible anyway. + } + return nil, ErrIdleTimeout -type idleListener struct { - wrapped Listener - timeout time.Duration - newConns chan *idleCloser - closed chan struct{} - wasTimeout chan struct{} - closeOnce sync.Once -} + case timer := <-l.idleTimer: + if err != nil { + // The idle timer doesn't run until it receives itself from the idleTimer + // channel, so it can't have called l.wrapped.Close yet and thus err can't + // be ErrIdleTimeout. Leave the idle timer as it was and return whatever + // error we got. + l.idleTimer <- timer + return nil, err + } -type idleCloser struct { - wrapped io.ReadWriteCloser - closed chan struct{} - closeOnce sync.Once -} + if !timer.Stop() { + // Failed to stop the timer — the timer goroutine is in the process of + // firing. Send the timer back to the timer goroutine so that it can + // safely close the timedOut channel, and then wait for the listener to + // actually be closed before we return ErrIdleTimeout. + l.idleTimer <- timer + rwc.Close() + <-l.timedOut + return nil, ErrIdleTimeout + } -func (c *idleCloser) Read(p []byte) (int, error) { - n, err := c.wrapped.Read(p) - if err != nil && isClosingError(err) { - c.closeOnce.Do(func() { close(c.closed) }) + l.active <- 1 + return l.newConn(rwc), nil } - return n, err } -func (c *idleCloser) Write(p []byte) (int, error) { - // we do not close on write failure, we rely on the wrapped writer to do that - // if it is appropriate, which we will detect in the next read. - return c.wrapped.Write(p) -} +func (l *idleListener) Close() error { + select { + case _, ok := <-l.active: + if ok { + close(l.active) + } -func (c *idleCloser) Close() error { - // we rely on closing the wrapped stream to signal to the next read that we - // are closed, rather than triggering the closed signal directly - return c.wrapped.Close() -} + case <-l.timedOut: + // Already closed by the timer; take care not to double-close if the caller + // only explicitly invokes this Close method once, since the io.Closer + // interface explicitly leaves doubled Close calls undefined. + return ErrIdleTimeout -func (l *idleListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) { - rwc, err := l.wrapped.Accept(ctx) - if err != nil { - if isClosingError(err) { - // underlying listener was closed - l.closeOnce.Do(func() { close(l.closed) }) - // was it closed because of the idle timeout? - select { - case <-l.wasTimeout: - err = ErrIdleTimeout - default: - } + case timer := <-l.idleTimer: + if !timer.Stop() { + // Couldn't stop the timer. It shouldn't take long to run, so just wait + // (so that the Listener is guaranteed to be closed before we return) + // and pretend that this call happened afterward. + // That way we won't leak any timers or goroutines when Close returns. + l.idleTimer <- timer + <-l.timedOut + return ErrIdleTimeout } - return nil, err + close(l.active) } - conn := &idleCloser{ - wrapped: rwc, - closed: make(chan struct{}), - } - l.newConns <- conn - return conn, err -} -func (l *idleListener) Close() error { - defer l.closeOnce.Do(func() { close(l.closed) }) return l.wrapped.Close() } @@ -252,31 +246,83 @@ func (l *idleListener) Dialer() Dialer { return l.wrapped.Dialer() } -func (l *idleListener) run() { - var conns []*idleCloser - for { - var firstClosed chan struct{} // left at nil if there are no active conns - var timeout <-chan time.Time // left at nil if there are active conns - if len(conns) > 0 { - firstClosed = conns[0].closed +func (l *idleListener) timerExpired() { + select { + case n, ok := <-l.active: + if ok { + panic(fmt.Sprintf("jsonrpc2: idleListener idle timer fired with %d connections still active", n)) } else { - timeout = time.After(l.timeout) + panic("jsonrpc2: Close finished with idle timer still running") } - select { - case <-l.closed: - // the main listener closed, no need to keep going + + case <-l.timedOut: + panic("jsonrpc2: idleListener idle timer fired more than once") + + case <-l.idleTimer: + // The timer for this very call! + } + + // Close the Listener with all channels still blocked to ensure that this call + // to l.wrapped.Close doesn't race with the one in l.Close. + defer close(l.timedOut) + l.wrapped.Close() +} + +func (l *idleListener) connClosed() { + select { + case n, ok := <-l.active: + if !ok { + // l is already closed, so it can't close due to idleness, + // and we don't need to track the number of active connections any more. return - case conn := <-l.newConns: - // a new conn arrived, add it to the list - conns = append(conns, conn) - case <-timeout: - // we timed out, only happens when there are no active conns - // close the underlying listener, and allow the normal closing process to happen - close(l.wasTimeout) - l.wrapped.Close() - case <-firstClosed: - // a conn closed, remove it from the active list - conns = conns[:copy(conns, conns[1:])] } + n-- + if n == 0 { + l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired) + } else { + l.active <- n + } + + case <-l.timedOut: + panic("jsonrpc2: idleListener idle timer fired before last active connection was closed") + + case <-l.idleTimer: + panic("jsonrpc2: idleListener idle timer active before last active connection was closed") } } + +type idleListenerConn struct { + wrapped io.ReadWriteCloser + l *idleListener + closeOnce sync.Once +} + +func (l *idleListener) newConn(rwc io.ReadWriteCloser) *idleListenerConn { + c := &idleListenerConn{ + wrapped: rwc, + l: l, + } + + // A caller that forgets to call Close may disrupt the idleListener's + // accounting, even though the file descriptor for the underlying connection + // may eventually be garbage-collected anyway. + // + // Set a (best-effort) finalizer to verify that a Close call always occurs. + // (We will clear the finalizer explicitly in Close.) + runtime.SetFinalizer(c, func(c *idleListenerConn) { + panic("jsonrpc2: IdleListener connection became unreachable without a call to Close") + }) + + return c +} + +func (c *idleListenerConn) Read(p []byte) (int, error) { return c.wrapped.Read(p) } +func (c *idleListenerConn) Write(p []byte) (int, error) { return c.wrapped.Write(p) } + +func (c *idleListenerConn) Close() error { + defer c.closeOnce.Do(func() { + c.l.connClosed() + runtime.SetFinalizer(c, nil) + }) + return c.wrapped.Close() +} diff --git a/internal/jsonrpc2_v2/serve_go116.go b/internal/jsonrpc2_v2/serve_go116.go new file mode 100644 index 00000000000..29549f1059d --- /dev/null +++ b/internal/jsonrpc2_v2/serve_go116.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package jsonrpc2 + +import ( + "errors" + "net" +) + +var errClosed = net.ErrClosed + +func isErrClosed(err error) bool { + return errors.Is(err, errClosed) +} diff --git a/internal/jsonrpc2_v2/serve_pre116.go b/internal/jsonrpc2_v2/serve_pre116.go new file mode 100644 index 00000000000..a1801d8a200 --- /dev/null +++ b/internal/jsonrpc2_v2/serve_pre116.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.16 +// +build !go1.16 + +package jsonrpc2 + +import ( + "errors" + "strings" +) + +// errClosed is an error with the same string as net.ErrClosed, +// which was added in Go 1.16. +var errClosed = errors.New("use of closed network connection") + +// isErrClosed reports whether err ends in the same string as errClosed. +func isErrClosed(err error) bool { + // As of Go 1.16, this could be 'errors.Is(err, net.ErrClosing)', but + // unfortunately gopls still requires compatibility with + // (otherwise-unsupported) older Go versions. + // + // In the meantime, this error string has not changed on any supported Go + // version, and is not expected to change in the future. + // This is not ideal, but since the worst that could happen here is some + // superfluous logging, it is acceptable. + return strings.HasSuffix(err.Error(), "use of closed network connection") +} diff --git a/internal/jsonrpc2_v2/serve_test.go b/internal/jsonrpc2_v2/serve_test.go index 26cf6a58c4e..88ac66b7e66 100644 --- a/internal/jsonrpc2_v2/serve_test.go +++ b/internal/jsonrpc2_v2/serve_test.go @@ -7,6 +7,8 @@ package jsonrpc2_test import ( "context" "errors" + "fmt" + "runtime/debug" "testing" "time" @@ -16,48 +18,125 @@ import ( func TestIdleTimeout(t *testing.T) { stacktest.NoLeak(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{}) - if err != nil { - t.Fatal(err) - } - listener = jsonrpc2.NewIdleListener(100*time.Millisecond, listener) - defer listener.Close() - server, err := jsonrpc2.Serve(ctx, listener, jsonrpc2.ConnectionOptions{}) - if err != nil { - t.Fatal(err) - } + // Use a panicking time.AfterFunc instead of context.WithTimeout so that we + // get a goroutine dump on failure. We expect the test to take on the order of + // a few tens of milliseconds at most, so 10s should be several orders of + // magnitude of headroom. + timer := time.AfterFunc(10*time.Second, func() { + debug.SetTraceback("all") + panic("TestIdleTimeout deadlocked") + }) + defer timer.Stop() - connect := func() *jsonrpc2.Connection { - client, err := jsonrpc2.Dial(ctx, - listener.Dialer(), - jsonrpc2.ConnectionOptions{}) + ctx := context.Background() + + try := func(d time.Duration) (longEnough bool) { + listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{}) if err != nil { t.Fatal(err) } - return client - } - // Exercise some connection/disconnection patterns, and then assert that when - // our timer fires, the server exits. - conn1 := connect() - conn2 := connect() - if err := conn1.Close(); err != nil { - t.Fatalf("conn1.Close failed with error: %v", err) - } - if err := conn2.Close(); err != nil { - t.Fatalf("conn2.Close failed with error: %v", err) - } - conn3 := connect() - if err := conn3.Close(); err != nil { - t.Fatalf("conn3.Close failed with error: %v", err) - } - serverError := server.Wait() + idleStart := time.Now() + listener = jsonrpc2.NewIdleListener(d, listener) + defer listener.Close() - if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) { - t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout) + server := jsonrpc2.NewServer(ctx, listener, jsonrpc2.ConnectionOptions{}) + + // Exercise some connection/disconnection patterns, and then assert that when + // our timer fires, the server exits. + conn1, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{}) + if err != nil { + if since := time.Since(idleStart); since < d { + t.Fatalf("conn1 failed to connect after %v: %v", since, err) + } + t.Log("jsonrpc2.Dial:", err) + return false // Took to long to dial, so the failure could have been due to the idle timeout. + } + // On the server side, Accept can race with the connection timing out. + // Send a call and wait for the response to ensure that the connection was + // actually fully accepted. + ac := conn1.Call(ctx, "ping", nil) + if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) { + if since := time.Since(idleStart); since < d { + t.Fatalf("conn1 broken after %v: %v", since, err) + } + t.Log(`conn1.Call(ctx, "ping", nil):`, err) + conn1.Close() + return false + } + + // Since conn1 was successfully accepted and remains open, the server is + // definitely non-idle. Dialing another simultaneous connection should + // succeed. + conn2, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{}) + if err != nil { + conn1.Close() + t.Fatalf("conn2 failed to connect while non-idle after %v: %v", time.Since(idleStart), err) + return false + } + // Ensure that conn2 is also accepted on the server side before we close + // conn1. Otherwise, the connection can appear idle if the server processes + // the closure of conn1 and the idle timeout before it finally notices conn2 + // in the accept queue. + // (That failure mode may explain the failure noted in + // https://go.dev/issue/49387#issuecomment-1303979877.) + ac = conn2.Call(ctx, "ping", nil) + if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) { + t.Fatalf("conn2 broken while non-idle after %v: %v", time.Since(idleStart), err) + } + + if err := conn1.Close(); err != nil { + t.Fatalf("conn1.Close failed with error: %v", err) + } + idleStart = time.Now() + if err := conn2.Close(); err != nil { + t.Fatalf("conn2.Close failed with error: %v", err) + } + + conn3, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{}) + if err != nil { + if since := time.Since(idleStart); since < d { + t.Fatalf("conn3 failed to connect after %v: %v", since, err) + } + t.Log("jsonrpc2.Dial:", err) + return false // Took to long to dial, so the failure could have been due to the idle timeout. + } + + ac = conn3.Call(ctx, "ping", nil) + if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) { + if since := time.Since(idleStart); since < d { + t.Fatalf("conn3 broken after %v: %v", since, err) + } + t.Log(`conn3.Call(ctx, "ping", nil):`, err) + conn3.Close() + return false + } + + idleStart = time.Now() + if err := conn3.Close(); err != nil { + t.Fatalf("conn3.Close failed with error: %v", err) + } + + serverError := server.Wait() + + if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) { + t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout) + } + if since := time.Since(idleStart); since < d { + t.Errorf("server shut down after %v idle; want at least %v", since, d) + } + return true + } + + d := 1 * time.Millisecond + for { + t.Logf("testing with idle timout %v", d) + if !try(d) { + d *= 2 + continue + } + break } } @@ -78,8 +157,7 @@ func (fakeHandler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface func TestServe(t *testing.T) { stacktest.NoLeak(t) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() + ctx := context.Background() tests := []struct { name string @@ -116,13 +194,9 @@ func TestServe(t *testing.T) { } func newFake(t *testing.T, ctx context.Context, l jsonrpc2.Listener) (*jsonrpc2.Connection, func(), error) { - l = jsonrpc2.NewIdleListener(100*time.Millisecond, l) - server, err := jsonrpc2.Serve(ctx, l, jsonrpc2.ConnectionOptions{ + server := jsonrpc2.NewServer(ctx, l, jsonrpc2.ConnectionOptions{ Handler: fakeHandler{}, }) - if err != nil { - return nil, nil, err - } client, err := jsonrpc2.Dial(ctx, l.Dialer(), @@ -142,3 +216,129 @@ func newFake(t *testing.T, ctx context.Context, l jsonrpc2.Listener) (*jsonrpc2. server.Wait() }, nil } + +// TestIdleListenerAcceptCloseRace checks for the Accept/Close race fixed in CL 388597. +// +// (A bug in the idleListener implementation caused a successful Accept to block +// on sending to a background goroutine that could have already exited.) +func TestIdleListenerAcceptCloseRace(t *testing.T) { + ctx := context.Background() + + n := 10 + + // Each iteration of the loop appears to take around a millisecond, so to + // avoid spurious failures we'll set the watchdog for three orders of + // magnitude longer. When the bug was present, this reproduced the deadlock + // reliably on a Linux workstation when run with -count=100, which should be + // frequent enough to show up on the Go build dashboard if it regresses. + watchdog := time.Duration(n) * 1000 * time.Millisecond + timer := time.AfterFunc(watchdog, func() { + debug.SetTraceback("all") + panic(fmt.Sprintf("%s deadlocked after %v", t.Name(), watchdog)) + }) + defer timer.Stop() + + for ; n > 0; n-- { + listener, err := jsonrpc2.NetPipeListener(ctx) + if err != nil { + t.Fatal(err) + } + listener = jsonrpc2.NewIdleListener(24*time.Hour, listener) + + done := make(chan struct{}) + go func() { + conn, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{}) + listener.Close() + if err == nil { + conn.Close() + } + close(done) + }() + + // Accept may return a non-nil error if Close closes the underlying network + // connection before the wrapped Accept call unblocks. However, it must not + // deadlock! + c, err := listener.Accept(ctx) + if err == nil { + c.Close() + } + <-done + } +} + +// TestCloseCallRace checks for a race resulting in a deadlock when a Call on +// one side of the connection races with a Close (or otherwise broken +// connection) initiated from the other side. +// +// (The Call method was waiting for a result from the Read goroutine to +// determine which error value to return, but the Read goroutine was waiting for +// in-flight calls to complete before reporting that result.) +func TestCloseCallRace(t *testing.T) { + ctx := context.Background() + n := 10 + + watchdog := time.Duration(n) * 1000 * time.Millisecond + timer := time.AfterFunc(watchdog, func() { + debug.SetTraceback("all") + panic(fmt.Sprintf("%s deadlocked after %v", t.Name(), watchdog)) + }) + defer timer.Stop() + + for ; n > 0; n-- { + listener, err := jsonrpc2.NetPipeListener(ctx) + if err != nil { + t.Fatal(err) + } + + pokec := make(chan *jsonrpc2.AsyncCall, 1) + + s := jsonrpc2.NewServer(ctx, listener, jsonrpc2.BinderFunc(func(_ context.Context, srvConn *jsonrpc2.Connection) jsonrpc2.ConnectionOptions { + h := jsonrpc2.HandlerFunc(func(ctx context.Context, _ *jsonrpc2.Request) (interface{}, error) { + // Start a concurrent call from the server to the client. + // The point of this test is to ensure this doesn't deadlock + // if the client shuts down the connection concurrently. + // + // The racing Call may or may not receive a response: it should get a + // response if it is sent before the client closes the connection, and + // it should fail with some kind of "connection closed" error otherwise. + go func() { + pokec <- srvConn.Call(ctx, "poke", nil) + }() + + return &msg{"pong"}, nil + }) + return jsonrpc2.ConnectionOptions{Handler: h} + })) + + dialConn, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{}) + if err != nil { + listener.Close() + s.Wait() + t.Fatal(err) + } + + // Calling any method on the server should provoke it to asynchronously call + // us back. While it is starting that call, we will close the connection. + if err := dialConn.Call(ctx, "ping", nil).Await(ctx, nil); err != nil { + t.Error(err) + } + if err := dialConn.Close(); err != nil { + t.Error(err) + } + + // Ensure that the Call on the server side did not block forever when the + // connection closed. + pokeCall := <-pokec + if err := pokeCall.Await(ctx, nil); err == nil { + t.Errorf("unexpected nil error from server-initited call") + } else if errors.Is(err, jsonrpc2.ErrMethodNotFound) { + // The call completed before the Close reached the handler. + } else { + // The error was something else. + t.Logf("server-initiated call completed with expected error: %v", err) + } + + listener.Close() + s.Wait() + } +} diff --git a/internal/jsonrpc2_v2/wire.go b/internal/jsonrpc2_v2/wire.go index 4da129ae6e2..c8dc9ebf1bf 100644 --- a/internal/jsonrpc2_v2/wire.go +++ b/internal/jsonrpc2_v2/wire.go @@ -33,6 +33,10 @@ var ( ErrServerOverloaded = NewError(-32000, "JSON RPC overloaded") // ErrUnknown should be used for all non coded errors. ErrUnknown = NewError(-32001, "JSON RPC unknown error") + // ErrServerClosing is returned for calls that arrive while the server is closing. + ErrServerClosing = NewError(-32002, "JSON RPC server is closing") + // ErrClientClosing is a dummy error returned for calls initiated while the client is closing. + ErrClientClosing = NewError(-32003, "JSON RPC client is closing") ) const wireVersion = "2.0" @@ -72,3 +76,11 @@ func NewError(code int64, message string) error { func (err *wireError) Error() string { return err.Message } + +func (err *wireError) Is(other error) bool { + w, ok := other.(*wireError) + if !ok { + return false + } + return err.Code == w.Code +} diff --git a/internal/lockedfile/internal/filelock/filelock.go b/internal/lockedfile/internal/filelock/filelock.go new file mode 100644 index 00000000000..05f27c321a8 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "io/fs" + "os" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (fs.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by ErrNotSupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return isNotSupported(underlyingError(err)) +} + +var ErrNotSupported = errors.New("operation not supported") + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *fs.PathError: + return err.Err + case *os.LinkError: + return err.Err + case *os.SyscallError: + return err.Err + } + return err +} diff --git a/internal/lockedfile/internal/filelock/filelock_fcntl.go b/internal/lockedfile/internal/filelock/filelock_fcntl.go new file mode 100644 index 00000000000..30985191072 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || (solaris && !illumos) +// +build aix solaris,!illumos + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// Most platforms provide some alternative API, such as an 'flock' system call +// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and +// does not require per-inode bookkeeping in the application. + +package filelock + +import ( + "errors" + "io" + "io/fs" + "math/rand" + "sync" + "syscall" + "time" +) + +type lockType int16 + +const ( + readLock lockType = syscall.F_RDLCK + writeLock lockType = syscall.F_WRLCK +) + +type inode = uint64 // type of syscall.Stat_t.Ino + +type inodeLock struct { + owner File + queue []<-chan File +} + +var ( + mu sync.Mutex + inodes = map[File]inode{} + locks = map[inode]inodeLock{} +) + +func lock(f File, lt lockType) (err error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.Stat() + if err != nil { + return err + } + ino := fi.Sys().(*syscall.Stat_t).Ino + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + inodes[f] = ino + + var wait chan File + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else { + // Already owned: add a channel to wait on. + wait = make(chan File) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at + // the process, rather than thread, level. Consider processes P and Q, with + // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be + // reported as a deadlock on systems that consider only process granularity: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (This is erroneously reported as a deadlock.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + // + // These spurious errors were observed in practice on AIX and Solaris in + // cmd/go: see https://golang.org/issue/32817. + // + // We work around this bug by treating EDEADLK as always spurious. If there + // really is a lock-ordering bug between the interacting processes, it will + // become a livelock instead, but that's not appreciably worse than if we had + // a proper flock implementation (which generally does not even attempt to + // diagnose deadlocks). + // + // In the above example, that changes the trace to: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 spuriously fails to lock file B and goes to sleep. + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 wakes up and locks file B. + // P.2 unlocks file B. + // + // We know that the retry loop will not introduce a *spurious* livelock + // because, according to the POSIX specification, EDEADLK is only to be + // returned when ā€œthe lock is blocked by a lock from another processā€. + // If that process is blocked on some lock that we are holding, then the + // resulting livelock is due to a real deadlock (and would manifest as such + // when using, for example, the flock implementation of this package). + // If the other process is *not* blocked on some other lock that we are + // holding, then it will eventually release the requested lock. + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + err = setlkw(f.Fd(), lt) + if err != syscall.EDEADLK { + break + } + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions when we finally unblock. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } + + if err != nil { + unlock(f) + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + + return nil +} + +func unlock(f File) error { + var owner File + + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner != f { + panic("unlock called on a file that is not locked") + } + + err := setlkw(f.Fd(), syscall.F_UNLCK) + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. +func setlkw(fd uintptr, lt lockType) error { + for { + err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != syscall.EINTR { + return err + } + } +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/internal/lockedfile/internal/filelock/filelock_other.go b/internal/lockedfile/internal/filelock/filelock_other.go new file mode 100644 index 00000000000..cde868f49b0 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_other.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(aix || darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd || solaris) && !plan9 && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!illumos,!linux,!netbsd,!openbsd,!solaris,!plan9,!windows + +package filelock + +import "io/fs" + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/internal/lockedfile/internal/filelock/filelock_plan9.go b/internal/lockedfile/internal/filelock/filelock_plan9.go new file mode 100644 index 00000000000..908afb6c8cb --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_plan9.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package filelock + +import "io/fs" + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/internal/lockedfile/internal/filelock/filelock_test.go b/internal/lockedfile/internal/filelock/filelock_test.go new file mode 100644 index 00000000000..224feda93e3 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_test.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js && !plan9 +// +build !js,!plan9 + +package filelock_test + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "golang.org/x/tools/internal/lockedfile/internal/filelock" +) + +func lock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Lock(f) + t.Logf("Lock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func rLock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.RLock(f) + t.Logf("RLock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func unlock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Unlock(f) + t.Logf("Unlock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func mustTempFile(t *testing.T) (f *os.File, remove func()) { + t.Helper() + + base := filepath.Base(t.Name()) + f, err := os.CreateTemp("", base) + if err != nil { + t.Fatalf(`os.CreateTemp("", %q) = %v`, base, err) + } + t.Logf("fd %d = %s", f.Fd(), f.Name()) + + return f, func() { + f.Close() + os.Remove(f.Name()) + } +} + +func mustOpen(t *testing.T, name string) *os.File { + t.Helper() + + f, err := os.OpenFile(name, os.O_RDWR, 0) + if err != nil { + t.Fatalf("os.Open(%q) = %v", name, err) + } + + t.Logf("fd %d = os.Open(%q)", f.Fd(), name) + return f +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) { + t.Helper() + + desc := fmt.Sprintf("%s(fd %d)", op, f.Fd()) + + done := make(chan struct{}) + go func() { + t.Helper() + switch op { + case "Lock": + lock(t, f) + case "RLock": + rLock(t, f) + default: + panic("invalid op: " + op) + } + close(done) + }() + + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + return nil + + case <-time.After(quiescent): + t.Logf("%s is blocked (as expected)", desc) + return func(t *testing.T) { + t.Helper() + select { + case <-time.After(probablyStillBlocked): + t.Fatalf("%s is unexpectedly still blocked", desc) + case <-done: + } + } + } +} + +func TestLockExcludesLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} + +func TestLockExcludesRLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + rLockOther := mustBlock(t, "RLock", other) + unlock(t, f) + rLockOther(t) + unlock(t, other) +} + +func TestRLockExcludesOnlyLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + rLock(t, f) + + f2 := mustOpen(t, f.Name()) + defer f2.Close() + + doUnlockTF := false + switch runtime.GOOS { + case "aix", "solaris": + // When using POSIX locks (as on Solaris), we can't safely read-lock the + // same inode through two different descriptors at the same time: when the + // first descriptor is closed, the second descriptor would still be open but + // silently unlocked. So a second RLock must block instead of proceeding. + lockF2 := mustBlock(t, "RLock", f2) + unlock(t, f) + lockF2(t) + default: + rLock(t, f2) + doUnlockTF = true + } + + other := mustOpen(t, f.Name()) + defer other.Close() + lockOther := mustBlock(t, "Lock", other) + + unlock(t, f2) + if doUnlockTF { + unlock(t, f) + } + lockOther(t) + unlock(t, other) +} + +func TestLockNotDroppedByExecCommand(t *testing.T) { + f, remove := mustTempFile(t) + defer remove() + + lock(t, f) + + other := mustOpen(t, f.Name()) + defer other.Close() + + // Some kinds of file locks are dropped when a duplicated or forked file + // descriptor is unlocked. Double-check that the approach used by os/exec does + // not accidentally drop locks. + cmd := exec.Command(os.Args[0], "-test.run=^$") + if err := cmd.Run(); err != nil { + t.Fatalf("exec failed: %v", err) + } + + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} diff --git a/internal/lockedfile/internal/filelock/filelock_unix.go b/internal/lockedfile/internal/filelock/filelock_unix.go new file mode 100644 index 00000000000..878a1e770d4 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_unix.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd +// +build darwin dragonfly freebsd illumos linux netbsd openbsd + +package filelock + +import ( + "io/fs" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/internal/lockedfile/internal/filelock/filelock_windows.go b/internal/lockedfile/internal/filelock/filelock_windows.go new file mode 100644 index 00000000000..3273a818272 --- /dev/null +++ b/internal/lockedfile/internal/filelock/filelock_windows.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package filelock + +import ( + "io/fs" + + "golang.org/x/sys/windows" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +func lock(f File, lt lockType) error { + // Per https://golang.org/issue/19098, ā€œPrograms currently expect the Fd + // method to return a handle that uses ordinary synchronous I/O.ā€ + // However, LockFileEx still requires an OVERLAPPED structure, + // which contains the file offset of the beginning of the lock range. + // We want to lock the entire file, so we leave the offset as zero. + ol := new(windows.Overlapped) + + err := windows.LockFileEx(windows.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + ol := new(windows.Overlapped) + err := windows.UnlockFileEx(windows.Handle(f.Fd()), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: err, + } + } + return nil +} + +func isNotSupported(err error) bool { + switch err { + case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported: + return true + default: + return false + } +} diff --git a/internal/lockedfile/lockedfile.go b/internal/lockedfile/lockedfile.go new file mode 100644 index 00000000000..82e1a89675e --- /dev/null +++ b/internal/lockedfile/lockedfile.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/fs" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &fs.PathError{ + Op: "close", + Path: f.Name(), + Err: fs.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return io.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm fs.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} + +// Transform invokes t with the result of reading the named file, with its lock +// still held. +// +// If t returns a nil error, Transform then writes the returned contents back to +// the file, making a best effort to preserve existing contents on error. +// +// t must not modify the slice passed to it. +func Transform(name string, t func([]byte) ([]byte, error)) (err error) { + f, err := Edit(name) + if err != nil { + return err + } + defer f.Close() + + old, err := io.ReadAll(f) + if err != nil { + return err + } + + new, err := t(old) + if err != nil { + return err + } + + if len(new) > len(old) { + // The overall file size is increasing, so write the tail first: if we're + // about to run out of space on the disk, we would rather detect that + // failure before we have overwritten the original contents. + if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil { + // Make a best effort to remove the incomplete tail. + f.Truncate(int64(len(old))) + return err + } + } + + // We're about to overwrite the old contents. In case of failure, make a best + // effort to roll back before we close the file. + defer func() { + if err != nil { + if _, err := f.WriteAt(old, 0); err == nil { + f.Truncate(int64(len(old))) + } + } + }() + + if len(new) >= len(old) { + if _, err := f.WriteAt(new[:len(old)], 0); err != nil { + return err + } + } else { + if _, err := f.WriteAt(new, 0); err != nil { + return err + } + // The overall file size is decreasing, so shrink the file to its final size + // after writing. We do this after writing (instead of before) so that if + // the write fails, enough filesystem space will likely still be reserved + // to contain the previous contents. + if err := f.Truncate(int64(len(new))); err != nil { + return err + } + } + + return nil +} diff --git a/internal/lockedfile/lockedfile_filelock.go b/internal/lockedfile/lockedfile_filelock.go new file mode 100644 index 00000000000..7c71672c811 --- /dev/null +++ b/internal/lockedfile/lockedfile_filelock.go @@ -0,0 +1,66 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 +// +build !plan9 + +package lockedfile + +import ( + "io/fs" + "os" + + "golang.org/x/tools/internal/lockedfile/internal/filelock" +) + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says ā€œif possible, truncate file when + // openedā€, but doesn't define ā€œpossibleā€ (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as ā€œpossibleā€ + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/internal/lockedfile/lockedfile_plan9.go b/internal/lockedfile/lockedfile_plan9.go new file mode 100644 index 00000000000..40871e610cd --- /dev/null +++ b/internal/lockedfile/lockedfile_plan9.go @@ -0,0 +1,95 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package lockedfile + +import ( + "io/fs" + "math/rand" + "os" + "strings" + "time" +) + +// Opening an exclusive-use file returns an error. +// The expected error strings are: +// +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) +var lockedErrStrings = [...]string{ + "file is locked", + "exclusive lock", + "exclusive use file already open", +} + +// Even though plan9 doesn't support the Lock/RLock/Unlock functions to +// manipulate already-open files, IsLocked is still meaningful: os.OpenFile +// itself may return errors that indicate that a file with the ModeExclusive bit +// set is already open. +func isLocked(err error) bool { + s := err.Error() + + for _, frag := range lockedErrStrings { + if strings.Contains(s, frag) { + return true + } + } + + return false +} + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: ā€œExclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.ā€ + // + // So we can try to open a locked file, but if it fails we're on our own to + // figure out when it becomes available. We'll use exponential backoff with + // some jitter and an arbitrary limit of 500ms. + + // If the file was unpacked or created by some other program, it might not + // have the ModeExclusive bit set. Set it before we call OpenFile, so that we + // can be confident that a successful OpenFile implies exclusive use. + if fi, err := os.Stat(name); err == nil { + if fi.Mode()&fs.ModeExclusive == 0 { + if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil { + return nil, err + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive) + if err == nil { + return f, nil + } + + if !isLocked(err) { + return nil, err + } + + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } +} + +func closeFile(f *os.File) error { + return f.Close() +} diff --git a/internal/lockedfile/lockedfile_test.go b/internal/lockedfile/lockedfile_test.go new file mode 100644 index 00000000000..572178d0d32 --- /dev/null +++ b/internal/lockedfile/lockedfile_test.go @@ -0,0 +1,270 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// js does not support inter-process file locking. +// +//go:build !js +// +build !js + +package lockedfile_test + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "golang.org/x/tools/internal/lockedfile" +) + +func mustTempDir(t *testing.T) (dir string, remove func()) { + t.Helper() + + dir, err := os.MkdirTemp("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + return dir, func() { os.RemoveAll(dir) } +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) { + t.Helper() + + done := make(chan struct{}) + go func() { + f() + close(done) + }() + + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + return nil + + case <-time.After(quiescent): + return func(t *testing.T) { + t.Helper() + select { + case <-time.After(probablyStillBlocked): + t.Fatalf("%s is unexpectedly still blocked after %v", desc, probablyStillBlocked) + case <-done: + } + } + } +} + +func TestMutexExcludes(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "lock") + + mu := lockedfile.MutexAt(path) + t.Logf("mu := MutexAt(_)") + + unlock, err := mu.Lock() + if err != nil { + t.Fatalf("mu.Lock: %v", err) + } + t.Logf("unlock, _ := mu.Lock()") + + mu2 := lockedfile.MutexAt(mu.Path) + t.Logf("mu2 := MutexAt(mu.Path)") + + wait := mustBlock(t, "mu2.Lock()", func() { + unlock2, err := mu2.Lock() + if err != nil { + t.Errorf("mu2.Lock: %v", err) + return + } + t.Logf("unlock2, _ := mu2.Lock()") + t.Logf("unlock2()") + unlock2() + }) + + t.Logf("unlock()") + unlock() + wait(t) +} + +func TestReadWaitsForLock(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "timestamp.txt") + + f, err := lockedfile.Create(path) + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + + const ( + part1 = "part 1\n" + part2 = "part 2\n" + ) + _, err = f.WriteString(part1) + if err != nil { + t.Fatalf("WriteString: %v", err) + } + t.Logf("WriteString(%q) = ", part1) + + wait := mustBlock(t, "Read", func() { + b, err := lockedfile.Read(path) + if err != nil { + t.Errorf("Read: %v", err) + return + } + + const want = part1 + part2 + got := string(b) + if got == want { + t.Logf("Read(_) = %q", got) + } else { + t.Errorf("Read(_) = %q, _; want %q", got, want) + } + }) + + _, err = f.WriteString(part2) + if err != nil { + t.Errorf("WriteString: %v", err) + } else { + t.Logf("WriteString(%q) = ", part2) + } + f.Close() + + wait(t) +} + +func TestCanLockExistingFile(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + path := filepath.Join(dir, "existing.txt") + + if err := os.WriteFile(path, []byte("ok"), 0777); err != nil { + t.Fatalf("os.WriteFile: %v", err) + } + + f, err := lockedfile.Edit(path) + if err != nil { + t.Fatalf("first Edit: %v", err) + } + + wait := mustBlock(t, "Edit", func() { + other, err := lockedfile.Edit(path) + if err != nil { + t.Errorf("second Edit: %v", err) + } + other.Close() + }) + + f.Close() + wait(t) +} + +// TestSpuriousEDEADLK verifies that the spurious EDEADLK reported in +// https://golang.org/issue/32817 no longer occurs. +func TestSpuriousEDEADLK(t *testing.T) { + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (Spurious EDEADLK occurs here.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + + dirVar := t.Name() + "DIR" + + if dir := os.Getenv(dirVar); dir != "" { + // Q.3 locks file B. + b, err := lockedfile.Edit(filepath.Join(dir, "B")) + if err != nil { + t.Fatal(err) + } + defer b.Close() + + if err := os.WriteFile(filepath.Join(dir, "locked"), []byte("ok"), 0666); err != nil { + t.Fatal(err) + } + + // Q.3 blocks on file A. + a, err := lockedfile.Edit(filepath.Join(dir, "A")) + // Q.3 unblocks and locks file A. + if err != nil { + t.Fatal(err) + } + defer a.Close() + + // Q.3 unlocks files A and B. + return + } + + dir, remove := mustTempDir(t) + defer remove() + + // P.1 locks file A. + a, err := lockedfile.Edit(filepath.Join(dir, "A")) + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(os.Args[0], "-test.run="+t.Name()) + cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir)) + + qDone := make(chan struct{}) + waitQ := mustBlock(t, "Edit A and B in subprocess", func() { + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("%v:\n%s", err, out) + } + close(qDone) + }) + + // Wait until process Q has either failed or locked file B. + // Otherwise, P.2 might not block on file B as intended. +locked: + for { + if _, err := os.Stat(filepath.Join(dir, "locked")); !os.IsNotExist(err) { + break locked + } + select { + case <-qDone: + break locked + case <-time.After(1 * time.Millisecond): + } + } + + waitP2 := mustBlock(t, "Edit B", func() { + // P.2 blocks on file B. (Spurious EDEADLK occurs here.) + b, err := lockedfile.Edit(filepath.Join(dir, "B")) + // P.2 unblocks and locks file B. + if err != nil { + t.Error(err) + return + } + // P.2 unlocks file B. + b.Close() + }) + + // P.1 unlocks file A. + a.Close() + + waitQ(t) + waitP2(t) +} diff --git a/internal/lockedfile/mutex.go b/internal/lockedfile/mutex.go new file mode 100644 index 00000000000..180a36c6201 --- /dev/null +++ b/internal/lockedfile/mutex.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" + "sync" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. + mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + mu.mu.Lock() + + return func() { + mu.mu.Unlock() + f.Close() + }, nil +} diff --git a/internal/lockedfile/transform_test.go b/internal/lockedfile/transform_test.go new file mode 100644 index 00000000000..cebbf4101cb --- /dev/null +++ b/internal/lockedfile/transform_test.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// js does not support inter-process file locking. +// +//go:build !js +// +build !js + +package lockedfile_test + +import ( + "bytes" + "encoding/binary" + "math/rand" + "path/filepath" + "testing" + "time" + + "golang.org/x/tools/internal/lockedfile" +) + +func isPowerOf2(x int) bool { + return x > 0 && x&(x-1) == 0 +} + +func roundDownToPowerOf2(x int) int { + if x <= 0 { + panic("nonpositive x") + } + bit := 1 + for x != bit { + x = x &^ bit + bit <<= 1 + } + return x +} + +func TestTransform(t *testing.T) { + dir, remove := mustTempDir(t) + defer remove() + path := filepath.Join(dir, "blob.bin") + + const maxChunkWords = 8 << 10 + buf := make([]byte, 2*maxChunkWords*8) + for i := uint64(0); i < 2*maxChunkWords; i++ { + binary.LittleEndian.PutUint64(buf[i*8:], i) + } + if err := lockedfile.Write(path, bytes.NewReader(buf[:8]), 0666); err != nil { + t.Fatal(err) + } + + var attempts int64 = 128 + if !testing.Short() { + attempts *= 16 + } + const parallel = 32 + + var sem = make(chan bool, parallel) + + for n := attempts; n > 0; n-- { + sem <- true + go func() { + defer func() { <-sem }() + + time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) + chunkWords := roundDownToPowerOf2(rand.Intn(maxChunkWords) + 1) + offset := rand.Intn(chunkWords) + + err := lockedfile.Transform(path, func(data []byte) (chunk []byte, err error) { + chunk = buf[offset*8 : (offset+chunkWords)*8] + + if len(data)&^7 != len(data) { + t.Errorf("read %d bytes, but each write is an integer multiple of 8 bytes", len(data)) + return chunk, nil + } + + words := len(data) / 8 + if !isPowerOf2(words) { + t.Errorf("read %d 8-byte words, but each write is a power-of-2 number of words", words) + return chunk, nil + } + + u := binary.LittleEndian.Uint64(data) + for i := 1; i < words; i++ { + next := binary.LittleEndian.Uint64(data[i*8:]) + if next != u+1 { + t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i) + return chunk, nil + } + u = next + } + + return chunk, nil + }) + + if err != nil { + t.Errorf("unexpected error from Transform: %v", err) + } + }() + } + + for n := parallel; n > 0; n-- { + sem <- true + } +} diff --git a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go b/internal/lsp/analysis/fillstruct/testdata/src/a/a.go deleted file mode 100644 index 68560092105..00000000000 --- a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fillstruct - -import ( - data "b" - "go/ast" - "go/token" - "unsafe" -) - -type emptyStruct struct{} - -var _ = emptyStruct{} - -type basicStruct struct { - foo int -} - -var _ = basicStruct{} // want "" - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{} // want "" - -var _ = twoArgStruct{ // want "" - bar: "bar", -} - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{} // want "" - -var _ = data.B{} // want "" - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{} // want "" - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{} // want "" - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{} // want "" - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{} // want "" - -type Foo struct { - A int -} - -type Bar struct { - X *Foo - Y *Foo -} - -var _ = Bar{} // want "" - -type importedStruct struct { - m map[*ast.CompositeLit]ast.Field - s []ast.BadExpr - a [3]token.Token - c chan ast.EmptyStmt - fn func(ast_decl ast.DeclStmt) ast.Ellipsis - st ast.CompositeLit -} - -var _ = importedStruct{} // want "" - -type pointerBuiltinStruct struct { - b *bool - s *string - i *int -} - -var _ = pointerBuiltinStruct{} // want "" - -var _ = []ast.BasicLit{ - {}, // want "" -} - -var _ = []ast.BasicLit{{}, // want "" -} - -type unsafeStruct struct { - foo unsafe.Pointer -} - -var _ = unsafeStruct{} // want "" diff --git a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go deleted file mode 100644 index 90290613d87..00000000000 --- a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fillstruct - -type emptyStruct[A any] struct{} - -var _ = emptyStruct[int]{} - -type basicStruct[T any] struct { - foo T -} - -var _ = basicStruct[int]{} - -type fooType[T any] T - -type twoArgStruct[F, B any] struct { - foo fooType[F] - bar fooType[B] -} - -var _ = twoArgStruct[string, int]{} - -var _ = twoArgStruct[int, string]{ - bar: "bar", -} - -type nestedStruct struct { - bar string - basic basicStruct[int] -} - -var _ = nestedStruct{} - -func _[T any]() { - type S struct{ t T } - x := S{} - _ = x -} diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues.go b/internal/lsp/analysis/noresultvalues/noresultvalues.go deleted file mode 100644 index b9f21f3135e..00000000000 --- a/internal/lsp/analysis/noresultvalues/noresultvalues.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package noresultvalues defines an Analyzer that applies suggested fixes -// to errors of the type "no result values expected". -package noresultvalues - -import ( - "bytes" - "go/ast" - "go/format" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" -) - -const Doc = `suggested fixes for unexpected return values - -This checker provides suggested fixes for type errors of the -type "no result values expected" or "too many return values". -For example: - func z() { return nil } -will turn into - func z() { return } -` - -var Analyzer = &analysis.Analyzer{ - Name: string(analysisinternal.NoResultValues), - Doc: Doc, - Requires: []*analysis.Analyzer{inspect.Analyzer}, - Run: run, - RunDespiteErrors: true, -} - -func run(pass *analysis.Pass) (interface{}, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - errors := analysisinternal.GetTypeErrors(pass) - - nodeFilter := []ast.Node{(*ast.ReturnStmt)(nil)} - inspect.Preorder(nodeFilter, func(n ast.Node) { - retStmt, _ := n.(*ast.ReturnStmt) - - var file *ast.File - for _, f := range pass.Files { - if f.Pos() <= retStmt.Pos() && retStmt.Pos() < f.End() { - file = f - break - } - } - if file == nil { - return - } - - for _, err := range errors { - if !FixesError(err.Msg) { - continue - } - if retStmt.Pos() >= err.Pos || err.Pos >= retStmt.End() { - continue - } - var buf bytes.Buffer - if err := format.Node(&buf, pass.Fset, file); err != nil { - continue - } - pass.Report(analysis.Diagnostic{ - Pos: err.Pos, - End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos), - Message: err.Msg, - SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Delete return values", - TextEdits: []analysis.TextEdit{{ - Pos: retStmt.Pos(), - End: retStmt.End(), - NewText: []byte("return"), - }}, - }}, - }) - } - }) - return nil, nil -} - -func FixesError(msg string) bool { - return msg == "no result values expected" || - strings.HasPrefix(msg, "too many return values") && strings.Contains(msg, "want ()") -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go deleted file mode 100644 index 81c732001af..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func x() int { - var z int - z = y // want "undeclared name: y" - - if z == m { // want "undeclared name: m" - z = 1 - } - - if z == 1 { - z = 1 - } else if z == n+1 { // want "undeclared name: n" - z = 1 - } - - switch z { - case 10: - z = 1 - case a: // want "undeclared name: a" - z = 1 - } - return z -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go deleted file mode 100644 index ecf00ecfc20..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func channels(s string) { - undefinedChannels(c()) // want "undeclared name: undefinedChannels" -} - -func c() (<-chan string, chan string) { - return make(<-chan string), make(chan string) -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go deleted file mode 100644 index ab7b2ba5c18..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func consecutiveParams() { - var s string - undefinedConsecutiveParams(s, s) // want "undeclared name: undefinedConsecutiveParams" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go deleted file mode 100644 index 341a9d2a453..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func errorParam() { - var err error - undefinedErrorParam(err) // want "undeclared name: undefinedErrorParam" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go deleted file mode 100644 index ab82463d00e..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -type T struct{} - -func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) // want "undeclared name: undefinedLiterals" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go deleted file mode 100644 index 9a543821ee6..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -import "time" - -func operation() { - undefinedOperation(10 * time.Second) // want "undeclared name: undefinedOperation" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go deleted file mode 100644 index 9ed09a27f24..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func selector() { - m := map[int]bool{} - undefinedSelector(m[1]) // want "undeclared name: undefinedSelector" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go deleted file mode 100644 index d741c68f68d..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func slice() { - undefinedSlice([]int{1, 2}) // want "undeclared name: undefinedSlice" -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go deleted file mode 100644 index 3148e8f4d4c..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func tuple() { - undefinedTuple(b()) // want "undeclared name: undefinedTuple" -} - -func b() (string, error) { - return "", nil -} diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go deleted file mode 100644 index 98f77a43cd1..00000000000 --- a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package undeclared - -func uniqueArguments() { - var s string - var i int - undefinedUniqueArguments(s, i, s) // want "undeclared name: undefinedUniqueArguments" -} diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go deleted file mode 100644 index e882fb46f07..00000000000 --- a/internal/lsp/cache/analysis.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "fmt" - "go/ast" - "go/types" - "reflect" - "sort" - "sync" - - "golang.org/x/sync/errgroup" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" -) - -func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) { - var roots []*actionHandle - for _, a := range analyzers { - if !a.IsEnabled(s.view) { - continue - } - ah, err := s.actionHandle(ctx, PackageID(id), a.Analyzer) - if err != nil { - return nil, err - } - roots = append(roots, ah) - } - - // Check if the context has been canceled before running the analyses. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - var results []*source.Diagnostic - for _, ah := range roots { - diagnostics, _, err := ah.analyze(ctx, s) - if err != nil { - // Keep going if a single analyzer failed. - event.Error(ctx, fmt.Sprintf("analyzer %q failed", ah.analyzer.Name), err) - continue - } - results = append(results, diagnostics...) - } - return results, nil -} - -type actionHandleKey string - -// An action represents one unit of analysis work: the application of -// one analysis to one package. Actions form a DAG, both within a -// package (as different analyzers are applied, either in sequence or -// parallel), and across packages (as dependencies are analyzed). -type actionHandle struct { - handle *memoize.Handle - - analyzer *analysis.Analyzer - pkg *pkg -} - -type actionData struct { - diagnostics []*source.Diagnostic - result interface{} - objectFacts map[objectFactKey]analysis.Fact - packageFacts map[packageFactKey]analysis.Fact - err error -} - -type objectFactKey struct { - obj types.Object - typ reflect.Type -} - -type packageFactKey struct { - pkg *types.Package - typ reflect.Type -} - -func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) { - ph, err := s.buildPackageHandle(ctx, id, source.ParseFull) - if err != nil { - return nil, err - } - act := s.getActionHandle(id, ph.mode, a) - if act != nil { - return act, nil - } - if len(ph.key) == 0 { - return nil, fmt.Errorf("actionHandle: no key for package %s", id) - } - pkg, err := ph.check(ctx, s) - if err != nil { - return nil, err - } - act = &actionHandle{ - analyzer: a, - pkg: pkg, - } - var deps []*actionHandle - // Add a dependency on each required analyzers. - for _, req := range a.Requires { - reqActionHandle, err := s.actionHandle(ctx, id, req) - if err != nil { - return nil, err - } - deps = append(deps, reqActionHandle) - } - - // TODO(golang/go#35089): Re-enable this when we doesn't use ParseExported - // mode for dependencies. In the meantime, disable analysis for dependencies, - // since we don't get anything useful out of it. - if false { - // An analysis that consumes/produces facts - // must run on the package's dependencies too. - if len(a.FactTypes) > 0 { - importIDs := make([]string, 0, len(ph.m.Deps)) - for _, importID := range ph.m.Deps { - importIDs = append(importIDs, string(importID)) - } - sort.Strings(importIDs) // for determinism - for _, importID := range importIDs { - depActionHandle, err := s.actionHandle(ctx, PackageID(importID), a) - if err != nil { - return nil, err - } - deps = append(deps, depActionHandle) - } - } - } - - h := s.generation.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - // Analyze dependencies first. - results, err := execAll(ctx, snapshot, deps) - if err != nil { - return &actionData{ - err: err, - } - } - return runAnalysis(ctx, snapshot, a, pkg, results) - }, nil) - act.handle = h - - act = s.addActionHandle(act) - return act, nil -} - -func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) { - d, err := act.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return nil, nil, err - } - data, ok := d.(*actionData) - if !ok { - return nil, nil, fmt.Errorf("unexpected type for %s:%s", act.pkg.ID(), act.analyzer.Name) - } - if data == nil { - return nil, nil, fmt.Errorf("unexpected nil analysis for %s:%s", act.pkg.ID(), act.analyzer.Name) - } - return data.diagnostics, data.result, data.err -} - -func buildActionKey(a *analysis.Analyzer, ph *packageHandle) actionHandleKey { - return actionHandleKey(hashContents([]byte(fmt.Sprintf("%p %s", a, string(ph.key))))) -} - -func (act *actionHandle) String() string { - return fmt.Sprintf("%s@%s", act.analyzer, act.pkg.PkgPath()) -} - -func execAll(ctx context.Context, snapshot *snapshot, actions []*actionHandle) (map[*actionHandle]*actionData, error) { - var mu sync.Mutex - results := make(map[*actionHandle]*actionData) - - g, ctx := errgroup.WithContext(ctx) - for _, act := range actions { - act := act - g.Go(func() error { - v, err := act.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return err - } - data, ok := v.(*actionData) - if !ok { - return fmt.Errorf("unexpected type for %s: %T", act, v) - } - - mu.Lock() - defer mu.Unlock() - results[act] = data - - return nil - }) - } - return results, g.Wait() -} - -func runAnalysis(ctx context.Context, snapshot *snapshot, analyzer *analysis.Analyzer, pkg *pkg, deps map[*actionHandle]*actionData) (data *actionData) { - data = &actionData{ - objectFacts: make(map[objectFactKey]analysis.Fact), - packageFacts: make(map[packageFactKey]analysis.Fact), - } - defer func() { - if r := recover(); r != nil { - data.err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pkg.PkgPath(), r) - } - }() - - // Plumb the output values of the dependencies - // into the inputs of this action. Also facts. - inputs := make(map[*analysis.Analyzer]interface{}) - - for depHandle, depData := range deps { - if depHandle.pkg == pkg { - // Same package, different analysis (horizontal edge): - // in-memory outputs of prerequisite analyzers - // become inputs to this analysis pass. - inputs[depHandle.analyzer] = depData.result - } else if depHandle.analyzer == analyzer { // (always true) - // Same analysis, different package (vertical edge): - // serialized facts produced by prerequisite analysis - // become available to this analysis pass. - for key, fact := range depData.objectFacts { - // Filter out facts related to objects - // that are irrelevant downstream - // (equivalently: not in the compiler export data). - if !exportedFrom(key.obj, depHandle.pkg.types) { - continue - } - data.objectFacts[key] = fact - } - for key, fact := range depData.packageFacts { - // TODO: filter out facts that belong to - // packages not mentioned in the export data - // to prevent side channels. - - data.packageFacts[key] = fact - } - } - } - - var syntax []*ast.File - for _, cgf := range pkg.compiledGoFiles { - syntax = append(syntax, cgf.File) - } - - var diagnostics []*analysis.Diagnostic - - // Run the analysis. - pass := &analysis.Pass{ - Analyzer: analyzer, - Fset: snapshot.FileSet(), - Files: syntax, - Pkg: pkg.GetTypes(), - TypesInfo: pkg.GetTypesInfo(), - TypesSizes: pkg.GetTypesSizes(), - ResultOf: inputs, - Report: func(d analysis.Diagnostic) { - // Prefix the diagnostic category with the analyzer's name. - if d.Category == "" { - d.Category = analyzer.Name - } else { - d.Category = analyzer.Name + "." + d.Category - } - diagnostics = append(diagnostics, &d) - }, - ImportObjectFact: func(obj types.Object, ptr analysis.Fact) bool { - if obj == nil { - panic("nil object") - } - key := objectFactKey{obj, factType(ptr)} - - if v, ok := data.objectFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false - }, - ExportObjectFact: func(obj types.Object, fact analysis.Fact) { - if obj.Pkg() != pkg.types { - panic(fmt.Sprintf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", - analyzer, pkg.ID(), obj, fact)) - } - key := objectFactKey{obj, factType(fact)} - data.objectFacts[key] = fact // clobber any existing entry - }, - ImportPackageFact: func(pkg *types.Package, ptr analysis.Fact) bool { - if pkg == nil { - panic("nil package") - } - key := packageFactKey{pkg, factType(ptr)} - if v, ok := data.packageFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false - }, - ExportPackageFact: func(fact analysis.Fact) { - key := packageFactKey{pkg.types, factType(fact)} - data.packageFacts[key] = fact // clobber any existing entry - }, - AllObjectFacts: func() []analysis.ObjectFact { - facts := make([]analysis.ObjectFact, 0, len(data.objectFacts)) - for k := range data.objectFacts { - facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: data.objectFacts[k]}) - } - return facts - }, - AllPackageFacts: func() []analysis.PackageFact { - facts := make([]analysis.PackageFact, 0, len(data.packageFacts)) - for k := range data.packageFacts { - facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: data.packageFacts[k]}) - } - return facts - }, - } - analysisinternal.SetTypeErrors(pass, pkg.typeErrors) - - if pkg.IsIllTyped() { - data.err = fmt.Errorf("analysis skipped due to errors in package") - return data - } - data.result, data.err = pass.Analyzer.Run(pass) - if data.err != nil { - return data - } - - if got, want := reflect.TypeOf(data.result), pass.Analyzer.ResultType; got != want { - data.err = fmt.Errorf( - "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", - pass.Pkg.Path(), pass.Analyzer, got, want) - return data - } - - // disallow calls after Run - pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) { - panic(fmt.Sprintf("%s:%s: Pass.ExportObjectFact(%s, %T) called after Run", analyzer.Name, pkg.PkgPath(), obj, fact)) - } - pass.ExportPackageFact = func(fact analysis.Fact) { - panic(fmt.Sprintf("%s:%s: Pass.ExportPackageFact(%T) called after Run", analyzer.Name, pkg.PkgPath(), fact)) - } - - for _, diag := range diagnostics { - srcDiags, err := analysisDiagnosticDiagnostics(snapshot, pkg, analyzer, diag) - if err != nil { - event.Error(ctx, "unable to compute analysis error position", err, tag.Category.Of(diag.Category), tag.Package.Of(pkg.ID())) - continue - } - if ctx.Err() != nil { - data.err = ctx.Err() - return data - } - data.diagnostics = append(data.diagnostics, srcDiags...) - } - return data -} - -// exportedFrom reports whether obj may be visible to a package that imports pkg. -// This includes not just the exported members of pkg, but also unexported -// constants, types, fields, and methods, perhaps belonging to other packages, -// that find there way into the API. -// This is an overapproximation of the more accurate approach used by -// gc export data, which walks the type graph, but it's much simpler. -// -// TODO(adonovan): do more accurate filtering by walking the type graph. -func exportedFrom(obj types.Object, pkg *types.Package) bool { - switch obj := obj.(type) { - case *types.Func: - return obj.Exported() && obj.Pkg() == pkg || - obj.Type().(*types.Signature).Recv() != nil - case *types.Var: - return obj.Exported() && obj.Pkg() == pkg || - obj.IsField() - case *types.TypeName, *types.Const: - return true - } - return false // Nil, Builtin, Label, or PkgName -} - -func factType(fact analysis.Fact) reflect.Type { - t := reflect.TypeOf(fact) - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("invalid Fact type: got %T, want pointer", fact)) - } - return t -} - -func (s *snapshot) DiagnosePackage(ctx context.Context, spkg source.Package) (map[span.URI][]*source.Diagnostic, error) { - pkg := spkg.(*pkg) - // Apply type error analyzers. They augment type error diagnostics with their own fixes. - var analyzers []*source.Analyzer - for _, a := range s.View().Options().TypeErrorAnalyzers { - analyzers = append(analyzers, a) - } - var errorAnalyzerDiag []*source.Diagnostic - if pkg.HasTypeErrors() { - var err error - errorAnalyzerDiag, err = s.Analyze(ctx, pkg.ID(), analyzers) - if err != nil { - // Keep going: analysis failures should not block diagnostics. - event.Error(ctx, "type error analysis failed", err, tag.Package.Of(pkg.ID())) - } - } - diags := map[span.URI][]*source.Diagnostic{} - for _, diag := range pkg.diagnostics { - for _, eaDiag := range errorAnalyzerDiag { - if eaDiag.URI == diag.URI && eaDiag.Range == diag.Range && eaDiag.Message == diag.Message { - // Type error analyzers just add fixes and tags. Make a copy, - // since we don't own either, and overwrite. - // The analyzer itself can't do this merge because - // analysis.Diagnostic doesn't have all the fields, and Analyze - // can't because it doesn't have the type error, notably its code. - clone := *diag - clone.SuggestedFixes = eaDiag.SuggestedFixes - clone.Tags = eaDiag.Tags - clone.Analyzer = eaDiag.Analyzer - diag = &clone - } - } - diags[diag.URI] = append(diags[diag.URI], diag) - } - return diags, nil -} diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go deleted file mode 100644 index ac670b573e5..00000000000 --- a/internal/lsp/cache/cache.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "crypto/sha256" - "fmt" - "go/ast" - "go/token" - "go/types" - "html/template" - "io/ioutil" - "os" - "reflect" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" -) - -func New(options func(*source.Options)) *Cache { - index := atomic.AddInt64(&cacheIndex, 1) - c := &Cache{ - id: strconv.FormatInt(index, 10), - fset: token.NewFileSet(), - options: options, - fileContent: map[span.URI]*fileHandle{}, - } - return c -} - -type Cache struct { - id string - fset *token.FileSet - options func(*source.Options) - - store memoize.Store - - fileMu sync.Mutex - fileContent map[span.URI]*fileHandle -} - -type fileHandle struct { - modTime time.Time - uri span.URI - bytes []byte - hash string - err error - - // size is the file length as reported by Stat, for the purpose of - // invalidation. Probably we could just use len(bytes), but this is done - // defensively in case the definition of file size in the file system - // differs. - size int64 -} - -func (h *fileHandle) Saved() bool { - return true -} - -func (c *Cache) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { - return c.getFile(ctx, uri) -} - -func (c *Cache) getFile(ctx context.Context, uri span.URI) (*fileHandle, error) { - fi, statErr := os.Stat(uri.Filename()) - if statErr != nil { - return &fileHandle{ - err: statErr, - uri: uri, - }, nil - } - - c.fileMu.Lock() - fh, ok := c.fileContent[uri] - c.fileMu.Unlock() - - // Check mtime and file size to infer whether the file has changed. This is - // an imperfect heuristic. Notably on some real systems (such as WSL) the - // filesystem clock resolution can be large -- 1/64s was observed. Therefore - // it's quite possible for multiple file modifications to occur within a - // single logical 'tick'. This can leave the cache in an incorrect state, but - // unfortunately we can't afford to pay the price of reading the actual file - // content here. Or to be more precise, reading would be a risky change and - // we don't know if we can afford it. - // - // We check file size in an attempt to reduce the probability of false cache - // hits. - if ok && fh.modTime.Equal(fi.ModTime()) && fh.size == fi.Size() { - return fh, nil - } - - fh, err := readFile(ctx, uri, fi) - if err != nil { - return nil, err - } - c.fileMu.Lock() - c.fileContent[uri] = fh - c.fileMu.Unlock() - return fh, nil -} - -// ioLimit limits the number of parallel file reads per process. -var ioLimit = make(chan struct{}, 128) - -func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, error) { - select { - case ioLimit <- struct{}{}: - case <-ctx.Done(): - return nil, ctx.Err() - } - defer func() { <-ioLimit }() - - ctx, done := event.Start(ctx, "cache.readFile", tag.File.Of(uri.Filename())) - _ = ctx - defer done() - - data, err := ioutil.ReadFile(uri.Filename()) - if err != nil { - return &fileHandle{ - modTime: fi.ModTime(), - size: fi.Size(), - err: err, - }, nil - } - return &fileHandle{ - modTime: fi.ModTime(), - size: fi.Size(), - uri: uri, - bytes: data, - hash: hashContents(data), - }, nil -} - -func (c *Cache) NewSession(ctx context.Context) *Session { - index := atomic.AddInt64(&sessionIndex, 1) - options := source.DefaultOptions().Clone() - if c.options != nil { - c.options(options) - } - s := &Session{ - cache: c, - id: strconv.FormatInt(index, 10), - options: options, - overlays: make(map[span.URI]*overlay), - gocmdRunner: &gocommand.Runner{}, - } - event.Log(ctx, "New session", KeyCreateSession.Of(s)) - return s -} - -func (c *Cache) FileSet() *token.FileSet { - return c.fset -} - -func (h *fileHandle) URI() span.URI { - return h.uri -} - -func (h *fileHandle) Hash() string { - return h.hash -} - -func (h *fileHandle) FileIdentity() source.FileIdentity { - return source.FileIdentity{ - URI: h.uri, - Hash: h.hash, - } -} - -func (h *fileHandle) Read() ([]byte, error) { - return h.bytes, h.err -} - -func hashContents(contents []byte) string { - return fmt.Sprintf("%x", sha256.Sum256(contents)) -} - -var cacheIndex, sessionIndex, viewIndex int64 - -func (c *Cache) ID() string { return c.id } -func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() } - -type packageStat struct { - id PackageID - mode source.ParseMode - file int64 - ast int64 - types int64 - typesInfo int64 - total int64 -} - -func (c *Cache) PackageStats(withNames bool) template.HTML { - var packageStats []packageStat - c.store.DebugOnlyIterate(func(k, v interface{}) { - switch k.(type) { - case packageHandleKey: - v := v.(*packageData) - if v.pkg == nil { - break - } - var typsCost, typInfoCost int64 - if v.pkg.types != nil { - typsCost = typesCost(v.pkg.types.Scope()) - } - if v.pkg.typesInfo != nil { - typInfoCost = typesInfoCost(v.pkg.typesInfo) - } - stat := packageStat{ - id: v.pkg.m.ID, - mode: v.pkg.mode, - types: typsCost, - typesInfo: typInfoCost, - } - for _, f := range v.pkg.compiledGoFiles { - stat.file += int64(len(f.Src)) - stat.ast += astCost(f.File) - } - stat.total = stat.file + stat.ast + stat.types + stat.typesInfo - packageStats = append(packageStats, stat) - } - }) - var totalCost int64 - for _, stat := range packageStats { - totalCost += stat.total - } - sort.Slice(packageStats, func(i, j int) bool { - return packageStats[i].total > packageStats[j].total - }) - html := "
    \n" - human := func(n int64) string { - return fmt.Sprintf("%.2f", float64(n)/(1024*1024)) - } - var printedCost int64 - for _, stat := range packageStats { - name := stat.id - if !withNames { - name = "-" - } - html += fmt.Sprintf("\n", name, stat.mode, - human(stat.total), human(stat.file), human(stat.ast), human(stat.types), human(stat.typesInfo)) - printedCost += stat.total - if float64(printedCost) > float64(totalCost)*.9 { - break - } - } - html += "
    Nametotal = file + ast + types + types info
    %v (%v)%v = %v + %v + %v + %v
    \n" - return template.HTML(html) -} - -func astCost(f *ast.File) int64 { - if f == nil { - return 0 - } - var count int64 - ast.Inspect(f, func(_ ast.Node) bool { - count += 32 // nodes are pretty small. - return true - }) - return count -} - -func typesCost(scope *types.Scope) int64 { - cost := 64 + int64(scope.Len())*128 // types.object looks pretty big - for i := 0; i < scope.NumChildren(); i++ { - cost += typesCost(scope.Child(i)) - } - return cost -} - -func typesInfoCost(info *types.Info) int64 { - // Most of these refer to existing objects, with the exception of InitOrder, Selections, and Types. - cost := 24*len(info.Defs) + - 32*len(info.Implicits) + - 256*len(info.InitOrder) + // these are big, but there aren't many of them. - 32*len(info.Scopes) + - 128*len(info.Selections) + // wild guess - 128*len(info.Types) + // wild guess - 32*len(info.Uses) - return int64(cost) -} diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go deleted file mode 100644 index b8a3655a9d4..00000000000 --- a/internal/lsp/cache/check.go +++ /dev/null @@ -1,862 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "bytes" - "context" - "errors" - "fmt" - "go/ast" - "go/types" - "path" - "path/filepath" - "regexp" - "sort" - "strings" - "sync" - - "golang.org/x/mod/module" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typeparams" - "golang.org/x/tools/internal/typesinternal" -) - -type packageHandleKey string - -type packageHandle struct { - handle *memoize.Handle - - goFiles, compiledGoFiles []*parseGoHandle - - // mode is the mode the files were parsed in. - mode source.ParseMode - - // m is the metadata associated with the package. - m *KnownMetadata - - // key is the hashed key for the package. - key packageHandleKey -} - -func (ph *packageHandle) packageKey() packageKey { - return packageKey{ - id: ph.m.ID, - mode: ph.mode, - } -} - -func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) { - for _, pgh := range ph.goFiles { - f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader) - if err != nil { - continue - } - seen := map[string]struct{}{} - for _, impSpec := range f.File.Imports { - imp := strings.Trim(impSpec.Path.Value, `"`) - if _, ok := seen[imp]; !ok { - seen[imp] = struct{}{} - result = append(result, imp) - } - } - } - - sort.Strings(result) - return result -} - -// packageData contains the data produced by type-checking a package. -type packageData struct { - pkg *pkg - err error -} - -// buildPackageHandle returns a packageHandle for a given package and mode. -// It assumes that the given ID already has metadata available, so it does not -// attempt to reload missing or invalid metadata. The caller must reload -// metadata if needed. -func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) { - if ph := s.getPackage(id, mode); ph != nil { - return ph, nil - } - - // Build the packageHandle for this ID and its dependencies. - ph, deps, err := s.buildKey(ctx, id, mode) - if err != nil { - return nil, err - } - - // Do not close over the packageHandle or the snapshot in the Bind function. - // This creates a cycle, which causes the finalizers to never run on the handles. - // The possible cycles are: - // - // packageHandle.h.function -> packageHandle - // packageHandle.h.function -> snapshot -> packageHandle - // - - m := ph.m - key := ph.key - - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - - // Begin loading the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, dep := range deps { - wg.Add(1) - go func(dep *packageHandle) { - dep.check(ctx, snapshot) - wg.Done() - }(dep) - } - - data := &packageData{} - data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps) - // Make sure that the workers above have finished before we return, - // especially in case of cancellation. - wg.Wait() - - return data - }, nil) - ph.handle = h - - // Cache the handle in the snapshot. If a package handle has already - // been cached, addPackage will return the cached value. This is fine, - // since the original package handle above will have no references and be - // garbage collected. - ph = s.addPackageHandle(ph) - - return ph, nil -} - -// buildKey computes the key for a given packageHandle. -func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) { - m := s.getMetadata(id) - if m == nil { - return nil, nil, fmt.Errorf("no metadata for %s", id) - } - goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode) - if err != nil { - return nil, nil, err - } - compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode) - if err != nil { - return nil, nil, err - } - ph := &packageHandle{ - m: m, - goFiles: goFiles, - compiledGoFiles: compiledGoFiles, - mode: mode, - } - // Make sure all of the depList are sorted. - depList := append([]PackageID{}, m.Deps...) - sort.Slice(depList, func(i, j int) bool { - return depList[i] < depList[j] - }) - - deps := make(map[PackagePath]*packageHandle) - - // Begin computing the key by getting the depKeys for all dependencies. - var depKeys []packageHandleKey - for _, depID := range depList { - depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID)) - // Don't use invalid metadata for dependencies if the top-level - // metadata is valid. We only load top-level packages, so if the - // top-level is valid, all of its dependencies should be as well. - if err != nil || m.Valid && !depHandle.m.Valid { - if err != nil { - event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id)) - } else { - event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id)) - } - - if ctx.Err() != nil { - return nil, nil, ctx.Err() - } - // One bad dependency should not prevent us from checking the entire package. - // Add a special key to mark a bad dependency. - depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID))) - continue - } - deps[depHandle.m.PkgPath] = depHandle - depKeys = append(depKeys, depHandle.key) - } - experimentalKey := s.View().Options().ExperimentalPackageCacheKey - ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) - return ph, deps, nil -} - -func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { - s.mu.Lock() - defer s.mu.Unlock() - _, ws := s.workspacePackages[id] - if !ws { - return source.ParseExported - } - if s.view.Options().MemoryMode == source.ModeNormal { - return source.ParseFull - } - if s.isActiveLocked(id, nil) { - return source.ParseFull - } - return source.ParseExported -} - -func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey { - b := bytes.NewBuffer(nil) - b.WriteString(string(id)) - if m.Module != nil { - b.WriteString(m.Module.GoVersion) // go version affects type check errors. - } - if !experimentalKey { - // cfg was used to produce the other hashed inputs (package ID, parsed Go - // files, and deps). It should not otherwise affect the inputs to the type - // checker, so this experiment omits it. This should increase cache hits on - // the daemon as cfg contains the environment and working directory. - b.WriteString(hashConfig(m.Config)) - } - b.WriteByte(byte(mode)) - for _, dep := range deps { - b.WriteString(string(dep)) - } - for _, cgf := range pghs { - b.WriteString(cgf.file.FileIdentity().String()) - } - return packageHandleKey(hashContents(b.Bytes())) -} - -// hashEnv returns a hash of the snapshot's configuration. -func hashEnv(s *snapshot) string { - s.view.optionsMu.Lock() - env := s.view.options.EnvSlice() - s.view.optionsMu.Unlock() - - b := &bytes.Buffer{} - for _, e := range env { - b.WriteString(e) - } - return hashContents(b.Bytes()) -} - -// hashConfig returns the hash for the *packages.Config. -func hashConfig(config *packages.Config) string { - b := bytes.NewBuffer(nil) - - // Dir, Mode, Env, BuildFlags are the parts of the config that can change. - b.WriteString(config.Dir) - b.WriteString(string(rune(config.Mode))) - - for _, e := range config.Env { - b.WriteString(e) - } - for _, f := range config.BuildFlags { - b.WriteString(f) - } - return hashContents(b.Bytes()) -} - -func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) { - return ph.check(ctx, s.(*snapshot)) -} - -func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { - v, err := ph.handle.Get(ctx, s.generation, s) - if err != nil { - return nil, err - } - data := v.(*packageData) - return data.pkg, data.err -} - -func (ph *packageHandle) CompiledGoFiles() []span.URI { - return ph.m.CompiledGoFiles -} - -func (ph *packageHandle) ID() string { - return string(ph.m.ID) -} - -func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) { - v := ph.handle.Cached(g) - if v == nil { - return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath) - } - data := v.(*packageData) - return data.pkg, data.err -} - -func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) { - pghs := make([]*parseGoHandle, 0, len(files)) - for _, uri := range files { - fh, err := s.GetFile(ctx, uri) - if err != nil { - return nil, err - } - pghs = append(pghs, s.parseGoHandle(ctx, fh, mode)) - } - return pghs, nil -} - -func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) { - var filter *unexportedFilter - if mode == source.ParseExported { - filter = &unexportedFilter{uses: map[string]bool{}} - } - pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter) - if err != nil { - return nil, err - } - - if mode == source.ParseExported { - // The AST filtering is a little buggy and may remove things it - // shouldn't. If we only got undeclared name errors, try one more - // time keeping those names. - missing, unexpected := filter.ProcessErrors(pkg.typeErrors) - if len(unexpected) == 0 && len(missing) != 0 { - event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID))) - pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter) - if err != nil { - return nil, err - } - missing, unexpected = filter.ProcessErrors(pkg.typeErrors) - } - if len(unexpected) != 0 || len(missing) != 0 { - event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID))) - pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil) - if err != nil { - return nil, err - } - } - } - // If this is a replaced module in the workspace, the version is - // meaningless, and we don't want clients to access it. - if m.Module != nil { - version := m.Module.Version - if source.IsWorkspaceModuleVersion(version) { - version = "" - } - pkg.version = &module.Version{ - Path: m.Module.Path, - Version: version, - } - } - - // We don't care about a package's errors unless we have parsed it in full. - if mode != source.ParseFull { - return pkg, nil - } - - for _, e := range m.Errors { - diags, err := goPackagesErrorDiagnostics(snapshot, pkg, e) - if err != nil { - event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(pkg.ID())) - continue - } - pkg.diagnostics = append(pkg.diagnostics, diags...) - } - - // Our heuristic for whether to show type checking errors is: - // + If any file was 'fixed', don't show type checking errors as we - // can't guarantee that they reference accurate locations in the source. - // + If there is a parse error _in the current file_, suppress type - // errors in that file. - // + Otherwise, show type errors even in the presence of parse errors in - // other package files. go/types attempts to suppress follow-on errors - // due to bad syntax, so on balance type checking errors still provide - // a decent signal/noise ratio as long as the file in question parses. - - // Track URIs with parse errors so that we can suppress type errors for these - // files. - unparseable := map[span.URI]bool{} - for _, e := range pkg.parseErrors { - diags, err := parseErrorDiagnostics(snapshot, pkg, e) - if err != nil { - event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(pkg.ID())) - continue - } - for _, diag := range diags { - unparseable[diag.URI] = true - pkg.diagnostics = append(pkg.diagnostics, diag) - } - } - - if pkg.hasFixedFiles { - return pkg, nil - } - - unexpanded := pkg.typeErrors - pkg.typeErrors = nil - for _, e := range expandErrors(unexpanded, snapshot.View().Options().RelatedInformationSupported) { - diags, err := typeErrorDiagnostics(snapshot, pkg, e) - if err != nil { - event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(pkg.ID())) - continue - } - pkg.typeErrors = append(pkg.typeErrors, e.primary) - for _, diag := range diags { - // If the file didn't parse cleanly, it is highly likely that type - // checking errors will be confusing or redundant. But otherwise, type - // checking usually provides a good enough signal to include. - if !unparseable[diag.URI] { - pkg.diagnostics = append(pkg.diagnostics, diag) - } - } - } - - depsErrors, err := snapshot.depsErrors(ctx, pkg) - if err != nil { - return nil, err - } - pkg.diagnostics = append(pkg.diagnostics, depsErrors...) - - return pkg, nil -} - -var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) - -func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) { - ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID))) - defer done() - - pkg := &pkg{ - m: m, - mode: mode, - imports: make(map[PackagePath]*pkg), - types: types.NewPackage(string(m.PkgPath), string(m.Name)), - typesInfo: &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - Scopes: make(map[ast.Node]*types.Scope), - }, - typesSizes: m.TypesSizes, - } - typeparams.InitInstanceInfo(pkg.typesInfo) - - for _, gf := range pkg.m.GoFiles { - // In the presence of line directives, we may need to report errors in - // non-compiled Go files, so we need to register them on the package. - // However, we only need to really parse them in ParseFull mode, when - // the user might actually be looking at the file. - fh, err := snapshot.GetFile(ctx, gf) - if err != nil { - return nil, err - } - goMode := source.ParseFull - if mode != source.ParseFull { - goMode = source.ParseHeader - } - pgf, err := snapshot.ParseGo(ctx, fh, goMode) - if err != nil { - return nil, err - } - pkg.goFiles = append(pkg.goFiles, pgf) - } - - if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil { - return nil, err - } - - // Use the default type information for the unsafe package. - if m.PkgPath == "unsafe" { - // Don't type check Unsafe: it's unnecessary, and doing so exposes a data - // race to Unsafe.completed. - pkg.types = types.Unsafe - return pkg, nil - } - - if len(m.CompiledGoFiles) == 0 { - // No files most likely means go/packages failed. Try to attach error - // messages to the file as much as possible. - var found bool - for _, e := range m.Errors { - srcDiags, err := goPackagesErrorDiagnostics(snapshot, pkg, e) - if err != nil { - continue - } - found = true - pkg.diagnostics = append(pkg.diagnostics, srcDiags...) - } - if found { - return pkg, nil - } - return nil, fmt.Errorf("no parsed files for package %s, expected: %v, errors: %v", pkg.m.PkgPath, pkg.compiledGoFiles, m.Errors) - } - - cfg := &types.Config{ - Error: func(e error) { - pkg.typeErrors = append(pkg.typeErrors, e.(types.Error)) - }, - Importer: importerFunc(func(pkgPath string) (*types.Package, error) { - // If the context was cancelled, we should abort. - if ctx.Err() != nil { - return nil, ctx.Err() - } - dep := resolveImportPath(pkgPath, pkg, deps) - if dep == nil { - return nil, snapshot.missingPkgError(ctx, pkgPath) - } - if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) { - return nil, fmt.Errorf("invalid use of internal package %s", pkgPath) - } - depPkg, err := dep.check(ctx, snapshot) - if err != nil { - return nil, err - } - pkg.imports[depPkg.m.PkgPath] = depPkg - return depPkg.types, nil - }), - } - if pkg.m.Module != nil && pkg.m.Module.GoVersion != "" { - goVersion := "go" + pkg.m.Module.GoVersion - // types.NewChecker panics if GoVersion is invalid. An unparsable mod - // file should probably stop us before we get here, but double check - // just in case. - if goVersionRx.MatchString(goVersion) { - typesinternal.SetGoVersion(cfg, goVersion) - } - } - - if mode != source.ParseFull { - cfg.DisableUnusedImportCheck = true - cfg.IgnoreFuncBodies = true - } - - // We want to type check cgo code if go/types supports it. - // We passed typecheckCgo to go/packages when we Loaded. - typesinternal.SetUsesCgo(cfg) - - check := types.NewChecker(cfg, snapshot.FileSet(), pkg.types, pkg.typesInfo) - - var files []*ast.File - for _, cgf := range pkg.compiledGoFiles { - files = append(files, cgf.File) - } - - // Type checking errors are handled via the config, so ignore them here. - _ = check.Files(files) - - // If the context was cancelled, we may have returned a ton of transient - // errors to the type checker. Swallow them. - if ctx.Err() != nil { - return nil, ctx.Err() - } - return pkg, nil -} - -func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error { - for _, cgf := range pkg.m.CompiledGoFiles { - fh, err := snapshot.GetFile(ctx, cgf) - if err != nil { - return err - } - - var pgf *source.ParsedGoFile - var fixed bool - // Only parse Full through the cache -- we need to own Exported ASTs - // to prune them. - if mode == source.ParseFull { - pgf, fixed, err = snapshot.parseGo(ctx, fh, mode) - } else { - d := parseGo(ctx, snapshot.FileSet(), fh, mode) - pgf, fixed, err = d.parsed, d.fixed, d.err - } - if err != nil { - return err - } - pkg.compiledGoFiles = append(pkg.compiledGoFiles, pgf) - if pgf.ParseErr != nil { - pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr) - } - // If we have fixed parse errors in any of the files, we should hide type - // errors, as they may be completely nonsensical. - pkg.hasFixedFiles = pkg.hasFixedFiles || fixed - } - if mode != source.ParseExported { - return nil - } - if astFilter != nil { - var files []*ast.File - for _, cgf := range pkg.compiledGoFiles { - files = append(files, cgf.File) - } - astFilter.Filter(files) - } else { - for _, cgf := range pkg.compiledGoFiles { - trimAST(cgf.File) - } - } - return nil -} - -func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnostic, error) { - // Select packages that can't be found, and were imported in non-workspace packages. - // Workspace packages already show their own errors. - var relevantErrors []*packagesinternal.PackageError - for _, depsError := range pkg.m.depsErrors { - // Up to Go 1.15, the missing package was included in the stack, which - // was presumably a bug. We want the next one up. - directImporterIdx := len(depsError.ImportStack) - 1 - if s.view.goversion < 15 { - directImporterIdx = len(depsError.ImportStack) - 2 - } - if directImporterIdx < 0 { - continue - } - - directImporter := depsError.ImportStack[directImporterIdx] - if s.isWorkspacePackage(PackageID(directImporter)) { - continue - } - relevantErrors = append(relevantErrors, depsError) - } - - // Don't build the import index for nothing. - if len(relevantErrors) == 0 { - return nil, nil - } - - // Build an index of all imports in the package. - type fileImport struct { - cgf *source.ParsedGoFile - imp *ast.ImportSpec - } - allImports := map[string][]fileImport{} - for _, cgf := range pkg.compiledGoFiles { - for _, group := range astutil.Imports(s.FileSet(), cgf.File) { - for _, imp := range group { - if imp.Path == nil { - continue - } - path := strings.Trim(imp.Path.Value, `"`) - allImports[path] = append(allImports[path], fileImport{cgf, imp}) - } - } - } - - // Apply a diagnostic to any import involved in the error, stopping once - // we reach the workspace. - var errors []*source.Diagnostic - for _, depErr := range relevantErrors { - for i := len(depErr.ImportStack) - 1; i >= 0; i-- { - item := depErr.ImportStack[i] - if s.isWorkspacePackage(PackageID(item)) { - break - } - - for _, imp := range allImports[item] { - rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range() - if err != nil { - return nil, err - } - fixes, err := goGetQuickFixes(s, imp.cgf.URI, item) - if err != nil { - return nil, err - } - errors = append(errors, &source.Diagnostic{ - URI: imp.cgf.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.TypeError, - Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), - SuggestedFixes: fixes, - }) - } - } - } - - if len(pkg.compiledGoFiles) == 0 { - return errors, nil - } - mod := s.GoModForFile(pkg.compiledGoFiles[0].URI) - if mod == "" { - return errors, nil - } - fh, err := s.GetFile(ctx, mod) - if err != nil { - return nil, err - } - pm, err := s.ParseMod(ctx, fh) - if err != nil { - return nil, err - } - - // Add a diagnostic to the module that contained the lowest-level import of - // the missing package. - for _, depErr := range relevantErrors { - for i := len(depErr.ImportStack) - 1; i >= 0; i-- { - item := depErr.ImportStack[i] - m := s.getMetadata(PackageID(item)) - if m == nil || m.Module == nil { - continue - } - modVer := module.Version{Path: m.Module.Path, Version: m.Module.Version} - reference := findModuleReference(pm.File, modVer) - if reference == nil { - continue - } - rng, err := rangeFromPositions(pm.Mapper, reference.Start, reference.End) - if err != nil { - return nil, err - } - fixes, err := goGetQuickFixes(s, pm.URI, item) - if err != nil { - return nil, err - } - errors = append(errors, &source.Diagnostic{ - URI: pm.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.TypeError, - Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), - SuggestedFixes: fixes, - }) - break - } - } - return errors, nil -} - -// missingPkgError returns an error message for a missing package that varies -// based on the user's workspace mode. -func (s *snapshot) missingPkgError(ctx context.Context, pkgPath string) error { - var b strings.Builder - if s.workspaceMode()&moduleMode == 0 { - gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath)) - - b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg)) - - for _, gopath := range filepath.SplitList(s.view.gopath) { - gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath)) - b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg)) - } - } else { - b.WriteString(fmt.Sprintf("no required module provides package %q", pkgPath)) - if err := s.getInitializationError(ctx); err != nil { - b.WriteString(fmt.Sprintf("(workspace configuration error: %s)", err.MainError)) - } - } - return errors.New(b.String()) -} - -type extendedError struct { - primary types.Error - secondaries []types.Error -} - -func (e extendedError) Error() string { - return e.primary.Error() -} - -// expandErrors duplicates "secondary" errors by mapping them to their main -// error. Some errors returned by the type checker are followed by secondary -// errors which give more information about the error. These are errors in -// their own right, and they are marked by starting with \t. For instance, when -// there is a multiply-defined function, the secondary error points back to the -// definition first noticed. -// -// This function associates the secondary error with its primary error, which can -// then be used as RelatedInformation when the error becomes a diagnostic. -// -// If supportsRelatedInformation is false, the secondary is instead embedded as -// additional context in the primary error. -func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extendedError { - var result []extendedError - for i := 0; i < len(errs); { - original := extendedError{ - primary: errs[i], - } - for i++; i < len(errs); i++ { - spl := errs[i] - if len(spl.Msg) == 0 || spl.Msg[0] != '\t' { - break - } - spl.Msg = spl.Msg[1:] - original.secondaries = append(original.secondaries, spl) - } - - // Clone the error to all its related locations -- VS Code, at least, - // doesn't do it for us. - result = append(result, original) - for i, mainSecondary := range original.secondaries { - // Create the new primary error, with a tweaked message, in the - // secondary's location. We need to start from the secondary to - // capture its unexported location fields. - relocatedSecondary := mainSecondary - if supportsRelatedInformation { - relocatedSecondary.Msg = fmt.Sprintf("%v (see details)", original.primary.Msg) - } else { - relocatedSecondary.Msg = fmt.Sprintf("%v (this error: %v)", original.primary.Msg, mainSecondary.Msg) - } - relocatedSecondary.Soft = original.primary.Soft - - // Copy over the secondary errors, noting the location of the - // current error we're cloning. - clonedError := extendedError{primary: relocatedSecondary, secondaries: []types.Error{original.primary}} - for j, secondary := range original.secondaries { - if i == j { - secondary.Msg += " (this error)" - } - clonedError.secondaries = append(clonedError.secondaries, secondary) - } - result = append(result, clonedError) - } - - } - return result -} - -// resolveImportPath resolves an import path in pkg to a package from deps. -// It should produce the same results as resolveImportPath: -// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990. -func resolveImportPath(importPath string, pkg *pkg, deps map[PackagePath]*packageHandle) *packageHandle { - if dep := deps[PackagePath(importPath)]; dep != nil { - return dep - } - // We may be in GOPATH mode, in which case we need to check vendor dirs. - searchDir := path.Dir(pkg.PkgPath()) - for { - vdir := PackagePath(path.Join(searchDir, "vendor", importPath)) - if vdep := deps[vdir]; vdep != nil { - return vdep - } - - // Search until Dir doesn't take us anywhere new, e.g. "." or "/". - next := path.Dir(searchDir) - if searchDir == next { - break - } - searchDir = next - } - - // Vendor didn't work. Let's try minimal module compatibility mode. - // In MMC, the packagePath is the canonical (.../vN/...) path, which - // is hard to calculate. But the go command has already resolved the ID - // to the non-versioned path, and we can take advantage of that. - for _, dep := range deps { - if dep.ID() == importPath { - return dep - } - } - return nil -} - -// An importFunc is an implementation of the single-method -// types.Importer interface based on a function value. -type importerFunc func(path string) (*types.Package, error) - -func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go deleted file mode 100644 index 342f2bea5d7..00000000000 --- a/internal/lsp/cache/errors.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "fmt" - "go/scanner" - "go/token" - "go/types" - "regexp" - "strconv" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typesinternal" -) - -func goPackagesErrorDiagnostics(snapshot *snapshot, pkg *pkg, e packages.Error) ([]*source.Diagnostic, error) { - if msg, spn, ok := parseGoListImportCycleError(snapshot, e, pkg); ok { - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - return []*source.Diagnostic{{ - URI: spn.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: msg, - }}, nil - } - - var spn span.Span - if e.Pos == "" { - spn = parseGoListError(e.Msg, pkg.m.Config.Dir) - // We may not have been able to parse a valid span. Apply the errors to all files. - if _, err := spanToRange(pkg, spn); err != nil { - var diags []*source.Diagnostic - for _, cgf := range pkg.compiledGoFiles { - diags = append(diags, &source.Diagnostic{ - URI: cgf.URI, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: e.Msg, - }) - } - return diags, nil - } - } else { - spn = span.ParseInDir(e.Pos, pkg.m.Config.Dir) - } - - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - return []*source.Diagnostic{{ - URI: spn.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: e.Msg, - }}, nil -} - -func parseErrorDiagnostics(snapshot *snapshot, pkg *pkg, errList scanner.ErrorList) ([]*source.Diagnostic, error) { - // The first parser error is likely the root cause of the problem. - if errList.Len() <= 0 { - return nil, fmt.Errorf("no errors in %v", errList) - } - e := errList[0] - pgf, err := pkg.File(span.URIFromPath(e.Pos.Filename)) - if err != nil { - return nil, err - } - pos := pgf.Tok.Pos(e.Pos.Offset) - spn, err := span.NewRange(snapshot.FileSet(), pos, pos).Span() - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - return []*source.Diagnostic{{ - URI: spn.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ParseError, - Message: e.Msg, - }}, nil -} - -var importErrorRe = regexp.MustCompile(`could not import ([^\s]+)`) -var unsupportedFeatureRe = regexp.MustCompile(`.*require.* go(\d+\.\d+) or later`) - -func typeErrorDiagnostics(snapshot *snapshot, pkg *pkg, e extendedError) ([]*source.Diagnostic, error) { - code, spn, err := typeErrorData(snapshot.FileSet(), pkg, e.primary) - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - diag := &source.Diagnostic{ - URI: spn.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.TypeError, - Message: e.primary.Msg, - } - if code != 0 { - diag.Code = code.String() - diag.CodeHref = typesCodeHref(snapshot, code) - } - - for _, secondary := range e.secondaries { - _, secondarySpan, err := typeErrorData(snapshot.FileSet(), pkg, secondary) - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, secondarySpan) - if err != nil { - return nil, err - } - diag.Related = append(diag.Related, source.RelatedInformation{ - URI: secondarySpan.URI(), - Range: rng, - Message: secondary.Msg, - }) - } - - if match := importErrorRe.FindStringSubmatch(e.primary.Msg); match != nil { - diag.SuggestedFixes, err = goGetQuickFixes(snapshot, spn.URI(), match[1]) - if err != nil { - return nil, err - } - } - if match := unsupportedFeatureRe.FindStringSubmatch(e.primary.Msg); match != nil { - diag.SuggestedFixes, err = editGoDirectiveQuickFix(snapshot, spn.URI(), match[1]) - if err != nil { - return nil, err - } - } - return []*source.Diagnostic{diag}, nil -} - -func goGetQuickFixes(snapshot *snapshot, uri span.URI, pkg string) ([]source.SuggestedFix, error) { - // Go get only supports module mode for now. - if snapshot.workspaceMode()&moduleMode == 0 { - return nil, nil - } - title := fmt.Sprintf("go get package %v", pkg) - cmd, err := command.NewGoGetPackageCommand(title, command.GoGetPackageArgs{ - URI: protocol.URIFromSpanURI(uri), - AddRequire: true, - Pkg: pkg, - }) - if err != nil { - return nil, err - } - return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil -} - -func editGoDirectiveQuickFix(snapshot *snapshot, uri span.URI, version string) ([]source.SuggestedFix, error) { - // Go mod edit only supports module mode. - if snapshot.workspaceMode()&moduleMode == 0 { - return nil, nil - } - title := fmt.Sprintf("go mod edit -go=%s", version) - cmd, err := command.NewEditGoDirectiveCommand(title, command.EditGoDirectiveArgs{ - URI: protocol.URIFromSpanURI(uri), - Version: version, - }) - if err != nil { - return nil, err - } - return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil -} - -func analysisDiagnosticDiagnostics(snapshot *snapshot, pkg *pkg, a *analysis.Analyzer, e *analysis.Diagnostic) ([]*source.Diagnostic, error) { - var srcAnalyzer *source.Analyzer - // Find the analyzer that generated this diagnostic. - for _, sa := range source.EnabledAnalyzers(snapshot) { - if a == sa.Analyzer { - srcAnalyzer = sa - break - } - } - - spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span() - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - kinds := srcAnalyzer.ActionKind - if len(srcAnalyzer.ActionKind) == 0 { - kinds = append(kinds, protocol.QuickFix) - } - fixes, err := suggestedAnalysisFixes(snapshot, pkg, e, kinds) - if err != nil { - return nil, err - } - if srcAnalyzer.Fix != "" { - cmd, err := command.NewApplyFixCommand(e.Message, command.ApplyFixArgs{ - URI: protocol.URIFromSpanURI(spn.URI()), - Range: rng, - Fix: srcAnalyzer.Fix, - }) - if err != nil { - return nil, err - } - for _, kind := range kinds { - fixes = append(fixes, source.SuggestedFixFromCommand(cmd, kind)) - } - } - related, err := relatedInformation(pkg, snapshot.FileSet(), e) - if err != nil { - return nil, err - } - - severity := srcAnalyzer.Severity - if severity == 0 { - severity = protocol.SeverityWarning - } - diag := &source.Diagnostic{ - URI: spn.URI(), - Range: rng, - Severity: severity, - Source: source.AnalyzerErrorKind(e.Category), - Message: e.Message, - Related: related, - SuggestedFixes: fixes, - Analyzer: srcAnalyzer, - } - // If the fixes only delete code, assume that the diagnostic is reporting dead code. - if onlyDeletions(fixes) { - diag.Tags = []protocol.DiagnosticTag{protocol.Unnecessary} - } - return []*source.Diagnostic{diag}, nil -} - -// onlyDeletions returns true if all of the suggested fixes are deletions. -func onlyDeletions(fixes []source.SuggestedFix) bool { - for _, fix := range fixes { - if fix.Command != nil { - return false - } - for _, edits := range fix.Edits { - for _, edit := range edits { - if edit.NewText != "" { - return false - } - if protocol.ComparePosition(edit.Range.Start, edit.Range.End) == 0 { - return false - } - } - } - } - return len(fixes) > 0 -} - -func typesCodeHref(snapshot *snapshot, code typesinternal.ErrorCode) string { - target := snapshot.View().Options().LinkTarget - return source.BuildLink(target, "golang.org/x/tools/internal/typesinternal", code.String()) -} - -func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnostic, kinds []protocol.CodeActionKind) ([]source.SuggestedFix, error) { - var fixes []source.SuggestedFix - for _, fix := range diag.SuggestedFixes { - edits := make(map[span.URI][]protocol.TextEdit) - for _, e := range fix.TextEdits { - spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span() - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - edits[spn.URI()] = append(edits[spn.URI()], protocol.TextEdit{ - Range: rng, - NewText: string(e.NewText), - }) - } - for _, kind := range kinds { - fixes = append(fixes, source.SuggestedFix{ - Title: fix.Message, - Edits: edits, - ActionKind: kind, - }) - } - - } - return fixes, nil -} - -func relatedInformation(pkg *pkg, fset *token.FileSet, diag *analysis.Diagnostic) ([]source.RelatedInformation, error) { - var out []source.RelatedInformation - for _, related := range diag.Related { - spn, err := span.NewRange(fset, related.Pos, related.End).Span() - if err != nil { - return nil, err - } - rng, err := spanToRange(pkg, spn) - if err != nil { - return nil, err - } - out = append(out, source.RelatedInformation{ - URI: spn.URI(), - Range: rng, - Message: related.Message, - }) - } - return out, nil -} - -func typeErrorData(fset *token.FileSet, pkg *pkg, terr types.Error) (typesinternal.ErrorCode, span.Span, error) { - ecode, start, end, ok := typesinternal.ReadGo116ErrorData(terr) - if !ok { - start, end = terr.Pos, terr.Pos - ecode = 0 - } - posn := fset.Position(start) - pgf, err := pkg.File(span.URIFromPath(posn.Filename)) - if err != nil { - return 0, span.Span{}, err - } - if !end.IsValid() || end == start { - end = analysisinternal.TypeErrorEndPos(fset, pgf.Src, start) - } - spn, err := parsedGoSpan(pgf, start, end) - if err != nil { - return 0, span.Span{}, err - } - return ecode, spn, nil -} - -func parsedGoSpan(pgf *source.ParsedGoFile, start, end token.Pos) (span.Span, error) { - return span.FileSpan(pgf.Mapper.TokFile, pgf.Mapper.TokFile, start, end) -} - -// spanToRange converts a span.Span to a protocol.Range, -// assuming that the span belongs to the package whose diagnostics are being computed. -func spanToRange(pkg *pkg, spn span.Span) (protocol.Range, error) { - pgf, err := pkg.File(spn.URI()) - if err != nil { - return protocol.Range{}, err - } - return pgf.Mapper.Range(spn) -} - -// parseGoListError attempts to parse a standard `go list` error message -// by stripping off the trailing error message. -// -// It works only on errors whose message is prefixed by colon, -// followed by a space (": "). For example: -// -// attributes.go:13:1: expected 'package', found 'type' -func parseGoListError(input, wd string) span.Span { - input = strings.TrimSpace(input) - msgIndex := strings.Index(input, ": ") - if msgIndex < 0 { - return span.Parse(input) - } - return span.ParseInDir(input[:msgIndex], wd) -} - -func parseGoListImportCycleError(snapshot *snapshot, e packages.Error, pkg *pkg) (string, span.Span, bool) { - re := regexp.MustCompile(`(.*): import stack: \[(.+)\]`) - matches := re.FindStringSubmatch(strings.TrimSpace(e.Msg)) - if len(matches) < 3 { - return e.Msg, span.Span{}, false - } - msg := matches[1] - importList := strings.Split(matches[2], " ") - // Since the error is relative to the current package. The import that is causing - // the import cycle error is the second one in the list. - if len(importList) < 2 { - return msg, span.Span{}, false - } - // Imports have quotation marks around them. - circImp := strconv.Quote(importList[1]) - for _, cgf := range pkg.compiledGoFiles { - // Search file imports for the import that is causing the import cycle. - for _, imp := range cgf.File.Imports { - if imp.Path.Value == circImp { - spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span() - if err != nil { - return msg, span.Span{}, false - } - return msg, spn, true - } - } - } - return msg, span.Span{}, false -} diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go deleted file mode 100644 index f0f8724d375..00000000000 --- a/internal/lsp/cache/graph.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import "golang.org/x/tools/internal/span" - -// A metadataGraph holds information about a transtively closed import graph of -// Go packages, as obtained from go/packages. -// -// Currently a new metadata graph is created for each snapshot. -// TODO(rfindley): make this type immutable, so that it may be shared across -// snapshots. -type metadataGraph struct { - // ids maps file URIs to package IDs. A single file may belong to multiple - // packages due to tests packages. - ids map[span.URI][]PackageID - - // metadata maps package IDs to their associated metadata. - metadata map[PackageID]*KnownMetadata - - // importedBy maps package IDs to the list of packages that import them. - importedBy map[PackageID][]PackageID -} - -func NewMetadataGraph() *metadataGraph { - return &metadataGraph{ - ids: make(map[span.URI][]PackageID), - metadata: make(map[PackageID]*KnownMetadata), - importedBy: make(map[PackageID][]PackageID), - } -} diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go deleted file mode 100644 index 01a2468ef34..00000000000 --- a/internal/lsp/cache/imports.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "fmt" - "reflect" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/event/keys" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/source" -) - -type importsState struct { - ctx context.Context - - mu sync.Mutex - processEnv *imports.ProcessEnv - cleanupProcessEnv func() - cacheRefreshDuration time.Duration - cacheRefreshTimer *time.Timer - cachedModFileHash string - cachedBuildFlags []string -} - -func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error { - s.mu.Lock() - defer s.mu.Unlock() - - // Find the hash of the active mod file, if any. Using the unsaved content - // is slightly wasteful, since we'll drop caches a little too often, but - // the mod file shouldn't be changing while people are autocompleting. - var modFileHash string - // If we are using 'legacyWorkspace' mode, we can just read the modfile from - // the snapshot. Otherwise, we need to get the synthetic workspace mod file. - // - // TODO(rfindley): we should be able to just always use the synthetic - // workspace module, or alternatively use the go.work file. - if snapshot.workspace.moduleSource == legacyWorkspace { - for m := range snapshot.workspace.getActiveModFiles() { // range to access the only element - modFH, err := snapshot.GetFile(ctx, m) - if err != nil { - return err - } - modFileHash = modFH.FileIdentity().Hash - } - } else { - modFile, err := snapshot.workspace.modFile(ctx, snapshot) - if err != nil { - return err - } - modBytes, err := modFile.Format() - if err != nil { - return err - } - modFileHash = hashContents(modBytes) - } - - // view.goEnv is immutable -- changes make a new view. Options can change. - // We can't compare build flags directly because we may add -modfile. - snapshot.view.optionsMu.Lock() - localPrefix := snapshot.view.options.Local - currentBuildFlags := snapshot.view.options.BuildFlags - changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) || - snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) || - modFileHash != s.cachedModFileHash - snapshot.view.optionsMu.Unlock() - - // If anything relevant to imports has changed, clear caches and - // update the processEnv. Clearing caches blocks on any background - // scans. - if changed { - // As a special case, skip cleanup the first time -- we haven't fully - // initialized the environment yet and calling GetResolver will do - // unnecessary work and potentially mess up the go.mod file. - if s.cleanupProcessEnv != nil { - if resolver, err := s.processEnv.GetResolver(); err == nil { - if modResolver, ok := resolver.(*imports.ModuleResolver); ok { - modResolver.ClearForNewMod() - } - } - s.cleanupProcessEnv() - } - s.cachedModFileHash = modFileHash - s.cachedBuildFlags = currentBuildFlags - var err error - s.cleanupProcessEnv, err = s.populateProcessEnv(ctx, snapshot) - if err != nil { - return err - } - } - - // Run the user function. - opts := &imports.Options{ - // Defaults. - AllErrors: true, - Comments: true, - Fragment: true, - FormatOnly: false, - TabIndent: true, - TabWidth: 8, - Env: s.processEnv, - LocalPrefix: localPrefix, - } - - if err := fn(opts); err != nil { - return err - } - - if s.cacheRefreshTimer == nil { - // Don't refresh more than twice per minute. - delay := 30 * time.Second - // Don't spend more than a couple percent of the time refreshing. - if adaptive := 50 * s.cacheRefreshDuration; adaptive > delay { - delay = adaptive - } - s.cacheRefreshTimer = time.AfterFunc(delay, s.refreshProcessEnv) - } - - return nil -} - -// populateProcessEnv sets the dynamically configurable fields for the view's -// process environment. Assumes that the caller is holding the s.view.importsMu. -func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapshot) (cleanup func(), err error) { - pe := s.processEnv - - if snapshot.view.Options().VerboseOutput { - pe.Logf = func(format string, args ...interface{}) { - event.Log(ctx, fmt.Sprintf(format, args...)) - } - } else { - pe.Logf = nil - } - - // Take an extra reference to the snapshot so that its workspace directory - // (if any) isn't destroyed while we're using it. - release := snapshot.generation.Acquire() - _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ - WorkingDir: snapshot.view.rootURI.Filename(), - }) - if err != nil { - return nil, err - } - pe.WorkingDir = inv.WorkingDir - pe.BuildFlags = inv.BuildFlags - pe.WorkingDir = inv.WorkingDir - pe.ModFile = inv.ModFile - pe.ModFlag = inv.ModFlag - pe.Env = map[string]string{} - for _, kv := range inv.Env { - split := strings.SplitN(kv, "=", 2) - if len(split) != 2 { - continue - } - pe.Env[split[0]] = split[1] - } - - return func() { - cleanupInvocation() - release() - }, nil -} - -func (s *importsState) refreshProcessEnv() { - start := time.Now() - - s.mu.Lock() - env := s.processEnv - if resolver, err := s.processEnv.GetResolver(); err == nil { - resolver.ClearForNewScan() - } - s.mu.Unlock() - - event.Log(s.ctx, "background imports cache refresh starting") - if err := imports.PrimeCache(context.Background(), env); err == nil { - event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start))) - } else { - event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err)) - } - s.mu.Lock() - s.cacheRefreshDuration = time.Since(start) - s.cacheRefreshTimer = nil - s.mu.Unlock() -} - -func (s *importsState) destroy() { - s.mu.Lock() - if s.cleanupProcessEnv != nil { - s.cleanupProcessEnv() - } - s.mu.Unlock() -} diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go deleted file mode 100644 index 96c2a0733a5..00000000000 --- a/internal/lsp/cache/load.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "bytes" - "context" - "crypto/sha256" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/span" -) - -// load calls packages.Load for the given scopes, updating package metadata, -// import graph, and mapped files with the result. -// -// The resulting error may wrap the moduleErrorMap error type, representing -// errors associated with specific modules. -func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) (err error) { - var query []string - var containsDir bool // for logging - - // Keep track of module query -> module path so that we can later correlate query - // errors with errors. - moduleQueries := make(map[string]string) - for _, scope := range scopes { - if !s.shouldLoad(scope) { - continue - } - // Unless the context was canceled, set "shouldLoad" to false for all - // of the metadata we attempted to load. - defer func() { - if errors.Is(err, context.Canceled) { - return - } - s.clearShouldLoad(scope) - }() - switch scope := scope.(type) { - case PackagePath: - if source.IsCommandLineArguments(string(scope)) { - panic("attempted to load command-line-arguments") - } - // The only time we pass package paths is when we're doing a - // partial workspace load. In those cases, the paths came back from - // go list and should already be GOPATH-vendorized when appropriate. - query = append(query, string(scope)) - case fileURI: - uri := span.URI(scope) - // Don't try to load a file that doesn't exist. - fh := s.FindFile(uri) - if fh == nil || s.View().FileKind(fh) != source.Go { - continue - } - query = append(query, fmt.Sprintf("file=%s", uri.Filename())) - case moduleLoadScope: - switch scope { - case "std", "cmd": - query = append(query, string(scope)) - default: - modQuery := fmt.Sprintf("%s/...", scope) - query = append(query, modQuery) - moduleQueries[modQuery] = string(scope) - } - case viewLoadScope: - // If we are outside of GOPATH, a module, or some other known - // build system, don't load subdirectories. - if !s.ValidBuildConfiguration() { - query = append(query, "./") - } else { - query = append(query, "./...") - } - default: - panic(fmt.Sprintf("unknown scope type %T", scope)) - } - switch scope.(type) { - case viewLoadScope, moduleLoadScope: - containsDir = true - } - } - if len(query) == 0 { - return nil - } - sort.Strings(query) // for determinism - - if s.view.Options().VerboseWorkDoneProgress { - work := s.view.session.progress.Start(ctx, "Load", fmt.Sprintf("Loading query=%s", query), nil, nil) - defer func() { - work.End(ctx, "Done.") - }() - } - - ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query)) - defer done() - - flags := source.LoadWorkspace - if allowNetwork { - flags |= source.AllowNetwork - } - _, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{ - WorkingDir: s.view.rootURI.Filename(), - }) - if err != nil { - return err - } - - // Set a last resort deadline on packages.Load since it calls the go - // command, which may hang indefinitely if it has a bug. golang/go#42132 - // and golang/go#42255 have more context. - ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) - defer cancel() - - cfg := s.config(ctx, inv) - pkgs, err := packages.Load(cfg, query...) - cleanup() - - // If the context was canceled, return early. Otherwise, we might be - // type-checking an incomplete result. Check the context directly, - // because go/packages adds extra information to the error. - if ctx.Err() != nil { - return ctx.Err() - } - if err != nil { - event.Error(ctx, "go/packages.Load", err, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) - } else { - event.Log(ctx, "go/packages.Load", tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) - } - if len(pkgs) == 0 { - if err == nil { - err = fmt.Errorf("no packages returned") - } - return fmt.Errorf("%v: %w", err, source.PackagesLoadError) - } - - moduleErrs := make(map[string][]packages.Error) // module path -> errors - for _, pkg := range pkgs { - // The Go command returns synthetic list results for module queries that - // encountered module errors. - // - // For example, given a module path a.mod, we'll query for "a.mod/..." and - // the go command will return a package named "a.mod/..." holding this - // error. Save it for later interpretation. - // - // See golang/go#50862 for more details. - if mod := moduleQueries[pkg.PkgPath]; mod != "" { // a synthetic result for the unloadable module - if len(pkg.Errors) > 0 { - moduleErrs[mod] = pkg.Errors - } - continue - } - - if !containsDir || s.view.Options().VerboseOutput { - event.Log(ctx, "go/packages.Load", - tag.Snapshot.Of(s.ID()), - tag.Package.Of(pkg.ID), - tag.Files.Of(pkg.CompiledGoFiles)) - } - // Ignore packages with no sources, since we will never be able to - // correctly invalidate that metadata. - if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 { - continue - } - // Special case for the builtin package, as it has no dependencies. - if pkg.PkgPath == "builtin" { - if len(pkg.GoFiles) != 1 { - return fmt.Errorf("only expected 1 file for builtin, got %v", len(pkg.GoFiles)) - } - s.setBuiltin(pkg.GoFiles[0]) - continue - } - // Skip test main packages. - if isTestMain(pkg, s.view.gocache) { - continue - } - // Skip filtered packages. They may be added anyway if they're - // dependencies of non-filtered packages. - if s.view.allFilesExcluded(pkg) { - continue - } - // Set the metadata for this package. - s.mu.Lock() - m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, map[PackageID]struct{}{}) - s.mu.Unlock() - if err != nil { - return err - } - if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil { - return err - } - } - // Rebuild the import graph when the metadata is updated. - s.clearAndRebuildImportGraph() - - if len(moduleErrs) > 0 { - return &moduleErrorMap{moduleErrs} - } - - return nil -} - -type moduleErrorMap struct { - errs map[string][]packages.Error // module path -> errors -} - -func (m *moduleErrorMap) Error() string { - var paths []string // sort for stability - for path, errs := range m.errs { - if len(errs) > 0 { // should always be true, but be cautious - paths = append(paths, path) - } - } - sort.Strings(paths) - - var buf bytes.Buffer - fmt.Fprintf(&buf, "%d modules have errors:\n", len(paths)) - for _, path := range paths { - fmt.Fprintf(&buf, "\t%s", m.errs[path][0].Msg) - } - - return buf.String() -} - -// workspaceLayoutErrors returns a diagnostic for every open file, as well as -// an error message if there are no open files. -func (s *snapshot) workspaceLayoutError(ctx context.Context) *source.CriticalError { - if len(s.workspace.getKnownModFiles()) == 0 { - return nil - } - if s.view.userGo111Module == off { - return nil - } - if s.workspace.moduleSource != legacyWorkspace { - return nil - } - // If the user has one module per view, there is nothing to warn about. - if s.ValidBuildConfiguration() && len(s.workspace.getKnownModFiles()) == 1 { - return nil - } - - // Apply diagnostics about the workspace configuration to relevant open - // files. - openFiles := s.openFiles() - - // If the snapshot does not have a valid build configuration, it may be - // that the user has opened a directory that contains multiple modules. - // Check for that an warn about it. - if !s.ValidBuildConfiguration() { - msg := `gopls requires a module at the root of your workspace. -You can work with multiple modules by opening each one as a workspace folder. -Improvements to this workflow will be coming soon, and you can learn more here: -https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.` - return &source.CriticalError{ - MainError: fmt.Errorf(msg), - DiagList: s.applyCriticalErrorToFiles(ctx, msg, openFiles), - } - } - - // If the user has one active go.mod file, they may still be editing files - // in nested modules. Check the module of each open file and add warnings - // that the nested module must be opened as a workspace folder. - if len(s.workspace.getActiveModFiles()) == 1 { - // Get the active root go.mod file to compare against. - var rootModURI span.URI - for uri := range s.workspace.getActiveModFiles() { - rootModURI = uri - } - nestedModules := map[string][]source.VersionedFileHandle{} - for _, fh := range openFiles { - modURI := moduleForURI(s.workspace.knownModFiles, fh.URI()) - if modURI != rootModURI { - modDir := filepath.Dir(modURI.Filename()) - nestedModules[modDir] = append(nestedModules[modDir], fh) - } - } - // Add a diagnostic to each file in a nested module to mark it as - // "orphaned". Don't show a general diagnostic in the progress bar, - // because the user may still want to edit a file in a nested module. - var srcDiags []*source.Diagnostic - for modDir, uris := range nestedModules { - msg := fmt.Sprintf(`This file is in %s, which is a nested module in the %s module. -gopls currently requires one module per workspace folder. -Please open %s as a separate workspace folder. -You can learn more here: https://github.com/golang/tools/blob/master/gopls/doc/workspace.md. -`, modDir, filepath.Dir(rootModURI.Filename()), modDir) - srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...) - } - if len(srcDiags) != 0 { - return &source.CriticalError{ - MainError: fmt.Errorf(`You are working in a nested module. -Please open it as a separate workspace folder. Learn more: -https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`), - DiagList: srcDiags, - } - } - } - return nil -} - -func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.VersionedFileHandle) []*source.Diagnostic { - var srcDiags []*source.Diagnostic - for _, fh := range files { - // Place the diagnostics on the package or module declarations. - var rng protocol.Range - switch s.view.FileKind(fh) { - case source.Go: - if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil { - pkgDecl := span.NewRange(s.FileSet(), pgf.File.Package, pgf.File.Name.End()) - if spn, err := pkgDecl.Span(); err == nil { - rng, _ = pgf.Mapper.Range(spn) - } - } - case source.Mod: - if pmf, err := s.ParseMod(ctx, fh); err == nil { - if pmf.File.Module != nil && pmf.File.Module.Syntax != nil { - rng, _ = rangeFromPositions(pmf.Mapper, pmf.File.Module.Syntax.Start, pmf.File.Module.Syntax.End) - } - } - } - srcDiags = append(srcDiags, &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: msg, - }) - } - return srcDiags -} - -type workspaceDirKey string - -type workspaceDirData struct { - dir string - err error -} - -// getWorkspaceDir gets the URI for the workspace directory associated with -// this snapshot. The workspace directory is a temp directory containing the -// go.mod file computed from all active modules. -func (s *snapshot) getWorkspaceDir(ctx context.Context) (span.URI, error) { - s.mu.Lock() - h := s.workspaceDirHandle - s.mu.Unlock() - if h != nil { - return getWorkspaceDir(ctx, h, s.generation) - } - file, err := s.workspace.modFile(ctx, s) - if err != nil { - return "", err - } - hash := sha256.New() - modContent, err := file.Format() - if err != nil { - return "", err - } - sumContent, err := s.workspace.sumFile(ctx, s) - if err != nil { - return "", err - } - hash.Write(modContent) - hash.Write(sumContent) - key := workspaceDirKey(hash.Sum(nil)) - s.mu.Lock() - h = s.generation.Bind(key, func(context.Context, memoize.Arg) interface{} { - tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod") - if err != nil { - return &workspaceDirData{err: err} - } - - for name, content := range map[string][]byte{ - "go.mod": modContent, - "go.sum": sumContent, - } { - filename := filepath.Join(tmpdir, name) - if err := ioutil.WriteFile(filename, content, 0644); err != nil { - os.RemoveAll(tmpdir) - return &workspaceDirData{err: err} - } - } - - return &workspaceDirData{dir: tmpdir} - }, func(v interface{}) { - d := v.(*workspaceDirData) - if d.dir != "" { - if err := os.RemoveAll(d.dir); err != nil { - event.Error(context.Background(), "cleaning workspace dir", err) - } - } - }) - s.workspaceDirHandle = h - s.mu.Unlock() - return getWorkspaceDir(ctx, h, s.generation) -} - -func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generation) (span.URI, error) { - v, err := h.Get(ctx, g, nil) - if err != nil { - return "", err - } - return span.URIFromPath(v.(*workspaceDirData).dir), nil -} - -// setMetadataLocked extracts metadata from pkg and records it in s. It -// recurs through pkg.Imports to ensure that metadata exists for all -// dependencies. -func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[PackageID]struct{}) (*Metadata, error) { - id := PackageID(pkg.ID) - if source.IsCommandLineArguments(pkg.ID) { - suffix := ":" + strings.Join(query, ",") - id = PackageID(string(id) + suffix) - pkgPath = PackagePath(string(pkgPath) + suffix) - } - if _, ok := seen[id]; ok { - return nil, fmt.Errorf("import cycle detected: %q", id) - } - // Recreate the metadata rather than reusing it to avoid locking. - m := &Metadata{ - ID: id, - PkgPath: pkgPath, - Name: PackageName(pkg.Name), - ForTest: PackagePath(packagesinternal.GetForTest(pkg)), - TypesSizes: pkg.TypesSizes, - Config: cfg, - Module: pkg.Module, - depsErrors: packagesinternal.GetDepsErrors(pkg), - } - - // Identify intermediate test variants for later filtering. See the - // documentation of IsIntermediateTestVariant for more information. - if m.ForTest != "" && m.ForTest != m.PkgPath && m.ForTest+"_test" != m.PkgPath { - m.IsIntermediateTestVariant = true - } - - for _, err := range pkg.Errors { - // Filter out parse errors from go list. We'll get them when we - // actually parse, and buggy overlay support may generate spurious - // errors. (See TestNewModule_Issue38207.) - if strings.Contains(err.Msg, "expected '") { - continue - } - m.Errors = append(m.Errors, err) - } - - uris := map[span.URI]struct{}{} - for _, filename := range pkg.CompiledGoFiles { - uri := span.URIFromPath(filename) - m.CompiledGoFiles = append(m.CompiledGoFiles, uri) - uris[uri] = struct{}{} - } - for _, filename := range pkg.GoFiles { - uri := span.URIFromPath(filename) - m.GoFiles = append(m.GoFiles, uri) - uris[uri] = struct{}{} - } - s.updateIDForURIsLocked(id, uris) - - // TODO(rstambler): is this still necessary? - copied := map[PackageID]struct{}{ - id: {}, - } - for k, v := range seen { - copied[k] = v - } - for importPath, importPkg := range pkg.Imports { - importPkgPath := PackagePath(importPath) - importID := PackageID(importPkg.ID) - - m.Deps = append(m.Deps, importID) - - // Don't remember any imports with significant errors. - if importPkgPath != "unsafe" && len(importPkg.CompiledGoFiles) == 0 { - if m.MissingDeps == nil { - m.MissingDeps = make(map[PackagePath]struct{}) - } - m.MissingDeps[importPkgPath] = struct{}{} - continue - } - if s.noValidMetadataForIDLocked(importID) { - if _, err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, copied); err != nil { - event.Error(ctx, "error in dependency", err) - } - } - } - - // Add the metadata to the cache. - - // If we've already set the metadata for this snapshot, reuse it. - if original, ok := s.meta.metadata[m.ID]; ok && original.Valid { - // Since we've just reloaded, clear out shouldLoad. - original.ShouldLoad = false - m = original.Metadata - } else { - s.meta.metadata[m.ID] = &KnownMetadata{ - Metadata: m, - Valid: true, - } - // Invalidate any packages we may have associated with this metadata. - for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} { - key := packageKey{mode, m.ID} - delete(s.packages, key) - } - } - - // Set the workspace packages. If any of the package's files belong to the - // view, then the package may be a workspace package. - for _, uri := range append(m.CompiledGoFiles, m.GoFiles...) { - if !s.view.contains(uri) { - continue - } - - // The package's files are in this view. It may be a workspace package. - if strings.Contains(string(uri), "/vendor/") { - // Vendored packages are not likely to be interesting to the user. - continue - } - - switch { - case m.ForTest == "": - // A normal package. - s.workspacePackages[m.ID] = pkgPath - case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath: - // The test variant of some workspace package or its x_test. - // To load it, we need to load the non-test variant with -test. - s.workspacePackages[m.ID] = m.ForTest - } - } - return m, nil -} - -func isTestMain(pkg *packages.Package, gocache string) bool { - // Test mains must have an import path that ends with ".test". - if !strings.HasSuffix(pkg.PkgPath, ".test") { - return false - } - // Test main packages are always named "main". - if pkg.Name != "main" { - return false - } - // Test mains always have exactly one GoFile that is in the build cache. - if len(pkg.GoFiles) > 1 { - return false - } - if !source.InDir(gocache, pkg.GoFiles[0]) { - return false - } - return true -} diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go deleted file mode 100644 index c2a21969d88..00000000000 --- a/internal/lsp/cache/metadata.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "go/types" - - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/span" -) - -// Declare explicit types for package paths, names, and IDs to ensure that we -// never use an ID where a path belongs, and vice versa. If we confused these, -// it would result in confusing errors because package IDs often look like -// package paths. -type ( - PackageID string - PackagePath string - PackageName string -) - -// Metadata holds package Metadata extracted from a call to packages.Load. -type Metadata struct { - ID PackageID - PkgPath PackagePath - Name PackageName - GoFiles []span.URI - CompiledGoFiles []span.URI - ForTest PackagePath - TypesSizes types.Sizes - Errors []packages.Error - Deps []PackageID - MissingDeps map[PackagePath]struct{} - Module *packages.Module - depsErrors []*packagesinternal.PackageError - - // Config is the *packages.Config associated with the loaded package. - Config *packages.Config - - // IsIntermediateTestVariant reports whether the given package is an - // intermediate test variant, e.g. - // "golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test]". - // - // Such test variants arise when an x_test package (in this case source_test) - // imports a package (in this case cache) that itself imports the the - // non-x_test package (in this case source). - // - // This is done so that the forward transitive closure of source_test has - // only one package for the "golang.org/x/tools/internal/lsp/source" import. - // The intermediate test variant exists to hold the test variant import: - // - // golang.org/x/tools/internal/lsp/source_test [golang.org/x/tools/internal/lsp/source.test] - // | "golang.org/x/tools/internal/lsp/cache" -> golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test] - // | "golang.org/x/tools/internal/lsp/source" -> golang.org/x/tools/internal/lsp/source [golang.org/x/tools/internal/lsp/source.test] - // | ... - // - // golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test] - // | "golang.org/x/tools/internal/lsp/source" -> golang.org/x/tools/internal/lsp/source [golang.org/x/tools/internal/lsp/source.test] - // | ... - // - // We filter these variants out in certain places. For example, there is - // generally no reason to run diagnostics or analysis on them. - // - // TODO(rfindley): this can probably just be a method, since it is derived - // from other fields. - IsIntermediateTestVariant bool -} - -// Name implements the source.Metadata interface. -func (m *Metadata) PackageName() string { - return string(m.Name) -} - -// PkgPath implements the source.Metadata interface. -func (m *Metadata) PackagePath() string { - return string(m.PkgPath) -} - -// ModuleInfo implements the source.Metadata interface. -func (m *Metadata) ModuleInfo() *packages.Module { - return m.Module -} - -// KnownMetadata is a wrapper around metadata that tracks its validity. -type KnownMetadata struct { - *Metadata - - // Valid is true if the given metadata is Valid. - // Invalid metadata can still be used if a metadata reload fails. - Valid bool - - // ShouldLoad is true if the given metadata should be reloaded. - ShouldLoad bool -} diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go deleted file mode 100644 index 5ac199bd96b..00000000000 --- a/internal/lsp/cache/mod.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "regexp" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/module" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" -) - -type parseModHandle struct { - handle *memoize.Handle -} - -type parseModData struct { - parsed *source.ParsedModule - - // err is any error encountered while parsing the file. - err error -} - -func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) { - v, err := mh.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return nil, err - } - data := v.(*parseModData) - return data.parsed, data.err -} - -func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*source.ParsedModule, error) { - if handle := s.getParseModHandle(modFH.URI()); handle != nil { - return handle.parse(ctx, s) - } - h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { - _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI())) - defer done() - - contents, err := modFH.Read() - if err != nil { - return &parseModData{err: err} - } - m := protocol.NewColumnMapper(modFH.URI(), contents) - file, parseErr := modfile.Parse(modFH.URI().Filename(), contents, nil) - // Attempt to convert the error to a standardized parse error. - var parseErrors []*source.Diagnostic - if parseErr != nil { - mfErrList, ok := parseErr.(modfile.ErrorList) - if !ok { - return &parseModData{err: fmt.Errorf("unexpected parse error type %v", parseErr)} - } - for _, mfErr := range mfErrList { - rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) - if err != nil { - return &parseModData{err: err} - } - parseErrors = append(parseErrors, &source.Diagnostic{ - URI: modFH.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ParseError, - Message: mfErr.Err.Error(), - }) - } - } - return &parseModData{ - parsed: &source.ParsedModule{ - URI: modFH.URI(), - Mapper: m, - File: file, - ParseErrors: parseErrors, - }, - err: parseErr, - } - }, nil) - - pmh := &parseModHandle{handle: h} - s.mu.Lock() - s.parseModHandles[modFH.URI()] = pmh - s.mu.Unlock() - - return pmh.parse(ctx, s) -} - -type parseWorkHandle struct { - handle *memoize.Handle -} - -type parseWorkData struct { - parsed *source.ParsedWorkFile - - // err is any error encountered while parsing the file. - err error -} - -func (mh *parseWorkHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedWorkFile, error) { - v, err := mh.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return nil, err - } - data := v.(*parseWorkData) - return data.parsed, data.err -} - -func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*source.ParsedWorkFile, error) { - if handle := s.getParseWorkHandle(modFH.URI()); handle != nil { - return handle.parse(ctx, s) - } - h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { - _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI())) - defer done() - - contents, err := modFH.Read() - if err != nil { - return &parseWorkData{err: err} - } - m := protocol.NewColumnMapper(modFH.URI(), contents) - file, parseErr := modfile.ParseWork(modFH.URI().Filename(), contents, nil) - // Attempt to convert the error to a standardized parse error. - var parseErrors []*source.Diagnostic - if parseErr != nil { - mfErrList, ok := parseErr.(modfile.ErrorList) - if !ok { - return &parseWorkData{err: fmt.Errorf("unexpected parse error type %v", parseErr)} - } - for _, mfErr := range mfErrList { - rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) - if err != nil { - return &parseWorkData{err: err} - } - parseErrors = append(parseErrors, &source.Diagnostic{ - URI: modFH.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ParseError, - Message: mfErr.Err.Error(), - }) - } - } - return &parseWorkData{ - parsed: &source.ParsedWorkFile{ - URI: modFH.URI(), - Mapper: m, - File: file, - ParseErrors: parseErrors, - }, - err: parseErr, - } - }, nil) - - pwh := &parseWorkHandle{handle: h} - s.mu.Lock() - s.parseWorkHandles[modFH.URI()] = pwh - s.mu.Unlock() - - return pwh.parse(ctx, s) -} - -// goSum reads the go.sum file for the go.mod file at modURI, if it exists. If -// it doesn't exist, it returns nil. -func (s *snapshot) goSum(ctx context.Context, modURI span.URI) []byte { - // Get the go.sum file, either from the snapshot or directly from the - // cache. Avoid (*snapshot).GetFile here, as we don't want to add - // nonexistent file handles to the snapshot if the file does not exist. - sumURI := span.URIFromPath(sumFilename(modURI)) - var sumFH source.FileHandle = s.FindFile(sumURI) - if sumFH == nil { - var err error - sumFH, err = s.view.session.cache.getFile(ctx, sumURI) - if err != nil { - return nil - } - } - content, err := sumFH.Read() - if err != nil { - return nil - } - return content -} - -func sumFilename(modURI span.URI) string { - return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum" -} - -// modKey is uniquely identifies cached data for `go mod why` or dependencies -// to upgrade. -type modKey struct { - sessionID, env, view string - mod source.FileIdentity - verb modAction -} - -type modAction int - -const ( - why modAction = iota - upgrade -) - -type modWhyHandle struct { - handle *memoize.Handle -} - -type modWhyData struct { - // why keeps track of the `go mod why` results for each require statement - // in the go.mod file. - why map[string]string - - err error -} - -func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) { - v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return nil, err - } - data := v.(*modWhyData) - return data.why, data.err -} - -func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) { - if s.View().FileKind(fh) != source.Mod { - return nil, fmt.Errorf("%s is not a go.mod file", fh.URI()) - } - if handle := s.getModWhyHandle(fh.URI()); handle != nil { - return handle.why(ctx, s) - } - key := modKey{ - sessionID: s.view.session.id, - env: hashEnv(s), - mod: fh.FileIdentity(), - view: s.view.rootURI.Filename(), - verb: why, - } - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI())) - defer done() - - snapshot := arg.(*snapshot) - - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - return &modWhyData{err: err} - } - // No requires to explain. - if len(pm.File.Require) == 0 { - return &modWhyData{} - } - // Run `go mod why` on all the dependencies. - inv := &gocommand.Invocation{ - Verb: "mod", - Args: []string{"why", "-m"}, - WorkingDir: filepath.Dir(fh.URI().Filename()), - } - for _, req := range pm.File.Require { - inv.Args = append(inv.Args, req.Mod.Path) - } - stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv) - if err != nil { - return &modWhyData{err: err} - } - whyList := strings.Split(stdout.String(), "\n\n") - if len(whyList) != len(pm.File.Require) { - return &modWhyData{ - err: fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)), - } - } - why := make(map[string]string, len(pm.File.Require)) - for i, req := range pm.File.Require { - why[req.Mod.Path] = whyList[i] - } - return &modWhyData{why: why} - }, nil) - - mwh := &modWhyHandle{handle: h} - s.mu.Lock() - s.modWhyHandles[fh.URI()] = mwh - s.mu.Unlock() - - return mwh.why(ctx, s) -} - -// extractGoCommandError tries to parse errors that come from the go command -// and shape them into go.mod diagnostics. -// TODO: rename this to 'load errors' -func (s *snapshot) extractGoCommandErrors(ctx context.Context, goCmdError error) []*source.Diagnostic { - if goCmdError == nil { - return nil - } - - type locatedErr struct { - spn span.Span - msg string - } - diagLocations := map[*source.ParsedModule]locatedErr{} - backupDiagLocations := map[*source.ParsedModule]locatedErr{} - - // If moduleErrs is non-nil, go command errors are scoped to specific - // modules. - var moduleErrs *moduleErrorMap - _ = errors.As(goCmdError, &moduleErrs) - - // Match the error against all the mod files in the workspace. - for _, uri := range s.ModFiles() { - fh, err := s.GetFile(ctx, uri) - if err != nil { - event.Error(ctx, "getting modfile for Go command error", err) - continue - } - pm, err := s.ParseMod(ctx, fh) - if err != nil { - // Parsing errors are reported elsewhere - return nil - } - var msgs []string // error messages to consider - if moduleErrs != nil { - if pm.File.Module != nil { - for _, mes := range moduleErrs.errs[pm.File.Module.Mod.Path] { - msgs = append(msgs, mes.Error()) - } - } - } else { - msgs = append(msgs, goCmdError.Error()) - } - for _, msg := range msgs { - if strings.Contains(goCmdError.Error(), "errors parsing go.mod") { - // The go command emits parse errors for completely invalid go.mod files. - // Those are reported by our own diagnostics and can be ignored here. - // As of writing, we are not aware of any other errors that include - // file/position information, so don't even try to find it. - continue - } - spn, found, err := s.matchErrorToModule(ctx, pm, msg) - if err != nil { - event.Error(ctx, "matching error to module", err) - continue - } - le := locatedErr{ - spn: spn, - msg: msg, - } - if found { - diagLocations[pm] = le - } else { - backupDiagLocations[pm] = le - } - } - } - - // If we didn't find any good matches, assign diagnostics to all go.mod files. - if len(diagLocations) == 0 { - diagLocations = backupDiagLocations - } - - var srcErrs []*source.Diagnostic - for pm, le := range diagLocations { - diag, err := s.goCommandDiagnostic(pm, le.spn, le.msg) - if err != nil { - event.Error(ctx, "building go command diagnostic", err) - continue - } - srcErrs = append(srcErrs, diag) - } - return srcErrs -} - -var moduleVersionInErrorRe = regexp.MustCompile(`[:\s]([+-._~0-9A-Za-z]+)@([+-._~0-9A-Za-z]+)[:\s]`) - -// matchErrorToModule matches a go command error message to a go.mod file. -// Some examples: -// -// example.com@v1.2.2: reading example.com/@v/v1.2.2.mod: no such file or directory -// go: github.com/cockroachdb/apd/v2@v2.0.72: reading github.com/cockroachdb/apd/go.mod at revision v2.0.72: unknown revision v2.0.72 -// go: example.com@v1.2.3 requires\n\trandom.org@v1.2.3: parsing go.mod:\n\tmodule declares its path as: bob.org\n\tbut was required as: random.org -// -// It returns the location of a reference to the one of the modules and true -// if one exists. If none is found it returns a fallback location and false. -func (s *snapshot) matchErrorToModule(ctx context.Context, pm *source.ParsedModule, goCmdError string) (span.Span, bool, error) { - var reference *modfile.Line - matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1) - - for i := len(matches) - 1; i >= 0; i-- { - ver := module.Version{Path: matches[i][1], Version: matches[i][2]} - // Any module versions that come from the workspace module should not - // be shown to the user. - if source.IsWorkspaceModuleVersion(ver.Version) { - continue - } - if err := module.Check(ver.Path, ver.Version); err != nil { - continue - } - reference = findModuleReference(pm.File, ver) - if reference != nil { - break - } - } - - if reference == nil { - // No match for the module path was found in the go.mod file. - // Show the error on the module declaration, if one exists, or - // just the first line of the file. - if pm.File.Module == nil { - return span.New(pm.URI, span.NewPoint(1, 1, 0), span.Point{}), false, nil - } - spn, err := spanFromPositions(pm.Mapper, pm.File.Module.Syntax.Start, pm.File.Module.Syntax.End) - return spn, false, err - } - - spn, err := spanFromPositions(pm.Mapper, reference.Start, reference.End) - return spn, true, err -} - -// goCommandDiagnostic creates a diagnostic for a given go command error. -func (s *snapshot) goCommandDiagnostic(pm *source.ParsedModule, spn span.Span, goCmdError string) (*source.Diagnostic, error) { - rng, err := pm.Mapper.Range(spn) - if err != nil { - return nil, err - } - - matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1) - var innermost *module.Version - for i := len(matches) - 1; i >= 0; i-- { - ver := module.Version{Path: matches[i][1], Version: matches[i][2]} - // Any module versions that come from the workspace module should not - // be shown to the user. - if source.IsWorkspaceModuleVersion(ver.Version) { - continue - } - if err := module.Check(ver.Path, ver.Version); err != nil { - continue - } - innermost = &ver - break - } - - switch { - case strings.Contains(goCmdError, "inconsistent vendoring"): - cmd, err := command.NewVendorCommand("Run go mod vendor", command.URIArg{URI: protocol.URIFromSpanURI(pm.URI)}) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: pm.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: `Inconsistent vendoring detected. Please re-run "go mod vendor". -See https://github.com/golang/go/issues/39164 for more detail on this issue.`, - SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, - }, nil - - case strings.Contains(goCmdError, "updates to go.sum needed"), strings.Contains(goCmdError, "missing go.sum entry"): - var args []protocol.DocumentURI - for _, uri := range s.ModFiles() { - args = append(args, protocol.URIFromSpanURI(uri)) - } - tidyCmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: args}) - if err != nil { - return nil, err - } - updateCmd, err := command.NewUpdateGoSumCommand("Update go.sum", command.URIArgs{URIs: args}) - if err != nil { - return nil, err - } - msg := "go.sum is out of sync with go.mod. Please update it by applying the quick fix." - if innermost != nil { - msg = fmt.Sprintf("go.sum is out of sync with go.mod: entry for %v is missing. Please updating it by applying the quick fix.", innermost) - } - return &source.Diagnostic{ - URI: pm.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: msg, - SuggestedFixes: []source.SuggestedFix{ - source.SuggestedFixFromCommand(tidyCmd, protocol.QuickFix), - source.SuggestedFixFromCommand(updateCmd, protocol.QuickFix), - }, - }, nil - case strings.Contains(goCmdError, "disabled by GOPROXY=off") && innermost != nil: - title := fmt.Sprintf("Download %v@%v", innermost.Path, innermost.Version) - cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{ - URI: protocol.URIFromSpanURI(pm.URI), - AddRequire: false, - GoCmdArgs: []string{fmt.Sprintf("%v@%v", innermost.Path, innermost.Version)}, - }) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: pm.URI, - Range: rng, - Severity: protocol.SeverityError, - Message: fmt.Sprintf("%v@%v has not been downloaded", innermost.Path, innermost.Version), - Source: source.ListError, - SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, - }, nil - default: - return &source.Diagnostic{ - URI: pm.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: goCmdError, - }, nil - } -} - -func findModuleReference(mf *modfile.File, ver module.Version) *modfile.Line { - for _, req := range mf.Require { - if req.Mod == ver { - return req.Syntax - } - } - for _, ex := range mf.Exclude { - if ex.Mod == ver { - return ex.Syntax - } - } - for _, rep := range mf.Replace { - if rep.New == ver || rep.Old == ver { - return rep.Syntax - } - } - return nil -} diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go deleted file mode 100644 index aa525e7413d..00000000000 --- a/internal/lsp/cache/mod_tidy.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "fmt" - "go/ast" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" -) - -type modTidyKey struct { - sessionID string - env string - gomod source.FileIdentity - imports string - unsavedOverlays string - view string -} - -type modTidyHandle struct { - handle *memoize.Handle -} - -type modTidyData struct { - tidied *source.TidiedModule - err error -} - -func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) { - v, err := mth.handle.Get(ctx, snapshot.generation, snapshot) - if err != nil { - return nil, err - } - data := v.(*modTidyData) - return data.tidied, data.err -} - -func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { - if pm.File == nil { - return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", pm.URI) - } - if handle := s.getModTidyHandle(pm.URI); handle != nil { - return handle.tidy(ctx, s) - } - fh, err := s.GetFile(ctx, pm.URI) - if err != nil { - return nil, err - } - // If the file handle is an overlay, it may not be written to disk. - // The go.mod file has to be on disk for `go mod tidy` to work. - if _, ok := fh.(*overlay); ok { - if info, _ := os.Stat(fh.URI().Filename()); info == nil { - return nil, source.ErrNoModOnDisk - } - } - if criticalErr := s.GetCriticalError(ctx); criticalErr != nil { - return &source.TidiedModule{ - Diagnostics: criticalErr.DiagList, - }, nil - } - workspacePkgs, err := s.workspacePackageHandles(ctx) - if err != nil { - return nil, err - } - importHash, err := s.hashImports(ctx, workspacePkgs) - if err != nil { - return nil, err - } - - s.mu.Lock() - overlayHash := hashUnsavedOverlays(s.files) - s.mu.Unlock() - - key := modTidyKey{ - sessionID: s.view.session.id, - view: s.view.folder.Filename(), - imports: importHash, - unsavedOverlays: overlayHash, - gomod: fh.FileIdentity(), - env: hashEnv(s), - } - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(fh.URI())) - defer done() - - snapshot := arg.(*snapshot) - inv := &gocommand.Invocation{ - Verb: "mod", - Args: []string{"tidy"}, - WorkingDir: filepath.Dir(fh.URI().Filename()), - } - tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv) - if err != nil { - return &modTidyData{err: err} - } - // Keep the temporary go.mod file around long enough to parse it. - defer cleanup() - - if _, err := s.view.session.gocmdRunner.Run(ctx, *inv); err != nil { - return &modTidyData{err: err} - } - // Go directly to disk to get the temporary mod file, since it is - // always on disk. - tempContents, err := ioutil.ReadFile(tmpURI.Filename()) - if err != nil { - return &modTidyData{err: err} - } - ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil) - if err != nil { - // We do not need to worry about the temporary file's parse errors - // since it has been "tidied". - return &modTidyData{err: err} - } - // Compare the original and tidied go.mod files to compute errors and - // suggested fixes. - diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs) - if err != nil { - return &modTidyData{err: err} - } - return &modTidyData{ - tidied: &source.TidiedModule{ - Diagnostics: diagnostics, - TidiedContent: tempContents, - }, - } - }, nil) - - mth := &modTidyHandle{handle: h} - s.mu.Lock() - s.modTidyHandles[fh.URI()] = mth - s.mu.Unlock() - - return mth.tidy(ctx, s) -} - -func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) (string, error) { - seen := map[string]struct{}{} - var imports []string - for _, ph := range wsPackages { - for _, imp := range ph.imports(ctx, s) { - if _, ok := seen[imp]; !ok { - imports = append(imports, imp) - seen[imp] = struct{}{} - } - } - } - sort.Strings(imports) - hashed := strings.Join(imports, ",") - return hashContents([]byte(hashed)), nil -} - -// modTidyDiagnostics computes the differences between the original and tidied -// go.mod files to produce diagnostic and suggested fixes. Some diagnostics -// may appear on the Go files that import packages from missing modules. -func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *source.ParsedModule, ideal *modfile.File, workspacePkgs []*packageHandle) (diagnostics []*source.Diagnostic, err error) { - // First, determine which modules are unused and which are missing from the - // original go.mod file. - var ( - unused = make(map[string]*modfile.Require, len(pm.File.Require)) - missing = make(map[string]*modfile.Require, len(ideal.Require)) - wrongDirectness = make(map[string]*modfile.Require, len(pm.File.Require)) - ) - for _, req := range pm.File.Require { - unused[req.Mod.Path] = req - } - for _, req := range ideal.Require { - origReq := unused[req.Mod.Path] - if origReq == nil { - missing[req.Mod.Path] = req - continue - } else if origReq.Indirect != req.Indirect { - wrongDirectness[req.Mod.Path] = origReq - } - delete(unused, req.Mod.Path) - } - for _, req := range wrongDirectness { - // Handle dependencies that are incorrectly labeled indirect and - // vice versa. - srcDiag, err := directnessDiagnostic(pm.Mapper, req, snapshot.View().Options().ComputeEdits) - if err != nil { - // We're probably in a bad state if we can't compute a - // directnessDiagnostic, but try to keep going so as to not suppress - // other, valid diagnostics. - event.Error(ctx, "computing directness diagnostic", err) - continue - } - diagnostics = append(diagnostics, srcDiag) - } - // Next, compute any diagnostics for modules that are missing from the - // go.mod file. The fixes will be for the go.mod file, but the - // diagnostics should also appear in both the go.mod file and the import - // statements in the Go files in which the dependencies are used. - missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{} - for _, req := range missing { - srcDiag, err := missingModuleDiagnostic(pm, req) - if err != nil { - return nil, err - } - missingModuleFixes[req] = srcDiag.SuggestedFixes - diagnostics = append(diagnostics, srcDiag) - } - // Add diagnostics for missing modules anywhere they are imported in the - // workspace. - for _, ph := range workspacePkgs { - missingImports := map[string]*modfile.Require{} - - // If -mod=readonly is not set we may have successfully imported - // packages from missing modules. Otherwise they'll be in - // MissingDependencies. Combine both. - importedPkgs := ph.imports(ctx, snapshot) - - for _, imp := range importedPkgs { - if req, ok := missing[imp]; ok { - missingImports[imp] = req - break - } - // If the import is a package of the dependency, then add the - // package to the map, this will eliminate the need to do this - // prefix package search on each import for each file. - // Example: - // - // import ( - // "golang.org/x/tools/go/expect" - // "golang.org/x/tools/go/packages" - // ) - // They both are related to the same module: "golang.org/x/tools". - var match string - for _, req := range ideal.Require { - if strings.HasPrefix(imp, req.Mod.Path) && len(req.Mod.Path) > len(match) { - match = req.Mod.Path - } - } - if req, ok := missing[match]; ok { - missingImports[imp] = req - } - } - // None of this package's imports are from missing modules. - if len(missingImports) == 0 { - continue - } - for _, pgh := range ph.compiledGoFiles { - pgf, err := snapshot.ParseGo(ctx, pgh.file, source.ParseHeader) - if err != nil { - continue - } - file, m := pgf.File, pgf.Mapper - if file == nil || m == nil { - continue - } - imports := make(map[string]*ast.ImportSpec) - for _, imp := range file.Imports { - if imp.Path == nil { - continue - } - if target, err := strconv.Unquote(imp.Path.Value); err == nil { - imports[target] = imp - } - } - if len(imports) == 0 { - continue - } - for importPath, req := range missingImports { - imp, ok := imports[importPath] - if !ok { - continue - } - fixes, ok := missingModuleFixes[req] - if !ok { - return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path) - } - srcErr, err := missingModuleForImport(snapshot, m, imp, req, fixes) - if err != nil { - return nil, err - } - diagnostics = append(diagnostics, srcErr) - } - } - } - // Finally, add errors for any unused dependencies. - onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1 - for _, req := range unused { - srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic) - if err != nil { - return nil, err - } - diagnostics = append(diagnostics, srcErr) - } - return diagnostics, nil -} - -// unusedDiagnostic returns a source.Diagnostic for an unused require. -func unusedDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) { - rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End) - if err != nil { - return nil, err - } - title := fmt.Sprintf("Remove dependency: %s", req.Mod.Path) - cmd, err := command.NewRemoveDependencyCommand(title, command.RemoveDependencyArgs{ - URI: protocol.URIFromSpanURI(m.URI), - OnlyDiagnostic: onlyDiagnostic, - ModulePath: req.Mod.Path, - }) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: m.URI, - Range: rng, - Severity: protocol.SeverityWarning, - Source: source.ModTidyError, - Message: fmt.Sprintf("%s is not used in this module", req.Mod.Path), - SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, - }, nil -} - -// directnessDiagnostic extracts errors when a dependency is labeled indirect when -// it should be direct and vice versa. -func directnessDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, computeEdits diff.ComputeEdits) (*source.Diagnostic, error) { - rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End) - if err != nil { - return nil, err - } - direction := "indirect" - if req.Indirect { - direction = "direct" - - // If the dependency should be direct, just highlight the // indirect. - if comments := req.Syntax.Comment(); comments != nil && len(comments.Suffix) > 0 { - end := comments.Suffix[0].Start - end.LineRune += len(comments.Suffix[0].Token) - end.Byte += len([]byte(comments.Suffix[0].Token)) - rng, err = rangeFromPositions(m, comments.Suffix[0].Start, end) - if err != nil { - return nil, err - } - } - } - // If the dependency should be indirect, add the // indirect. - edits, err := switchDirectness(req, m, computeEdits) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: m.URI, - Range: rng, - Severity: protocol.SeverityWarning, - Source: source.ModTidyError, - Message: fmt.Sprintf("%s should be %s", req.Mod.Path, direction), - SuggestedFixes: []source.SuggestedFix{{ - Title: fmt.Sprintf("Change %s to %s", req.Mod.Path, direction), - Edits: map[span.URI][]protocol.TextEdit{ - m.URI: edits, - }, - ActionKind: protocol.QuickFix, - }}, - }, nil -} - -func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*source.Diagnostic, error) { - var rng protocol.Range - // Default to the start of the file if there is no module declaration. - if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil { - start, end := pm.File.Module.Syntax.Span() - var err error - rng, err = rangeFromPositions(pm.Mapper, start, end) - if err != nil { - return nil, err - } - } - title := fmt.Sprintf("Add %s to your go.mod file", req.Mod.Path) - cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{ - URI: protocol.URIFromSpanURI(pm.Mapper.URI), - AddRequire: !req.Indirect, - GoCmdArgs: []string{req.Mod.Path + "@" + req.Mod.Version}, - }) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: pm.Mapper.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.ModTidyError, - Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path), - SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, - }, nil -} - -// switchDirectness gets the edits needed to change an indirect dependency to -// direct and vice versa. -func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdits diff.ComputeEdits) ([]protocol.TextEdit, error) { - // We need a private copy of the parsed go.mod file, since we're going to - // modify it. - copied, err := modfile.Parse("", m.Content, nil) - if err != nil { - return nil, err - } - // Change the directness in the matching require statement. To avoid - // reordering the require statements, rewrite all of them. - var requires []*modfile.Require - seenVersions := make(map[string]string) - for _, r := range copied.Require { - if seen := seenVersions[r.Mod.Path]; seen != "" && seen != r.Mod.Version { - // Avoid a panic in SetRequire below, which panics on conflicting - // versions. - return nil, fmt.Errorf("%q has conflicting versions: %q and %q", r.Mod.Path, seen, r.Mod.Version) - } - seenVersions[r.Mod.Path] = r.Mod.Version - if r.Mod.Path == req.Mod.Path { - requires = append(requires, &modfile.Require{ - Mod: r.Mod, - Syntax: r.Syntax, - Indirect: !r.Indirect, - }) - continue - } - requires = append(requires, r) - } - copied.SetRequire(requires) - newContent, err := copied.Format() - if err != nil { - return nil, err - } - // Calculate the edits to be made due to the change. - diff, err := computeEdits(m.URI, string(m.Content), string(newContent)) - if err != nil { - return nil, err - } - return source.ToProtocolEdits(m, diff) -} - -// missingModuleForImport creates an error for a given import path that comes -// from a missing module. -func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) { - if req.Syntax == nil { - return nil, fmt.Errorf("no syntax for %v", req) - } - spn, err := span.NewRange(snapshot.FileSet(), imp.Path.Pos(), imp.Path.End()).Span() - if err != nil { - return nil, err - } - rng, err := m.Range(spn) - if err != nil { - return nil, err - } - return &source.Diagnostic{ - URI: m.URI, - Range: rng, - Severity: protocol.SeverityError, - Source: source.ModTidyError, - Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path), - SuggestedFixes: fixes, - }, nil -} - -func rangeFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (protocol.Range, error) { - spn, err := spanFromPositions(m, s, e) - if err != nil { - return protocol.Range{}, err - } - return m.Range(spn) -} - -func spanFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (span.Span, error) { - toPoint := func(offset int) (span.Point, error) { - l, c, err := span.ToPosition(m.TokFile, offset) - if err != nil { - return span.Point{}, err - } - return span.NewPoint(l, c, offset), nil - } - start, err := toPoint(s.Byte) - if err != nil { - return span.Span{}, err - } - end, err := toPoint(e.Byte) - if err != nil { - return span.Span{}, err - } - return span.New(m.URI, start, end), nil -} diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go deleted file mode 100644 index 668c437f5c9..00000000000 --- a/internal/lsp/cache/parse.go +++ /dev/null @@ -1,1475 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "bytes" - "context" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "path/filepath" - "reflect" - "strconv" - "strings" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" -) - -// parseKey uniquely identifies a parsed Go file. -type parseKey struct { - file source.FileIdentity - mode source.ParseMode -} - -type parseGoHandle struct { - handle *memoize.Handle - file source.FileHandle - mode source.ParseMode -} - -type parseGoData struct { - parsed *source.ParsedGoFile - - // If true, we adjusted the AST to make it type check better, and - // it may not match the source code. - fixed bool - err error // any other errors -} - -func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode source.ParseMode) *parseGoHandle { - key := parseKey{ - file: fh.FileIdentity(), - mode: mode, - } - if pgh := s.getGoFile(key); pgh != nil { - return pgh - } - parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - return parseGo(ctx, snapshot.FileSet(), fh, mode) - }, nil) - - pgh := &parseGoHandle{ - handle: parseHandle, - file: fh, - mode: mode, - } - return s.addGoFile(key, pgh) -} - -func (pgh *parseGoHandle) String() string { - return pgh.file.URI().Filename() -} - -func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { - pgf, _, err := s.parseGo(ctx, fh, mode) - return pgf, err -} - -func (s *snapshot) parseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, bool, error) { - if mode == source.ParseExported { - panic("only type checking should use Exported") - } - pgh := s.parseGoHandle(ctx, fh, mode) - d, err := pgh.handle.Get(ctx, s.generation, s) - if err != nil { - return nil, false, err - } - data := d.(*parseGoData) - return data.parsed, data.fixed, data.err -} - -// cachedPGF returns the cached ParsedGoFile for the given ParseMode, if it -// has already been computed. Otherwise, it returns nil. -func (s *snapshot) cachedPGF(fh source.FileHandle, mode source.ParseMode) *source.ParsedGoFile { - key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh := s.getGoFile(key); pgh != nil { - cached := pgh.handle.Cached(s.generation) - if cached != nil { - cached := cached.(*parseGoData) - if cached.parsed != nil { - return cached.parsed - } - } - } - return nil -} - -type astCacheKey struct { - pkg packageHandleKey - uri span.URI -} - -func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) { - pkg := spkg.(*pkg) - pkgHandle := s.getPackage(pkg.m.ID, pkg.mode) - if pkgHandle == nil { - return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID) - } - tok := s.FileSet().File(pos) - if tok == nil { - return nil, fmt.Errorf("no file for pos %v", pos) - } - pgf, err := pkg.File(span.URIFromPath(tok.Name())) - if err != nil { - return nil, err - } - astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} { - return buildASTCache(pgf) - }, nil) - - d, err := astHandle.Get(ctx, s.generation, s) - if err != nil { - return nil, err - } - data := d.(*astCacheData) - if data.err != nil { - return nil, data.err - } - return data, nil -} - -func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) { - data, err := s.astCacheData(ctx, spkg, pos) - if err != nil { - return nil, err - } - return data.posToDecl[pos], nil -} - -func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos token.Pos) (*ast.Field, error) { - data, err := s.astCacheData(ctx, spkg, pos) - if err != nil { - return nil, err - } - return data.posToField[pos], nil -} - -type astCacheData struct { - err error - - posToDecl map[token.Pos]ast.Decl - posToField map[token.Pos]*ast.Field -} - -// buildASTCache builds caches to aid in quickly going from the typed -// world to the syntactic world. -func buildASTCache(pgf *source.ParsedGoFile) *astCacheData { - var ( - // path contains all ancestors, including n. - path []ast.Node - // decls contains all ancestors that are decls. - decls []ast.Decl - ) - - data := &astCacheData{ - posToDecl: make(map[token.Pos]ast.Decl), - posToField: make(map[token.Pos]*ast.Field), - } - - ast.Inspect(pgf.File, func(n ast.Node) bool { - if n == nil { - lastP := path[len(path)-1] - path = path[:len(path)-1] - if len(decls) > 0 && decls[len(decls)-1] == lastP { - decls = decls[:len(decls)-1] - } - return false - } - - path = append(path, n) - - switch n := n.(type) { - case *ast.Field: - addField := func(f ast.Node) { - if f.Pos().IsValid() { - data.posToField[f.Pos()] = n - if len(decls) > 0 { - data.posToDecl[f.Pos()] = decls[len(decls)-1] - } - } - } - - // Add mapping for *ast.Field itself. This handles embedded - // fields which have no associated *ast.Ident name. - addField(n) - - // Add mapping for each field name since you can have - // multiple names for the same type expression. - for _, name := range n.Names { - addField(name) - } - - // Also map "X" in "...X" to the containing *ast.Field. This - // makes it easy to format variadic signature params - // properly. - if elips, ok := n.Type.(*ast.Ellipsis); ok && elips.Elt != nil { - addField(elips.Elt) - } - case *ast.FuncDecl: - decls = append(decls, n) - - if n.Name != nil && n.Name.Pos().IsValid() { - data.posToDecl[n.Name.Pos()] = n - } - case *ast.GenDecl: - decls = append(decls, n) - - for _, spec := range n.Specs { - switch spec := spec.(type) { - case *ast.TypeSpec: - if spec.Name != nil && spec.Name.Pos().IsValid() { - data.posToDecl[spec.Name.Pos()] = n - } - case *ast.ValueSpec: - for _, id := range spec.Names { - if id != nil && id.Pos().IsValid() { - data.posToDecl[id.Pos()] = n - } - } - } - } - } - - return true - }) - - return data -} - -func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) *parseGoData { - ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename())) - defer done() - - ext := filepath.Ext(fh.URI().Filename()) - if ext != ".go" && ext != "" { // files generated by cgo have no extension - return &parseGoData{err: fmt.Errorf("cannot parse non-Go file %s", fh.URI())} - } - src, err := fh.Read() - if err != nil { - return &parseGoData{err: err} - } - - parserMode := parser.AllErrors | parser.ParseComments - if mode == source.ParseHeader { - parserMode = parser.ImportsOnly | parser.ParseComments - } - - file, err := parser.ParseFile(fset, fh.URI().Filename(), src, parserMode) - var parseErr scanner.ErrorList - if err != nil { - // We passed a byte slice, so the only possible error is a parse error. - parseErr = err.(scanner.ErrorList) - } - - tok := fset.File(file.Pos()) - if tok == nil { - // file.Pos is the location of the package declaration. If there was - // none, we can't find the token.File that ParseFile created, and we - // have no choice but to recreate it. - tok = fset.AddFile(fh.URI().Filename(), -1, len(src)) - tok.SetLinesForContent(src) - } - - fixed := false - // If there were parse errors, attempt to fix them up. - if parseErr != nil { - // Fix any badly parsed parts of the AST. - fixed = fixAST(ctx, file, tok, src) - - for i := 0; i < 10; i++ { - // Fix certain syntax errors that render the file unparseable. - newSrc := fixSrc(file, tok, src) - if newSrc == nil { - break - } - - // If we thought there was something to fix 10 times in a row, - // it is likely we got stuck in a loop somehow. Log out a diff - // of the last changes we made to aid in debugging. - if i == 9 { - edits, err := myers.ComputeEdits(fh.URI(), string(src), string(newSrc)) - if err != nil { - event.Error(ctx, "error generating fixSrc diff", err, tag.File.Of(tok.Name())) - } else { - unified := diff.ToUnified("before", "after", string(src), edits) - event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name())) - } - } - - newFile, _ := parser.ParseFile(fset, fh.URI().Filename(), newSrc, parserMode) - if newFile != nil { - // Maintain the original parseError so we don't try formatting the doctored file. - file = newFile - src = newSrc - tok = fset.File(file.Pos()) - - fixed = fixAST(ctx, file, tok, src) - } - } - } - - return &parseGoData{ - parsed: &source.ParsedGoFile{ - URI: fh.URI(), - Mode: mode, - Src: src, - File: file, - Tok: tok, - Mapper: &protocol.ColumnMapper{ - URI: fh.URI(), - TokFile: tok, - Content: src, - }, - ParseErr: parseErr, - }, - fixed: fixed, - } -} - -// An unexportedFilter removes as much unexported AST from a set of Files as possible. -type unexportedFilter struct { - uses map[string]bool -} - -// Filter records uses of unexported identifiers and filters out all other -// unexported declarations. -func (f *unexportedFilter) Filter(files []*ast.File) { - // Iterate to fixed point -- unexported types can include other unexported types. - oldLen := len(f.uses) - for { - for _, file := range files { - f.recordUses(file) - } - if len(f.uses) == oldLen { - break - } - oldLen = len(f.uses) - } - - for _, file := range files { - var newDecls []ast.Decl - for _, decl := range file.Decls { - if f.filterDecl(decl) { - newDecls = append(newDecls, decl) - } - } - file.Decls = newDecls - file.Scope = nil - file.Unresolved = nil - file.Comments = nil - trimAST(file) - } -} - -func (f *unexportedFilter) keep(ident *ast.Ident) bool { - return ast.IsExported(ident.Name) || f.uses[ident.Name] -} - -func (f *unexportedFilter) filterDecl(decl ast.Decl) bool { - switch decl := decl.(type) { - case *ast.FuncDecl: - if ident := recvIdent(decl); ident != nil && !f.keep(ident) { - return false - } - return f.keep(decl.Name) - case *ast.GenDecl: - if decl.Tok == token.CONST { - // Constants can involve iota, and iota is hard to deal with. - return true - } - var newSpecs []ast.Spec - for _, spec := range decl.Specs { - if f.filterSpec(spec) { - newSpecs = append(newSpecs, spec) - } - } - decl.Specs = newSpecs - return len(newSpecs) != 0 - case *ast.BadDecl: - return false - } - panic(fmt.Sprintf("unknown ast.Decl %T", decl)) -} - -func (f *unexportedFilter) filterSpec(spec ast.Spec) bool { - switch spec := spec.(type) { - case *ast.ImportSpec: - return true - case *ast.ValueSpec: - var newNames []*ast.Ident - for _, name := range spec.Names { - if f.keep(name) { - newNames = append(newNames, name) - } - } - spec.Names = newNames - return len(spec.Names) != 0 - case *ast.TypeSpec: - if !f.keep(spec.Name) { - return false - } - switch typ := spec.Type.(type) { - case *ast.StructType: - f.filterFieldList(typ.Fields) - case *ast.InterfaceType: - f.filterFieldList(typ.Methods) - } - return true - } - panic(fmt.Sprintf("unknown ast.Spec %T", spec)) -} - -func (f *unexportedFilter) filterFieldList(fields *ast.FieldList) { - var newFields []*ast.Field - for _, field := range fields.List { - if len(field.Names) == 0 { - // Keep embedded fields: they can export methods and fields. - newFields = append(newFields, field) - } - for _, name := range field.Names { - if f.keep(name) { - newFields = append(newFields, field) - break - } - } - } - fields.List = newFields -} - -func (f *unexportedFilter) recordUses(file *ast.File) { - for _, decl := range file.Decls { - switch decl := decl.(type) { - case *ast.FuncDecl: - // Ignore methods on dropped types. - if ident := recvIdent(decl); ident != nil && !f.keep(ident) { - break - } - // Ignore functions with dropped names. - if !f.keep(decl.Name) { - break - } - f.recordFuncType(decl.Type) - case *ast.GenDecl: - for _, spec := range decl.Specs { - switch spec := spec.(type) { - case *ast.ValueSpec: - for i, name := range spec.Names { - // Don't mess with constants -- iota is hard. - if f.keep(name) || decl.Tok == token.CONST { - f.recordIdents(spec.Type) - if len(spec.Values) > i { - f.recordIdents(spec.Values[i]) - } - } - } - case *ast.TypeSpec: - switch typ := spec.Type.(type) { - case *ast.StructType: - f.recordFieldUses(false, typ.Fields) - case *ast.InterfaceType: - f.recordFieldUses(false, typ.Methods) - } - } - } - } - } -} - -// recvIdent returns the identifier of a method receiver, e.g. *int. -func recvIdent(decl *ast.FuncDecl) *ast.Ident { - if decl.Recv == nil || len(decl.Recv.List) == 0 { - return nil - } - x := decl.Recv.List[0].Type - if star, ok := x.(*ast.StarExpr); ok { - x = star.X - } - if ident, ok := x.(*ast.Ident); ok { - return ident - } - return nil -} - -// recordIdents records unexported identifiers in an Expr in uses. -// These may be types, e.g. in map[key]value, function names, e.g. in foo(), -// or simple variable references. References that will be discarded, such -// as those in function literal bodies, are ignored. -func (f *unexportedFilter) recordIdents(x ast.Expr) { - ast.Inspect(x, func(n ast.Node) bool { - if n == nil { - return false - } - if complit, ok := n.(*ast.CompositeLit); ok { - // We clear out composite literal contents; just record their type. - f.recordIdents(complit.Type) - return false - } - if flit, ok := n.(*ast.FuncLit); ok { - f.recordFuncType(flit.Type) - return false - } - if ident, ok := n.(*ast.Ident); ok && !ast.IsExported(ident.Name) { - f.uses[ident.Name] = true - } - return true - }) -} - -// recordFuncType records the types mentioned by a function type. -func (f *unexportedFilter) recordFuncType(x *ast.FuncType) { - f.recordFieldUses(true, x.Params) - f.recordFieldUses(true, x.Results) -} - -// recordFieldUses records unexported identifiers used in fields, which may be -// struct members, interface members, or function parameter/results. -func (f *unexportedFilter) recordFieldUses(isParams bool, fields *ast.FieldList) { - if fields == nil { - return - } - for _, field := range fields.List { - if isParams { - // Parameter types of retained functions need to be retained. - f.recordIdents(field.Type) - continue - } - if ft, ok := field.Type.(*ast.FuncType); ok { - // Function declarations in interfaces need all their types retained. - f.recordFuncType(ft) - continue - } - if len(field.Names) == 0 { - // Embedded fields might contribute exported names. - f.recordIdents(field.Type) - } - for _, name := range field.Names { - // We only need normal fields if they're exported. - if ast.IsExported(name.Name) { - f.recordIdents(field.Type) - break - } - } - } -} - -// ProcessErrors records additional uses from errors, returning the new uses -// and any unexpected errors. -func (f *unexportedFilter) ProcessErrors(errors []types.Error) (map[string]bool, []types.Error) { - var unexpected []types.Error - missing := map[string]bool{} - for _, err := range errors { - if strings.Contains(err.Msg, "missing return") { - continue - } - const undeclared = "undeclared name: " - if strings.HasPrefix(err.Msg, undeclared) { - missing[strings.TrimPrefix(err.Msg, undeclared)] = true - f.uses[strings.TrimPrefix(err.Msg, undeclared)] = true - continue - } - unexpected = append(unexpected, err) - } - return missing, unexpected -} - -// trimAST clears any part of the AST not relevant to type checking -// expressions at pos. -func trimAST(file *ast.File) { - ast.Inspect(file, func(n ast.Node) bool { - if n == nil { - return false - } - switch n := n.(type) { - case *ast.FuncDecl: - n.Body = nil - case *ast.BlockStmt: - n.List = nil - case *ast.CaseClause: - n.Body = nil - case *ast.CommClause: - n.Body = nil - case *ast.CompositeLit: - // types.Info.Types for long slice/array literals are particularly - // expensive. Try to clear them out. - at, ok := n.Type.(*ast.ArrayType) - if !ok { - // Composite literal. No harm removing all its fields. - n.Elts = nil - break - } - // Removing the elements from an ellipsis array changes its type. - // Try to set the length explicitly so we can continue. - if _, ok := at.Len.(*ast.Ellipsis); ok { - length, ok := arrayLength(n) - if !ok { - break - } - at.Len = &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprint(length), - ValuePos: at.Len.Pos(), - } - } - n.Elts = nil - } - return true - }) -} - -// arrayLength returns the length of some simple forms of ellipsis array literal. -// Notably, it handles the tables in golang.org/x/text. -func arrayLength(array *ast.CompositeLit) (int, bool) { - litVal := func(expr ast.Expr) (int, bool) { - lit, ok := expr.(*ast.BasicLit) - if !ok { - return 0, false - } - val, err := strconv.ParseInt(lit.Value, 10, 64) - if err != nil { - return 0, false - } - return int(val), true - } - largestKey := -1 - for _, elt := range array.Elts { - kve, ok := elt.(*ast.KeyValueExpr) - if !ok { - continue - } - switch key := kve.Key.(type) { - case *ast.BasicLit: - if val, ok := litVal(key); ok && largestKey < val { - largestKey = val - } - case *ast.BinaryExpr: - // golang.org/x/text uses subtraction (and only subtraction) in its indices. - if key.Op != token.SUB { - break - } - x, ok := litVal(key.X) - if !ok { - break - } - y, ok := litVal(key.Y) - if !ok { - break - } - if val := x - y; largestKey < val { - largestKey = val - } - } - } - if largestKey != -1 { - return largestKey + 1, true - } - return len(array.Elts), true -} - -// fixAST inspects the AST and potentially modifies any *ast.BadStmts so that it can be -// type-checked more effectively. -// -// If fixAST returns true, the resulting AST is considered "fixed", meaning -// positions have been mangled, and type checker errors may not make sense. -func fixAST(ctx context.Context, n ast.Node, tok *token.File, src []byte) (fixed bool) { - var err error - walkASTWithParent(n, func(n, parent ast.Node) bool { - switch n := n.(type) { - case *ast.BadStmt: - if fixed = fixDeferOrGoStmt(n, parent, tok, src); fixed { - // Recursively fix in our fixed node. - _ = fixAST(ctx, parent, tok, src) - } else { - err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err) - } - return false - case *ast.BadExpr: - if fixed = fixArrayType(n, parent, tok, src); fixed { - // Recursively fix in our fixed node. - _ = fixAST(ctx, parent, tok, src) - return false - } - - // Fix cases where parser interprets if/for/switch "init" - // statement as "cond" expression, e.g.: - // - // // "i := foo" is init statement, not condition. - // for i := foo - // - fixInitStmt(n, parent, tok, src) - - return false - case *ast.SelectorExpr: - // Fix cases where a keyword prefix results in a phantom "_" selector, e.g.: - // - // foo.var<> // want to complete to "foo.variance" - // - fixPhantomSelector(n, tok, src) - return true - - case *ast.BlockStmt: - switch parent.(type) { - case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - // Adjust closing curly brace of empty switch/select - // statements so we can complete inside them. - fixEmptySwitch(n, tok, src) - } - - return true - default: - return true - } - }) - return fixed -} - -// walkASTWithParent walks the AST rooted at n. The semantics are -// similar to ast.Inspect except it does not call f(nil). -func walkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { - var ancestors []ast.Node - ast.Inspect(n, func(n ast.Node) (recurse bool) { - defer func() { - if recurse { - ancestors = append(ancestors, n) - } - }() - - if n == nil { - ancestors = ancestors[:len(ancestors)-1] - return false - } - - var parent ast.Node - if len(ancestors) > 0 { - parent = ancestors[len(ancestors)-1] - } - - return f(n, parent) - }) -} - -// fixSrc attempts to modify the file's source code to fix certain -// syntax errors that leave the rest of the file unparsed. -func fixSrc(f *ast.File, tf *token.File, src []byte) (newSrc []byte) { - walkASTWithParent(f, func(n, parent ast.Node) bool { - if newSrc != nil { - return false - } - - switch n := n.(type) { - case *ast.BlockStmt: - newSrc = fixMissingCurlies(f, n, parent, tf, src) - case *ast.SelectorExpr: - newSrc = fixDanglingSelector(n, tf, src) - } - - return newSrc == nil - }) - - return newSrc -} - -// fixMissingCurlies adds in curly braces for block statements that -// are missing curly braces. For example: -// -// if foo -// -// becomes -// -// if foo {} -func fixMissingCurlies(f *ast.File, b *ast.BlockStmt, parent ast.Node, tok *token.File, src []byte) []byte { - // If the "{" is already in the source code, there isn't anything to - // fix since we aren't missing curlies. - if b.Lbrace.IsValid() { - braceOffset, err := safetoken.Offset(tok, b.Lbrace) - if err != nil { - return nil - } - if braceOffset < len(src) && src[braceOffset] == '{' { - return nil - } - } - - parentLine := tok.Line(parent.Pos()) - - if parentLine >= tok.LineCount() { - // If we are the last line in the file, no need to fix anything. - return nil - } - - // Insert curlies at the end of parent's starting line. The parent - // is the statement that contains the block, e.g. *ast.IfStmt. The - // block's Pos()/End() can't be relied upon because they are based - // on the (missing) curly braces. We assume the statement is a - // single line for now and try sticking the curly braces at the end. - insertPos := tok.LineStart(parentLine+1) - 1 - - // Scootch position backwards until it's not in a comment. For example: - // - // if foo<> // some amazing comment | - // someOtherCode() - // - // insertPos will be located at "|", so we back it out of the comment. - didSomething := true - for didSomething { - didSomething = false - for _, c := range f.Comments { - if c.Pos() < insertPos && insertPos <= c.End() { - insertPos = c.Pos() - didSomething = true - } - } - } - - // Bail out if line doesn't end in an ident or ".". This is to avoid - // cases like below where we end up making things worse by adding - // curlies: - // - // if foo && - // bar<> - switch precedingToken(insertPos, tok, src) { - case token.IDENT, token.PERIOD: - // ok - default: - return nil - } - - var buf bytes.Buffer - buf.Grow(len(src) + 3) - offset, err := safetoken.Offset(tok, insertPos) - if err != nil { - return nil - } - buf.Write(src[:offset]) - - // Detect if we need to insert a semicolon to fix "for" loop situations like: - // - // for i := foo(); foo<> - // - // Just adding curlies is not sufficient to make things parse well. - if fs, ok := parent.(*ast.ForStmt); ok { - if _, ok := fs.Cond.(*ast.BadExpr); !ok { - if xs, ok := fs.Post.(*ast.ExprStmt); ok { - if _, ok := xs.X.(*ast.BadExpr); ok { - buf.WriteByte(';') - } - } - } - } - - // Insert "{}" at insertPos. - buf.WriteByte('{') - buf.WriteByte('}') - buf.Write(src[offset:]) - return buf.Bytes() -} - -// fixEmptySwitch moves empty switch/select statements' closing curly -// brace down one line. This allows us to properly detect incomplete -// "case" and "default" keywords as inside the switch statement. For -// example: -// -// switch { -// def<> -// } -// -// gets parsed like: -// -// switch { -// } -// -// Later we manually pull out the "def" token, but we need to detect -// that our "<>" position is inside the switch block. To do that we -// move the curly brace so it looks like: -// -// switch { -// -// } -func fixEmptySwitch(body *ast.BlockStmt, tok *token.File, src []byte) { - // We only care about empty switch statements. - if len(body.List) > 0 || !body.Rbrace.IsValid() { - return - } - - // If the right brace is actually in the source code at the - // specified position, don't mess with it. - braceOffset, err := safetoken.Offset(tok, body.Rbrace) - if err != nil { - return - } - if braceOffset < len(src) && src[braceOffset] == '}' { - return - } - - braceLine := tok.Line(body.Rbrace) - if braceLine >= tok.LineCount() { - // If we are the last line in the file, no need to fix anything. - return - } - - // Move the right brace down one line. - body.Rbrace = tok.LineStart(braceLine + 1) -} - -// fixDanglingSelector inserts real "_" selector expressions in place -// of phantom "_" selectors. For example: -// -// func _() { -// x.<> -// } -// -// var x struct { i int } -// -// To fix completion at "<>", we insert a real "_" after the "." so the -// following declaration of "x" can be parsed and type checked -// normally. -func fixDanglingSelector(s *ast.SelectorExpr, tf *token.File, src []byte) []byte { - if !isPhantomUnderscore(s.Sel, tf, src) { - return nil - } - - if !s.X.End().IsValid() { - return nil - } - - insertOffset, err := safetoken.Offset(tf, s.X.End()) - if err != nil { - return nil - } - // Insert directly after the selector's ".". - insertOffset++ - if src[insertOffset-1] != '.' { - return nil - } - - var buf bytes.Buffer - buf.Grow(len(src) + 1) - buf.Write(src[:insertOffset]) - buf.WriteByte('_') - buf.Write(src[insertOffset:]) - return buf.Bytes() -} - -// fixPhantomSelector tries to fix selector expressions with phantom -// "_" selectors. In particular, we check if the selector is a -// keyword, and if so we swap in an *ast.Ident with the keyword text. For example: -// -// foo.var -// -// yields a "_" selector instead of "var" since "var" is a keyword. -// -// TODO(rfindley): should this constitute an ast 'fix'? -func fixPhantomSelector(sel *ast.SelectorExpr, tf *token.File, src []byte) { - if !isPhantomUnderscore(sel.Sel, tf, src) { - return - } - - // Only consider selectors directly abutting the selector ".". This - // avoids false positives in cases like: - // - // foo. // don't think "var" is our selector - // var bar = 123 - // - if sel.Sel.Pos() != sel.X.End()+1 { - return - } - - maybeKeyword := readKeyword(sel.Sel.Pos(), tf, src) - if maybeKeyword == "" { - return - } - - replaceNode(sel, sel.Sel, &ast.Ident{ - Name: maybeKeyword, - NamePos: sel.Sel.Pos(), - }) -} - -// isPhantomUnderscore reports whether the given ident is a phantom -// underscore. The parser sometimes inserts phantom underscores when -// it encounters otherwise unparseable situations. -func isPhantomUnderscore(id *ast.Ident, tok *token.File, src []byte) bool { - if id == nil || id.Name != "_" { - return false - } - - // Phantom underscore means the underscore is not actually in the - // program text. - offset, err := safetoken.Offset(tok, id.Pos()) - if err != nil { - return false - } - return len(src) <= offset || src[offset] != '_' -} - -// fixInitStmt fixes cases where the parser misinterprets an -// if/for/switch "init" statement as the "cond" conditional. In cases -// like "if i := 0" the user hasn't typed the semicolon yet so the -// parser is looking for the conditional expression. However, "i := 0" -// are not valid expressions, so we get a BadExpr. -// -// fixInitStmt returns valid AST for the original source. -func fixInitStmt(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) { - if !bad.Pos().IsValid() || !bad.End().IsValid() { - return - } - - // Try to extract a statement from the BadExpr. - start, err := safetoken.Offset(tok, bad.Pos()) - if err != nil { - return - } - end, err := safetoken.Offset(tok, bad.End()-1) - if err != nil { - return - } - stmtBytes := src[start : end+1] - stmt, err := parseStmt(bad.Pos(), stmtBytes) - if err != nil { - return - } - - // If the parent statement doesn't already have an "init" statement, - // move the extracted statement into the "init" field and insert a - // dummy expression into the required "cond" field. - switch p := parent.(type) { - case *ast.IfStmt: - if p.Init != nil { - return - } - p.Init = stmt - p.Cond = &ast.Ident{ - Name: "_", - NamePos: stmt.End(), - } - case *ast.ForStmt: - if p.Init != nil { - return - } - p.Init = stmt - p.Cond = &ast.Ident{ - Name: "_", - NamePos: stmt.End(), - } - case *ast.SwitchStmt: - if p.Init != nil { - return - } - p.Init = stmt - p.Tag = nil - } -} - -// readKeyword reads the keyword starting at pos, if any. -func readKeyword(pos token.Pos, tok *token.File, src []byte) string { - var kwBytes []byte - offset, err := safetoken.Offset(tok, pos) - if err != nil { - return "" - } - for i := offset; i < len(src); i++ { - // Use a simplified identifier check since keywords are always lowercase ASCII. - if src[i] < 'a' || src[i] > 'z' { - break - } - kwBytes = append(kwBytes, src[i]) - - // Stop search at arbitrarily chosen too-long-for-a-keyword length. - if len(kwBytes) > 15 { - return "" - } - } - - if kw := string(kwBytes); token.Lookup(kw).IsKeyword() { - return kw - } - - return "" -} - -// fixArrayType tries to parse an *ast.BadExpr into an *ast.ArrayType. -// go/parser often turns lone array types like "[]int" into BadExprs -// if it isn't expecting a type. -func fixArrayType(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) bool { - // Our expected input is a bad expression that looks like "[]someExpr". - - from := bad.Pos() - to := bad.End() - - if !from.IsValid() || !to.IsValid() { - return false - } - - exprBytes := make([]byte, 0, int(to-from)+3) - // Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF. - // It also panics if the position is not in the range of the file, and - // badExprs may not necessarily have good positions, so check first. - fromOffset, err := safetoken.Offset(tok, from) - if err != nil { - return false - } - toOffset, err := safetoken.Offset(tok, to-1) - if err != nil { - return false - } - exprBytes = append(exprBytes, src[fromOffset:toOffset+1]...) - exprBytes = bytes.TrimSpace(exprBytes) - - // If our expression ends in "]" (e.g. "[]"), add a phantom selector - // so we can complete directly after the "[]". - if len(exprBytes) > 0 && exprBytes[len(exprBytes)-1] == ']' { - exprBytes = append(exprBytes, '_') - } - - // Add "{}" to turn our ArrayType into a CompositeLit. This is to - // handle the case of "[...]int" where we must make it a composite - // literal to be parseable. - exprBytes = append(exprBytes, '{', '}') - - expr, err := parseExpr(from, exprBytes) - if err != nil { - return false - } - - cl, _ := expr.(*ast.CompositeLit) - if cl == nil { - return false - } - - at, _ := cl.Type.(*ast.ArrayType) - if at == nil { - return false - } - - return replaceNode(parent, bad, at) -} - -// precedingToken scans src to find the token preceding pos. -func precedingToken(pos token.Pos, tok *token.File, src []byte) token.Token { - s := &scanner.Scanner{} - s.Init(tok, src, nil, 0) - - var lastTok token.Token - for { - p, t, _ := s.Scan() - if t == token.EOF || p >= pos { - break - } - - lastTok = t - } - return lastTok -} - -// fixDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement. -// -// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because -// it does not include a call expression. This means that go/types skips type-checking -// this statement entirely, and we can't use the type information when completing. -// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST, -// instead of the *ast.BadStmt. -func fixDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) bool { - // Check if we have a bad statement containing either a "go" or "defer". - s := &scanner.Scanner{} - s.Init(tok, src, nil, 0) - - var ( - pos token.Pos - tkn token.Token - ) - for { - if tkn == token.EOF { - return false - } - if pos >= bad.From { - break - } - pos, tkn, _ = s.Scan() - } - - var stmt ast.Stmt - switch tkn { - case token.DEFER: - stmt = &ast.DeferStmt{ - Defer: pos, - } - case token.GO: - stmt = &ast.GoStmt{ - Go: pos, - } - default: - return false - } - - var ( - from, to, last token.Pos - lastToken token.Token - braceDepth int - phantomSelectors []token.Pos - ) -FindTo: - for { - to, tkn, _ = s.Scan() - - if from == token.NoPos { - from = to - } - - switch tkn { - case token.EOF: - break FindTo - case token.SEMICOLON: - // If we aren't in nested braces, end of statement means - // end of expression. - if braceDepth == 0 { - break FindTo - } - case token.LBRACE: - braceDepth++ - } - - // This handles the common dangling selector case. For example in - // - // defer fmt. - // y := 1 - // - // we notice the dangling period and end our expression. - // - // If the previous token was a "." and we are looking at a "}", - // the period is likely a dangling selector and needs a phantom - // "_". Likewise if the current token is on a different line than - // the period, the period is likely a dangling selector. - if lastToken == token.PERIOD && (tkn == token.RBRACE || tok.Line(to) > tok.Line(last)) { - // Insert phantom "_" selector after the dangling ".". - phantomSelectors = append(phantomSelectors, last+1) - // If we aren't in a block then end the expression after the ".". - if braceDepth == 0 { - to = last + 1 - break - } - } - - lastToken = tkn - last = to - - switch tkn { - case token.RBRACE: - braceDepth-- - if braceDepth <= 0 { - if braceDepth == 0 { - // +1 to include the "}" itself. - to += 1 - } - break FindTo - } - } - } - - fromOffset, err := safetoken.Offset(tok, from) - if err != nil { - return false - } - if !from.IsValid() || fromOffset >= len(src) { - return false - } - - toOffset, err := safetoken.Offset(tok, to) - if err != nil { - return false - } - if !to.IsValid() || toOffset >= len(src) { - return false - } - - // Insert any phantom selectors needed to prevent dangling "." from messing - // up the AST. - exprBytes := make([]byte, 0, int(to-from)+len(phantomSelectors)) - for i, b := range src[fromOffset:toOffset] { - if len(phantomSelectors) > 0 && from+token.Pos(i) == phantomSelectors[0] { - exprBytes = append(exprBytes, '_') - phantomSelectors = phantomSelectors[1:] - } - exprBytes = append(exprBytes, b) - } - - if len(phantomSelectors) > 0 { - exprBytes = append(exprBytes, '_') - } - - expr, err := parseExpr(from, exprBytes) - if err != nil { - return false - } - - // Package the expression into a fake *ast.CallExpr and re-insert - // into the function. - call := &ast.CallExpr{ - Fun: expr, - Lparen: to, - Rparen: to, - } - - switch stmt := stmt.(type) { - case *ast.DeferStmt: - stmt.Call = call - case *ast.GoStmt: - stmt.Call = call - } - - return replaceNode(parent, bad, stmt) -} - -// parseStmt parses the statement in src and updates its position to -// start at pos. -func parseStmt(pos token.Pos, src []byte) (ast.Stmt, error) { - // Wrap our expression to make it a valid Go file we can pass to ParseFile. - fileSrc := bytes.Join([][]byte{ - []byte("package fake;func _(){"), - src, - []byte("}"), - }, nil) - - // Use ParseFile instead of ParseExpr because ParseFile has - // best-effort behavior, whereas ParseExpr fails hard on any error. - fakeFile, err := parser.ParseFile(token.NewFileSet(), "", fileSrc, 0) - if fakeFile == nil { - return nil, fmt.Errorf("error reading fake file source: %v", err) - } - - // Extract our expression node from inside the fake file. - if len(fakeFile.Decls) == 0 { - return nil, fmt.Errorf("error parsing fake file: %v", err) - } - - fakeDecl, _ := fakeFile.Decls[0].(*ast.FuncDecl) - if fakeDecl == nil || len(fakeDecl.Body.List) == 0 { - return nil, fmt.Errorf("no statement in %s: %v", src, err) - } - - stmt := fakeDecl.Body.List[0] - - // parser.ParseFile returns undefined positions. - // Adjust them for the current file. - offsetPositions(stmt, pos-1-(stmt.Pos()-1)) - - return stmt, nil -} - -// parseExpr parses the expression in src and updates its position to -// start at pos. -func parseExpr(pos token.Pos, src []byte) (ast.Expr, error) { - stmt, err := parseStmt(pos, src) - if err != nil { - return nil, err - } - - exprStmt, ok := stmt.(*ast.ExprStmt) - if !ok { - return nil, fmt.Errorf("no expr in %s: %v", src, err) - } - - return exprStmt.X, nil -} - -var tokenPosType = reflect.TypeOf(token.NoPos) - -// offsetPositions applies an offset to the positions in an ast.Node. -func offsetPositions(n ast.Node, offset token.Pos) { - ast.Inspect(n, func(n ast.Node) bool { - if n == nil { - return false - } - - v := reflect.ValueOf(n).Elem() - - switch v.Kind() { - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - f := v.Field(i) - if f.Type() != tokenPosType { - continue - } - - if !f.CanSet() { - continue - } - - // Don't offset invalid positions: they should stay invalid. - if !token.Pos(f.Int()).IsValid() { - continue - } - - f.SetInt(f.Int() + int64(offset)) - } - } - - return true - }) -} - -// replaceNode updates parent's child oldChild to be newChild. It -// returns whether it replaced successfully. -func replaceNode(parent, oldChild, newChild ast.Node) bool { - if parent == nil || oldChild == nil || newChild == nil { - return false - } - - parentVal := reflect.ValueOf(parent).Elem() - if parentVal.Kind() != reflect.Struct { - return false - } - - newChildVal := reflect.ValueOf(newChild) - - tryReplace := func(v reflect.Value) bool { - if !v.CanSet() || !v.CanInterface() { - return false - } - - // If the existing value is oldChild, we found our child. Make - // sure our newChild is assignable and then make the swap. - if v.Interface() == oldChild && newChildVal.Type().AssignableTo(v.Type()) { - v.Set(newChildVal) - return true - } - - return false - } - - // Loop over parent's struct fields. - for i := 0; i < parentVal.NumField(); i++ { - f := parentVal.Field(i) - - switch f.Kind() { - // Check interface and pointer fields. - case reflect.Interface, reflect.Ptr: - if tryReplace(f) { - return true - } - - // Search through any slice fields. - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - if tryReplace(f.Index(i)) { - return true - } - } - } - } - - return false -} diff --git a/internal/lsp/cache/pkg.go b/internal/lsp/cache/pkg.go deleted file mode 100644 index 1217ec29fd4..00000000000 --- a/internal/lsp/cache/pkg.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "fmt" - "go/ast" - "go/scanner" - "go/types" - - "golang.org/x/mod/module" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -// pkg contains the type information needed by the source package. -type pkg struct { - m *Metadata - mode source.ParseMode - goFiles []*source.ParsedGoFile - compiledGoFiles []*source.ParsedGoFile - diagnostics []*source.Diagnostic - imports map[PackagePath]*pkg - version *module.Version - parseErrors []scanner.ErrorList - typeErrors []types.Error - types *types.Package - typesInfo *types.Info - typesSizes types.Sizes - hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors -} - -// Declare explicit types for files and directories to distinguish between the two. -type ( - fileURI span.URI - moduleLoadScope string - viewLoadScope span.URI -) - -func (p *pkg) ID() string { - return string(p.m.ID) -} - -func (p *pkg) Name() string { - return string(p.m.Name) -} - -func (p *pkg) PkgPath() string { - return string(p.m.PkgPath) -} - -func (p *pkg) ParseMode() source.ParseMode { - return p.mode -} - -func (p *pkg) CompiledGoFiles() []*source.ParsedGoFile { - return p.compiledGoFiles -} - -func (p *pkg) File(uri span.URI) (*source.ParsedGoFile, error) { - for _, cgf := range p.compiledGoFiles { - if cgf.URI == uri { - return cgf, nil - } - } - for _, gf := range p.goFiles { - if gf.URI == uri { - return gf, nil - } - } - return nil, fmt.Errorf("no parsed file for %s in %v", uri, p.m.ID) -} - -func (p *pkg) GetSyntax() []*ast.File { - var syntax []*ast.File - for _, pgf := range p.compiledGoFiles { - syntax = append(syntax, pgf.File) - } - return syntax -} - -func (p *pkg) GetTypes() *types.Package { - return p.types -} - -func (p *pkg) GetTypesInfo() *types.Info { - return p.typesInfo -} - -func (p *pkg) GetTypesSizes() types.Sizes { - return p.typesSizes -} - -func (p *pkg) IsIllTyped() bool { - return p.types == nil || p.typesInfo == nil || p.typesSizes == nil -} - -func (p *pkg) ForTest() string { - return string(p.m.ForTest) -} - -func (p *pkg) GetImport(pkgPath string) (source.Package, error) { - if imp := p.imports[PackagePath(pkgPath)]; imp != nil { - return imp, nil - } - // Don't return a nil pointer because that still satisfies the interface. - return nil, fmt.Errorf("no imported package for %s", pkgPath) -} - -func (p *pkg) MissingDependencies() []string { - // We don't invalidate metadata for import deletions, so check the package - // imports via the *types.Package. Only use metadata if p.types is nil. - if p.types == nil { - var md []string - for i := range p.m.MissingDeps { - md = append(md, string(i)) - } - return md - } - var md []string - for _, pkg := range p.types.Imports() { - if _, ok := p.m.MissingDeps[PackagePath(pkg.Path())]; ok { - md = append(md, pkg.Path()) - } - } - return md -} - -func (p *pkg) Imports() []source.Package { - var result []source.Package - for _, imp := range p.imports { - result = append(result, imp) - } - return result -} - -func (p *pkg) Version() *module.Version { - return p.version -} - -func (p *pkg) HasListOrParseErrors() bool { - return len(p.m.Errors) != 0 || len(p.parseErrors) != 0 -} - -func (p *pkg) HasTypeErrors() bool { - return len(p.typeErrors) != 0 -} diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go deleted file mode 100644 index e018cb33bd8..00000000000 --- a/internal/lsp/cache/session.go +++ /dev/null @@ -1,738 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "fmt" - "strconv" - "sync" - "sync/atomic" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/progress" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -type Session struct { - cache *Cache - id string - - optionsMu sync.Mutex - options *source.Options - - viewMu sync.RWMutex - views []*View - viewMap map[span.URI]*View // map of URI->best view - - overlayMu sync.Mutex - overlays map[span.URI]*overlay - - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - - progress *progress.Tracker -} - -type overlay struct { - session *Session - uri span.URI - text []byte - hash string - version int32 - kind source.FileKind - - // saved is true if a file matches the state on disk, - // and therefore does not need to be part of the overlay sent to go/packages. - saved bool -} - -func (o *overlay) Read() ([]byte, error) { - return o.text, nil -} - -func (o *overlay) FileIdentity() source.FileIdentity { - return source.FileIdentity{ - URI: o.uri, - Hash: o.hash, - } -} - -func (o *overlay) VersionedFileIdentity() source.VersionedFileIdentity { - return source.VersionedFileIdentity{ - URI: o.uri, - SessionID: o.session.id, - Version: o.version, - } -} - -func (o *overlay) Kind() source.FileKind { - return o.kind -} - -func (o *overlay) URI() span.URI { - return o.uri -} - -func (o *overlay) Version() int32 { - return o.version -} - -func (o *overlay) Session() string { - return o.session.id -} - -func (o *overlay) Saved() bool { - return o.saved -} - -// closedFile implements LSPFile for a file that the editor hasn't told us about. -type closedFile struct { - source.FileHandle -} - -func (c *closedFile) VersionedFileIdentity() source.VersionedFileIdentity { - return source.VersionedFileIdentity{ - URI: c.FileHandle.URI(), - SessionID: "", - Version: 0, - } -} - -func (c *closedFile) Saved() bool { - return true -} - -func (c *closedFile) Session() string { - return "" -} - -func (c *closedFile) Version() int32 { - return 0 -} - -func (s *Session) ID() string { return s.id } -func (s *Session) String() string { return s.id } - -func (s *Session) Options() *source.Options { - s.optionsMu.Lock() - defer s.optionsMu.Unlock() - return s.options -} - -func (s *Session) SetOptions(options *source.Options) { - s.optionsMu.Lock() - defer s.optionsMu.Unlock() - s.options = options -} - -func (s *Session) SetProgressTracker(tracker *progress.Tracker) { - // The progress tracker should be set before any view is initialized. - s.progress = tracker -} - -func (s *Session) Shutdown(ctx context.Context) { - var views []*View - s.viewMu.Lock() - views = append(views, s.views...) - s.views = nil - s.viewMap = nil - s.viewMu.Unlock() - for _, view := range views { - view.shutdown(ctx) - } - event.Log(ctx, "Shutdown session", KeyShutdownSession.Of(s)) -} - -func (s *Session) Cache() interface{} { - return s.cache -} - -func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (source.View, source.Snapshot, func(), error) { - s.viewMu.Lock() - defer s.viewMu.Unlock() - for _, view := range s.views { - if span.CompareURI(view.folder, folder) == 0 { - return nil, nil, nil, source.ErrViewExists - } - } - view, snapshot, release, err := s.createView(ctx, name, folder, options, 0) - if err != nil { - return nil, nil, func() {}, err - } - s.views = append(s.views, view) - // we always need to drop the view map - s.viewMap = make(map[span.URI]*View) - return view, snapshot, release, nil -} - -func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, snapshotID uint64) (*View, *snapshot, func(), error) { - index := atomic.AddInt64(&viewIndex, 1) - - if s.cache.options != nil { - s.cache.options(options) - } - - // Set the module-specific information. - ws, err := s.getWorkspaceInformation(ctx, folder, options) - if err != nil { - return nil, nil, func() {}, err - } - root := folder - if options.ExpandWorkspaceToModule { - root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), options.ExperimentalWorkspaceModule) - if err != nil { - return nil, nil, func() {}, err - } - } - - // Build the gopls workspace, collecting active modules in the view. - workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule) - if err != nil { - return nil, nil, func() {}, err - } - - // We want a true background context and not a detached context here - // the spans need to be unrelated and no tag values should pollute it. - baseCtx := event.Detach(xcontext.Detach(ctx)) - backgroundCtx, cancel := context.WithCancel(baseCtx) - - v := &View{ - session: s, - initialWorkspaceLoad: make(chan struct{}), - initializationSema: make(chan struct{}, 1), - id: strconv.FormatInt(index, 10), - options: options, - baseCtx: baseCtx, - name: name, - folder: folder, - moduleUpgrades: map[string]string{}, - filesByURI: map[span.URI]*fileBase{}, - filesByBase: map[string][]*fileBase{}, - rootURI: root, - workspaceInformation: *ws, - } - v.importsState = &importsState{ - ctx: backgroundCtx, - processEnv: &imports.ProcessEnv{ - GocmdRunner: s.gocmdRunner, - }, - } - v.snapshot = &snapshot{ - id: snapshotID, - view: v, - backgroundCtx: backgroundCtx, - cancel: cancel, - initializeOnce: &sync.Once{}, - generation: s.cache.store.Generation(generationName(v, 0)), - packages: make(map[packageKey]*packageHandle), - meta: NewMetadataGraph(), - files: make(map[span.URI]source.VersionedFileHandle), - goFiles: make(map[parseKey]*parseGoHandle), - symbols: make(map[span.URI]*symbolHandle), - actions: make(map[actionKey]*actionHandle), - workspacePackages: make(map[PackageID]PackagePath), - unloadableFiles: make(map[span.URI]struct{}), - parseModHandles: make(map[span.URI]*parseModHandle), - parseWorkHandles: make(map[span.URI]*parseWorkHandle), - modTidyHandles: make(map[span.URI]*modTidyHandle), - modWhyHandles: make(map[span.URI]*modWhyHandle), - workspace: workspace, - } - - // Initialize the view without blocking. - initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx)) - v.initCancelFirstAttempt = initCancel - snapshot := v.snapshot - release := snapshot.generation.Acquire() - go func() { - defer release() - snapshot.initialize(initCtx, true) - }() - return v, snapshot, snapshot.generation.Acquire(), nil -} - -// View returns the view by name. -func (s *Session) View(name string) source.View { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - for _, view := range s.views { - if view.Name() == name { - return view - } - } - return nil -} - -// ViewOf returns a view corresponding to the given URI. -// If the file is not already associated with a view, pick one using some heuristics. -func (s *Session) ViewOf(uri span.URI) (source.View, error) { - return s.viewOf(uri) -} - -func (s *Session) viewOf(uri span.URI) (*View, error) { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - // Check if we already know this file. - if v, found := s.viewMap[uri]; found { - return v, nil - } - // Pick the best view for this file and memoize the result. - if len(s.views) == 0 { - return nil, fmt.Errorf("no views in session") - } - s.viewMap[uri] = bestViewForURI(uri, s.views) - return s.viewMap[uri], nil -} - -func (s *Session) viewsOf(uri span.URI) []*View { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - - var views []*View - for _, view := range s.views { - if source.InDir(view.folder.Filename(), uri.Filename()) { - views = append(views, view) - } - } - return views -} - -func (s *Session) Views() []source.View { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - result := make([]source.View, len(s.views)) - for i, v := range s.views { - result[i] = v - } - return result -} - -// bestViewForURI returns the most closely matching view for the given URI -// out of the given set of views. -func bestViewForURI(uri span.URI, views []*View) *View { - // we need to find the best view for this file - var longest *View - for _, view := range views { - if longest != nil && len(longest.Folder()) > len(view.Folder()) { - continue - } - if view.contains(uri) { - longest = view - } - } - if longest != nil { - return longest - } - // Try our best to return a view that knows the file. - for _, view := range views { - if view.knownFile(uri) { - return view - } - } - // TODO: are there any more heuristics we can use? - return views[0] -} - -func (s *Session) removeView(ctx context.Context, view *View) error { - s.viewMu.Lock() - defer s.viewMu.Unlock() - i, err := s.dropView(ctx, view) - if err != nil { - return err - } - // delete this view... we don't care about order but we do want to make - // sure we can garbage collect the view - s.views[i] = s.views[len(s.views)-1] - s.views[len(s.views)-1] = nil - s.views = s.views[:len(s.views)-1] - return nil -} - -func (s *Session) updateView(ctx context.Context, view *View, options *source.Options) (*View, error) { - s.viewMu.Lock() - defer s.viewMu.Unlock() - - // Preserve the snapshot ID if we are recreating the view. - view.snapshotMu.Lock() - if view.snapshot == nil { - view.snapshotMu.Unlock() - panic("updateView called after View was already shut down") - } - snapshotID := view.snapshot.id - view.snapshotMu.Unlock() - - i, err := s.dropView(ctx, view) - if err != nil { - return nil, err - } - - v, _, release, err := s.createView(ctx, view.name, view.folder, options, snapshotID) - release() - - if err != nil { - // we have dropped the old view, but could not create the new one - // this should not happen and is very bad, but we still need to clean - // up the view array if it happens - s.views[i] = s.views[len(s.views)-1] - s.views[len(s.views)-1] = nil - s.views = s.views[:len(s.views)-1] - return nil, err - } - // substitute the new view into the array where the old view was - s.views[i] = v - return v, nil -} - -func (s *Session) dropView(ctx context.Context, v *View) (int, error) { - // we always need to drop the view map - s.viewMap = make(map[span.URI]*View) - for i := range s.views { - if v == s.views[i] { - // we found the view, drop it and return the index it was found at - s.views[i] = nil - v.shutdown(ctx) - return i, nil - } - } - return -1, fmt.Errorf("view %s for %v not found", v.Name(), v.Folder()) -} - -func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error { - _, releases, err := s.DidModifyFiles(ctx, changes) - for _, release := range releases { - release() - } - return err -} - -type fileChange struct { - content []byte - exists bool - fileHandle source.VersionedFileHandle - - // isUnchanged indicates whether the file action is one that does not - // change the actual contents of the file. Opens and closes should not - // be treated like other changes, since the file content doesn't change. - isUnchanged bool -} - -func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - views := make(map[*View]map[span.URI]*fileChange) - affectedViews := map[span.URI][]*View{} - - overlays, err := s.updateOverlays(ctx, changes) - if err != nil { - return nil, nil, err - } - var forceReloadMetadata bool - for _, c := range changes { - if c.Action == source.InvalidateMetadata { - forceReloadMetadata = true - } - - // Build the list of affected views. - var changedViews []*View - for _, view := range s.views { - // Don't propagate changes that are outside of the view's scope - // or knowledge. - if !view.relevantChange(c) { - continue - } - changedViews = append(changedViews, view) - } - // If the change is not relevant to any view, but the change is - // happening in the editor, assign it the most closely matching view. - if len(changedViews) == 0 { - if c.OnDisk { - continue - } - bestView, err := s.viewOf(c.URI) - if err != nil { - return nil, nil, err - } - changedViews = append(changedViews, bestView) - } - affectedViews[c.URI] = changedViews - - isUnchanged := c.Action == source.Open || c.Action == source.Close - - // Apply the changes to all affected views. - for _, view := range changedViews { - // Make sure that the file is added to the view. - _ = view.getFile(c.URI) - if _, ok := views[view]; !ok { - views[view] = make(map[span.URI]*fileChange) - } - if fh, ok := overlays[c.URI]; ok { - views[view][c.URI] = &fileChange{ - content: fh.text, - exists: true, - fileHandle: fh, - isUnchanged: isUnchanged, - } - } else { - fsFile, err := s.cache.getFile(ctx, c.URI) - if err != nil { - return nil, nil, err - } - content, err := fsFile.Read() - fh := &closedFile{fsFile} - views[view][c.URI] = &fileChange{ - content: content, - exists: err == nil, - fileHandle: fh, - isUnchanged: isUnchanged, - } - } - } - } - - var releases []func() - viewToSnapshot := map[*View]*snapshot{} - for view, changed := range views { - snapshot, release := view.invalidateContent(ctx, changed, forceReloadMetadata) - releases = append(releases, release) - viewToSnapshot[view] = snapshot - } - - // We only want to diagnose each changed file once, in the view to which - // it "most" belongs. We do this by picking the best view for each URI, - // and then aggregating the set of snapshots and their URIs (to avoid - // diagnosing the same snapshot multiple times). - snapshotURIs := map[source.Snapshot][]span.URI{} - for _, mod := range changes { - viewSlice, ok := affectedViews[mod.URI] - if !ok || len(viewSlice) == 0 { - continue - } - view := bestViewForURI(mod.URI, viewSlice) - snapshot, ok := viewToSnapshot[view] - if !ok { - panic(fmt.Sprintf("no snapshot for view %s", view.Folder())) - } - snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI) - } - return snapshotURIs, releases, nil -} - -func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - var snapshots []*snapshot - for _, v := range s.views { - snapshot, release := v.getSnapshot() - defer release() - snapshots = append(snapshots, snapshot) - } - knownDirs := knownDirectories(ctx, snapshots) - var result []source.FileModification - for _, c := range changes { - if _, ok := knownDirs[c.URI]; !ok { - result = append(result, c) - continue - } - affectedFiles := knownFilesInDir(ctx, snapshots, c.URI) - var fileChanges []source.FileModification - for uri := range affectedFiles { - fileChanges = append(fileChanges, source.FileModification{ - URI: uri, - Action: c.Action, - LanguageID: "", - OnDisk: c.OnDisk, - // changes to directories cannot include text or versions - }) - } - result = append(result, fileChanges...) - } - return result -} - -// knownDirectories returns all of the directories known to the given -// snapshots, including workspace directories and their subdirectories. -func knownDirectories(ctx context.Context, snapshots []*snapshot) map[span.URI]struct{} { - result := map[span.URI]struct{}{} - for _, snapshot := range snapshots { - dirs := snapshot.workspace.dirs(ctx, snapshot) - for _, dir := range dirs { - result[dir] = struct{}{} - } - for _, dir := range snapshot.getKnownSubdirs(dirs) { - result[dir] = struct{}{} - } - } - return result -} - -// knownFilesInDir returns the files known to the snapshots in the session. -// It does not respect symlinks. -func knownFilesInDir(ctx context.Context, snapshots []*snapshot, dir span.URI) map[span.URI]struct{} { - files := map[span.URI]struct{}{} - - for _, snapshot := range snapshots { - for _, uri := range snapshot.knownFilesInDir(ctx, dir) { - files[uri] = struct{}{} - } - } - return files -} - -func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModification) (map[span.URI]*overlay, error) { - s.overlayMu.Lock() - defer s.overlayMu.Unlock() - - for _, c := range changes { - // Don't update overlays for metadata invalidations. - if c.Action == source.InvalidateMetadata { - continue - } - - o, ok := s.overlays[c.URI] - - // If the file is not opened in an overlay and the change is on disk, - // there's no need to update an overlay. If there is an overlay, we - // may need to update the overlay's saved value. - if !ok && c.OnDisk { - continue - } - - // Determine the file kind on open, otherwise, assume it has been cached. - var kind source.FileKind - switch c.Action { - case source.Open: - kind = source.FileKindForLang(c.LanguageID) - default: - if !ok { - return nil, fmt.Errorf("updateOverlays: modifying unopened overlay %v", c.URI) - } - kind = o.kind - } - - // Closing a file just deletes its overlay. - if c.Action == source.Close { - delete(s.overlays, c.URI) - continue - } - - // If the file is on disk, check if its content is the same as in the - // overlay. Saves and on-disk file changes don't come with the file's - // content. - text := c.Text - if text == nil && (c.Action == source.Save || c.OnDisk) { - if !ok { - return nil, fmt.Errorf("no known content for overlay for %s", c.Action) - } - text = o.text - } - // On-disk changes don't come with versions. - version := c.Version - if c.OnDisk || c.Action == source.Save { - version = o.version - } - hash := hashContents(text) - var sameContentOnDisk bool - switch c.Action { - case source.Delete: - // Do nothing. sameContentOnDisk should be false. - case source.Save: - // Make sure the version and content (if present) is the same. - if false && o.version != version { // Client no longer sends the version - return nil, fmt.Errorf("updateOverlays: saving %s at version %v, currently at %v", c.URI, c.Version, o.version) - } - if c.Text != nil && o.hash != hash { - return nil, fmt.Errorf("updateOverlays: overlay %s changed on save", c.URI) - } - sameContentOnDisk = true - default: - fh, err := s.cache.getFile(ctx, c.URI) - if err != nil { - return nil, err - } - _, readErr := fh.Read() - sameContentOnDisk = (readErr == nil && fh.FileIdentity().Hash == hash) - } - o = &overlay{ - session: s, - uri: c.URI, - version: version, - text: text, - kind: kind, - hash: hash, - saved: sameContentOnDisk, - } - - // When opening files, ensure that we actually have a well-defined view and file kind. - if c.Action == source.Open { - view, err := s.ViewOf(o.uri) - if err != nil { - return nil, fmt.Errorf("updateOverlays: finding view for %s: %v", o.uri, err) - } - if kind := view.FileKind(o); kind == source.UnknownKind { - return nil, fmt.Errorf("updateOverlays: unknown file kind for %s", o.uri) - } - } - - s.overlays[c.URI] = o - } - - // Get the overlays for each change while the session's overlay map is - // locked. - overlays := make(map[span.URI]*overlay) - for _, c := range changes { - if o, ok := s.overlays[c.URI]; ok { - overlays[c.URI] = o - } - } - return overlays, nil -} - -func (s *Session) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { - if overlay := s.readOverlay(uri); overlay != nil { - return overlay, nil - } - // Fall back to the cache-level file system. - return s.cache.getFile(ctx, uri) -} - -func (s *Session) readOverlay(uri span.URI) *overlay { - s.overlayMu.Lock() - defer s.overlayMu.Unlock() - - if overlay, ok := s.overlays[uri]; ok { - return overlay - } - return nil -} - -func (s *Session) Overlays() []source.Overlay { - s.overlayMu.Lock() - defer s.overlayMu.Unlock() - - overlays := make([]source.Overlay, 0, len(s.overlays)) - for _, overlay := range s.overlays { - overlays = append(overlays, overlay) - } - return overlays -} - -func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - patterns := map[string]struct{}{} - for _, view := range s.views { - snapshot, release := view.getSnapshot() - for k, v := range snapshot.fileWatchingGlobPatterns(ctx) { - patterns[k] = v - } - release() - } - return patterns -} diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go deleted file mode 100644 index a219935aa66..00000000000 --- a/internal/lsp/cache/snapshot.go +++ /dev/null @@ -1,2549 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "bytes" - "context" - "errors" - "fmt" - "go/ast" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/module" - "golang.org/x/mod/semver" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/debug/log" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typesinternal" -) - -type snapshot struct { - memoize.Arg // allow as a memoize.Function arg - - id uint64 - view *View - - cancel func() - backgroundCtx context.Context - - // the cache generation that contains the data for this snapshot. - generation *memoize.Generation - - // The snapshot's initialization state is controlled by the fields below. - // - // initializeOnce guards snapshot initialization. Each snapshot is - // initialized at most once: reinitialization is triggered on later snapshots - // by invalidating this field. - initializeOnce *sync.Once - // initializedErr holds the last error resulting from initialization. If - // initialization fails, we only retry when the the workspace modules change, - // to avoid too many go/packages calls. - initializedErr *source.CriticalError - - // mu guards all of the maps in the snapshot, as well as the builtin URI. - mu sync.Mutex - - // builtin pins the AST and package for builtin.go in memory. - builtin span.URI - - // meta holds loaded metadata. - meta *metadataGraph - - // files maps file URIs to their corresponding FileHandles. - // It may invalidated when a file's content changes. - files map[span.URI]source.VersionedFileHandle - - // goFiles maps a parseKey to its parseGoHandle. - goFiles map[parseKey]*parseGoHandle - - // TODO(rfindley): consider merging this with files to reduce burden on clone. - symbols map[span.URI]*symbolHandle - - // packages maps a packageKey to a set of packageHandles to which that file belongs. - // It may be invalidated when a file's content changes. - packages map[packageKey]*packageHandle - - // actions maps an actionkey to its actionHandle. - actions map[actionKey]*actionHandle - - // workspacePackages contains the workspace's packages, which are loaded - // when the view is created. - workspacePackages map[PackageID]PackagePath - - // unloadableFiles keeps track of files that we've failed to load. - unloadableFiles map[span.URI]struct{} - - // parseModHandles keeps track of any parseModHandles for the snapshot. - // The handles need not refer to only the view's go.mod file. - parseModHandles map[span.URI]*parseModHandle - - // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. - // The handles need not refer to only the view's go.work file. - parseWorkHandles map[span.URI]*parseWorkHandle - - // Preserve go.mod-related handles to avoid garbage-collecting the results - // of various calls to the go command. The handles need not refer to only - // the view's go.mod file. - modTidyHandles map[span.URI]*modTidyHandle - modWhyHandles map[span.URI]*modWhyHandle - - workspace *workspace - workspaceDirHandle *memoize.Handle - - // knownSubdirs is the set of subdirectories in the workspace, used to - // create glob patterns for file watching. - knownSubdirs map[span.URI]struct{} - // unprocessedSubdirChanges are any changes that might affect the set of - // subdirectories in the workspace. They are not reflected to knownSubdirs - // during the snapshot cloning step as it can slow down cloning. - unprocessedSubdirChanges []*fileChange -} - -type packageKey struct { - mode source.ParseMode - id PackageID -} - -type actionKey struct { - pkg packageKey - analyzer *analysis.Analyzer -} - -func (s *snapshot) ID() uint64 { - return s.id -} - -func (s *snapshot) View() source.View { - return s.view -} - -func (s *snapshot) BackgroundContext() context.Context { - return s.backgroundCtx -} - -func (s *snapshot) FileSet() *token.FileSet { - return s.view.session.cache.fset -} - -func (s *snapshot) ModFiles() []span.URI { - var uris []span.URI - for modURI := range s.workspace.getActiveModFiles() { - uris = append(uris, modURI) - } - return uris -} - -func (s *snapshot) WorkFile() span.URI { - return s.workspace.workFile -} - -func (s *snapshot) Templates() map[span.URI]source.VersionedFileHandle { - s.mu.Lock() - defer s.mu.Unlock() - - tmpls := map[span.URI]source.VersionedFileHandle{} - for k, fh := range s.files { - if s.view.FileKind(fh) == source.Tmpl { - tmpls[k] = fh - } - } - return tmpls -} - -func (s *snapshot) ValidBuildConfiguration() bool { - return validBuildConfiguration(s.view.rootURI, &s.view.workspaceInformation, s.workspace.getActiveModFiles()) -} - -// workspaceMode describes the way in which the snapshot's workspace should -// be loaded. -func (s *snapshot) workspaceMode() workspaceMode { - var mode workspaceMode - - // If the view has an invalid configuration, don't build the workspace - // module. - validBuildConfiguration := s.ValidBuildConfiguration() - if !validBuildConfiguration { - return mode - } - // If the view is not in a module and contains no modules, but still has a - // valid workspace configuration, do not create the workspace module. - // It could be using GOPATH or a different build system entirely. - if len(s.workspace.getActiveModFiles()) == 0 && validBuildConfiguration { - return mode - } - mode |= moduleMode - options := s.view.Options() - // The -modfile flag is available for Go versions >= 1.14. - if options.TempModfile && s.view.workspaceInformation.goversion >= 14 { - mode |= tempModfile - } - return mode -} - -// config returns the configuration used for the snapshot's interaction with -// the go/packages API. It uses the given working directory. -// -// TODO(rstambler): go/packages requires that we do not provide overlays for -// multiple modules in on config, so buildOverlay needs to filter overlays by -// module. -func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config { - s.view.optionsMu.Lock() - verboseOutput := s.view.options.VerboseOutput - s.view.optionsMu.Unlock() - - cfg := &packages.Config{ - Context: ctx, - Dir: inv.WorkingDir, - Env: inv.Env, - BuildFlags: inv.BuildFlags, - Mode: packages.NeedName | - packages.NeedFiles | - packages.NeedCompiledGoFiles | - packages.NeedImports | - packages.NeedDeps | - packages.NeedTypesSizes | - packages.NeedModule | - packages.LoadMode(packagesinternal.DepsErrors) | - packages.LoadMode(packagesinternal.ForTest), - Fset: s.FileSet(), - Overlay: s.buildOverlay(), - ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) { - panic("go/packages must not be used to parse files") - }, - Logf: func(format string, args ...interface{}) { - if verboseOutput { - event.Log(ctx, fmt.Sprintf(format, args...)) - } - }, - Tests: true, - } - packagesinternal.SetModFile(cfg, inv.ModFile) - packagesinternal.SetModFlag(cfg, inv.ModFlag) - // We want to type check cgo code if go/types supports it. - if typesinternal.SetUsesCgo(&types.Config{}) { - cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo) - } - packagesinternal.SetGoCmdRunner(cfg, s.view.session.gocmdRunner) - return cfg -} - -func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) { - _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) - if err != nil { - return nil, err - } - defer cleanup() - - return s.view.session.gocmdRunner.Run(ctx, *inv) -} - -func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error { - _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv) - if err != nil { - return err - } - defer cleanup() - return s.view.session.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr) -} - -func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) { - var flags source.InvocationFlags - if s.workspaceMode()&tempModfile != 0 { - flags = source.WriteTemporaryModFile - } else { - flags = source.Normal - } - if allowNetwork { - flags |= source.AllowNetwork - } - tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd}) - if err != nil { - return false, nil, nil, err - } - defer cleanup() - invoke := func(args ...string) (*bytes.Buffer, error) { - inv.Verb = args[0] - inv.Args = args[1:] - return s.view.session.gocmdRunner.Run(ctx, *inv) - } - if err := run(invoke); err != nil { - return false, nil, nil, err - } - if flags.Mode() != source.WriteTemporaryModFile { - return false, nil, nil, nil - } - var modBytes, sumBytes []byte - modBytes, err = ioutil.ReadFile(tmpURI.Filename()) - if err != nil && !os.IsNotExist(err) { - return false, nil, nil, err - } - sumBytes, err = ioutil.ReadFile(strings.TrimSuffix(tmpURI.Filename(), ".mod") + ".sum") - if err != nil && !os.IsNotExist(err) { - return false, nil, nil, err - } - return true, modBytes, sumBytes, nil -} - -func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) { - s.view.optionsMu.Lock() - allowModfileModificationOption := s.view.options.AllowModfileModifications - allowNetworkOption := s.view.options.AllowImplicitNetworkAccess - inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.effectiveGo111Module) - inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...) - s.view.optionsMu.Unlock() - cleanup = func() {} // fallback - - // All logic below is for module mode. - if s.workspaceMode()&moduleMode == 0 { - return "", inv, cleanup, nil - } - - mode, allowNetwork := flags.Mode(), flags.AllowNetwork() - if !allowNetwork && !allowNetworkOption { - inv.Env = append(inv.Env, "GOPROXY=off") - } - - // What follows is rather complicated logic for how to actually run the go - // command. A word of warning: this is the result of various incremental - // features added to gopls, and varying behavior of the Go command across Go - // versions. It can surely be cleaned up significantly, but tread carefully. - // - // Roughly speaking we need to resolve four things: - // - the working directory. - // - the -mod flag - // - the -modfile flag - // - the -workfile flag - // - // These are dependent on a number of factors: whether we need to run in a - // synthetic workspace, whether flags are supported at the current go - // version, and what we're actually trying to achieve (the - // source.InvocationFlags). - - var modURI span.URI - // Select the module context to use. - // If we're type checking, we need to use the workspace context, meaning - // the main (workspace) module. Otherwise, we should use the module for - // the passed-in working dir. - if mode == source.LoadWorkspace { - switch s.workspace.moduleSource { - case legacyWorkspace: - for m := range s.workspace.getActiveModFiles() { // range to access the only element - modURI = m - } - case goWorkWorkspace: - if s.view.goversion >= 18 { - break - } - // Before go 1.18, the Go command did not natively support go.work files, - // so we 'fake' them with a workspace module. - fallthrough - case fileSystemWorkspace, goplsModWorkspace: - var tmpDir span.URI - var err error - tmpDir, err = s.getWorkspaceDir(ctx) - if err != nil { - return "", nil, cleanup, err - } - inv.WorkingDir = tmpDir.Filename() - modURI = span.URIFromPath(filepath.Join(tmpDir.Filename(), "go.mod")) - } - } else { - modURI = s.GoModForFile(span.URIFromPath(inv.WorkingDir)) - } - - var modContent []byte - if modURI != "" { - modFH, err := s.GetFile(ctx, modURI) - if err != nil { - return "", nil, cleanup, err - } - modContent, err = modFH.Read() - if err != nil { - return "", nil, cleanup, err - } - } - - vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent) - if err != nil { - return "", nil, cleanup, err - } - - mutableModFlag := "" - // If the mod flag isn't set, populate it based on the mode and workspace. - if inv.ModFlag == "" { - if s.view.goversion >= 16 { - mutableModFlag = "mod" - } - - switch mode { - case source.LoadWorkspace, source.Normal: - if vendorEnabled { - inv.ModFlag = "vendor" - } else if !allowModfileModificationOption { - inv.ModFlag = "readonly" - } else { - inv.ModFlag = mutableModFlag - } - case source.WriteTemporaryModFile: - inv.ModFlag = mutableModFlag - // -mod must be readonly when using go.work files - see issue #48941 - inv.Env = append(inv.Env, "GOWORK=off") - } - } - - // Only use a temp mod file if the modfile can actually be mutated. - needTempMod := inv.ModFlag == mutableModFlag - useTempMod := s.workspaceMode()&tempModfile != 0 - if needTempMod && !useTempMod { - return "", nil, cleanup, source.ErrTmpModfileUnsupported - } - - // We should use -workfile if: - // 1. We're not actively trying to mutate a modfile. - // 2. We have an active go.work file. - // 3. We're using at least Go 1.18. - useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18 - if useWorkFile { - // TODO(#51215): build a temp workfile and set GOWORK in the environment. - } else if useTempMod { - if modURI == "" { - return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) - } - modFH, err := s.GetFile(ctx, modURI) - if err != nil { - return "", nil, cleanup, err - } - // Use the go.sum if it happens to be available. - gosum := s.goSum(ctx, modURI) - tmpURI, cleanup, err = tempModFile(modFH, gosum) - if err != nil { - return "", nil, cleanup, err - } - inv.ModFile = tmpURI.Filename() - } - - return tmpURI, inv, cleanup, nil -} - -func (s *snapshot) buildOverlay() map[string][]byte { - s.mu.Lock() - defer s.mu.Unlock() - - overlays := make(map[string][]byte) - for uri, fh := range s.files { - overlay, ok := fh.(*overlay) - if !ok { - continue - } - if overlay.saved { - continue - } - // TODO(rstambler): Make sure not to send overlays outside of the current view. - overlays[uri.Filename()] = overlay.text - } - return overlays -} - -func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string { - var unsaved []string - for uri, fh := range files { - if overlay, ok := fh.(*overlay); ok && !overlay.saved { - unsaved = append(unsaved, uri.Filename()) - } - } - sort.Strings(unsaved) - return hashContents([]byte(strings.Join(unsaved, ""))) -} - -func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) { - ctx = event.Label(ctx, tag.URI.Of(uri)) - - phs, err := s.packageHandlesForFile(ctx, uri, mode, includeTestVariants) - if err != nil { - return nil, err - } - var pkgs []source.Package - for _, ph := range phs { - pkg, err := ph.check(ctx, s) - if err != nil { - return nil, err - } - pkgs = append(pkgs, pkg) - } - return pkgs, nil -} - -func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, pkgPolicy source.PackageFilter) (source.Package, error) { - ctx = event.Label(ctx, tag.URI.Of(uri)) - - phs, err := s.packageHandlesForFile(ctx, uri, mode, false) - if err != nil { - return nil, err - } - - if len(phs) < 1 { - return nil, fmt.Errorf("no packages") - } - - ph := phs[0] - for _, handle := range phs[1:] { - switch pkgPolicy { - case source.WidestPackage: - if ph == nil || len(handle.CompiledGoFiles()) > len(ph.CompiledGoFiles()) { - ph = handle - } - case source.NarrowestPackage: - if ph == nil || len(handle.CompiledGoFiles()) < len(ph.CompiledGoFiles()) { - ph = handle - } - } - } - if ph == nil { - return nil, fmt.Errorf("no packages in input") - } - - return ph.check(ctx, s) -} - -func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) { - // Check if we should reload metadata for the file. We don't invalidate IDs - // (though we should), so the IDs will be a better source of truth than the - // metadata. If there are no IDs for the file, then we should also reload. - fh, err := s.GetFile(ctx, uri) - if err != nil { - return nil, err - } - if kind := s.view.FileKind(fh); kind != source.Go { - return nil, fmt.Errorf("no packages for non-Go file %s (%v)", uri, kind) - } - knownIDs, err := s.getOrLoadIDsForURI(ctx, uri) - if err != nil { - return nil, err - } - - var phs []*packageHandle - for _, id := range knownIDs { - // Filter out any intermediate test variants. We typically aren't - // interested in these packages for file= style queries. - if m := s.getMetadata(id); m != nil && m.IsIntermediateTestVariant && !includeTestVariants { - continue - } - var parseModes []source.ParseMode - switch mode { - case source.TypecheckAll: - if s.workspaceParseMode(id) == source.ParseFull { - parseModes = []source.ParseMode{source.ParseFull} - } else { - parseModes = []source.ParseMode{source.ParseExported, source.ParseFull} - } - case source.TypecheckFull: - parseModes = []source.ParseMode{source.ParseFull} - case source.TypecheckWorkspace: - parseModes = []source.ParseMode{s.workspaceParseMode(id)} - } - - for _, parseMode := range parseModes { - ph, err := s.buildPackageHandle(ctx, id, parseMode) - if err != nil { - return nil, err - } - phs = append(phs, ph) - } - } - return phs, nil -} - -func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) { - knownIDs := s.getIDsForURI(uri) - reload := len(knownIDs) == 0 - for _, id := range knownIDs { - // Reload package metadata if any of the metadata has missing - // dependencies, in case something has changed since the last time we - // reloaded it. - if s.noValidMetadataForID(id) { - reload = true - break - } - // TODO(golang/go#36918): Previously, we would reload any package with - // missing dependencies. This is expensive and results in too many - // calls to packages.Load. Determine what we should do instead. - } - if reload { - err := s.load(ctx, false, fileURI(uri)) - - if !s.useInvalidMetadata() && err != nil { - return nil, err - } - // We've tried to reload and there are still no known IDs for the URI. - // Return the load error, if there was one. - knownIDs = s.getIDsForURI(uri) - if len(knownIDs) == 0 { - return nil, err - } - } - return knownIDs, nil -} - -// Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has -// issues with overlays that will cause confusing error messages if we reuse -// old metadata. -func (s *snapshot) useInvalidMetadata() bool { - return s.view.goversion >= 13 && s.view.Options().ExperimentalUseInvalidMetadata -} - -func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) { - if err := s.awaitLoaded(ctx); err != nil { - return nil, err - } - ids := make(map[PackageID]struct{}) - s.transitiveReverseDependencies(PackageID(id), ids) - - // Make sure to delete the original package ID from the map. - delete(ids, PackageID(id)) - - var pkgs []source.Package - for id := range ids { - pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id)) - if err != nil { - return nil, err - } - pkgs = append(pkgs, pkg) - } - return pkgs, nil -} - -func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source.ParseMode) (*pkg, error) { - ph, err := s.buildPackageHandle(ctx, id, mode) - if err != nil { - return nil, err - } - return ph.check(ctx, s) -} - -// transitiveReverseDependencies populates the ids map with package IDs -// belonging to the provided package and its transitive reverse dependencies. -func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID]struct{}) { - if _, ok := ids[id]; ok { - return - } - m := s.getMetadata(id) - // Only use invalid metadata if we support it. - if m == nil || !(m.Valid || s.useInvalidMetadata()) { - return - } - ids[id] = struct{}{} - importedBy := s.getImportedBy(id) - for _, parentID := range importedBy { - s.transitiveReverseDependencies(parentID, ids) - } -} - -func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.goFiles[key] -} - -func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle { - s.mu.Lock() - defer s.mu.Unlock() - if existing, ok := s.goFiles[key]; ok { - return existing - } - s.goFiles[key] = pgh - return pgh -} - -func (s *snapshot) getParseModHandle(uri span.URI) *parseModHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.parseModHandles[uri] -} - -func (s *snapshot) getParseWorkHandle(uri span.URI) *parseWorkHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.parseWorkHandles[uri] -} - -func (s *snapshot) getModWhyHandle(uri span.URI) *modWhyHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.modWhyHandles[uri] -} - -func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.modTidyHandles[uri] -} - -func (s *snapshot) getImportedBy(id PackageID) []PackageID { - s.mu.Lock() - defer s.mu.Unlock() - return s.getImportedByLocked(id) -} - -func (s *snapshot) getImportedByLocked(id PackageID) []PackageID { - // If we haven't rebuilt the import graph since creating the snapshot. - if len(s.meta.importedBy) == 0 { - s.rebuildImportGraph() - } - return s.meta.importedBy[id] -} - -func (s *snapshot) clearAndRebuildImportGraph() { - s.mu.Lock() - defer s.mu.Unlock() - - // Completely invalidate the original map. - s.meta.importedBy = make(map[PackageID][]PackageID) - s.rebuildImportGraph() -} - -func (s *snapshot) rebuildImportGraph() { - for id, m := range s.meta.metadata { - for _, importID := range m.Deps { - s.meta.importedBy[importID] = append(s.meta.importedBy[importID], id) - } - } -} - -func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle { - s.mu.Lock() - defer s.mu.Unlock() - - // If the package handle has already been cached, - // return the cached handle instead of overriding it. - if ph, ok := s.packages[ph.packageKey()]; ok { - return ph - } - s.packages[ph.packageKey()] = ph - return ph -} - -func (s *snapshot) workspacePackageIDs() (ids []PackageID) { - s.mu.Lock() - defer s.mu.Unlock() - - for id := range s.workspacePackages { - ids = append(ids, id) - } - return ids -} - -func (s *snapshot) activePackageIDs() (ids []PackageID) { - if s.view.Options().MemoryMode == source.ModeNormal { - return s.workspacePackageIDs() - } - - s.mu.Lock() - defer s.mu.Unlock() - - seen := make(map[PackageID]bool) - for id := range s.workspacePackages { - if s.isActiveLocked(id, seen) { - ids = append(ids, id) - } - } - return ids -} - -func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) { - if seen == nil { - seen = make(map[PackageID]bool) - } - if seen, ok := seen[id]; ok { - return seen - } - defer func() { - seen[id] = active - }() - m, ok := s.meta.metadata[id] - if !ok { - return false - } - for _, cgf := range m.CompiledGoFiles { - if s.isOpenLocked(cgf) { - return true - } - } - for _, dep := range m.Deps { - if s.isActiveLocked(dep, seen) { - return true - } - } - return false -} - -const fileExtensions = "go,mod,sum,work" - -func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { - extensions := fileExtensions - for _, ext := range s.View().Options().TemplateExtensions { - extensions += "," + ext - } - // Work-around microsoft/vscode#100870 by making sure that we are, - // at least, watching the user's entire workspace. This will still be - // applied to every folder in the workspace. - patterns := map[string]struct{}{ - fmt.Sprintf("**/*.{%s}", extensions): {}, - } - dirs := s.workspace.dirs(ctx, s) - for _, dir := range dirs { - dirName := dir.Filename() - - // If the directory is within the view's folder, we're already watching - // it with the pattern above. - if source.InDir(s.view.folder.Filename(), dirName) { - continue - } - // TODO(rstambler): If microsoft/vscode#3025 is resolved before - // microsoft/vscode#101042, we will need a work-around for Windows - // drive letter casing. - patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{} - } - - // Some clients do not send notifications for changes to directories that - // contain Go code (golang/go#42348). To handle this, explicitly watch all - // of the directories in the workspace. We find them by adding the - // directories of every file in the snapshot's workspace directories. - var dirNames []string - for _, uri := range s.getKnownSubdirs(dirs) { - dirNames = append(dirNames, uri.Filename()) - } - sort.Strings(dirNames) - if len(dirNames) > 0 { - patterns[fmt.Sprintf("{%s}", strings.Join(dirNames, ","))] = struct{}{} - } - return patterns -} - -// collectAllKnownSubdirs collects all of the subdirectories within the -// snapshot's workspace directories. None of the workspace directories are -// included. -func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { - dirs := s.workspace.dirs(ctx, s) - - s.mu.Lock() - defer s.mu.Unlock() - - s.knownSubdirs = map[span.URI]struct{}{} - for uri := range s.files { - s.addKnownSubdirLocked(uri, dirs) - } -} - -func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { - s.mu.Lock() - defer s.mu.Unlock() - - // First, process any pending changes and update the set of known - // subdirectories. - for _, c := range s.unprocessedSubdirChanges { - if c.isUnchanged { - continue - } - if !c.exists { - s.removeKnownSubdirLocked(c.fileHandle.URI()) - } else { - s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs) - } - } - s.unprocessedSubdirChanges = nil - - var result []span.URI - for uri := range s.knownSubdirs { - result = append(result, uri) - } - return result -} - -func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { - dir := filepath.Dir(uri.Filename()) - // First check if the directory is already known, because then we can - // return early. - if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok { - return - } - var matched span.URI - for _, wsDir := range dirs { - if source.InDir(wsDir.Filename(), dir) { - matched = wsDir - break - } - } - // Don't watch any directory outside of the workspace directories. - if matched == "" { - return - } - for { - if dir == "" || dir == matched.Filename() { - break - } - uri := span.URIFromPath(dir) - if _, ok := s.knownSubdirs[uri]; ok { - break - } - s.knownSubdirs[uri] = struct{}{} - dir = filepath.Dir(dir) - } -} - -func (s *snapshot) removeKnownSubdirLocked(uri span.URI) { - dir := filepath.Dir(uri.Filename()) - for dir != "" { - uri := span.URIFromPath(dir) - if _, ok := s.knownSubdirs[uri]; !ok { - break - } - if info, _ := os.Stat(dir); info == nil { - delete(s.knownSubdirs, uri) - } - dir = filepath.Dir(dir) - } -} - -// knownFilesInDir returns the files known to the given snapshot that are in -// the given directory. It does not respect symlinks. -func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI { - var files []span.URI - s.mu.Lock() - defer s.mu.Unlock() - - for uri := range s.files { - if source.InDir(dir.Filename(), uri.Filename()) { - files = append(files, uri) - } - } - return files -} - -func (s *snapshot) workspacePackageHandles(ctx context.Context) ([]*packageHandle, error) { - if err := s.awaitLoaded(ctx); err != nil { - return nil, err - } - var phs []*packageHandle - for _, pkgID := range s.workspacePackageIDs() { - ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID)) - if err != nil { - return nil, err - } - phs = append(phs, ph) - } - return phs, nil -} - -func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) { - phs, err := s.activePackageHandles(ctx) - if err != nil { - return nil, err - } - var pkgs []source.Package - for _, ph := range phs { - pkg, err := ph.check(ctx, s) - if err != nil { - return nil, err - } - pkgs = append(pkgs, pkg) - } - return pkgs, nil -} - -func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, error) { - if err := s.awaitLoaded(ctx); err != nil { - return nil, err - } - var phs []*packageHandle - for _, pkgID := range s.activePackageIDs() { - ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID)) - if err != nil { - return nil, err - } - phs = append(phs, ph) - } - return phs, nil -} - -func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) { - result := make(map[span.URI][]source.Symbol) - - // Keep going on errors, but log the first failure. Partial symbol results - // are better than no symbol results. - var firstErr error - for uri, f := range s.files { - sh := s.buildSymbolHandle(ctx, f) - v, err := sh.handle.Get(ctx, s.generation, s) - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - data := v.(*symbolData) - result[uri] = data.symbols - } - if firstErr != nil { - event.Error(ctx, "getting snapshot symbols", firstErr) - } - return result, nil -} - -func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]source.Metadata, error) { - knownIDs, err := s.getOrLoadIDsForURI(ctx, uri) - if err != nil { - return nil, err - } - var mds []source.Metadata - for _, id := range knownIDs { - md := s.getMetadata(id) - // TODO(rfindley): knownIDs and metadata should be in sync, but existing - // code is defensive of nil metadata. - if md != nil { - mds = append(mds, md) - } - } - return mds, nil -} - -func (s *snapshot) KnownPackages(ctx context.Context) ([]source.Package, error) { - if err := s.awaitLoaded(ctx); err != nil { - return nil, err - } - - // The WorkspaceSymbols implementation relies on this function returning - // workspace packages first. - ids := s.workspacePackageIDs() - s.mu.Lock() - for id := range s.meta.metadata { - if _, ok := s.workspacePackages[id]; ok { - continue - } - ids = append(ids, id) - } - s.mu.Unlock() - - var pkgs []source.Package - for _, id := range ids { - pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id)) - if err != nil { - return nil, err - } - pkgs = append(pkgs, pkg) - } - return pkgs, nil -} - -func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) { - // Don't reload workspace package metadata. - // This function is meant to only return currently cached information. - s.AwaitInitialized(ctx) - - s.mu.Lock() - defer s.mu.Unlock() - - results := map[string]source.Package{} - for _, ph := range s.packages { - cachedPkg, err := ph.cached(s.generation) - if err != nil { - continue - } - for importPath, newPkg := range cachedPkg.imports { - if oldPkg, ok := results[string(importPath)]; ok { - // Using the same trick as NarrowestPackage, prefer non-variants. - if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) { - results[string(importPath)] = newPkg - } - } else { - results[string(importPath)] = newPkg - } - } - } - return results, nil -} - -func (s *snapshot) GoModForFile(uri span.URI) span.URI { - return moduleForURI(s.workspace.activeModFiles, uri) -} - -func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { - var match span.URI - for modURI := range modFiles { - if !source.InDir(dirURI(modURI).Filename(), uri.Filename()) { - continue - } - if len(modURI) > len(match) { - match = modURI - } - } - return match -} - -func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle { - s.mu.Lock() - defer s.mu.Unlock() - - key := packageKey{ - id: id, - mode: mode, - } - return s.packages[key] -} - -func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle { - s.mu.Lock() - defer s.mu.Unlock() - - return s.symbols[uri] -} - -func (s *snapshot) addSymbolHandle(sh *symbolHandle) *symbolHandle { - s.mu.Lock() - defer s.mu.Unlock() - - uri := sh.fh.URI() - // If the package handle has already been cached, - // return the cached handle instead of overriding it. - if sh, ok := s.symbols[uri]; ok { - return sh - } - s.symbols[uri] = sh - return sh -} - -func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle { - s.mu.Lock() - defer s.mu.Unlock() - - key := actionKey{ - pkg: packageKey{ - id: id, - mode: m, - }, - analyzer: a, - } - return s.actions[key] -} - -func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle { - s.mu.Lock() - defer s.mu.Unlock() - - key := actionKey{ - analyzer: ah.analyzer, - pkg: packageKey{ - id: ah.pkg.m.ID, - mode: ah.pkg.mode, - }, - } - if ah, ok := s.actions[key]; ok { - return ah - } - s.actions[key] = ah - return ah -} - -func (s *snapshot) getIDsForURI(uri span.URI) []PackageID { - s.mu.Lock() - defer s.mu.Unlock() - - return s.meta.ids[uri] -} - -func (s *snapshot) getMetadata(id PackageID) *KnownMetadata { - s.mu.Lock() - defer s.mu.Unlock() - - return s.meta.metadata[id] -} - -func (s *snapshot) shouldLoad(scope interface{}) bool { - s.mu.Lock() - defer s.mu.Unlock() - - switch scope := scope.(type) { - case PackagePath: - var meta *KnownMetadata - for _, m := range s.meta.metadata { - if m.PkgPath != scope { - continue - } - meta = m - } - if meta == nil || meta.ShouldLoad { - return true - } - return false - case fileURI: - uri := span.URI(scope) - ids := s.meta.ids[uri] - if len(ids) == 0 { - return true - } - for _, id := range ids { - m, ok := s.meta.metadata[id] - if !ok || m.ShouldLoad { - return true - } - } - return false - default: - return true - } -} - -func (s *snapshot) clearShouldLoad(scope interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - - switch scope := scope.(type) { - case PackagePath: - var meta *KnownMetadata - for _, m := range s.meta.metadata { - if m.PkgPath == scope { - meta = m - } - } - if meta == nil { - return - } - meta.ShouldLoad = false - case fileURI: - uri := span.URI(scope) - ids := s.meta.ids[uri] - if len(ids) == 0 { - return - } - for _, id := range ids { - if m, ok := s.meta.metadata[id]; ok { - m.ShouldLoad = false - } - } - } -} - -// noValidMetadataForURILocked reports whether there is any valid metadata for -// the given URI. -func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { - ids, ok := s.meta.ids[uri] - if !ok { - return true - } - for _, id := range ids { - if m, ok := s.meta.metadata[id]; ok && m.Valid { - return false - } - } - return true -} - -// noValidMetadataForID reports whether there is no valid metadata for the -// given ID. -func (s *snapshot) noValidMetadataForID(id PackageID) bool { - s.mu.Lock() - defer s.mu.Unlock() - return s.noValidMetadataForIDLocked(id) -} - -func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool { - m := s.meta.metadata[id] - return m == nil || !m.Valid -} - -// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI. -// Any existing invalid IDs are removed from the set of known IDs. IDs that are -// not "command-line-arguments" are preferred, so if a new ID comes in for a -// URI that previously only had "command-line-arguments", the new ID will -// replace the "command-line-arguments" ID. -func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) { - for uri := range uris { - // Collect the new set of IDs, preserving any valid existing IDs. - newIDs := []PackageID{id} - for _, existingID := range s.meta.ids[uri] { - // Don't set duplicates of the same ID. - if existingID == id { - continue - } - // If the package previously only had a command-line-arguments ID, - // delete the command-line-arguments workspace package. - if source.IsCommandLineArguments(string(existingID)) { - delete(s.workspacePackages, existingID) - continue - } - // If the metadata for an existing ID is invalid, and we are - // setting metadata for a new, valid ID--don't preserve the old ID. - if m, ok := s.meta.metadata[existingID]; !ok || !m.Valid { - continue - } - newIDs = append(newIDs, existingID) - } - sort.Slice(newIDs, func(i, j int) bool { - return newIDs[i] < newIDs[j] - }) - s.meta.ids[uri] = newIDs - } -} - -func (s *snapshot) isWorkspacePackage(id PackageID) bool { - s.mu.Lock() - defer s.mu.Unlock() - - _, ok := s.workspacePackages[id] - return ok -} - -func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle { - f := s.view.getFile(uri) - - s.mu.Lock() - defer s.mu.Unlock() - - return s.files[f.URI()] -} - -// GetVersionedFile returns a File for the given URI. If the file is unknown it -// is added to the managed set. -// -// GetVersionedFile succeeds even if the file does not exist. A non-nil error return -// indicates some type of internal error, for example if ctx is cancelled. -func (s *snapshot) GetVersionedFile(ctx context.Context, uri span.URI) (source.VersionedFileHandle, error) { - f := s.view.getFile(uri) - - s.mu.Lock() - defer s.mu.Unlock() - return s.getFileLocked(ctx, f) -} - -// GetFile implements the fileSource interface by wrapping GetVersionedFile. -func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { - return s.GetVersionedFile(ctx, uri) -} - -func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) { - if fh, ok := s.files[f.URI()]; ok { - return fh, nil - } - - fh, err := s.view.session.cache.getFile(ctx, f.URI()) - if err != nil { - return nil, err - } - closed := &closedFile{fh} - s.files[f.URI()] = closed - return closed, nil -} - -func (s *snapshot) IsOpen(uri span.URI) bool { - s.mu.Lock() - defer s.mu.Unlock() - return s.isOpenLocked(uri) - -} - -func (s *snapshot) openFiles() []source.VersionedFileHandle { - s.mu.Lock() - defer s.mu.Unlock() - - var open []source.VersionedFileHandle - for _, fh := range s.files { - if s.isOpenLocked(fh.URI()) { - open = append(open, fh) - } - } - return open -} - -func (s *snapshot) isOpenLocked(uri span.URI) bool { - _, open := s.files[uri].(*overlay) - return open -} - -func (s *snapshot) awaitLoaded(ctx context.Context) error { - loadErr := s.awaitLoadedAllErrors(ctx) - - s.mu.Lock() - defer s.mu.Unlock() - - // If we still have absolutely no metadata, check if the view failed to - // initialize and return any errors. - if s.useInvalidMetadata() && len(s.meta.metadata) > 0 { - return nil - } - for _, m := range s.meta.metadata { - if m.Valid { - return nil - } - } - if loadErr != nil { - return loadErr.MainError - } - return nil -} - -func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError { - loadErr := s.awaitLoadedAllErrors(ctx) - if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) { - return nil - } - - // Even if packages didn't fail to load, we still may want to show - // additional warnings. - if loadErr == nil { - wsPkgs, _ := s.ActivePackages(ctx) - if msg := shouldShowAdHocPackagesWarning(s, wsPkgs); msg != "" { - return &source.CriticalError{ - MainError: errors.New(msg), - } - } - // Even if workspace packages were returned, there still may be an error - // with the user's workspace layout. Workspace packages that only have the - // ID "command-line-arguments" are usually a symptom of a bad workspace - // configuration. - if containsCommandLineArguments(wsPkgs) { - return s.workspaceLayoutError(ctx) - } - return nil - } - - if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") { - return s.workspaceLayoutError(ctx) - } - return loadErr -} - -const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src. -If you are using modules, please open your editor to a directory in your module. -If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.` - -func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, pkgs []source.Package) string { - if snapshot.ValidBuildConfiguration() { - return "" - } - for _, pkg := range pkgs { - if len(pkg.MissingDependencies()) > 0 { - return adHocPackagesWarning - } - } - return "" -} - -func containsCommandLineArguments(pkgs []source.Package) bool { - for _, pkg := range pkgs { - if source.IsCommandLineArguments(pkg.ID()) { - return true - } - } - return false -} - -func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError { - // Do not return results until the snapshot's view has been initialized. - s.AwaitInitialized(ctx) - - // TODO(rstambler): Should we be more careful about returning the - // initialization error? Is it possible for the initialization error to be - // corrected without a successful reinitialization? - s.mu.Lock() - initializedErr := s.initializedErr - s.mu.Unlock() - if initializedErr != nil { - return initializedErr - } - - if ctx.Err() != nil { - return &source.CriticalError{MainError: ctx.Err()} - } - - if err := s.reloadWorkspace(ctx); err != nil { - diags := s.extractGoCommandErrors(ctx, err) - return &source.CriticalError{ - MainError: err, - DiagList: diags, - } - } - if err := s.reloadOrphanedFiles(ctx); err != nil { - diags := s.extractGoCommandErrors(ctx, err) - return &source.CriticalError{ - MainError: err, - DiagList: diags, - } - } - return nil -} - -func (s *snapshot) getInitializationError(ctx context.Context) *source.CriticalError { - s.mu.Lock() - defer s.mu.Unlock() - - return s.initializedErr -} - -func (s *snapshot) AwaitInitialized(ctx context.Context) { - select { - case <-ctx.Done(): - return - case <-s.view.initialWorkspaceLoad: - } - // We typically prefer to run something as intensive as the IWL without - // blocking. I'm not sure if there is a way to do that here. - s.initialize(ctx, false) -} - -// reloadWorkspace reloads the metadata for all invalidated workspace packages. -func (s *snapshot) reloadWorkspace(ctx context.Context) error { - // See which of the workspace packages are missing metadata. - s.mu.Lock() - missingMetadata := len(s.workspacePackages) == 0 || len(s.meta.metadata) == 0 - pkgPathSet := map[PackagePath]struct{}{} - for id, pkgPath := range s.workspacePackages { - if m, ok := s.meta.metadata[id]; ok && m.Valid { - continue - } - missingMetadata = true - - // Don't try to reload "command-line-arguments" directly. - if source.IsCommandLineArguments(string(pkgPath)) { - continue - } - pkgPathSet[pkgPath] = struct{}{} - } - s.mu.Unlock() - - // If the view's build configuration is invalid, we cannot reload by - // package path. Just reload the directory instead. - if missingMetadata && !s.ValidBuildConfiguration() { - return s.load(ctx, false, viewLoadScope("LOAD_INVALID_VIEW")) - } - - if len(pkgPathSet) == 0 { - return nil - } - - var pkgPaths []interface{} - for pkgPath := range pkgPathSet { - pkgPaths = append(pkgPaths, pkgPath) - } - return s.load(ctx, false, pkgPaths...) -} - -func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error { - // When we load ./... or a package path directly, we may not get packages - // that exist only in overlays. As a workaround, we search all of the files - // available in the snapshot and reload their metadata individually using a - // file= query if the metadata is unavailable. - files := s.orphanedFiles() - - // Files without a valid package declaration can't be loaded. Don't try. - var scopes []interface{} - for _, file := range files { - pgf, err := s.ParseGo(ctx, file, source.ParseHeader) - if err != nil { - continue - } - if !pgf.File.Package.IsValid() { - continue - } - scopes = append(scopes, fileURI(file.URI())) - } - - if len(scopes) == 0 { - return nil - } - - // The regtests match this exact log message, keep them in sync. - event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes)) - err := s.load(ctx, false, scopes...) - - // If we failed to load some files, i.e. they have no metadata, - // mark the failures so we don't bother retrying until the file's - // content changes. - // - // TODO(rstambler): This may be an overestimate if the load stopped - // early for an unrelated errors. Add a fallback? - // - // Check for context cancellation so that we don't incorrectly mark files - // as unloadable, but don't return before setting all workspace packages. - if ctx.Err() == nil && err != nil { - event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes)) - s.mu.Lock() - for _, scope := range scopes { - uri := span.URI(scope.(fileURI)) - if s.noValidMetadataForURILocked(uri) { - s.unloadableFiles[uri] = struct{}{} - } - } - s.mu.Unlock() - } - return nil -} - -func (s *snapshot) orphanedFiles() []source.VersionedFileHandle { - s.mu.Lock() - defer s.mu.Unlock() - - var files []source.VersionedFileHandle - for uri, fh := range s.files { - // Don't try to reload metadata for go.mod files. - if s.view.FileKind(fh) != source.Go { - continue - } - // If the URI doesn't belong to this view, then it's not in a workspace - // package and should not be reloaded directly. - if !contains(s.view.session.viewsOf(uri), s.view) { - continue - } - // If the file is not open and is in a vendor directory, don't treat it - // like a workspace package. - if _, ok := fh.(*overlay); !ok && inVendor(uri) { - continue - } - // Don't reload metadata for files we've already deemed unloadable. - if _, ok := s.unloadableFiles[uri]; ok { - continue - } - if s.noValidMetadataForURILocked(uri) { - files = append(files, fh) - } - } - return files -} - -func contains(views []*View, view *View) bool { - for _, v := range views { - if v == view { - return true - } - } - return false -} - -func inVendor(uri span.URI) bool { - if !strings.Contains(string(uri), "/vendor/") { - return false - } - // Only packages in _subdirectories_ of /vendor/ are considered vendored - // (/vendor/a/foo.go is vendored, /vendor/foo.go is not). - split := strings.Split(string(uri), "/vendor/") - if len(split) < 2 { - return false - } - return strings.Contains(split[1], "/") -} - -func generationName(v *View, snapshotID uint64) string { - return fmt.Sprintf("v%v/%v", v.id, snapshotID) -} - -// checkSnapshotLocked verifies that some invariants are preserved on the -// snapshot. -func checkSnapshotLocked(ctx context.Context, s *snapshot) { - // Check that every go file for a workspace package is identified as - // belonging to that workspace package. - for wsID := range s.workspacePackages { - if m, ok := s.meta.metadata[wsID]; ok { - for _, uri := range m.GoFiles { - found := false - for _, id := range s.meta.ids[uri] { - if id == wsID { - found = true - break - } - } - if !found { - log.Error.Logf(ctx, "workspace package %v not associated with %v", wsID, uri) - } - } - } - } -} - -// unappliedChanges is a file source that handles an uncloned snapshot. -type unappliedChanges struct { - originalSnapshot *snapshot - changes map[span.URI]*fileChange -} - -func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { - if c, ok := ac.changes[uri]; ok { - return c.fileHandle, nil - } - return ac.originalSnapshot.GetFile(ctx, uri) -} - -func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot { - var vendorChanged bool - newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{ - originalSnapshot: s, - changes: changes, - }) - - s.mu.Lock() - defer s.mu.Unlock() - - checkSnapshotLocked(ctx, s) - - newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1)) - bgCtx, cancel := context.WithCancel(bgCtx) - result := &snapshot{ - id: s.id + 1, - generation: newGen, - view: s.view, - backgroundCtx: bgCtx, - cancel: cancel, - builtin: s.builtin, - initializeOnce: s.initializeOnce, - initializedErr: s.initializedErr, - meta: NewMetadataGraph(), - packages: make(map[packageKey]*packageHandle, len(s.packages)), - actions: make(map[actionKey]*actionHandle, len(s.actions)), - files: make(map[span.URI]source.VersionedFileHandle, len(s.files)), - goFiles: make(map[parseKey]*parseGoHandle, len(s.goFiles)), - symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), - workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), - unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), - parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)), - parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)), - modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)), - modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)), - knownSubdirs: make(map[span.URI]struct{}, len(s.knownSubdirs)), - workspace: newWorkspace, - } - - if !workspaceChanged && s.workspaceDirHandle != nil { - result.workspaceDirHandle = s.workspaceDirHandle - newGen.Inherit(s.workspaceDirHandle) - } - - // Copy all of the FileHandles. - for k, v := range s.files { - result.files[k] = v - } - for k, v := range s.symbols { - if change, ok := changes[k]; ok { - if change.exists { - result.symbols[k] = result.buildSymbolHandle(ctx, change.fileHandle) - } - continue - } - newGen.Inherit(v.handle) - result.symbols[k] = v - } - - // Copy the set of unloadable files. - for k, v := range s.unloadableFiles { - result.unloadableFiles[k] = v - } - // Copy all of the modHandles. - for k, v := range s.parseModHandles { - result.parseModHandles[k] = v - } - // Copy all of the parseWorkHandles. - for k, v := range s.parseWorkHandles { - result.parseWorkHandles[k] = v - } - - for k, v := range s.goFiles { - if _, ok := changes[k.file.URI]; ok { - continue - } - newGen.Inherit(v.handle) - result.goFiles[k] = v - } - - // Copy all of the go.mod-related handles. They may be invalidated later, - // so we inherit them at the end of the function. - for k, v := range s.modTidyHandles { - if _, ok := changes[k]; ok { - continue - } - result.modTidyHandles[k] = v - } - for k, v := range s.modWhyHandles { - if _, ok := changes[k]; ok { - continue - } - result.modWhyHandles[k] = v - } - - // Add all of the known subdirectories, but don't update them for the - // changed files. We need to rebuild the workspace module to know the - // true set of known subdirectories, but we don't want to do that in clone. - for k, v := range s.knownSubdirs { - result.knownSubdirs[k] = v - } - for _, c := range changes { - result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c) - } - - // directIDs keeps track of package IDs that have directly changed. - // It maps id->invalidateMetadata. - directIDs := map[PackageID]bool{} - - // Invalidate all package metadata if the workspace module has changed. - if workspaceReload { - for k := range s.meta.metadata { - directIDs[k] = true - } - } - - changedPkgFiles := map[PackageID]struct{}{} // packages whose file set may have changed - anyImportDeleted := false - for uri, change := range changes { - // Maybe reinitialize the view if we see a change in the vendor - // directory. - if inVendor(uri) { - vendorChanged = true - } - - // The original FileHandle for this URI is cached on the snapshot. - originalFH := s.files[uri] - - // If uri is a Go file, check if it has changed in a way that would - // invalidate metadata. Note that we can't use s.view.FileKind here, - // because the file type that matters is not what the *client* tells us, - // but what the Go command sees. - var invalidateMetadata, pkgFileChanged, importDeleted bool - if strings.HasSuffix(uri.Filename(), ".go") { - invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle) - } - - invalidateMetadata = invalidateMetadata || forceReloadMetadata || workspaceReload - anyImportDeleted = anyImportDeleted || importDeleted - - // Mark all of the package IDs containing the given file. - filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged) - if pkgFileChanged { - for id := range filePackageIDs { - changedPkgFiles[id] = struct{}{} - } - } - for id := range filePackageIDs { - directIDs[id] = directIDs[id] || invalidateMetadata - } - - // Invalidate the previous modTidyHandle if any of the files have been - // saved or if any of the metadata has been invalidated. - if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) { - // TODO(rstambler): Only delete mod handles for which the - // withoutURI is relevant. - for k := range s.modTidyHandles { - delete(result.modTidyHandles, k) - } - for k := range s.modWhyHandles { - delete(result.modWhyHandles, k) - } - } - delete(result.parseModHandles, uri) - delete(result.parseWorkHandles, uri) - // Handle the invalidated file; it may have new contents or not exist. - if !change.exists { - delete(result.files, uri) - } else { - result.files[uri] = change.fileHandle - } - - // Make sure to remove the changed file from the unloadable set. - delete(result.unloadableFiles, uri) - } - - // Deleting an import can cause list errors due to import cycles to be - // resolved. The best we can do without parsing the list error message is to - // hope that list errors may have been resolved by a deleted import. - // - // We could do better by parsing the list error message. We already do this - // to assign a better range to the list error, but for such critical - // functionality as metadata, it's better to be conservative until it proves - // impractical. - // - // We could also do better by looking at which imports were deleted and - // trying to find cycles they are involved in. This fails when the file goes - // from an unparseable state to a parseable state, as we don't have a - // starting point to compare with. - if anyImportDeleted { - for id, metadata := range s.meta.metadata { - if len(metadata.Errors) > 0 { - directIDs[id] = true - } - } - } - - // Invalidate reverse dependencies too. - // TODO(heschi): figure out the locking model and use transitiveReverseDeps? - // idsToInvalidate keeps track of transitive reverse dependencies. - // If an ID is present in the map, invalidate its types. - // If an ID's value is true, invalidate its metadata too. - idsToInvalidate := map[PackageID]bool{} - var addRevDeps func(PackageID, bool) - addRevDeps = func(id PackageID, invalidateMetadata bool) { - current, seen := idsToInvalidate[id] - newInvalidateMetadata := current || invalidateMetadata - - // If we've already seen this ID, and the value of invalidate - // metadata has not changed, we can return early. - if seen && current == newInvalidateMetadata { - return - } - idsToInvalidate[id] = newInvalidateMetadata - for _, rid := range s.getImportedByLocked(id) { - addRevDeps(rid, invalidateMetadata) - } - } - for id, invalidateMetadata := range directIDs { - addRevDeps(id, invalidateMetadata) - } - - // Copy the package type information. - for k, v := range s.packages { - if _, ok := idsToInvalidate[k.id]; ok { - continue - } - newGen.Inherit(v.handle) - result.packages[k] = v - } - // Copy the package analysis information. - for k, v := range s.actions { - if _, ok := idsToInvalidate[k.pkg.id]; ok { - continue - } - newGen.Inherit(v.handle) - result.actions[k] = v - } - - // If the workspace mode has changed, we must delete all metadata, as it - // is unusable and may produce confusing or incorrect diagnostics. - // If a file has been deleted, we must delete metadata all packages - // containing that file. - workspaceModeChanged := s.workspaceMode() != result.workspaceMode() - skipID := map[PackageID]bool{} - for _, c := range changes { - if c.exists { - continue - } - // The file has been deleted. - if ids, ok := s.meta.ids[c.fileHandle.URI()]; ok { - for _, id := range ids { - skipID[id] = true - } - } - } - - // Collect all of the IDs that are reachable from the workspace packages. - // Any unreachable IDs will have their metadata deleted outright. - reachableID := map[PackageID]bool{} - var addForwardDeps func(PackageID) - addForwardDeps = func(id PackageID) { - if reachableID[id] { - return - } - reachableID[id] = true - m, ok := s.meta.metadata[id] - if !ok { - return - } - for _, depID := range m.Deps { - addForwardDeps(depID) - } - } - for id := range s.workspacePackages { - addForwardDeps(id) - } - - // Copy the URI to package ID mappings, skipping only those URIs whose - // metadata will be reloaded in future calls to load. - deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged - idsInSnapshot := map[PackageID]bool{} // track all known IDs - for uri, ids := range s.meta.ids { - var resultIDs []PackageID - for _, id := range ids { - if skipID[id] || deleteInvalidMetadata && idsToInvalidate[id] { - continue - } - // The ID is not reachable from any workspace package, so it should - // be deleted. - if !reachableID[id] { - continue - } - idsInSnapshot[id] = true - resultIDs = append(resultIDs, id) - } - result.meta.ids[uri] = resultIDs - } - - // Copy the package metadata. We only need to invalidate packages directly - // containing the affected file, and only if it changed in a relevant way. - for k, v := range s.meta.metadata { - if !idsInSnapshot[k] { - // Delete metadata for IDs that are no longer reachable from files - // in the snapshot. - continue - } - invalidateMetadata := idsToInvalidate[k] - // Mark invalidated metadata rather than deleting it outright. - result.meta.metadata[k] = &KnownMetadata{ - Metadata: v.Metadata, - Valid: v.Valid && !invalidateMetadata, - ShouldLoad: v.ShouldLoad || invalidateMetadata, - } - } - - // Copy the set of initially loaded packages. - for id, pkgPath := range s.workspacePackages { - // Packages with the id "command-line-arguments" are generated by the - // go command when the user is outside of GOPATH and outside of a - // module. Do not cache them as workspace packages for longer than - // necessary. - if source.IsCommandLineArguments(string(id)) { - if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok { - continue - } - } - - // If all the files we know about in a package have been deleted, - // the package is gone and we should no longer try to load it. - if m := s.meta.metadata[id]; m != nil { - hasFiles := false - for _, uri := range s.meta.metadata[id].GoFiles { - // For internal tests, we need _test files, not just the normal - // ones. External tests only have _test files, but we can check - // them anyway. - if m.ForTest != "" && !strings.HasSuffix(string(uri), "_test.go") { - continue - } - if _, ok := result.files[uri]; ok { - hasFiles = true - break - } - } - if !hasFiles { - continue - } - } - - // If the package name of a file in the package has changed, it's - // possible that the package ID may no longer exist. Delete it from - // the set of workspace packages, on the assumption that we will add it - // back when the relevant files are reloaded. - if _, ok := changedPkgFiles[id]; ok { - continue - } - - result.workspacePackages[id] = pkgPath - } - - // Inherit all of the go.mod-related handles. - for _, v := range result.modTidyHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.modWhyHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.parseModHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.parseWorkHandles { - newGen.Inherit(v.handle) - } - // Don't bother copying the importedBy graph, - // as it changes each time we update metadata. - - // If the snapshot's workspace mode has changed, the packages loaded using - // the previous mode are no longer relevant, so clear them out. - if workspaceModeChanged { - result.workspacePackages = map[PackageID]PackagePath{} - } - - // The snapshot may need to be reinitialized. - if workspaceReload || vendorChanged { - if workspaceChanged || result.initializedErr != nil { - result.initializeOnce = &sync.Once{} - } - } - return result -} - -// invalidatedPackageIDs returns all packages invalidated by a change to uri. -// If we haven't seen this URI before, we guess based on files in the same -// directory. This is of course incorrect in build systems where packages are -// not organized by directory. -// -// If packageFileChanged is set, the file is either a new file, or has a new -// package name. In this case, all known packages in the directory will be -// invalidated. -func invalidatedPackageIDs(uri span.URI, known map[span.URI][]PackageID, packageFileChanged bool) map[PackageID]struct{} { - invalidated := make(map[PackageID]struct{}) - - // At a minimum, we invalidate packages known to contain uri. - for _, id := range known[uri] { - invalidated[id] = struct{}{} - } - - // If the file didn't move to a new package, we should only invalidate the - // packages it is currently contained inside. - if !packageFileChanged && len(invalidated) > 0 { - return invalidated - } - - // This is a file we don't yet know about, or which has moved packages. Guess - // relevant packages by considering files in the same directory. - - // Cache of FileInfo to avoid unnecessary stats for multiple files in the - // same directory. - stats := make(map[string]struct { - os.FileInfo - error - }) - getInfo := func(dir string) (os.FileInfo, error) { - if res, ok := stats[dir]; ok { - return res.FileInfo, res.error - } - fi, err := os.Stat(dir) - stats[dir] = struct { - os.FileInfo - error - }{fi, err} - return fi, err - } - dir := filepath.Dir(uri.Filename()) - fi, err := getInfo(dir) - if err == nil { - // Aggregate all possibly relevant package IDs. - for knownURI, ids := range known { - knownDir := filepath.Dir(knownURI.Filename()) - knownFI, err := getInfo(knownDir) - if err != nil { - continue - } - if os.SameFile(fi, knownFI) { - for _, id := range ids { - invalidated[id] = struct{}{} - } - } - } - } - return invalidated -} - -// fileWasSaved reports whether the FileHandle passed in has been saved. It -// accomplishes this by checking to see if the original and current FileHandles -// are both overlays, and if the current FileHandle is saved while the original -// FileHandle was not saved. -func fileWasSaved(originalFH, currentFH source.FileHandle) bool { - c, ok := currentFH.(*overlay) - if !ok || c == nil { - return true - } - o, ok := originalFH.(*overlay) - if !ok || o == nil { - return c.saved - } - return !o.saved && c.saved -} - -// metadataChanges detects features of the change from oldFH->newFH that may -// affect package metadata. -// -// It uses lockedSnapshot to access cached parse information. lockedSnapshot -// must be locked. -// -// The result parameters have the following meaning: -// - invalidate means that package metadata for packages containing the file -// should be invalidated. -// - pkgFileChanged means that the file->package associates for the file have -// changed (possibly because the file is new, or because its package name has -// changed). -// - importDeleted means that an import has been deleted, or we can't -// determine if an import was deleted due to errors. -func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH source.FileHandle) (invalidate, pkgFileChanged, importDeleted bool) { - if oldFH == nil || newFH == nil { // existential changes - changed := (oldFH == nil) != (newFH == nil) - return changed, changed, (newFH == nil) // we don't know if an import was deleted - } - - // If the file hasn't changed, there's no need to reload. - if oldFH.FileIdentity() == newFH.FileIdentity() { - return false, false, false - } - - // Parse headers to compare package names and imports. - oldHead, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseHeader) - newHead, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseHeader) - - if oldErr != nil || newErr != nil { - // TODO(rfindley): we can get here if newFH does not exists. There is - // asymmetry here, in that newFH may be non-nil even if the underlying file - // does not exist. - // - // We should not produce a non-nil filehandle for a file that does not exist. - errChanged := (oldErr == nil) != (newErr == nil) - return errChanged, errChanged, (newErr != nil) // we don't know if an import was deleted - } - - // `go list` fails completely if the file header cannot be parsed. If we go - // from a non-parsing state to a parsing state, we should reload. - if oldHead.ParseErr != nil && newHead.ParseErr == nil { - return true, true, true // We don't know what changed, so fall back on full invalidation. - } - - // If a package name has changed, the set of package imports may have changed - // in ways we can't detect here. Assume an import has been deleted. - if oldHead.File.Name.Name != newHead.File.Name.Name { - return true, true, true - } - - // Check whether package imports have changed. Only consider potentially - // valid imports paths. - oldImports := validImports(oldHead.File.Imports) - newImports := validImports(newHead.File.Imports) - - for path := range newImports { - if _, ok := oldImports[path]; ok { - delete(oldImports, path) - } else { - invalidate = true // a new, potentially valid import was added - } - } - - if len(oldImports) > 0 { - invalidate = true - importDeleted = true - } - - // If the change does not otherwise invalidate metadata, get the full ASTs in - // order to check magic comments. - // - // Note: if this affects performance we can probably avoid parsing in the - // common case by first scanning the source for potential comments. - if !invalidate { - origFull, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseFull) - currFull, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseFull) - if oldErr == nil && newErr == nil { - invalidate = magicCommentsChanged(origFull.File, currFull.File) - } else { - // At this point, we shouldn't ever fail to produce a ParsedGoFile, as - // we're already past header parsing. - bug.Reportf("metadataChanges: unparseable file %v (old error: %v, new error: %v)", oldFH.URI(), oldErr, newErr) - } - } - - return invalidate, pkgFileChanged, importDeleted -} - -// peekOrParse returns the cached ParsedGoFile if it exists, otherwise parses -// without caching. -// -// It returns an error if the file could not be read (note that parsing errors -// are stored in ParsedGoFile.ParseErr). -// -// lockedSnapshot must be locked. -func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { - key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh := lockedSnapshot.goFiles[key]; pgh != nil { - cached := pgh.handle.Cached(lockedSnapshot.generation) - if cached != nil { - cached := cached.(*parseGoData) - if cached.parsed != nil { - return cached.parsed, nil - } - } - } - - fset := token.NewFileSet() - data := parseGo(ctx, fset, fh, mode) - return data.parsed, data.err -} - -func magicCommentsChanged(original *ast.File, current *ast.File) bool { - oldComments := extractMagicComments(original) - newComments := extractMagicComments(current) - if len(oldComments) != len(newComments) { - return true - } - for i := range oldComments { - if oldComments[i] != newComments[i] { - return true - } - } - return false -} - -// validImports extracts the set of valid import paths from imports. -func validImports(imports []*ast.ImportSpec) map[string]struct{} { - m := make(map[string]struct{}) - for _, spec := range imports { - if path := spec.Path.Value; validImportPath(path) { - m[path] = struct{}{} - } - } - return m -} - -func validImportPath(path string) bool { - path, err := strconv.Unquote(path) - if err != nil { - return false - } - if path == "" { - return false - } - if path[len(path)-1] == '/' { - return false - } - return true -} - -var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`) - -// extractMagicComments finds magic comments that affect metadata in f. -func extractMagicComments(f *ast.File) []string { - var results []string - for _, cg := range f.Comments { - for _, c := range cg.List { - if buildConstraintOrEmbedRe.MatchString(c.Text) { - results = append(results, c.Text) - } - } - } - return results -} - -func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) { - s.AwaitInitialized(ctx) - - s.mu.Lock() - builtin := s.builtin - s.mu.Unlock() - - if builtin == "" { - return nil, fmt.Errorf("no builtin package for view %s", s.view.name) - } - - fh, err := s.GetFile(ctx, builtin) - if err != nil { - return nil, err - } - return s.ParseGo(ctx, fh, source.ParseFull) -} - -func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool { - s.mu.Lock() - defer s.mu.Unlock() - // We should always get the builtin URI in a canonical form, so use simple - // string comparison here. span.CompareURI is too expensive. - return uri == s.builtin -} - -func (s *snapshot) setBuiltin(path string) { - s.mu.Lock() - defer s.mu.Unlock() - - s.builtin = span.URIFromPath(path) -} - -// BuildGoplsMod generates a go.mod file for all modules in the workspace. It -// bypasses any existing gopls.mod. -func (s *snapshot) BuildGoplsMod(ctx context.Context) (*modfile.File, error) { - allModules, err := findModules(s.view.folder, pathExcludedByFilterFunc(s.view.rootURI.Filename(), s.view.gomodcache, s.View().Options()), 0) - if err != nil { - return nil, err - } - return buildWorkspaceModFile(ctx, allModules, s) -} - -// TODO(rfindley): move this to workspacemodule.go -func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) (*modfile.File, error) { - file := &modfile.File{} - file.AddModuleStmt("gopls-workspace") - // Track the highest Go version, to be set on the workspace module. - // Fall back to 1.12 -- old versions insist on having some version. - goVersion := "1.12" - - paths := map[string]span.URI{} - excludes := map[string][]string{} - var sortedModURIs []span.URI - for uri := range modFiles { - sortedModURIs = append(sortedModURIs, uri) - } - sort.Slice(sortedModURIs, func(i, j int) bool { - return sortedModURIs[i] < sortedModURIs[j] - }) - for _, modURI := range sortedModURIs { - fh, err := fs.GetFile(ctx, modURI) - if err != nil { - return nil, err - } - content, err := fh.Read() - if err != nil { - return nil, err - } - parsed, err := modfile.Parse(fh.URI().Filename(), content, nil) - if err != nil { - return nil, err - } - if file == nil || parsed.Module == nil { - return nil, fmt.Errorf("no module declaration for %s", modURI) - } - // Prepend "v" to go versions to make them valid semver. - if parsed.Go != nil && semver.Compare("v"+goVersion, "v"+parsed.Go.Version) < 0 { - goVersion = parsed.Go.Version - } - path := parsed.Module.Mod.Path - if _, ok := paths[path]; ok { - return nil, fmt.Errorf("found module %q twice in the workspace", path) - } - paths[path] = modURI - // If the module's path includes a major version, we expect it to have - // a matching major version. - _, majorVersion, _ := module.SplitPathVersion(path) - if majorVersion == "" { - majorVersion = "/v0" - } - majorVersion = strings.TrimLeft(majorVersion, "/.") // handle gopkg.in versions - file.AddNewRequire(path, source.WorkspaceModuleVersion(majorVersion), false) - if err := file.AddReplace(path, "", dirURI(modURI).Filename(), ""); err != nil { - return nil, err - } - for _, exclude := range parsed.Exclude { - excludes[exclude.Mod.Path] = append(excludes[exclude.Mod.Path], exclude.Mod.Version) - } - } - if goVersion != "" { - file.AddGoStmt(goVersion) - } - // Go back through all of the modules to handle any of their replace - // statements. - for _, modURI := range sortedModURIs { - fh, err := fs.GetFile(ctx, modURI) - if err != nil { - return nil, err - } - content, err := fh.Read() - if err != nil { - return nil, err - } - parsed, err := modfile.Parse(fh.URI().Filename(), content, nil) - if err != nil { - return nil, err - } - // If any of the workspace modules have replace directives, they need - // to be reflected in the workspace module. - for _, rep := range parsed.Replace { - // Don't replace any modules that are in our workspace--we should - // always use the version in the workspace. - if _, ok := paths[rep.Old.Path]; ok { - continue - } - newPath := rep.New.Path - newVersion := rep.New.Version - // If a replace points to a module in the workspace, make sure we - // direct it to version of the module in the workspace. - if m, ok := paths[rep.New.Path]; ok { - newPath = dirURI(m).Filename() - newVersion = "" - } else if rep.New.Version == "" && !filepath.IsAbs(rep.New.Path) { - // Make any relative paths absolute. - newPath = filepath.Join(dirURI(modURI).Filename(), rep.New.Path) - } - if err := file.AddReplace(rep.Old.Path, rep.Old.Version, newPath, newVersion); err != nil { - return nil, err - } - } - } - for path, versions := range excludes { - for _, version := range versions { - file.AddExclude(path, version) - } - } - file.SortBlocks() - return file, nil -} - -func buildWorkspaceSumFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) ([]byte, error) { - allSums := map[module.Version][]string{} - for modURI := range modFiles { - // TODO(rfindley): factor out this pattern into a uripath package. - sumURI := span.URIFromPath(filepath.Join(filepath.Dir(modURI.Filename()), "go.sum")) - fh, err := fs.GetFile(ctx, sumURI) - if err != nil { - continue - } - data, err := fh.Read() - if os.IsNotExist(err) { - continue - } - if err != nil { - return nil, fmt.Errorf("reading go sum: %w", err) - } - if err := readGoSum(allSums, sumURI.Filename(), data); err != nil { - return nil, err - } - } - // This logic to write go.sum is copied (with minor modifications) from - // https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=631;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0 - var mods []module.Version - for m := range allSums { - mods = append(mods, m) - } - module.Sort(mods) - - var buf bytes.Buffer - for _, m := range mods { - list := allSums[m] - sort.Strings(list) - // Note (rfindley): here we add all sum lines without verification, because - // the assumption is that if they come from a go.sum file, they are - // trusted. - for _, h := range list { - fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h) - } - } - return buf.Bytes(), nil -} - -// readGoSum is copied (with minor modifications) from -// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=398;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0 -func readGoSum(dst map[module.Version][]string, file string, data []byte) error { - lineno := 0 - for len(data) > 0 { - var line []byte - lineno++ - i := bytes.IndexByte(data, '\n') - if i < 0 { - line, data = data, nil - } else { - line, data = data[:i], data[i+1:] - } - f := strings.Fields(string(line)) - if len(f) == 0 { - // blank line; skip it - continue - } - if len(f) != 3 { - return fmt.Errorf("malformed go.sum:\n%s:%d: wrong number of fields %v", file, lineno, len(f)) - } - mod := module.Version{Path: f[0], Version: f[1]} - dst[mod] = append(dst[mod], f[2]) - } - return nil -} diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go deleted file mode 100644 index db68912015e..00000000000 --- a/internal/lsp/cache/symbols.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "go/ast" - "go/parser" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/internal/lsp/lsppos" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" -) - -type symbolHandle struct { - handle *memoize.Handle - - fh source.FileHandle - - // key is the hashed key for the package. - key symbolHandleKey -} - -// symbolData contains the data produced by extracting symbols from a file. -type symbolData struct { - symbols []source.Symbol - err error -} - -type symbolHandleKey string - -func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle { - if h := s.getSymbolHandle(fh.URI()); h != nil { - return h - } - key := symbolHandleKey(fh.FileIdentity().Hash) - h := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - data := &symbolData{} - data.symbols, data.err = symbolize(snapshot, fh) - return data - }, nil) - - sh := &symbolHandle{ - handle: h, - fh: fh, - key: key, - } - return s.addSymbolHandle(sh) -} - -// symbolize extracts symbols from a file. It uses a parsed file already -// present in the cache but otherwise does not populate the cache. -func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) { - src, err := fh.Read() - if err != nil { - return nil, err - } - - var ( - file *ast.File - fileDesc *token.File - ) - - // If the file has already been fully parsed through the cache, we can just - // use the result. - if pgf := snapshot.cachedPGF(fh, source.ParseFull); pgf != nil { - file = pgf.File - fileDesc = pgf.Tok - } - - // Otherwise, we parse the file ourselves. Notably we don't use parseGo here, - // so that we can avoid parsing comments and can skip object resolution, - // which has a meaningful impact on performance. Neither comments nor objects - // are necessary for symbol construction. - if file == nil { - fset := token.NewFileSet() - file, err = parser.ParseFile(fset, fh.URI().Filename(), src, skipObjectResolution) - if file == nil { - return nil, err - } - fileDesc = fset.File(file.Package) - } - - w := &symbolWalker{ - mapper: lsppos.NewTokenMapper(src, fileDesc), - } - - w.fileDecls(file.Decls) - - return w.symbols, w.firstError -} - -type symbolWalker struct { - mapper *lsppos.TokenMapper // for computing positions - - symbols []source.Symbol - firstError error -} - -func (w *symbolWalker) atNode(node ast.Node, name string, kind protocol.SymbolKind, path ...*ast.Ident) { - var b strings.Builder - for _, ident := range path { - if ident != nil { - b.WriteString(ident.Name) - b.WriteString(".") - } - } - b.WriteString(name) - - rng, err := w.mapper.Range(node.Pos(), node.End()) - if err != nil { - w.error(err) - return - } - sym := source.Symbol{ - Name: b.String(), - Kind: kind, - Range: rng, - } - w.symbols = append(w.symbols, sym) -} - -func (w *symbolWalker) error(err error) { - if err != nil && w.firstError == nil { - w.firstError = err - } -} - -func (w *symbolWalker) fileDecls(decls []ast.Decl) { - for _, decl := range decls { - switch decl := decl.(type) { - case *ast.FuncDecl: - kind := protocol.Function - var recv *ast.Ident - if decl.Recv.NumFields() > 0 { - kind = protocol.Method - recv = unpackRecv(decl.Recv.List[0].Type) - } - w.atNode(decl.Name, decl.Name.Name, kind, recv) - case *ast.GenDecl: - for _, spec := range decl.Specs { - switch spec := spec.(type) { - case *ast.TypeSpec: - kind := guessKind(spec) - w.atNode(spec.Name, spec.Name.Name, kind) - w.walkType(spec.Type, spec.Name) - case *ast.ValueSpec: - for _, name := range spec.Names { - kind := protocol.Variable - if decl.Tok == token.CONST { - kind = protocol.Constant - } - w.atNode(name, name.Name, kind) - } - } - } - } - } -} - -func guessKind(spec *ast.TypeSpec) protocol.SymbolKind { - switch spec.Type.(type) { - case *ast.InterfaceType: - return protocol.Interface - case *ast.StructType: - return protocol.Struct - case *ast.FuncType: - return protocol.Function - } - return protocol.Class -} - -func unpackRecv(rtyp ast.Expr) *ast.Ident { - // Extract the receiver identifier. Lifted from go/types/resolver.go -L: - for { - switch t := rtyp.(type) { - case *ast.ParenExpr: - rtyp = t.X - case *ast.StarExpr: - rtyp = t.X - default: - break L - } - } - if name, _ := rtyp.(*ast.Ident); name != nil { - return name - } - return nil -} - -// walkType processes symbols related to a type expression. path is path of -// nested type identifiers to the type expression. -func (w *symbolWalker) walkType(typ ast.Expr, path ...*ast.Ident) { - switch st := typ.(type) { - case *ast.StructType: - for _, field := range st.Fields.List { - w.walkField(field, protocol.Field, protocol.Field, path...) - } - case *ast.InterfaceType: - for _, field := range st.Methods.List { - w.walkField(field, protocol.Interface, protocol.Method, path...) - } - } -} - -// walkField processes symbols related to the struct field or interface method. -// -// unnamedKind and namedKind are the symbol kinds if the field is resp. unnamed -// or named. path is the path of nested identifiers containing the field. -func (w *symbolWalker) walkField(field *ast.Field, unnamedKind, namedKind protocol.SymbolKind, path ...*ast.Ident) { - if len(field.Names) == 0 { - switch typ := field.Type.(type) { - case *ast.SelectorExpr: - // embedded qualified type - w.atNode(field, typ.Sel.Name, unnamedKind, path...) - default: - w.atNode(field, types.ExprString(field.Type), unnamedKind, path...) - } - } - for _, name := range field.Names { - w.atNode(name, name.Name, namedKind, path...) - w.walkType(field.Type, append(path, name)...) - } -} diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go deleted file mode 100644 index b0390a3fbde..00000000000 --- a/internal/lsp/cache/view.go +++ /dev/null @@ -1,1078 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cache implements the caching layer for gopls. -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/semver" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -type View struct { - session *Session - id string - - optionsMu sync.Mutex - options *source.Options - - // mu protects most mutable state of the view. - mu sync.Mutex - - // baseCtx is the context handed to NewView. This is the parent of all - // background contexts created for this view. - baseCtx context.Context - - // cancel is called when all action being performed by the current view - // should be stopped. - cancel context.CancelFunc - - // name is the user visible name of this view. - name string - - // folder is the folder with which this view was constructed. - folder span.URI - - importsState *importsState - - // moduleUpgrades tracks known upgrades for module paths. - moduleUpgrades map[string]string - - // keep track of files by uri and by basename, a single file may be mapped - // to multiple uris, and the same basename may map to multiple files - filesByURI map[span.URI]*fileBase - filesByBase map[string][]*fileBase - - // initCancelFirstAttempt can be used to terminate the view's first - // attempt at initialization. - initCancelFirstAttempt context.CancelFunc - - snapshotMu sync.Mutex - snapshot *snapshot // nil after shutdown has been called - - // initialWorkspaceLoad is closed when the first workspace initialization has - // completed. If we failed to load, we only retry if the go.mod file changes, - // to avoid too many go/packages calls. - initialWorkspaceLoad chan struct{} - - // initializationSema is used limit concurrent initialization of snapshots in - // the view. We use a channel instead of a mutex to avoid blocking when a - // context is canceled. - initializationSema chan struct{} - - // rootURI is the rootURI directory of this view. If we are in GOPATH mode, this - // is just the folder. If we are in module mode, this is the module rootURI. - rootURI span.URI - - // workspaceInformation tracks various details about this view's - // environment variables, go version, and use of modules. - workspaceInformation -} - -type workspaceInformation struct { - // The Go version in use: X in Go 1.X. - goversion int - - // hasGopackagesDriver is true if the user has a value set for the - // GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on - // their machine. - hasGopackagesDriver bool - - // `go env` variables that need to be tracked by gopls. - environmentVariables - - // userGo111Module is the user's value of GO111MODULE. - userGo111Module go111module - - // The value of GO111MODULE we want to run with. - effectiveGo111Module string - - // goEnv is the `go env` output collected when a view is created. - // It includes the values of the environment variables above. - goEnv map[string]string -} - -type go111module int - -const ( - off = go111module(iota) - auto - on -) - -type environmentVariables struct { - gocache, gopath, goroot, goprivate, gomodcache, go111module string -} - -type workspaceMode int - -const ( - moduleMode workspaceMode = 1 << iota - - // tempModfile indicates whether or not the -modfile flag should be used. - tempModfile -) - -// fileBase holds the common functionality for all files. -// It is intended to be embedded in the file implementations -type fileBase struct { - uris []span.URI - fname string - - view *View -} - -func (f *fileBase) URI() span.URI { - return f.uris[0] -} - -func (f *fileBase) filename() string { - return f.fname -} - -func (f *fileBase) addURI(uri span.URI) int { - f.uris = append(f.uris, uri) - return len(f.uris) -} - -func (v *View) ID() string { return v.id } - -// tempModFile creates a temporary go.mod file based on the contents of the -// given go.mod file. It is the caller's responsibility to clean up the files -// when they are done using them. -func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) { - filenameHash := hashContents([]byte(modFh.URI().Filename())) - tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash)) - if err != nil { - return "", nil, err - } - defer tmpMod.Close() - - tmpURI = span.URIFromPath(tmpMod.Name()) - tmpSumName := sumFilename(tmpURI) - - content, err := modFh.Read() - if err != nil { - return "", nil, err - } - - if _, err := tmpMod.Write(content); err != nil { - return "", nil, err - } - - cleanup = func() { - _ = os.Remove(tmpSumName) - _ = os.Remove(tmpURI.Filename()) - } - - // Be careful to clean up if we return an error from this function. - defer func() { - if err != nil { - cleanup() - cleanup = nil - } - }() - - // Create an analogous go.sum, if one exists. - if gosum != nil { - if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil { - return "", cleanup, err - } - } - - return tmpURI, cleanup, nil -} - -// Name returns the user visible name of this view. -func (v *View) Name() string { - return v.name -} - -// Folder returns the folder at the base of this view. -func (v *View) Folder() span.URI { - return v.folder -} - -func (v *View) Options() *source.Options { - v.optionsMu.Lock() - defer v.optionsMu.Unlock() - return v.options -} - -func (v *View) FileKind(fh source.FileHandle) source.FileKind { - if o, ok := fh.(source.Overlay); ok { - if o.Kind() != source.UnknownKind { - return o.Kind() - } - } - fext := filepath.Ext(fh.URI().Filename()) - switch fext { - case ".go": - return source.Go - case ".mod": - return source.Mod - case ".sum": - return source.Sum - case ".work": - return source.Work - } - exts := v.Options().TemplateExtensions - for _, ext := range exts { - if fext == ext || fext == "."+ext { - return source.Tmpl - } - } - // and now what? This should never happen, but it does for cgo before go1.15 - return source.Go -} - -func minorOptionsChange(a, b *source.Options) bool { - // Check if any of the settings that modify our understanding of files have been changed - if !reflect.DeepEqual(a.Env, b.Env) { - return false - } - if !reflect.DeepEqual(a.DirectoryFilters, b.DirectoryFilters) { - return false - } - if a.MemoryMode != b.MemoryMode { - return false - } - aBuildFlags := make([]string, len(a.BuildFlags)) - bBuildFlags := make([]string, len(b.BuildFlags)) - copy(aBuildFlags, a.BuildFlags) - copy(bBuildFlags, b.BuildFlags) - sort.Strings(aBuildFlags) - sort.Strings(bBuildFlags) - // the rest of the options are benign - return reflect.DeepEqual(aBuildFlags, bBuildFlags) -} - -func (v *View) SetOptions(ctx context.Context, options *source.Options) (source.View, error) { - // no need to rebuild the view if the options were not materially changed - v.optionsMu.Lock() - if minorOptionsChange(v.options, options) { - v.options = options - v.optionsMu.Unlock() - return v, nil - } - v.optionsMu.Unlock() - newView, err := v.session.updateView(ctx, v, options) - return newView, err -} - -func (v *View) Rebuild(ctx context.Context) (source.Snapshot, func(), error) { - newView, err := v.session.updateView(ctx, v, v.Options()) - if err != nil { - return nil, func() {}, err - } - snapshot, release := newView.Snapshot(ctx) - return snapshot, release, nil -} - -func (s *snapshot) WriteEnv(ctx context.Context, w io.Writer) error { - s.view.optionsMu.Lock() - env := s.view.options.EnvSlice() - buildFlags := append([]string{}, s.view.options.BuildFlags...) - s.view.optionsMu.Unlock() - - fullEnv := make(map[string]string) - for k, v := range s.view.goEnv { - fullEnv[k] = v - } - for _, v := range env { - s := strings.SplitN(v, "=", 2) - if len(s) != 2 { - continue - } - if _, ok := fullEnv[s[0]]; ok { - fullEnv[s[0]] = s[1] - } - } - goVersion, err := s.view.session.gocmdRunner.Run(ctx, gocommand.Invocation{ - Verb: "version", - Env: env, - WorkingDir: s.view.rootURI.Filename(), - }) - if err != nil { - return err - } - fmt.Fprintf(w, `go env for %v -(root %s) -(go version %s) -(valid build configuration = %v) -(build flags: %v) -`, - s.view.folder.Filename(), - s.view.rootURI.Filename(), - strings.TrimRight(goVersion.String(), "\n"), - s.ValidBuildConfiguration(), - buildFlags) - for k, v := range fullEnv { - fmt.Fprintf(w, "%s=%s\n", k, v) - } - return nil -} - -func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error { - return s.view.importsState.runProcessEnvFunc(ctx, s, fn) -} - -// separated out from its sole use in locateTemplateFiles for testability -func fileHasExtension(path string, suffixes []string) bool { - ext := filepath.Ext(path) - if ext != "" && ext[0] == '.' { - ext = ext[1:] - } - for _, s := range suffixes { - if s != "" && ext == s { - return true - } - } - return false -} - -func (s *snapshot) locateTemplateFiles(ctx context.Context) { - if len(s.view.Options().TemplateExtensions) == 0 { - return - } - suffixes := s.view.Options().TemplateExtensions - - // The workspace root may have been expanded to a module, but we should apply - // directory filters based on the configured workspace folder. - // - // TODO(rfindley): we should be more principled about paths outside of the - // workspace folder: do we even consider them? Do we support absolute - // exclusions? Relative exclusions starting with ..? - dir := s.workspace.root.Filename() - relativeTo := s.view.folder.Filename() - - searched := 0 - // Change to WalkDir when we move up to 1.16 - err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - relpath := strings.TrimPrefix(path, relativeTo) - excluded := pathExcludedByFilter(relpath, dir, s.view.gomodcache, s.view.options) - if fileHasExtension(path, suffixes) && !excluded && !fi.IsDir() { - k := span.URIFromPath(path) - _, err := s.GetVersionedFile(ctx, k) - if err != nil { - return nil - } - } - searched++ - if fileLimit > 0 && searched > fileLimit { - return errExhausted - } - return nil - }) - if err != nil { - event.Error(ctx, "searching for template files failed", err) - } -} - -func (v *View) contains(uri span.URI) bool { - inRoot := source.InDir(v.rootURI.Filename(), uri.Filename()) - inFolder := source.InDir(v.folder.Filename(), uri.Filename()) - if !inRoot && !inFolder { - return false - } - // Filters are applied relative to the workspace folder. - if inFolder { - return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options()) - } - return true -} - -func (v *View) mapFile(uri span.URI, f *fileBase) { - v.filesByURI[uri] = f - if f.addURI(uri) == 1 { - basename := basename(f.filename()) - v.filesByBase[basename] = append(v.filesByBase[basename], f) - } -} - -func basename(filename string) string { - return strings.ToLower(filepath.Base(filename)) -} - -func (v *View) relevantChange(c source.FileModification) bool { - // If the file is known to the view, the change is relevant. - if v.knownFile(c.URI) { - return true - } - // The go.work/gopls.mod may not be "known" because we first access it - // through the session. As a result, treat changes to the view's go.work or - // gopls.mod file as always relevant, even if they are only on-disk - // changes. - // TODO(rstambler): Make sure the go.work/gopls.mod files are always known - // to the view. - for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} { - if c.URI == uriForSource(v.rootURI, src) { - return true - } - } - // If the file is not known to the view, and the change is only on-disk, - // we should not invalidate the snapshot. This is necessary because Emacs - // sends didChangeWatchedFiles events for temp files. - if c.OnDisk && (c.Action == source.Change || c.Action == source.Delete) { - return false - } - return v.contains(c.URI) -} - -func (v *View) knownFile(uri span.URI) bool { - v.mu.Lock() - defer v.mu.Unlock() - - f, err := v.findFile(uri) - return f != nil && err == nil -} - -// getFile returns a file for the given URI. -func (v *View) getFile(uri span.URI) *fileBase { - v.mu.Lock() - defer v.mu.Unlock() - - f, _ := v.findFile(uri) - if f != nil { - return f - } - f = &fileBase{ - view: v, - fname: uri.Filename(), - } - v.mapFile(uri, f) - return f -} - -// findFile checks the cache for any file matching the given uri. -// -// An error is only returned for an irreparable failure, for example, if the -// filename in question does not exist. -func (v *View) findFile(uri span.URI) (*fileBase, error) { - if f := v.filesByURI[uri]; f != nil { - // a perfect match - return f, nil - } - // no exact match stored, time to do some real work - // check for any files with the same basename - fname := uri.Filename() - basename := basename(fname) - if candidates := v.filesByBase[basename]; candidates != nil { - pathStat, err := os.Stat(fname) - if os.IsNotExist(err) { - return nil, err - } - if err != nil { - return nil, nil // the file may exist, return without an error - } - for _, c := range candidates { - if cStat, err := os.Stat(c.filename()); err == nil { - if os.SameFile(pathStat, cStat) { - // same file, map it - v.mapFile(uri, c) - return c, nil - } - } - } - } - // no file with a matching name was found, it wasn't in our cache - return nil, nil -} - -func (v *View) Shutdown(ctx context.Context) { - v.session.removeView(ctx, v) -} - -// TODO(rFindley): probably some of this should also be one in View.Shutdown -// above? -func (v *View) shutdown(ctx context.Context) { - // Cancel the initial workspace load if it is still running. - v.initCancelFirstAttempt() - - v.mu.Lock() - if v.cancel != nil { - v.cancel() - v.cancel = nil - } - v.mu.Unlock() - v.snapshotMu.Lock() - if v.snapshot != nil { - go v.snapshot.generation.Destroy("View.shutdown") - v.snapshot = nil - } - v.snapshotMu.Unlock() - v.importsState.destroy() -} - -func (v *View) Session() *Session { - return v.session -} - -func (s *snapshot) IgnoredFile(uri span.URI) bool { - filename := uri.Filename() - var prefixes []string - if len(s.workspace.getActiveModFiles()) == 0 { - for _, entry := range filepath.SplitList(s.view.gopath) { - prefixes = append(prefixes, filepath.Join(entry, "src")) - } - } else { - prefixes = append(prefixes, s.view.gomodcache) - for m := range s.workspace.getActiveModFiles() { - prefixes = append(prefixes, dirURI(m).Filename()) - } - } - for _, prefix := range prefixes { - if strings.HasPrefix(filename, prefix) { - return checkIgnored(filename[len(prefix):]) - } - } - return false -} - -// checkIgnored implements go list's exclusion rules. -// Quoting ā€œgo help listā€: -// -// Directory and file names that begin with "." or "_" are ignored -// by the go tool, as are directories named "testdata". -func checkIgnored(suffix string) bool { - for _, component := range strings.Split(suffix, string(filepath.Separator)) { - if len(component) == 0 { - continue - } - if component[0] == '.' || component[0] == '_' || component == "testdata" { - return true - } - } - return false -} - -func (v *View) Snapshot(ctx context.Context) (source.Snapshot, func()) { - return v.getSnapshot() -} - -func (v *View) getSnapshot() (*snapshot, func()) { - v.snapshotMu.Lock() - defer v.snapshotMu.Unlock() - if v.snapshot == nil { - panic("getSnapshot called after shutdown") - } - return v.snapshot, v.snapshot.generation.Acquire() -} - -func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) { - select { - case <-ctx.Done(): - return - case s.view.initializationSema <- struct{}{}: - } - - defer func() { - <-s.view.initializationSema - }() - - if s.initializeOnce == nil { - return - } - s.initializeOnce.Do(func() { - s.loadWorkspace(ctx, firstAttempt) - s.collectAllKnownSubdirs(ctx) - }) -} - -func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) { - defer func() { - s.initializeOnce = nil - if firstAttempt { - close(s.view.initialWorkspaceLoad) - } - }() - - // If we have multiple modules, we need to load them by paths. - var scopes []interface{} - var modDiagnostics []*source.Diagnostic - addError := func(uri span.URI, err error) { - modDiagnostics = append(modDiagnostics, &source.Diagnostic{ - URI: uri, - Severity: protocol.SeverityError, - Source: source.ListError, - Message: err.Error(), - }) - } - s.locateTemplateFiles(ctx) - if len(s.workspace.getActiveModFiles()) > 0 { - for modURI := range s.workspace.getActiveModFiles() { - fh, err := s.GetFile(ctx, modURI) - if err != nil { - addError(modURI, err) - continue - } - parsed, err := s.ParseMod(ctx, fh) - if err != nil { - addError(modURI, err) - continue - } - if parsed.File == nil || parsed.File.Module == nil { - addError(modURI, fmt.Errorf("no module path for %s", modURI)) - continue - } - path := parsed.File.Module.Mod.Path - scopes = append(scopes, moduleLoadScope(path)) - } - } else { - scopes = append(scopes, viewLoadScope("LOAD_VIEW")) - } - - // If we're loading anything, ensure we also load builtin. - // TODO(rstambler): explain the rationale for this. - if len(scopes) > 0 { - scopes = append(scopes, PackagePath("builtin")) - } - err := s.load(ctx, firstAttempt, scopes...) - - // If the context is canceled on the first attempt, loading has failed - // because the go command has timed out--that should be a critical error. - if err != nil && !firstAttempt && ctx.Err() != nil { - return - } - - var criticalErr *source.CriticalError - switch { - case err != nil && ctx.Err() != nil: - event.Error(ctx, fmt.Sprintf("initial workspace load: %v", err), err) - criticalErr = &source.CriticalError{ - MainError: err, - } - case err != nil: - event.Error(ctx, "initial workspace load failed", err) - extractedDiags := s.extractGoCommandErrors(ctx, err) - criticalErr = &source.CriticalError{ - MainError: err, - DiagList: append(modDiagnostics, extractedDiags...), - } - case len(modDiagnostics) == 1: - criticalErr = &source.CriticalError{ - MainError: fmt.Errorf(modDiagnostics[0].Message), - DiagList: modDiagnostics, - } - case len(modDiagnostics) > 1: - criticalErr = &source.CriticalError{ - MainError: fmt.Errorf("error loading module names"), - DiagList: modDiagnostics, - } - } - - // Lock the snapshot when setting the initialized error. - s.mu.Lock() - defer s.mu.Unlock() - s.initializedErr = criticalErr -} - -// invalidateContent invalidates the content of a Go file, -// including any position and type information that depends on it. -// -// invalidateContent returns a non-nil snapshot for the new content, along with -// a callback which the caller must invoke to release that snapshot. -func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) { - // Detach the context so that content invalidation cannot be canceled. - ctx = xcontext.Detach(ctx) - - // This should be the only time we hold the view's snapshot lock for any period of time. - v.snapshotMu.Lock() - defer v.snapshotMu.Unlock() - - if v.snapshot == nil { - panic("invalidateContent called after shutdown") - } - - // Cancel all still-running previous requests, since they would be - // operating on stale data. - v.snapshot.cancel() - - // Do not clone a snapshot until its view has finished initializing. - v.snapshot.AwaitInitialized(ctx) - - oldSnapshot := v.snapshot - - v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) - go oldSnapshot.generation.Destroy("View.invalidateContent") - - return v.snapshot, v.snapshot.generation.Acquire() -} - -func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (*workspaceInformation, error) { - if err := checkPathCase(folder.Filename()); err != nil { - return nil, fmt.Errorf("invalid workspace folder path: %w; check that the casing of the configured workspace folder path agrees with the casing reported by the operating system", err) - } - var err error - inv := gocommand.Invocation{ - WorkingDir: folder.Filename(), - Env: options.EnvSlice(), - } - goversion, err := gocommand.GoVersion(ctx, inv, s.gocmdRunner) - if err != nil { - return nil, err - } - - go111module := os.Getenv("GO111MODULE") - if v, ok := options.Env["GO111MODULE"]; ok { - go111module = v - } - // Make sure to get the `go env` before continuing with initialization. - envVars, env, err := s.getGoEnv(ctx, folder.Filename(), goversion, go111module, options.EnvSlice()) - if err != nil { - return nil, err - } - // If using 1.16, change the default back to auto. The primary effect of - // GO111MODULE=on is to break GOPATH, which we aren't too interested in. - if goversion >= 16 && go111module == "" { - go111module = "auto" - } - // The value of GOPACKAGESDRIVER is not returned through the go command. - gopackagesdriver := os.Getenv("GOPACKAGESDRIVER") - for _, s := range env { - split := strings.SplitN(s, "=", 2) - if split[0] == "GOPACKAGESDRIVER" { - gopackagesdriver = split[1] - } - } - - // A user may also have a gopackagesdriver binary on their machine, which - // works the same way as setting GOPACKAGESDRIVER. - tool, _ := exec.LookPath("gopackagesdriver") - hasGopackagesDriver := gopackagesdriver != "off" && (gopackagesdriver != "" || tool != "") - - return &workspaceInformation{ - hasGopackagesDriver: hasGopackagesDriver, - effectiveGo111Module: go111module, - userGo111Module: go111moduleForVersion(go111module, goversion), - goversion: goversion, - environmentVariables: envVars, - goEnv: env, - }, nil -} - -func go111moduleForVersion(go111module string, goversion int) go111module { - // Off by default until Go 1.12. - if go111module == "off" || (goversion < 12 && go111module == "") { - return off - } - // On by default as of Go 1.16. - if go111module == "on" || (goversion >= 16 && go111module == "") { - return on - } - return auto -} - -// findWorkspaceRoot searches for the best workspace root according to the -// following heuristics: -// - First, look for a parent directory containing a gopls.mod file -// (experimental only). -// - Then, a parent directory containing a go.mod file. -// - Then, a child directory containing a go.mod file, if there is exactly -// one (non-experimental only). -// -// Otherwise, it returns folder. -// TODO (rFindley): move this to workspace.go -// TODO (rFindley): simplify this once workspace modules are enabled by default. -func findWorkspaceRoot(ctx context.Context, folder span.URI, fs source.FileSource, excludePath func(string) bool, experimental bool) (span.URI, error) { - patterns := []string{"go.work", "go.mod"} - if experimental { - patterns = []string{"go.work", "gopls.mod", "go.mod"} - } - for _, basename := range patterns { - dir, err := findRootPattern(ctx, folder, basename, fs) - if err != nil { - return "", fmt.Errorf("finding %s: %w", basename, err) - } - if dir != "" { - return dir, nil - } - } - - // The experimental workspace can handle nested modules at this point... - if experimental { - return folder, nil - } - - // ...else we should check if there's exactly one nested module. - all, err := findModules(folder, excludePath, 2) - if err == errExhausted { - // Fall-back behavior: if we don't find any modules after searching 10000 - // files, assume there are none. - event.Log(ctx, fmt.Sprintf("stopped searching for modules after %d files", fileLimit)) - return folder, nil - } - if err != nil { - return "", err - } - if len(all) == 1 { - // range to access first element. - for uri := range all { - return dirURI(uri), nil - } - } - return folder, nil -} - -func findRootPattern(ctx context.Context, folder span.URI, basename string, fs source.FileSource) (span.URI, error) { - dir := folder.Filename() - for dir != "" { - target := filepath.Join(dir, basename) - exists, err := fileExists(ctx, span.URIFromPath(target), fs) - if err != nil { - return "", err - } - if exists { - return span.URIFromPath(dir), nil - } - // Trailing separators must be trimmed, otherwise filepath.Split is a noop. - next, _ := filepath.Split(strings.TrimRight(dir, string(filepath.Separator))) - if next == dir { - break - } - dir = next - } - return "", nil -} - -// OS-specific path case check, for case-insensitive filesystems. -var checkPathCase = defaultCheckPathCase - -func defaultCheckPathCase(path string) error { - return nil -} - -func validBuildConfiguration(folder span.URI, ws *workspaceInformation, modFiles map[span.URI]struct{}) bool { - // Since we only really understand the `go` command, if the user has a - // different GOPACKAGESDRIVER, assume that their configuration is valid. - if ws.hasGopackagesDriver { - return true - } - // Check if the user is working within a module or if we have found - // multiple modules in the workspace. - if len(modFiles) > 0 { - return true - } - // The user may have a multiple directories in their GOPATH. - // Check if the workspace is within any of them. - for _, gp := range filepath.SplitList(ws.gopath) { - if source.InDir(filepath.Join(gp, "src"), folder.Filename()) { - return true - } - } - return false -} - -// getGoEnv gets the view's various GO* values. -func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, go111module string, configEnv []string) (environmentVariables, map[string]string, error) { - envVars := environmentVariables{} - vars := map[string]*string{ - "GOCACHE": &envVars.gocache, - "GOPATH": &envVars.gopath, - "GOROOT": &envVars.goroot, - "GOPRIVATE": &envVars.goprivate, - "GOMODCACHE": &envVars.gomodcache, - "GO111MODULE": &envVars.go111module, - } - - // We can save ~200 ms by requesting only the variables we care about. - args := append([]string{"-json"}, imports.RequiredGoEnvVars...) - for k := range vars { - args = append(args, k) - } - args = append(args, "GOWORK") - - inv := gocommand.Invocation{ - Verb: "env", - Args: args, - Env: configEnv, - WorkingDir: folder, - } - // Don't go through runGoCommand, as we don't need a temporary -modfile to - // run `go env`. - stdout, err := s.gocmdRunner.Run(ctx, inv) - if err != nil { - return environmentVariables{}, nil, err - } - env := make(map[string]string) - if err := json.Unmarshal(stdout.Bytes(), &env); err != nil { - return environmentVariables{}, nil, err - } - - for key, ptr := range vars { - *ptr = env[key] - } - - // Old versions of Go don't have GOMODCACHE, so emulate it. - if envVars.gomodcache == "" && envVars.gopath != "" { - envVars.gomodcache = filepath.Join(filepath.SplitList(envVars.gopath)[0], "pkg/mod") - } - // GO111MODULE does not appear in `go env` output until Go 1.13. - if goversion < 13 { - envVars.go111module = go111module - } - return envVars, env, err -} - -func (v *View) IsGoPrivatePath(target string) bool { - return globsMatchPath(v.goprivate, target) -} - -func (v *View) ModuleUpgrades() map[string]string { - v.mu.Lock() - defer v.mu.Unlock() - - upgrades := map[string]string{} - for mod, ver := range v.moduleUpgrades { - upgrades[mod] = ver - } - return upgrades -} - -func (v *View) RegisterModuleUpgrades(upgrades map[string]string) { - v.mu.Lock() - defer v.mu.Unlock() - - for mod, ver := range upgrades { - v.moduleUpgrades[mod] = ver - } -} - -// Copied from -// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a -func globsMatchPath(globs, target string) bool { - for globs != "" { - // Extract next non-empty glob in comma-separated list. - var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] - } else { - glob, globs = globs, "" - } - if glob == "" { - continue - } - - // A glob with N+1 path elements (N slashes) needs to be matched - // against the first N+1 path elements of target, - // which end just before the N+1'th slash. - n := strings.Count(glob, "/") - prefix := target - // Walk target, counting slashes, truncating at the N+1'th slash. - for i := 0; i < len(target); i++ { - if target[i] == '/' { - if n == 0 { - prefix = target[:i] - break - } - n-- - } - } - if n > 0 { - // Not enough prefix elements. - continue - } - matched, _ := path.Match(glob, prefix) - if matched { - return true - } - } - return false -} - -var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) - -// TODO(rstambler): Consolidate modURI and modContent back into a FileHandle -// after we have a version of the workspace go.mod file on disk. Getting a -// FileHandle from the cache for temporary files is problematic, since we -// cannot delete it. -func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modContent []byte) (bool, error) { - if s.workspaceMode()&moduleMode == 0 { - return false, nil - } - matches := modFlagRegexp.FindStringSubmatch(s.view.goEnv["GOFLAGS"]) - var modFlag string - if len(matches) != 0 { - modFlag = matches[1] - } - if modFlag != "" { - // Don't override an explicit '-mod=vendor' argument. - // We do want to override '-mod=readonly': it would break various module code lenses, - // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway. - return modFlag == "vendor", nil - } - - modFile, err := modfile.Parse(modURI.Filename(), modContent, nil) - if err != nil { - return false, err - } - if fi, err := os.Stat(filepath.Join(s.view.rootURI.Filename(), "vendor")); err != nil || !fi.IsDir() { - return false, nil - } - vendorEnabled := modFile.Go != nil && modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0 - return vendorEnabled, nil -} - -func (v *View) allFilesExcluded(pkg *packages.Package) bool { - opts := v.Options() - folder := filepath.ToSlash(v.folder.Filename()) - for _, f := range pkg.GoFiles { - f = filepath.ToSlash(f) - if !strings.HasPrefix(f, folder) { - return false - } - if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) { - return false - } - } - return true -} - -func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool { - return func(path string) bool { - return pathExcludedByFilter(path, root, gomodcache, opts) - } -} - -// pathExcludedByFilter reports whether the path (relative to the workspace -// folder) should be excluded by the configured directory filters. -// -// TODO(rfindley): passing root and gomodcache here makes it confusing whether -// path should be absolute or relative, and has already caused at least one -// bug. -func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool { - path = strings.TrimPrefix(filepath.ToSlash(path), "/") - gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/") - filters := opts.DirectoryFilters - if gomodcache != "" { - filters = append(filters, "-"+gomodcache) - } - return source.FiltersDisallow(path, filters) -} diff --git a/internal/lsp/cache/view_test.go b/internal/lsp/cache/view_test.go deleted file mode 100644 index d76dcda8ed4..00000000000 --- a/internal/lsp/cache/view_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package cache - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func TestCaseInsensitiveFilesystem(t *testing.T) { - base, err := ioutil.TempDir("", t.Name()) - if err != nil { - t.Fatal(err) - } - - inner := filepath.Join(base, "a/B/c/DEFgh") - if err := os.MkdirAll(inner, 0777); err != nil { - t.Fatal(err) - } - file := filepath.Join(inner, "f.go") - if err := ioutil.WriteFile(file, []byte("hi"), 0777); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(filepath.Join(inner, "F.go")); err != nil { - t.Skip("filesystem is case-sensitive") - } - - tests := []struct { - path string - err bool - }{ - {file, false}, - {filepath.Join(inner, "F.go"), true}, - {filepath.Join(base, "a/b/c/defgh/f.go"), true}, - } - for _, tt := range tests { - err := checkPathCase(tt.path) - if err != nil != tt.err { - t.Errorf("checkPathCase(%q) = %v, wanted error: %v", tt.path, err, tt.err) - } - } -} - -func TestFindWorkspaceRoot(t *testing.T) { - workspace := ` --- a/go.mod -- -module a --- a/x/x.go -package x --- a/x/y/y.go -package x --- b/go.mod -- -module b --- b/c/go.mod -- -module bc --- d/gopls.mod -- -module d-goplsworkspace --- d/e/go.mod -- -module de --- f/g/go.mod -- -module fg -` - dir, err := fake.Tempdir(fake.UnpackTxt(workspace)) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - tests := []struct { - folder, want string - experimental bool - }{ - {"", "", false}, // no module at root, and more than one nested module - {"a", "a", false}, - {"a/x", "a", false}, - {"a/x/y", "a", false}, - {"b/c", "b/c", false}, - {"d", "d/e", false}, - {"d", "d", true}, - {"d/e", "d/e", false}, - {"d/e", "d", true}, - {"f", "f/g", false}, - {"f", "f", true}, - } - - for _, test := range tests { - ctx := context.Background() - rel := fake.RelativeTo(dir) - folderURI := span.URIFromPath(rel.AbsPath(test.folder)) - excludeNothing := func(string) bool { return false } - got, err := findWorkspaceRoot(ctx, folderURI, &osFileSource{}, excludeNothing, test.experimental) - if err != nil { - t.Fatal(err) - } - if gotf, wantf := filepath.Clean(got.Filename()), rel.AbsPath(test.want); gotf != wantf { - t.Errorf("findWorkspaceRoot(%q, %t) = %q, want %q", test.folder, test.experimental, gotf, wantf) - } - } -} - -func TestInVendor(t *testing.T) { - for _, tt := range []struct { - path string - inVendor bool - }{ - { - path: "foo/vendor/x.go", - inVendor: false, - }, - { - path: "foo/vendor/x/x.go", - inVendor: true, - }, - { - path: "foo/x.go", - inVendor: false, - }, - } { - if got := inVendor(span.URIFromPath(tt.path)); got != tt.inVendor { - t.Errorf("expected %s inVendor %v, got %v", tt.path, tt.inVendor, got) - } - } -} - -func TestFilters(t *testing.T) { - tests := []struct { - filters []string - included []string - excluded []string - }{ - { - included: []string{"x"}, - }, - { - filters: []string{"-"}, - excluded: []string{"x", "x/a"}, - }, - { - filters: []string{"-x", "+y"}, - included: []string{"y", "y/a", "z"}, - excluded: []string{"x", "x/a"}, - }, - { - filters: []string{"-x", "+x/y", "-x/y/z"}, - included: []string{"x/y", "x/y/a", "a"}, - excluded: []string{"x", "x/a", "x/y/z/a"}, - }, - { - filters: []string{"+foobar", "-foo"}, - included: []string{"foobar", "foobar/a"}, - excluded: []string{"foo", "foo/a"}, - }, - } - - for _, tt := range tests { - opts := &source.Options{} - opts.DirectoryFilters = tt.filters - for _, inc := range tt.included { - if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) { - t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc) - } - } - for _, exc := range tt.excluded { - if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) { - t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc) - } - } - } -} - -func TestSuffixes(t *testing.T) { - type file struct { - path string - want bool - } - type cases struct { - option []string - files []file - } - tests := []cases{ - {[]string{"tmpl", "gotmpl"}, []file{ // default - {"foo", false}, - {"foo.tmpl", true}, - {"foo.gotmpl", true}, - {"tmpl", false}, - {"tmpl.go", false}}, - }, - {[]string{"tmpl", "gotmpl", "html", "gohtml"}, []file{ - {"foo.gotmpl", true}, - {"foo.html", true}, - {"foo.gohtml", true}, - {"html", false}}, - }, - {[]string{"tmpl", "gotmpl", ""}, []file{ // possible user mistake - {"foo.gotmpl", true}, - {"foo.go", false}, - {"foo", false}}, - }, - } - for _, a := range tests { - suffixes := a.option - for _, b := range a.files { - got := fileHasExtension(b.path, suffixes) - if got != b.want { - t.Errorf("got %v, want %v, option %q, file %q (%+v)", - got, b.want, a.option, b.path, b) - } - } - } -} diff --git a/internal/lsp/cache/workspace.go b/internal/lsp/cache/workspace.go deleted file mode 100644 index 669ce9290c9..00000000000 --- a/internal/lsp/cache/workspace.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -// workspaceSource reports how the set of active modules has been derived. -type workspaceSource int - -const ( - legacyWorkspace = iota // non-module or single module mode - goplsModWorkspace // modules provided by a gopls.mod file - goWorkWorkspace // modules provided by a go.work file - fileSystemWorkspace // modules scanned from the filesystem -) - -func (s workspaceSource) String() string { - switch s { - case legacyWorkspace: - return "legacy" - case goplsModWorkspace: - return "gopls.mod" - case goWorkWorkspace: - return "go.work" - case fileSystemWorkspace: - return "file system" - default: - return "!(unknown module source)" - } -} - -// workspace tracks go.mod files in the workspace, along with the -// gopls.mod file, to provide support for multi-module workspaces. -// -// Specifically, it provides: -// - the set of modules contained within in the workspace root considered to -// be 'active' -// - the workspace modfile, to be used for the go command `-modfile` flag -// - the set of workspace directories -// -// This type is immutable (or rather, idempotent), so that it may be shared -// across multiple snapshots. -type workspace struct { - root span.URI - excludePath func(string) bool - moduleSource workspaceSource - - // activeModFiles holds the active go.mod files. - activeModFiles map[span.URI]struct{} - - // knownModFiles holds the set of all go.mod files in the workspace. - // In all modes except for legacy, this is equivalent to modFiles. - knownModFiles map[span.URI]struct{} - - // workFile, if nonEmpty, is the go.work file for the workspace. - workFile span.URI - - // The workspace module is lazily re-built once after being invalidated. - // buildMu+built guards this reconstruction. - // - // file and wsDirs may be non-nil even if built == false, if they were copied - // from the previous workspace module version. In this case, they will be - // preserved if building fails. - buildMu sync.Mutex - built bool - buildErr error - mod *modfile.File - sum []byte - wsDirs map[span.URI]struct{} -} - -// newWorkspace creates a new workspace at the given root directory, -// determining its module source based on the presence of a gopls.mod or -// go.work file, and the go111moduleOff and useWsModule settings. -// -// If useWsModule is set, the workspace may use a synthetic mod file replacing -// all modules in the root. -// -// If there is no active workspace file (a gopls.mod or go.work), newWorkspace -// scans the filesystem to find modules. -func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff bool, useWsModule bool) (*workspace, error) { - ws := &workspace{ - root: root, - excludePath: excludePath, - } - - // The user may have a gopls.mod or go.work file that defines their - // workspace. - if err := loadExplicitWorkspaceFile(ctx, ws, fs); err == nil { - return ws, nil - } - - // Otherwise, in all other modes, search for all of the go.mod files in the - // workspace. - knownModFiles, err := findModules(root, excludePath, 0) - if err != nil { - return nil, err - } - ws.knownModFiles = knownModFiles - - switch { - case go111moduleOff: - ws.moduleSource = legacyWorkspace - case useWsModule: - ws.activeModFiles = knownModFiles - ws.moduleSource = fileSystemWorkspace - default: - ws.moduleSource = legacyWorkspace - activeModFiles, err := getLegacyModules(ctx, root, fs) - if err != nil { - return nil, err - } - ws.activeModFiles = activeModFiles - } - return ws, nil -} - -// loadExplicitWorkspaceFile loads workspace information from go.work or -// gopls.mod files, setting the active modules, mod file, and module source -// accordingly. -func loadExplicitWorkspaceFile(ctx context.Context, ws *workspace, fs source.FileSource) error { - for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} { - fh, err := fs.GetFile(ctx, uriForSource(ws.root, src)) - if err != nil { - return err - } - contents, err := fh.Read() - if err != nil { - continue - } - var file *modfile.File - var activeModFiles map[span.URI]struct{} - switch src { - case goWorkWorkspace: - file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs) - ws.workFile = fh.URI() - case goplsModWorkspace: - file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents) - } - if err != nil { - ws.buildMu.Lock() - ws.built = true - ws.buildErr = err - ws.buildMu.Unlock() - } - ws.mod = file - ws.activeModFiles = activeModFiles - ws.moduleSource = src - return nil - } - return noHardcodedWorkspace -} - -var noHardcodedWorkspace = errors.New("no hardcoded workspace") - -func (w *workspace) getKnownModFiles() map[span.URI]struct{} { - return w.knownModFiles -} - -func (w *workspace) getActiveModFiles() map[span.URI]struct{} { - return w.activeModFiles -} - -// modFile gets the workspace modfile associated with this workspace, -// computing it if it doesn't exist. -// -// A fileSource must be passed in to solve a chicken-egg problem: it is not -// correct to pass in the snapshot file source to newWorkspace when -// invalidating, because at the time these are called the snapshot is locked. -// So we must pass it in later on when actually using the modFile. -func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) { - w.build(ctx, fs) - return w.mod, w.buildErr -} - -func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) { - w.build(ctx, fs) - return w.sum, w.buildErr -} - -func (w *workspace) build(ctx context.Context, fs source.FileSource) { - w.buildMu.Lock() - defer w.buildMu.Unlock() - - if w.built { - return - } - // Building should never be cancelled. Since the workspace module is shared - // across multiple snapshots, doing so would put us in a bad state, and it - // would not be obvious to the user how to recover. - ctx = xcontext.Detach(ctx) - - // If our module source is not gopls.mod, try to build the workspace module - // from modules. Fall back on the pre-existing mod file if parsing fails. - if w.moduleSource != goplsModWorkspace { - file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs) - switch { - case err == nil: - w.mod = file - case w.mod != nil: - // Parsing failed, but we have a previous file version. - event.Error(ctx, "building workspace mod file", err) - default: - // No file to fall back on. - w.buildErr = err - } - } - if w.mod != nil { - w.wsDirs = map[span.URI]struct{}{ - w.root: {}, - } - for _, r := range w.mod.Replace { - // We may be replacing a module with a different version, not a path - // on disk. - if r.New.Version != "" { - continue - } - w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{} - } - } - // Ensure that there is always at least the root dir. - if len(w.wsDirs) == 0 { - w.wsDirs = map[span.URI]struct{}{ - w.root: {}, - } - } - sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs) - if err == nil { - w.sum = sum - } else { - event.Error(ctx, "building workspace sum file", err) - } - w.built = true -} - -// dirs returns the workspace directories for the loaded modules. -func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI { - w.build(ctx, fs) - var dirs []span.URI - for d := range w.wsDirs { - dirs = append(dirs, d) - } - sort.Slice(dirs, func(i, j int) bool { - return source.CompareURI(dirs[i], dirs[j]) < 0 - }) - return dirs -} - -// invalidate returns a (possibly) new workspace after invalidating the changed -// files. If w is still valid in the presence of changedURIs, it returns itself -// unmodified. -// -// The returned changed and reload flags control the level of invalidation. -// Some workspace changes may affect workspace contents without requiring a -// reload of metadata (for example, unsaved changes to a go.mod or go.sum -// file). -func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, changed, reload bool) { - // Prevent races to w.modFile or w.wsDirs below, if w has not yet been built. - w.buildMu.Lock() - defer w.buildMu.Unlock() - - // Clone the workspace. This may be discarded if nothing changed. - result := &workspace{ - root: w.root, - moduleSource: w.moduleSource, - knownModFiles: make(map[span.URI]struct{}), - activeModFiles: make(map[span.URI]struct{}), - workFile: w.workFile, - mod: w.mod, - sum: w.sum, - wsDirs: w.wsDirs, - excludePath: w.excludePath, - } - for k, v := range w.knownModFiles { - result.knownModFiles[k] = v - } - for k, v := range w.activeModFiles { - result.activeModFiles[k] = v - } - - // First handle changes to the go.work or gopls.mod file. This must be - // considered before any changes to go.mod or go.sum files, as these files - // determine which modules we care about. If go.work/gopls.mod has changed - // we need to either re-read it if it exists or walk the filesystem if it - // has been deleted. go.work should override the gopls.mod if both exist. - changed, reload = handleWorkspaceFileChanges(ctx, result, changes, fs) - // Next, handle go.mod changes that could affect our workspace. - for uri, change := range changes { - // Otherwise, we only care about go.mod files in the workspace directory. - if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) { - continue - } - changed = true - active := result.moduleSource != legacyWorkspace || source.CompareURI(modURI(w.root), uri) == 0 - reload = reload || (active && change.fileHandle.Saved()) - // Don't mess with the list of mod files if using go.work or gopls.mod. - if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace { - continue - } - if change.exists { - result.knownModFiles[uri] = struct{}{} - if active { - result.activeModFiles[uri] = struct{}{} - } - } else { - delete(result.knownModFiles, uri) - delete(result.activeModFiles, uri) - } - } - - // Finally, process go.sum changes for any modules that are now active. - for uri, change := range changes { - if !isGoSum(uri) { - continue - } - // TODO(rFindley) factor out this URI mangling. - dir := filepath.Dir(uri.Filename()) - modURI := span.URIFromPath(filepath.Join(dir, "go.mod")) - if _, active := result.activeModFiles[modURI]; !active { - continue - } - // Only changes to active go.sum files actually cause the workspace to - // change. - changed = true - reload = reload || change.fileHandle.Saved() - } - - if !changed { - return w, false, false - } - - return result, changed, reload -} - -// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod -// file, updating ws accordingly. ws.root must be set. -func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) { - // If go.work/gopls.mod has changed we need to either re-read it if it - // exists or walk the filesystem if it has been deleted. - // go.work should override the gopls.mod if both exist. - for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} { - uri := uriForSource(ws.root, src) - // File opens/closes are just no-ops. - change, ok := changes[uri] - if !ok { - continue - } - if change.isUnchanged { - break - } - if change.exists { - // Only invalidate if the file if it actually parses. - // Otherwise, stick with the current file. - var parsedFile *modfile.File - var parsedModules map[span.URI]struct{} - var err error - switch src { - case goWorkWorkspace: - parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs) - case goplsModWorkspace: - parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content) - } - if err != nil { - // An unparseable file should not invalidate the workspace: - // nothing good could come from changing the workspace in - // this case. - event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err) - } else { - // only update the modfile if it parsed. - changed = true - reload = change.fileHandle.Saved() - ws.mod = parsedFile - ws.moduleSource = src - ws.knownModFiles = parsedModules - ws.activeModFiles = make(map[span.URI]struct{}) - for k, v := range parsedModules { - ws.activeModFiles[k] = v - } - } - break // We've found an explicit workspace file, so can stop looking. - } else { - // go.work/gopls.mod is deleted. search for modules again. - changed = true - reload = true - ws.moduleSource = fileSystemWorkspace - // The parsed file is no longer valid. - ws.mod = nil - knownModFiles, err := findModules(ws.root, ws.excludePath, 0) - if err != nil { - ws.knownModFiles = nil - ws.activeModFiles = nil - event.Error(ctx, "finding file system modules", err) - } else { - ws.knownModFiles = knownModFiles - ws.activeModFiles = make(map[span.URI]struct{}) - for k, v := range ws.knownModFiles { - ws.activeModFiles[k] = v - } - } - } - } - return changed, reload -} - -// goplsModURI returns the URI for the gopls.mod file contained in root. -func uriForSource(root span.URI, src workspaceSource) span.URI { - var basename string - switch src { - case goplsModWorkspace: - basename = "gopls.mod" - case goWorkWorkspace: - basename = "go.work" - default: - return "" - } - return span.URIFromPath(filepath.Join(root.Filename(), basename)) -} - -// modURI returns the URI for the go.mod file contained in root. -func modURI(root span.URI) span.URI { - return span.URIFromPath(filepath.Join(root.Filename(), "go.mod")) -} - -// isGoMod reports if uri is a go.mod file. -func isGoMod(uri span.URI) bool { - return filepath.Base(uri.Filename()) == "go.mod" -} - -func isGoSum(uri span.URI) bool { - return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum" -} - -// fileExists reports if the file uri exists within source. -func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) { - fh, err := source.GetFile(ctx, uri) - if err != nil { - return false, err - } - return fileHandleExists(fh) -} - -// fileHandleExists reports if the file underlying fh actually exits. -func fileHandleExists(fh source.FileHandle) (bool, error) { - _, err := fh.Read() - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -// TODO(rFindley): replace this (and similar) with a uripath package analogous -// to filepath. -func dirURI(uri span.URI) span.URI { - return span.URIFromPath(filepath.Dir(uri.Filename())) -} - -// getLegacyModules returns a module set containing at most the root module. -func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) { - uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod")) - modules := make(map[span.URI]struct{}) - exists, err := fileExists(ctx, uri, fs) - if err != nil { - return nil, err - } - if exists { - modules[uri] = struct{}{} - } - return modules, nil -} - -func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) { - workFile, err := modfile.ParseWork(uri.Filename(), contents, nil) - if err != nil { - return nil, nil, fmt.Errorf("parsing go.work: %w", err) - } - modFiles := make(map[span.URI]struct{}) - for _, dir := range workFile.Use { - // The resulting modfile must use absolute paths, so that it can be - // written to a temp directory. - dir.Path = absolutePath(root, dir.Path) - modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod")) - modFiles[modURI] = struct{}{} - } - modFile, err := buildWorkspaceModFile(ctx, modFiles, fs) - if err != nil { - return nil, nil, err - } - - // Require a go directive, per the spec. - if workFile.Go == nil || workFile.Go.Version == "" { - return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive") - } - if err := modFile.AddGoStmt(workFile.Go.Version); err != nil { - return nil, nil, err - } - - return modFile, modFiles, nil -} - -func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) { - modFile, err := modfile.Parse(uri.Filename(), contents, nil) - if err != nil { - return nil, nil, fmt.Errorf("parsing gopls.mod: %w", err) - } - modFiles := make(map[span.URI]struct{}) - for _, replace := range modFile.Replace { - if replace.New.Version != "" { - return nil, nil, fmt.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version) - } - // The resulting modfile must use absolute paths, so that it can be - // written to a temp directory. - replace.New.Path = absolutePath(root, replace.New.Path) - modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod")) - modFiles[modURI] = struct{}{} - } - return modFile, modFiles, nil -} - -func absolutePath(root span.URI, path string) string { - dirFP := filepath.FromSlash(path) - if !filepath.IsAbs(dirFP) { - dirFP = filepath.Join(root.Filename(), dirFP) - } - return dirFP -} - -// errExhausted is returned by findModules if the file scan limit is reached. -var errExhausted = errors.New("exhausted") - -// Limit go.mod search to 1 million files. As a point of reference, -// Kubernetes has 22K files (as of 2020-11-24). -const fileLimit = 1000000 - -// findModules recursively walks the root directory looking for go.mod files, -// returning the set of modules it discovers. If modLimit is non-zero, -// searching stops once modLimit modules have been found. -// -// TODO(rfindley): consider overlays. -func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) { - // Walk the view's folder to find all modules in the view. - modFiles := make(map[span.URI]struct{}) - searched := 0 - errDone := errors.New("done") - err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error { - if err != nil { - // Probably a permission error. Keep looking. - return filepath.SkipDir - } - // For any path that is not the workspace folder, check if the path - // would be ignored by the go command. Vendor directories also do not - // contain workspace modules. - if info.IsDir() && path != root.Filename() { - suffix := strings.TrimPrefix(path, root.Filename()) - switch { - case checkIgnored(suffix), - strings.Contains(filepath.ToSlash(suffix), "/vendor/"), - excludePath(suffix): - return filepath.SkipDir - } - } - // We're only interested in go.mod files. - uri := span.URIFromPath(path) - if isGoMod(uri) { - modFiles[uri] = struct{}{} - } - if modLimit > 0 && len(modFiles) >= modLimit { - return errDone - } - searched++ - if fileLimit > 0 && searched >= fileLimit { - return errExhausted - } - return nil - }) - if err == errDone { - return modFiles, nil - } - return modFiles, err -} diff --git a/internal/lsp/cache/workspace_test.go b/internal/lsp/cache/workspace_test.go deleted file mode 100644 index b809ad196a6..00000000000 --- a/internal/lsp/cache/workspace_test.go +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cache - -import ( - "context" - "errors" - "os" - "strings" - "testing" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -// osFileSource is a fileSource that just reads from the operating system. -type osFileSource struct { - overlays map[span.URI]fakeOverlay -} - -type fakeOverlay struct { - source.VersionedFileHandle - uri span.URI - content string - err error - saved bool -} - -func (o fakeOverlay) Saved() bool { return o.saved } - -func (o fakeOverlay) Read() ([]byte, error) { - if o.err != nil { - return nil, o.err - } - return []byte(o.content), nil -} - -func (o fakeOverlay) URI() span.URI { - return o.uri -} - -// change updates the file source with the given file content. For convenience, -// empty content signals a deletion. If saved is true, these changes are -// persisted to disk. -func (s *osFileSource) change(ctx context.Context, uri span.URI, content string, saved bool) (*fileChange, error) { - if content == "" { - delete(s.overlays, uri) - if saved { - if err := os.Remove(uri.Filename()); err != nil { - return nil, err - } - } - fh, err := s.GetFile(ctx, uri) - if err != nil { - return nil, err - } - data, err := fh.Read() - return &fileChange{exists: err == nil, content: data, fileHandle: &closedFile{fh}}, nil - } - if s.overlays == nil { - s.overlays = map[span.URI]fakeOverlay{} - } - s.overlays[uri] = fakeOverlay{uri: uri, content: content, saved: saved} - return &fileChange{ - exists: content != "", - content: []byte(content), - fileHandle: s.overlays[uri], - }, nil -} - -func (s *osFileSource) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { - if overlay, ok := s.overlays[uri]; ok { - return overlay, nil - } - fi, statErr := os.Stat(uri.Filename()) - if statErr != nil { - return &fileHandle{ - err: statErr, - uri: uri, - }, nil - } - fh, err := readFile(ctx, uri, fi) - if err != nil { - return nil, err - } - return fh, nil -} - -type wsState struct { - source workspaceSource - modules []string - dirs []string - sum string -} - -type wsChange struct { - content string - saved bool -} - -func TestWorkspaceModule(t *testing.T) { - tests := []struct { - desc string - initial string // txtar-encoded - legacyMode bool - initialState wsState - updates map[string]wsChange - wantChanged bool - wantReload bool - finalState wsState - }{ - { - desc: "legacy mode", - initial: ` --- go.mod -- -module mod.com --- go.sum -- -golang.org/x/mod v0.3.0 h1:deadbeef --- a/go.mod -- -module moda.com`, - legacyMode: true, - initialState: wsState{ - modules: []string{"./go.mod"}, - source: legacyWorkspace, - dirs: []string{"."}, - sum: "golang.org/x/mod v0.3.0 h1:deadbeef\n", - }, - }, - { - desc: "nested module", - initial: ` --- go.mod -- -module mod.com --- a/go.mod -- -module moda.com`, - initialState: wsState{ - modules: []string{"./go.mod", "a/go.mod"}, - source: fileSystemWorkspace, - dirs: []string{".", "a"}, - }, - }, - { - desc: "removing module", - initial: ` --- a/go.mod -- -module moda.com --- a/go.sum -- -golang.org/x/mod v0.3.0 h1:deadbeef --- b/go.mod -- -module modb.com --- b/go.sum -- -golang.org/x/mod v0.3.0 h1:beefdead`, - initialState: wsState{ - modules: []string{"a/go.mod", "b/go.mod"}, - source: fileSystemWorkspace, - dirs: []string{".", "a", "b"}, - sum: "golang.org/x/mod v0.3.0 h1:beefdead\ngolang.org/x/mod v0.3.0 h1:deadbeef\n", - }, - updates: map[string]wsChange{ - "gopls.mod": {`module gopls-workspace - -require moda.com v0.0.0-goplsworkspace -replace moda.com => $SANDBOX_WORKDIR/a`, true}, - }, - wantChanged: true, - wantReload: true, - finalState: wsState{ - modules: []string{"a/go.mod"}, - source: goplsModWorkspace, - dirs: []string{".", "a"}, - sum: "golang.org/x/mod v0.3.0 h1:deadbeef\n", - }, - }, - { - desc: "adding module", - initial: ` --- gopls.mod -- -require moda.com v0.0.0-goplsworkspace -replace moda.com => $SANDBOX_WORKDIR/a --- a/go.mod -- -module moda.com --- b/go.mod -- -module modb.com`, - initialState: wsState{ - modules: []string{"a/go.mod"}, - source: goplsModWorkspace, - dirs: []string{".", "a"}, - }, - updates: map[string]wsChange{ - "gopls.mod": {`module gopls-workspace - -require moda.com v0.0.0-goplsworkspace -require modb.com v0.0.0-goplsworkspace - -replace moda.com => $SANDBOX_WORKDIR/a -replace modb.com => $SANDBOX_WORKDIR/b`, true}, - }, - wantChanged: true, - wantReload: true, - finalState: wsState{ - modules: []string{"a/go.mod", "b/go.mod"}, - source: goplsModWorkspace, - dirs: []string{".", "a", "b"}, - }, - }, - { - desc: "deleting gopls.mod", - initial: ` --- gopls.mod -- -module gopls-workspace - -require moda.com v0.0.0-goplsworkspace -replace moda.com => $SANDBOX_WORKDIR/a --- a/go.mod -- -module moda.com --- b/go.mod -- -module modb.com`, - initialState: wsState{ - modules: []string{"a/go.mod"}, - source: goplsModWorkspace, - dirs: []string{".", "a"}, - }, - updates: map[string]wsChange{ - "gopls.mod": {"", true}, - }, - wantChanged: true, - wantReload: true, - finalState: wsState{ - modules: []string{"a/go.mod", "b/go.mod"}, - source: fileSystemWorkspace, - dirs: []string{".", "a", "b"}, - }, - }, - { - desc: "broken module parsing", - initial: ` --- a/go.mod -- -module moda.com - -require gopls.test v0.0.0-goplsworkspace -replace gopls.test => ../../gopls.test // (this path shouldn't matter) --- b/go.mod -- -module modb.com`, - initialState: wsState{ - modules: []string{"a/go.mod", "b/go.mod"}, - source: fileSystemWorkspace, - dirs: []string{".", "a", "b", "../gopls.test"}, - }, - updates: map[string]wsChange{ - "a/go.mod": {`modul moda.com - -require gopls.test v0.0.0-goplsworkspace -replace gopls.test => ../../gopls.test2`, false}, - }, - wantChanged: true, - wantReload: false, - finalState: wsState{ - modules: []string{"a/go.mod", "b/go.mod"}, - source: fileSystemWorkspace, - // finalDirs should be unchanged: we should preserve dirs in the presence - // of a broken modfile. - dirs: []string{".", "a", "b", "../gopls.test"}, - }, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - ctx := context.Background() - dir, err := fake.Tempdir(fake.UnpackTxt(test.initial)) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - root := span.URIFromPath(dir) - - fs := &osFileSource{} - excludeNothing := func(string) bool { return false } - w, err := newWorkspace(ctx, root, fs, excludeNothing, false, !test.legacyMode) - if err != nil { - t.Fatal(err) - } - rel := fake.RelativeTo(dir) - checkState(ctx, t, fs, rel, w, test.initialState) - - // Apply updates. - if test.updates != nil { - changes := make(map[span.URI]*fileChange) - for k, v := range test.updates { - content := strings.ReplaceAll(v.content, "$SANDBOX_WORKDIR", string(rel)) - uri := span.URIFromPath(rel.AbsPath(k)) - changes[uri], err = fs.change(ctx, uri, content, v.saved) - if err != nil { - t.Fatal(err) - } - } - got, gotChanged, gotReload := w.invalidate(ctx, changes, fs) - if gotChanged != test.wantChanged { - t.Errorf("w.invalidate(): got changed %t, want %t", gotChanged, test.wantChanged) - } - if gotReload != test.wantReload { - t.Errorf("w.invalidate(): got reload %t, want %t", gotReload, test.wantReload) - } - checkState(ctx, t, fs, rel, got, test.finalState) - } - }) - } -} - -func workspaceFromTxtar(t *testing.T, files string) (*workspace, func(), error) { - ctx := context.Background() - dir, err := fake.Tempdir(fake.UnpackTxt(files)) - if err != nil { - return nil, func() {}, err - } - cleanup := func() { - os.RemoveAll(dir) - } - root := span.URIFromPath(dir) - - fs := &osFileSource{} - excludeNothing := func(string) bool { return false } - workspace, err := newWorkspace(ctx, root, fs, excludeNothing, false, false) - return workspace, cleanup, err -} - -func TestWorkspaceParseError(t *testing.T) { - w, cleanup, err := workspaceFromTxtar(t, ` --- go.work -- -go 1.18 - -usa ./typo --- typo/go.mod -- -module foo -`) - defer cleanup() - if err != nil { - t.Fatalf("error creating workspace: %v; want no error", err) - } - w.buildMu.Lock() - built, buildErr := w.built, w.buildErr - w.buildMu.Unlock() - if !built || buildErr == nil { - t.Fatalf("built, buildErr: got %v, %v; want true, non-nil", built, buildErr) - } - var errList modfile.ErrorList - if !errors.As(buildErr, &errList) { - t.Fatalf("expected error to be an errorlist; got %v", buildErr) - } - if len(errList) != 1 { - t.Fatalf("expected errorList to have one element; got %v elements", len(errList)) - } - parseErr := errList[0] - if parseErr.Pos.Line != 3 { - t.Fatalf("expected error to be on line 3; got %v", parseErr.Pos.Line) - } -} - -func TestWorkspaceMissingModFile(t *testing.T) { - w, cleanup, err := workspaceFromTxtar(t, ` --- go.work -- -go 1.18 - -use ./missing -`) - defer cleanup() - if err != nil { - t.Fatalf("error creating workspace: %v; want no error", err) - } - w.buildMu.Lock() - built, buildErr := w.built, w.buildErr - w.buildMu.Unlock() - if !built || buildErr == nil { - t.Fatalf("built, buildErr: got %v, %v; want true, non-nil", built, buildErr) - } -} - -func checkState(ctx context.Context, t *testing.T, fs source.FileSource, rel fake.RelativeTo, got *workspace, want wsState) { - t.Helper() - if got.moduleSource != want.source { - t.Errorf("module source = %v, want %v", got.moduleSource, want.source) - } - modules := make(map[span.URI]struct{}) - for k := range got.getActiveModFiles() { - modules[k] = struct{}{} - } - for _, modPath := range want.modules { - path := rel.AbsPath(modPath) - uri := span.URIFromPath(path) - if _, ok := modules[uri]; !ok { - t.Errorf("missing module %q", uri) - } - delete(modules, uri) - } - for remaining := range modules { - t.Errorf("unexpected module %q", remaining) - } - gotDirs := got.dirs(ctx, fs) - gotM := make(map[span.URI]bool) - for _, dir := range gotDirs { - gotM[dir] = true - } - for _, dir := range want.dirs { - path := rel.AbsPath(dir) - uri := span.URIFromPath(path) - if !gotM[uri] { - t.Errorf("missing dir %q", uri) - } - delete(gotM, uri) - } - for remaining := range gotM { - t.Errorf("unexpected dir %q", remaining) - } - gotSumBytes, err := got.sumFile(ctx, fs) - if err != nil { - t.Fatal(err) - } - if gotSum := string(gotSumBytes); gotSum != want.sum { - t.Errorf("got final sum %q, want %q", gotSum, want.sum) - } -} diff --git a/internal/lsp/call_hierarchy.go b/internal/lsp/call_hierarchy.go deleted file mode 100644 index 43c4ea8d5b7..00000000000 --- a/internal/lsp/call_hierarchy.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) prepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - - return source.PrepareCallHierarchy(ctx, snapshot, fh, params.Position) -} - -func (s *Server) incomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go) - defer release() - if !ok { - return nil, err - } - - return source.IncomingCalls(ctx, snapshot, fh, params.Item.Range.Start) -} - -func (s *Server) outgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go) - defer release() - if !ok { - return nil, err - } - - return source.OutgoingCalls(ctx, snapshot, fh, params.Item.Range.Start) -} diff --git a/internal/lsp/cmd/call_hierarchy.go b/internal/lsp/cmd/call_hierarchy.go deleted file mode 100644 index c9f9e73e0e2..00000000000 --- a/internal/lsp/cmd/call_hierarchy.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "strings" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// callHierarchy implements the callHierarchy verb for gopls. -type callHierarchy struct { - app *Application -} - -func (c *callHierarchy) Name() string { return "call_hierarchy" } -func (c *callHierarchy) Parent() string { return c.app.Name() } -func (c *callHierarchy) Usage() string { return "" } -func (c *callHierarchy) ShortHelp() string { return "display selected identifier's call hierarchy" } -func (c *callHierarchy) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls call_hierarchy helper/helper.go:8:6 - $ gopls call_hierarchy helper/helper.go:#53 -`) - printFlagDefaults(f) -} - -func (c *callHierarchy) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("call_hierarchy expects 1 argument (position)") - } - - conn, err := c.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - - p := protocol.CallHierarchyPrepareParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - - callItems, err := conn.PrepareCallHierarchy(ctx, &p) - if err != nil { - return err - } - if len(callItems) == 0 { - return fmt.Errorf("function declaration identifier not found at %v", args[0]) - } - - for _, item := range callItems { - incomingCalls, err := conn.IncomingCalls(ctx, &protocol.CallHierarchyIncomingCallsParams{Item: item}) - if err != nil { - return err - } - for i, call := range incomingCalls { - // From the spec: CallHierarchyIncomingCall.FromRanges is relative to - // the caller denoted by CallHierarchyIncomingCall.from. - printString, err := callItemPrintString(ctx, conn, call.From, call.From.URI, call.FromRanges) - if err != nil { - return err - } - fmt.Printf("caller[%d]: %s\n", i, printString) - } - - printString, err := callItemPrintString(ctx, conn, item, "", nil) - if err != nil { - return err - } - fmt.Printf("identifier: %s\n", printString) - - outgoingCalls, err := conn.OutgoingCalls(ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: item}) - if err != nil { - return err - } - for i, call := range outgoingCalls { - // From the spec: CallHierarchyOutgoingCall.FromRanges is the range - // relative to the caller, e.g the item passed to - printString, err := callItemPrintString(ctx, conn, call.To, item.URI, call.FromRanges) - if err != nil { - return err - } - fmt.Printf("callee[%d]: %s\n", i, printString) - } - } - - return nil -} - -// callItemPrintString returns a protocol.CallHierarchyItem object represented as a string. -// item and call ranges (protocol.Range) are converted to user friendly spans (1-indexed). -func callItemPrintString(ctx context.Context, conn *connection, item protocol.CallHierarchyItem, callsURI protocol.DocumentURI, calls []protocol.Range) (string, error) { - itemFile := conn.AddFile(ctx, item.URI.SpanURI()) - if itemFile.err != nil { - return "", itemFile.err - } - itemSpan, err := itemFile.mapper.Span(protocol.Location{URI: item.URI, Range: item.Range}) - if err != nil { - return "", err - } - - callsFile := conn.AddFile(ctx, callsURI.SpanURI()) - if callsURI != "" && callsFile.err != nil { - return "", callsFile.err - } - var callRanges []string - for _, rng := range calls { - callSpan, err := callsFile.mapper.Span(protocol.Location{URI: item.URI, Range: rng}) - if err != nil { - return "", err - } - - spn := fmt.Sprint(callSpan) - callRanges = append(callRanges, fmt.Sprint(spn[strings.Index(spn, ":")+1:])) - } - - printString := fmt.Sprintf("function %s in %v", item.Name, itemSpan) - if len(calls) > 0 { - printString = fmt.Sprintf("ranges %s in %s from/to %s", strings.Join(callRanges, ", "), callsURI.SpanURI().Filename(), printString) - } - return printString, nil -} diff --git a/internal/lsp/cmd/check.go b/internal/lsp/cmd/check.go deleted file mode 100644 index 9a136699270..00000000000 --- a/internal/lsp/cmd/check.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - - "golang.org/x/tools/internal/span" -) - -// check implements the check verb for gopls. -type check struct { - app *Application -} - -func (c *check) Name() string { return "check" } -func (c *check) Parent() string { return c.app.Name() } -func (c *check) Usage() string { return "" } -func (c *check) ShortHelp() string { return "show diagnostic results for the specified file" } -func (c *check) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: show the diagnostic results of this file: - - $ gopls check internal/lsp/cmd/check.go -`) - printFlagDefaults(f) -} - -// Run performs the check on the files specified by args and prints the -// results to stdout. -func (c *check) Run(ctx context.Context, args ...string) error { - if len(args) == 0 { - // no files, so no results - return nil - } - checking := map[span.URI]*cmdFile{} - var uris []span.URI - // now we ready to kick things off - conn, err := c.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - for _, arg := range args { - uri := span.URIFromPath(arg) - uris = append(uris, uri) - file := conn.AddFile(ctx, uri) - if file.err != nil { - return file.err - } - checking[uri] = file - } - if err := conn.diagnoseFiles(ctx, uris); err != nil { - return err - } - conn.Client.filesMu.Lock() - defer conn.Client.filesMu.Unlock() - - for _, file := range checking { - for _, d := range file.diagnostics { - spn, err := file.mapper.RangeSpan(d.Range) - if err != nil { - return fmt.Errorf("Could not convert position %v for %q", d.Range, d.Message) - } - fmt.Printf("%v: %v\n", spn, d.Message) - } - } - return nil -} diff --git a/internal/lsp/cmd/cmd_test.go b/internal/lsp/cmd/cmd_test.go deleted file mode 100644 index c44bd5722cb..00000000000 --- a/internal/lsp/cmd/cmd_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd_test - -import ( - "os" - "testing" - - "golang.org/x/tools/internal/lsp/bug" - cmdtest "golang.org/x/tools/internal/lsp/cmd/test" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/testenv" -) - -func TestMain(m *testing.M) { - bug.PanicOnBugs = true - testenv.ExitIfSmallMachine() - os.Exit(m.Run()) -} - -func TestCommandLine(t *testing.T) { - cmdtest.TestCommandLine(t, "../testdata", tests.DefaultOptions) -} diff --git a/internal/lsp/cmd/definition.go b/internal/lsp/cmd/definition.go deleted file mode 100644 index 44e6fc8c717..00000000000 --- a/internal/lsp/cmd/definition.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - "strings" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// A Definition is the result of a 'definition' query. -type Definition struct { - Span span.Span `json:"span"` // span of the definition - Description string `json:"description"` // description of the denoted object -} - -// These constant is printed in the help, and then used in a test to verify the -// help is still valid. -// They refer to "Set" in "flag.FlagSet" from the DetailedHelp method below. -const ( - exampleLine = 44 - exampleColumn = 47 - exampleOffset = 1270 -) - -// definition implements the definition verb for gopls. -type definition struct { - app *Application - - JSON bool `flag:"json" help:"emit output in JSON format"` - MarkdownSupported bool `flag:"markdown" help:"support markdown in responses"` -} - -func (d *definition) Name() string { return "definition" } -func (d *definition) Parent() string { return d.app.Name() } -func (d *definition) Usage() string { return "[definition-flags] " } -func (d *definition) ShortHelp() string { return "show declaration of selected identifier" } -func (d *definition) DetailedHelp(f *flag.FlagSet) { - fmt.Fprintf(f.Output(), ` -Example: show the definition of the identifier at syntax at offset %[1]v in this file (flag.FlagSet): - - $ gopls definition internal/lsp/cmd/definition.go:%[1]v:%[2]v - $ gopls definition internal/lsp/cmd/definition.go:#%[3]v - -definition-flags: -`, exampleLine, exampleColumn, exampleOffset) - printFlagDefaults(f) -} - -// Run performs the definition query as specified by args and prints the -// results to stdout. -func (d *definition) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("definition expects 1 argument") - } - // Plaintext makes more sense for the command line. - opts := d.app.options - d.app.options = func(o *source.Options) { - if opts != nil { - opts(o) - } - o.PreferredContentFormat = protocol.PlainText - if d.MarkdownSupported { - o.PreferredContentFormat = protocol.Markdown - } - } - conn, err := d.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - p := protocol.DefinitionParams{ - TextDocumentPositionParams: tdpp, - } - locs, err := conn.Definition(ctx, &p) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - - if len(locs) == 0 { - return fmt.Errorf("%v: not an identifier", from) - } - q := protocol.HoverParams{ - TextDocumentPositionParams: tdpp, - } - hover, err := conn.Hover(ctx, &q) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - if hover == nil { - return fmt.Errorf("%v: not an identifier", from) - } - file = conn.AddFile(ctx, fileURI(locs[0].URI)) - if file.err != nil { - return fmt.Errorf("%v: %v", from, file.err) - } - definition, err := file.mapper.Span(locs[0]) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - description := strings.TrimSpace(hover.Contents.Value) - result := &Definition{ - Span: definition, - Description: description, - } - if d.JSON { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", "\t") - return enc.Encode(result) - } - fmt.Printf("%v: defined here as %s", result.Span, result.Description) - return nil -} diff --git a/internal/lsp/cmd/export_test.go b/internal/lsp/cmd/export_test.go deleted file mode 100644 index 05b3cd31261..00000000000 --- a/internal/lsp/cmd/export_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -const ( - ExampleLine = exampleLine - ExampleColumn = exampleColumn - ExampleOffset = exampleOffset -) diff --git a/internal/lsp/cmd/folding_range.go b/internal/lsp/cmd/folding_range.go deleted file mode 100644 index 513c9bdd227..00000000000 --- a/internal/lsp/cmd/folding_range.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// foldingRanges implements the folding_ranges verb for gopls -type foldingRanges struct { - app *Application -} - -func (r *foldingRanges) Name() string { return "folding_ranges" } -func (r *foldingRanges) Parent() string { return r.app.Name() } -func (r *foldingRanges) Usage() string { return "" } -func (r *foldingRanges) ShortHelp() string { return "display selected file's folding ranges" } -func (r *foldingRanges) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ gopls folding_ranges helper/helper.go -`) - printFlagDefaults(f) -} - -func (r *foldingRanges) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("folding_ranges expects 1 argument (file)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - - p := protocol.FoldingRangeParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(from.URI()), - }, - } - - ranges, err := conn.FoldingRange(ctx, &p) - if err != nil { - return err - } - - for _, r := range ranges { - fmt.Printf("%v:%v-%v:%v\n", - r.StartLine+1, - r.StartCharacter+1, - r.EndLine+1, - r.EndCharacter, - ) - } - - return nil -} diff --git a/internal/lsp/cmd/format.go b/internal/lsp/cmd/format.go deleted file mode 100644 index 5e17ed4a570..00000000000 --- a/internal/lsp/cmd/format.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -// format implements the format verb for gopls. -type format struct { - Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` - Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` - List bool `flag:"l,list" help:"list files whose formatting differs from gofmt's"` - - app *Application -} - -func (c *format) Name() string { return "format" } -func (c *format) Parent() string { return c.app.Name() } -func (c *format) Usage() string { return "[format-flags] " } -func (c *format) ShortHelp() string { return "format the code according to the go standard" } -func (c *format) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -The arguments supplied may be simple file names, or ranges within files. - -Example: reformat this file: - - $ gopls format -w internal/lsp/cmd/check.go - -format-flags: -`) - printFlagDefaults(f) -} - -// Run performs the check on the files specified by args and prints the -// results to stdout. -func (c *format) Run(ctx context.Context, args ...string) error { - if len(args) == 0 { - // no files, so no results - return nil - } - // now we ready to kick things off - conn, err := c.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - for _, arg := range args { - spn := span.Parse(arg) - file := conn.AddFile(ctx, spn.URI()) - if file.err != nil { - return file.err - } - filename := spn.URI().Filename() - loc, err := file.mapper.Location(spn) - if err != nil { - return err - } - if loc.Range.Start != loc.Range.End { - return fmt.Errorf("only full file formatting supported") - } - p := protocol.DocumentFormattingParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - } - edits, err := conn.Formatting(ctx, &p) - if err != nil { - return fmt.Errorf("%v: %v", spn, err) - } - sedits, err := source.FromProtocolEdits(file.mapper, edits) - if err != nil { - return fmt.Errorf("%v: %v", spn, err) - } - formatted := diff.ApplyEdits(string(file.mapper.Content), sedits) - printIt := true - if c.List { - printIt = false - if len(edits) > 0 { - fmt.Println(filename) - } - } - if c.Write { - printIt = false - if len(edits) > 0 { - ioutil.WriteFile(filename, []byte(formatted), 0644) - } - } - if c.Diff { - printIt = false - u := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) - fmt.Print(u) - } - if printIt { - fmt.Print(formatted) - } - } - return nil -} diff --git a/internal/lsp/cmd/highlight.go b/internal/lsp/cmd/highlight.go deleted file mode 100644 index a325a2d53d9..00000000000 --- a/internal/lsp/cmd/highlight.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "sort" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// highlight implements the highlight verb for gopls. -type highlight struct { - app *Application -} - -func (r *highlight) Name() string { return "highlight" } -func (r *highlight) Parent() string { return r.app.Name() } -func (r *highlight) Usage() string { return "" } -func (r *highlight) ShortHelp() string { return "display selected identifier's highlights" } -func (r *highlight) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls highlight helper/helper.go:8:6 - $ gopls highlight helper/helper.go:#53 -`) - printFlagDefaults(f) -} - -func (r *highlight) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("highlight expects 1 argument (position)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - - p := protocol.DocumentHighlightParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - highlights, err := conn.DocumentHighlight(ctx, &p) - if err != nil { - return err - } - - var results []span.Span - for _, h := range highlights { - l := protocol.Location{Range: h.Range} - s, err := file.mapper.Span(l) - if err != nil { - return err - } - results = append(results, s) - } - // Sort results to make tests deterministic since DocumentHighlight uses a map. - sort.SliceStable(results, func(i, j int) bool { - return span.Compare(results[i], results[j]) == -1 - }) - - for _, s := range results { - fmt.Println(s) - } - return nil -} diff --git a/internal/lsp/cmd/implementation.go b/internal/lsp/cmd/implementation.go deleted file mode 100644 index 7b42d994303..00000000000 --- a/internal/lsp/cmd/implementation.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "sort" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// implementation implements the implementation verb for gopls -type implementation struct { - app *Application -} - -func (i *implementation) Name() string { return "implementation" } -func (i *implementation) Parent() string { return i.app.Name() } -func (i *implementation) Usage() string { return "" } -func (i *implementation) ShortHelp() string { return "display selected identifier's implementation" } -func (i *implementation) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls implementation helper/helper.go:8:6 - $ gopls implementation helper/helper.go:#53 -`) - printFlagDefaults(f) -} - -func (i *implementation) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("implementation expects 1 argument (position)") - } - - conn, err := i.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - - p := protocol.ImplementationParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - - implementations, err := conn.Implementation(ctx, &p) - if err != nil { - return err - } - - var spans []string - for _, impl := range implementations { - f := conn.AddFile(ctx, fileURI(impl.URI)) - span, err := f.mapper.Span(impl) - if err != nil { - return err - } - spans = append(spans, fmt.Sprint(span)) - } - sort.Strings(spans) - - for _, s := range spans { - fmt.Println(s) - } - - return nil -} diff --git a/internal/lsp/cmd/imports.go b/internal/lsp/cmd/imports.go deleted file mode 100644 index 49778603d23..00000000000 --- a/internal/lsp/cmd/imports.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// imports implements the import verb for gopls. -type imports struct { - Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` - Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` - - app *Application -} - -func (t *imports) Name() string { return "imports" } -func (t *imports) Parent() string { return t.app.Name() } -func (t *imports) Usage() string { return "[imports-flags] " } -func (t *imports) ShortHelp() string { return "updates import statements" } -func (t *imports) DetailedHelp(f *flag.FlagSet) { - fmt.Fprintf(f.Output(), ` -Example: update imports statements in a file: - - $ gopls imports -w internal/lsp/cmd/check.go - -imports-flags: -`) - printFlagDefaults(f) -} - -// Run performs diagnostic checks on the file specified and either; -// - if -w is specified, updates the file in place; -// - if -d is specified, prints out unified diffs of the changes; or -// - otherwise, prints the new versions to stdout. -func (t *imports) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("imports expects 1 argument") - } - conn, err := t.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - uri := from.URI() - file := conn.AddFile(ctx, uri) - if file.err != nil { - return file.err - } - actions, err := conn.CodeAction(ctx, &protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - var edits []protocol.TextEdit - for _, a := range actions { - if a.Title != "Organize Imports" { - continue - } - for _, c := range a.Edit.DocumentChanges { - if fileURI(c.TextDocument.URI) == uri { - edits = append(edits, c.Edits...) - } - } - } - sedits, err := source.FromProtocolEdits(file.mapper, edits) - if err != nil { - return fmt.Errorf("%v: %v", edits, err) - } - newContent := diff.ApplyEdits(string(file.mapper.Content), sedits) - - filename := file.uri.Filename() - switch { - case t.Write: - if len(edits) > 0 { - ioutil.WriteFile(filename, []byte(newContent), 0644) - } - case t.Diff: - diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) - fmt.Print(diffs) - default: - fmt.Print(string(newContent)) - } - return nil -} diff --git a/internal/lsp/cmd/info.go b/internal/lsp/cmd/info.go deleted file mode 100644 index 8e581a37cb1..00000000000 --- a/internal/lsp/cmd/info.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "bytes" - "context" - "encoding/json" - "flag" - "fmt" - "net/url" - "os" - "strings" - - "golang.org/x/tools/internal/lsp/browser" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/tool" -) - -// help implements the help command. -type help struct { - app *Application -} - -func (h *help) Name() string { return "help" } -func (h *help) Parent() string { return h.app.Name() } -func (h *help) Usage() string { return "" } -func (h *help) ShortHelp() string { return "print usage information for subcommands" } -func (h *help) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` - -Examples: -$ gopls help # main gopls help message -$ gopls help remote # help on 'remote' command -$ gopls help remote sessions # help on 'remote sessions' subcommand -`) - printFlagDefaults(f) -} - -// Run prints help information about a subcommand. -func (h *help) Run(ctx context.Context, args ...string) error { - find := func(cmds []tool.Application, name string) tool.Application { - for _, cmd := range cmds { - if cmd.Name() == name { - return cmd - } - } - return nil - } - - // Find the subcommand denoted by args (empty => h.app). - var cmd tool.Application = h.app - for i, arg := range args { - cmd = find(getSubcommands(cmd), arg) - if cmd == nil { - return tool.CommandLineErrorf( - "no such subcommand: %s", strings.Join(args[:i+1], " ")) - } - } - - // 'gopls help cmd subcmd' is equivalent to 'gopls cmd subcmd -h'. - // The flag package prints the usage information (defined by tool.Run) - // when it sees the -h flag. - fs := flag.NewFlagSet(cmd.Name(), flag.ExitOnError) - return tool.Run(ctx, fs, h.app, append(args[:len(args):len(args)], "-h")) -} - -// version implements the version command. -type version struct { - JSON bool `flag:"json" help:"outputs in json format."` - - app *Application -} - -func (v *version) Name() string { return "version" } -func (v *version) Parent() string { return v.app.Name() } -func (v *version) Usage() string { return "" } -func (v *version) ShortHelp() string { return "print the gopls version information" } -func (v *version) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ``) - printFlagDefaults(f) -} - -// Run prints version information to stdout. -func (v *version) Run(ctx context.Context, args ...string) error { - var mode = debug.PlainText - if v.JSON { - mode = debug.JSON - } - - return debug.PrintVersionInfo(ctx, os.Stdout, v.app.verbose(), mode) -} - -// bug implements the bug command. -type bug struct { - app *Application -} - -func (b *bug) Name() string { return "bug" } -func (b *bug) Parent() string { return b.app.Name() } -func (b *bug) Usage() string { return "" } -func (b *bug) ShortHelp() string { return "report a bug in gopls" } -func (b *bug) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ``) - printFlagDefaults(f) -} - -const goplsBugPrefix = "x/tools/gopls: " -const goplsBugHeader = `ATTENTION: Please answer these questions BEFORE submitting your issue. Thanks! - -#### What did you do? -If possible, provide a recipe for reproducing the error. -A complete runnable program is good. -A link on play.golang.org is better. -A failing unit test is the best. - -#### What did you expect to see? - - -#### What did you see instead? - - -` - -// Run collects some basic information and then prepares an issue ready to -// be reported. -func (b *bug) Run(ctx context.Context, args ...string) error { - buf := &bytes.Buffer{} - fmt.Fprint(buf, goplsBugHeader) - debug.PrintVersionInfo(ctx, buf, true, debug.Markdown) - body := buf.String() - title := strings.Join(args, " ") - if !strings.HasPrefix(title, goplsBugPrefix) { - title = goplsBugPrefix + title - } - if !browser.Open("https://github.com/golang/go/issues/new?title=" + url.QueryEscape(title) + "&body=" + url.QueryEscape(body)) { - fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n") - fmt.Print(body) - } - return nil -} - -type apiJSON struct { - app *Application -} - -func (j *apiJSON) Name() string { return "api-json" } -func (j *apiJSON) Parent() string { return j.app.Name() } -func (j *apiJSON) Usage() string { return "" } -func (j *apiJSON) ShortHelp() string { return "print json describing gopls API" } -func (j *apiJSON) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ``) - printFlagDefaults(f) -} - -func (j *apiJSON) Run(ctx context.Context, args ...string) error { - js, err := json.MarshalIndent(source.GeneratedAPIJSON, "", "\t") - if err != nil { - return err - } - fmt.Fprint(os.Stdout, string(js)) - return nil -} - -type licenses struct { - app *Application -} - -func (l *licenses) Name() string { return "licenses" } -func (l *licenses) Parent() string { return l.app.Name() } -func (l *licenses) Usage() string { return "" } -func (l *licenses) ShortHelp() string { return "print licenses of included software" } -func (l *licenses) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ``) - printFlagDefaults(f) -} - -const licensePreamble = ` -gopls is made available under the following BSD-style license: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -gopls implements the LSP specification, which is made available under the following license: - -Copyright (c) Microsoft Corporation - -All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT -OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -gopls also includes software made available under these licenses: -` - -func (l *licenses) Run(ctx context.Context, args ...string) error { - opts := source.DefaultOptions() - l.app.options(opts) - txt := licensePreamble - if opts.LicensesText == "" { - txt += "(development gopls, license information not available)" - } else { - txt += opts.LicensesText - } - fmt.Fprint(os.Stdout, txt) - return nil -} diff --git a/internal/lsp/cmd/links.go b/internal/lsp/cmd/links.go deleted file mode 100644 index 1c48c8c50b9..00000000000 --- a/internal/lsp/cmd/links.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// links implements the links verb for gopls. -type links struct { - JSON bool `flag:"json" help:"emit document links in JSON format"` - - app *Application -} - -func (l *links) Name() string { return "links" } -func (l *links) Parent() string { return l.app.Name() } -func (l *links) Usage() string { return "[links-flags] " } -func (l *links) ShortHelp() string { return "list links in a file" } -func (l *links) DetailedHelp(f *flag.FlagSet) { - fmt.Fprintf(f.Output(), ` -Example: list links contained within a file: - - $ gopls links internal/lsp/cmd/check.go - -links-flags: -`) - printFlagDefaults(f) -} - -// Run finds all the links within a document -// - if -json is specified, outputs location range and uri -// - otherwise, prints the a list of unique links -func (l *links) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("links expects 1 argument") - } - conn, err := l.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - uri := from.URI() - file := conn.AddFile(ctx, uri) - if file.err != nil { - return file.err - } - results, err := conn.DocumentLink(ctx, &protocol.DocumentLinkParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - if l.JSON { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", "\t") - return enc.Encode(results) - } - for _, v := range results { - fmt.Println(v.Target) - } - return nil -} diff --git a/internal/lsp/cmd/prepare_rename.go b/internal/lsp/cmd/prepare_rename.go deleted file mode 100644 index 44a192b5be3..00000000000 --- a/internal/lsp/cmd/prepare_rename.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "errors" - "flag" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// prepareRename implements the prepare_rename verb for gopls. -type prepareRename struct { - app *Application -} - -func (r *prepareRename) Name() string { return "prepare_rename" } -func (r *prepareRename) Parent() string { return r.app.Name() } -func (r *prepareRename) Usage() string { return "" } -func (r *prepareRename) ShortHelp() string { return "test validity of a rename operation at location" } -func (r *prepareRename) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls prepare_rename helper/helper.go:8:6 - $ gopls prepare_rename helper/helper.go:#53 -`) - printFlagDefaults(f) -} - -// ErrInvalidRenamePosition is returned when prepareRename is run at a position that -// is not a candidate for renaming. -var ErrInvalidRenamePosition = errors.New("request is not valid at the given position") - -func (r *prepareRename) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("prepare_rename expects 1 argument (file)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - p := protocol.PrepareRenameParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - result, err := conn.PrepareRename(ctx, &p) - if err != nil { - return fmt.Errorf("prepare_rename failed: %w", err) - } - if result == nil { - return ErrInvalidRenamePosition - } - - l := protocol.Location{Range: result.Range} - s, err := file.mapper.Span(l) - if err != nil { - return err - } - - fmt.Println(s) - return nil -} diff --git a/internal/lsp/cmd/references.go b/internal/lsp/cmd/references.go deleted file mode 100644 index 0697d2e11b7..00000000000 --- a/internal/lsp/cmd/references.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "sort" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// references implements the references verb for gopls -type references struct { - IncludeDeclaration bool `flag:"d,declaration" help:"include the declaration of the specified identifier in the results"` - - app *Application -} - -func (r *references) Name() string { return "references" } -func (r *references) Parent() string { return r.app.Name() } -func (r *references) Usage() string { return "[references-flags] " } -func (r *references) ShortHelp() string { return "display selected identifier's references" } -func (r *references) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls references helper/helper.go:8:6 - $ gopls references helper/helper.go:#53 - -references-flags: -`) - printFlagDefaults(f) -} - -func (r *references) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("references expects 1 argument (position)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - p := protocol.ReferenceParams{ - Context: protocol.ReferenceContext{ - IncludeDeclaration: r.IncludeDeclaration, - }, - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - locations, err := conn.References(ctx, &p) - if err != nil { - return err - } - var spans []string - for _, l := range locations { - f := conn.AddFile(ctx, fileURI(l.URI)) - // convert location to span for user-friendly 1-indexed line - // and column numbers - span, err := f.mapper.Span(l) - if err != nil { - return err - } - spans = append(spans, fmt.Sprint(span)) - } - - sort.Strings(spans) - for _, s := range spans { - fmt.Println(s) - } - return nil -} diff --git a/internal/lsp/cmd/rename.go b/internal/lsp/cmd/rename.go deleted file mode 100644 index 9411275949f..00000000000 --- a/internal/lsp/cmd/rename.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// rename implements the rename verb for gopls. -type rename struct { - Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` - Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` - Preserve bool `flag:"preserve" help:"preserve original files"` - - app *Application -} - -func (r *rename) Name() string { return "rename" } -func (r *rename) Parent() string { return r.app.Name() } -func (r *rename) Usage() string { return "[rename-flags] " } -func (r *rename) ShortHelp() string { return "rename selected identifier" } -func (r *rename) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-based location (:line:column or :#position) of the thing to change - $ gopls rename helper/helper.go:8:6 Foo - $ gopls rename helper/helper.go:#53 Foo - -rename-flags: -`) - printFlagDefaults(f) -} - -// Run renames the specified identifier and either; -// - if -w is specified, updates the file(s) in place; -// - if -d is specified, prints out unified diffs of the changes; or -// - otherwise, prints the new versions to stdout. -func (r *rename) Run(ctx context.Context, args ...string) error { - if len(args) != 2 { - return tool.CommandLineErrorf("definition expects 2 arguments (position, new name)") - } - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - p := protocol.RenameParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - NewName: args[1], - } - edit, err := conn.Rename(ctx, &p) - if err != nil { - return err - } - var orderedURIs []string - edits := map[span.URI][]protocol.TextEdit{} - for _, c := range edit.DocumentChanges { - uri := fileURI(c.TextDocument.URI) - edits[uri] = append(edits[uri], c.Edits...) - orderedURIs = append(orderedURIs, string(uri)) - } - sort.Strings(orderedURIs) - changeCount := len(orderedURIs) - - for _, u := range orderedURIs { - uri := span.URIFromURI(u) - cmdFile := conn.AddFile(ctx, uri) - filename := cmdFile.uri.Filename() - - // convert LSP-style edits to []diff.TextEdit cuz Spans are handy - renameEdits, err := source.FromProtocolEdits(cmdFile.mapper, edits[uri]) - if err != nil { - return fmt.Errorf("%v: %v", edits, err) - } - newContent := diff.ApplyEdits(string(cmdFile.mapper.Content), renameEdits) - - switch { - case r.Write: - fmt.Fprintln(os.Stderr, filename) - if r.Preserve { - if err := os.Rename(filename, filename+".orig"); err != nil { - return fmt.Errorf("%v: %v", edits, err) - } - } - ioutil.WriteFile(filename, []byte(newContent), 0644) - case r.Diff: - diffs := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits) - fmt.Print(diffs) - default: - if len(orderedURIs) > 1 { - fmt.Printf("%s:\n", filepath.Base(filename)) - } - fmt.Print(string(newContent)) - if changeCount > 1 { // if this wasn't last change, print newline - fmt.Println() - } - changeCount -= 1 - } - } - return nil -} diff --git a/internal/lsp/cmd/serve.go b/internal/lsp/cmd/serve.go deleted file mode 100644 index 1c229a422b4..00000000000 --- a/internal/lsp/cmd/serve.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "errors" - "flag" - "fmt" - "io" - "log" - "os" - "time" - - "golang.org/x/tools/internal/fakenet" - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/lsprpc" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/tool" -) - -// Serve is a struct that exposes the configurable parts of the LSP server as -// flags, in the right form for tool.Main to consume. -type Serve struct { - Logfile string `flag:"logfile" help:"filename to log to. if value is \"auto\", then logging to a default output file is enabled"` - Mode string `flag:"mode" help:"no effect"` - Port int `flag:"port" help:"port on which to run gopls for debugging purposes"` - Address string `flag:"listen" help:"address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used."` - IdleTimeout time.Duration `flag:"listen.timeout" help:"when used with -listen, shut down the server when there are no connected clients for this duration"` - Trace bool `flag:"rpc.trace" help:"print the full rpc trace in lsp inspector format"` - Debug string `flag:"debug" help:"serve debug information on the supplied address"` - - RemoteListenTimeout time.Duration `flag:"remote.listen.timeout" help:"when used with -remote=auto, the -listen.timeout value used to start the daemon"` - RemoteDebug string `flag:"remote.debug" help:"when used with -remote=auto, the -debug value used to start the daemon"` - RemoteLogfile string `flag:"remote.logfile" help:"when used with -remote=auto, the -logfile value used to start the daemon"` - - app *Application -} - -func (s *Serve) Name() string { return "serve" } -func (s *Serve) Parent() string { return s.app.Name() } -func (s *Serve) Usage() string { return "[server-flags]" } -func (s *Serve) ShortHelp() string { - return "run a server for Go code using the Language Server Protocol" -} -func (s *Serve) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` gopls [flags] [server-flags] - -The server communicates using JSONRPC2 on stdin and stdout, and is intended to be run directly as -a child of an editor process. - -server-flags: -`) - printFlagDefaults(f) -} - -func (s *Serve) remoteArgs(network, address string) []string { - args := []string{"serve", - "-listen", fmt.Sprintf(`%s;%s`, network, address), - } - if s.RemoteDebug != "" { - args = append(args, "-debug", s.RemoteDebug) - } - if s.RemoteListenTimeout != 0 { - args = append(args, "-listen.timeout", s.RemoteListenTimeout.String()) - } - if s.RemoteLogfile != "" { - args = append(args, "-logfile", s.RemoteLogfile) - } - return args -} - -// Run configures a server based on the flags, and then runs it. -// It blocks until the server shuts down. -func (s *Serve) Run(ctx context.Context, args ...string) error { - if len(args) > 0 { - return tool.CommandLineErrorf("server does not take arguments, got %v", args) - } - - di := debug.GetInstance(ctx) - isDaemon := s.Address != "" || s.Port != 0 - if di != nil { - closeLog, err := di.SetLogFile(s.Logfile, isDaemon) - if err != nil { - return err - } - defer closeLog() - di.ServerAddress = s.Address - di.MonitorMemory(ctx) - di.Serve(ctx, s.Debug) - } - var ss jsonrpc2.StreamServer - if s.app.Remote != "" { - var err error - ss, err = lsprpc.NewForwarder(s.app.Remote, s.remoteArgs) - if err != nil { - return fmt.Errorf("creating forwarder: %w", err) - } - } else { - ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon) - } - - var network, addr string - if s.Address != "" { - network, addr = lsprpc.ParseAddr(s.Address) - } - if s.Port != 0 { - network = "tcp" - addr = fmt.Sprintf(":%v", s.Port) - } - if addr != "" { - log.Printf("Gopls daemon: listening on %s network, address %s...", network, addr) - defer log.Printf("Gopls daemon: exiting") - return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout) - } - stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout)) - if s.Trace && di != nil { - stream = protocol.LoggingStream(stream, di.LogWriter) - } - conn := jsonrpc2.NewConn(stream) - err := ss.ServeStream(ctx, conn) - if errors.Is(err, io.EOF) { - return nil - } - return err -} diff --git a/internal/lsp/cmd/signature.go b/internal/lsp/cmd/signature.go deleted file mode 100644 index db948430183..00000000000 --- a/internal/lsp/cmd/signature.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// signature implements the signature verb for gopls -type signature struct { - app *Application -} - -func (r *signature) Name() string { return "signature" } -func (r *signature) Parent() string { return r.app.Name() } -func (r *signature) Usage() string { return "" } -func (r *signature) ShortHelp() string { return "display selected identifier's signature" } -func (r *signature) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ # 1-indexed location (:line:column or :#offset) of the target identifier - $ gopls signature helper/helper.go:8:6 - $ gopls signature helper/helper.go:#53 -`) - printFlagDefaults(f) -} - -func (r *signature) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("signature expects 1 argument (position)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - file := conn.AddFile(ctx, from.URI()) - if file.err != nil { - return file.err - } - - loc, err := file.mapper.Location(from) - if err != nil { - return err - } - - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(from.URI()), - }, - Position: loc.Range.Start, - } - p := protocol.SignatureHelpParams{ - TextDocumentPositionParams: tdpp, - } - - s, err := conn.SignatureHelp(ctx, &p) - if err != nil { - return err - } - - if s == nil || len(s.Signatures) == 0 { - return tool.CommandLineErrorf("%v: not a function", from) - } - - // there is only ever one possible signature, - // see toProtocolSignatureHelp in lsp/signature_help.go - signature := s.Signatures[0] - fmt.Printf("%s\n", signature.Label) - if signature.Documentation != "" { - fmt.Printf("\n%s\n", signature.Documentation) - } - - return nil -} diff --git a/internal/lsp/cmd/suggested_fix.go b/internal/lsp/cmd/suggested_fix.go deleted file mode 100644 index c6f26e2d685..00000000000 --- a/internal/lsp/cmd/suggested_fix.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// suggestedFix implements the fix verb for gopls. -type suggestedFix struct { - Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"` - Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"` - All bool `flag:"a,all" help:"apply all fixes, not just preferred fixes"` - - app *Application -} - -func (s *suggestedFix) Name() string { return "fix" } -func (s *suggestedFix) Parent() string { return s.app.Name() } -func (s *suggestedFix) Usage() string { return "[fix-flags] " } -func (s *suggestedFix) ShortHelp() string { return "apply suggested fixes" } -func (s *suggestedFix) DetailedHelp(f *flag.FlagSet) { - fmt.Fprintf(f.Output(), ` -Example: apply suggested fixes for this file - $ gopls fix -w internal/lsp/cmd/check.go - -fix-flags: -`) - printFlagDefaults(f) -} - -// Run performs diagnostic checks on the file specified and either; -// - if -w is specified, updates the file in place; -// - if -d is specified, prints out unified diffs of the changes; or -// - otherwise, prints the new versions to stdout. -func (s *suggestedFix) Run(ctx context.Context, args ...string) error { - if len(args) < 1 { - return tool.CommandLineErrorf("fix expects at least 1 argument") - } - conn, err := s.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - uri := from.URI() - file := conn.AddFile(ctx, uri) - if file.err != nil { - return file.err - } - - if err := conn.diagnoseFiles(ctx, []span.URI{uri}); err != nil { - return err - } - conn.Client.filesMu.Lock() - defer conn.Client.filesMu.Unlock() - - codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix} - if len(args) > 1 { - codeActionKinds = []protocol.CodeActionKind{} - for _, k := range args[1:] { - codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k)) - } - } - - rng, err := file.mapper.Range(from) - if err != nil { - return err - } - p := protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - Context: protocol.CodeActionContext{ - Only: codeActionKinds, - Diagnostics: file.diagnostics, - }, - Range: rng, - } - actions, err := conn.CodeAction(ctx, &p) - if err != nil { - return fmt.Errorf("%v: %v", from, err) - } - var edits []protocol.TextEdit - for _, a := range actions { - if a.Command != nil { - return fmt.Errorf("ExecuteCommand is not yet supported on the command line") - } - if !a.IsPreferred && !s.All { - continue - } - if !from.HasPosition() { - for _, c := range a.Edit.DocumentChanges { - if fileURI(c.TextDocument.URI) == uri { - edits = append(edits, c.Edits...) - } - } - continue - } - // If the span passed in has a position, then we need to find - // the codeaction that has the same range as the passed in span. - for _, diag := range a.Diagnostics { - spn, err := file.mapper.RangeSpan(diag.Range) - if err != nil { - continue - } - if span.ComparePoint(from.Start(), spn.Start()) == 0 { - for _, c := range a.Edit.DocumentChanges { - if fileURI(c.TextDocument.URI) == uri { - edits = append(edits, c.Edits...) - } - } - break - } - } - - // If suggested fix is not a diagnostic, still must collect edits. - if len(a.Diagnostics) == 0 { - for _, c := range a.Edit.DocumentChanges { - if fileURI(c.TextDocument.URI) == uri { - edits = append(edits, c.Edits...) - } - } - } - } - - sedits, err := source.FromProtocolEdits(file.mapper, edits) - if err != nil { - return fmt.Errorf("%v: %v", edits, err) - } - newContent := diff.ApplyEdits(string(file.mapper.Content), sedits) - - filename := file.uri.Filename() - switch { - case s.Write: - if len(edits) > 0 { - ioutil.WriteFile(filename, []byte(newContent), 0644) - } - case s.Diff: - diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits) - fmt.Print(diffs) - default: - fmt.Print(string(newContent)) - } - return nil -} diff --git a/internal/lsp/cmd/symbols.go b/internal/lsp/cmd/symbols.go deleted file mode 100644 index b43a6dcd1f7..00000000000 --- a/internal/lsp/cmd/symbols.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "sort" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -// symbols implements the symbols verb for gopls -type symbols struct { - app *Application -} - -func (r *symbols) Name() string { return "symbols" } -func (r *symbols) Parent() string { return r.app.Name() } -func (r *symbols) Usage() string { return "" } -func (r *symbols) ShortHelp() string { return "display selected file's symbols" } -func (r *symbols) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - $ gopls symbols helper/helper.go -`) - printFlagDefaults(f) -} -func (r *symbols) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("symbols expects 1 argument (position)") - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - from := span.Parse(args[0]) - p := protocol.DocumentSymbolParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(from.URI()), - }, - } - symbols, err := conn.DocumentSymbol(ctx, &p) - if err != nil { - return err - } - for _, s := range symbols { - if m, ok := s.(map[string]interface{}); ok { - s, err = mapToSymbol(m) - if err != nil { - return err - } - } - switch t := s.(type) { - case protocol.DocumentSymbol: - printDocumentSymbol(t) - case protocol.SymbolInformation: - printSymbolInformation(t) - } - } - return nil -} - -func mapToSymbol(m map[string]interface{}) (interface{}, error) { - b, err := json.Marshal(m) - if err != nil { - return nil, err - } - - if _, ok := m["selectionRange"]; ok { - var s protocol.DocumentSymbol - if err := json.Unmarshal(b, &s); err != nil { - return nil, err - } - return s, nil - } - - var s protocol.SymbolInformation - if err := json.Unmarshal(b, &s); err != nil { - return nil, err - } - return s, nil -} - -func printDocumentSymbol(s protocol.DocumentSymbol) { - fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.SelectionRange)) - // Sort children for consistency - sort.Slice(s.Children, func(i, j int) bool { - return s.Children[i].Name < s.Children[j].Name - }) - for _, c := range s.Children { - fmt.Printf("\t%s %s %s\n", c.Name, c.Kind, positionToString(c.SelectionRange)) - } -} - -func printSymbolInformation(s protocol.SymbolInformation) { - fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.Location.Range)) -} - -func positionToString(r protocol.Range) string { - return fmt.Sprintf("%v:%v-%v:%v", - r.Start.Line+1, - r.Start.Character+1, - r.End.Line+1, - r.End.Character+1, - ) -} diff --git a/internal/lsp/cmd/test/call_hierarchy.go b/internal/lsp/cmd/test/call_hierarchy.go deleted file mode 100644 index 38f8ed707a4..00000000000 --- a/internal/lsp/cmd/test/call_hierarchy.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "sort" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) { - collectCallSpansString := func(callItems []protocol.CallHierarchyItem) string { - var callSpans []string - for _, call := range callItems { - mapper, err := r.data.Mapper(call.URI.SpanURI()) - if err != nil { - t.Fatal(err) - } - callSpan, err := mapper.Span(protocol.Location{URI: call.URI, Range: call.Range}) - if err != nil { - t.Fatal(err) - } - callSpans = append(callSpans, fmt.Sprint(callSpan)) - } - // to make tests deterministic - sort.Strings(callSpans) - return r.Normalize(strings.Join(callSpans, "\n")) - } - - expectIn, expectOut := collectCallSpansString(expectedCalls.IncomingCalls), collectCallSpansString(expectedCalls.OutgoingCalls) - expectIdent := r.Normalize(fmt.Sprint(spn)) - - uri := spn.URI() - filename := uri.Filename() - target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column()) - - got, stderr := r.NormalizeGoplsCmd(t, "call_hierarchy", target) - if stderr != "" { - t.Fatalf("call_hierarchy failed for %s: %s", target, stderr) - } - - gotIn, gotIdent, gotOut := cleanCallHierarchyCmdResult(got) - if expectIn != gotIn { - t.Errorf("incoming calls call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectIn, gotIn) - } - if expectIdent != gotIdent { - t.Errorf("call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectIdent, gotIdent) - } - if expectOut != gotOut { - t.Errorf("outgoing calls call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectOut, gotOut) - } - -} - -// parses function URI and Range from call hierarchy cmd output to -// incoming, identifier and outgoing calls (returned in that order) -// ex: "identifier: function d at .../callhierarchy/callhierarchy.go:19:6-7" -> ".../callhierarchy/callhierarchy.go:19:6-7" -func cleanCallHierarchyCmdResult(output string) (incoming, ident, outgoing string) { - var incomingCalls, outgoingCalls []string - for _, out := range strings.Split(output, "\n") { - if out == "" { - continue - } - - callLocation := out[strings.LastIndex(out, " ")+1:] - if strings.HasPrefix(out, "caller") { - incomingCalls = append(incomingCalls, callLocation) - } else if strings.HasPrefix(out, "callee") { - outgoingCalls = append(outgoingCalls, callLocation) - } else { - ident = callLocation - } - } - sort.Strings(incomingCalls) - sort.Strings(outgoingCalls) - incoming, outgoing = strings.Join(incomingCalls, "\n"), strings.Join(outgoingCalls, "\n") - return -} diff --git a/internal/lsp/cmd/test/check.go b/internal/lsp/cmd/test/check.go deleted file mode 100644 index 6a53925051f..00000000000 --- a/internal/lsp/cmd/test/check.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "io/ioutil" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) { - if len(want) == 1 && want[0].Message == "" { - return - } - fname := uri.Filename() - out, _ := r.runGoplsCmd(t, "check", fname) - // parse got into a collection of reports - got := map[string]struct{}{} - for _, l := range strings.Split(out, "\n") { - if len(l) == 0 { - continue - } - // parse and reprint to normalize the span - bits := strings.SplitN(l, ": ", 2) - if len(bits) == 2 { - spn := span.Parse(strings.TrimSpace(bits[0])) - spn = span.New(spn.URI(), spn.Start(), span.Point{}) - data, err := ioutil.ReadFile(fname) - if err != nil { - t.Fatal(err) - } - converter := span.NewTokenFile(fname, data) - s, err := spn.WithPosition(converter) - if err != nil { - t.Fatal(err) - } - l = fmt.Sprintf("%s: %s", s, strings.TrimSpace(bits[1])) - } - got[r.NormalizePrefix(l)] = struct{}{} - } - for _, diag := range want { - expect := fmt.Sprintf("%v:%v:%v: %v", uri.Filename(), diag.Range.Start.Line+1, diag.Range.Start.Character+1, diag.Message) - if diag.Range.Start.Character == 0 { - expect = fmt.Sprintf("%v:%v: %v", uri.Filename(), diag.Range.Start.Line+1, diag.Message) - } - expect = r.NormalizePrefix(expect) - _, found := got[expect] - if !found { - t.Errorf("missing diagnostic %q, %v", expect, got) - } else { - delete(got, expect) - } - } - for extra := range got { - t.Errorf("extra diagnostic %q", extra) - } -} diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go deleted file mode 100644 index 312f7b8b435..00000000000 --- a/internal/lsp/cmd/test/cmdtest.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cmdtest contains the test suite for the command line behavior of gopls. -package cmdtest - -import ( - "bytes" - "context" - "flag" - "fmt" - "io" - "os" - "sync" - "testing" - - "golang.org/x/tools/internal/jsonrpc2/servertest" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/cmd" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/lsprpc" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/tool" -) - -type runner struct { - data *tests.Data - ctx context.Context - options func(*source.Options) - normalizers []tests.Normalizer - remote string -} - -func TestCommandLine(t *testing.T, testdata string, options func(*source.Options)) { - // On Android, the testdata directory is not copied to the runner. - if stat, err := os.Stat(testdata); err != nil || !stat.IsDir() { - t.Skip("testdata directory not present") - } - tests.RunTests(t, testdata, false, func(t *testing.T, datum *tests.Data) { - ctx := tests.Context(t) - ts := NewTestServer(ctx, options) - tests.Run(t, NewRunner(datum, ctx, ts.Addr, options), datum) - cmd.CloseTestConnections(ctx) - }) -} - -func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer { - ctx = debug.WithInstance(ctx, "", "") - cache := cache.New(options) - ss := lsprpc.NewStreamServer(cache, false) - return servertest.NewTCPServer(ctx, ss, nil) -} - -func NewRunner(data *tests.Data, ctx context.Context, remote string, options func(*source.Options)) *runner { - return &runner{ - data: data, - ctx: ctx, - options: options, - normalizers: tests.CollectNormalizers(data.Exported), - remote: remote, - } -} - -func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) { - //TODO: add command line completions tests when it works -} - -func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - //TODO: add command line completions tests when it works -} - -func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) { - //TODO: function extraction not supported on command line -} - -func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) { - //TODO: function extraction not supported on command line -} - -func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) { - //TODO: import addition not supported on command line -} - -func (r *runner) Hover(t *testing.T, spn span.Span, info string) { - //TODO: hovering not supported on command line -} - -func (r *runner) runGoplsCmd(t testing.TB, args ...string) (string, string) { - rStdout, wStdout, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - oldStdout := os.Stdout - rStderr, wStderr, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - oldStderr := os.Stderr - stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(stdout, rStdout) - wg.Done() - }() - go func() { - io.Copy(stderr, rStderr) - wg.Done() - }() - os.Stdout, os.Stderr = wStdout, wStderr - app := cmd.New("gopls-test", r.data.Config.Dir, r.data.Exported.Config.Env, r.options) - remote := r.remote - s := flag.NewFlagSet(app.Name(), flag.ExitOnError) - err = tool.Run(tests.Context(t), s, - app, - append([]string{fmt.Sprintf("-remote=internal@%s", remote)}, args...)) - if err != nil { - fmt.Fprint(os.Stderr, err) - } - wStdout.Close() - wStderr.Close() - wg.Wait() - os.Stdout, os.Stderr = oldStdout, oldStderr - rStdout.Close() - rStderr.Close() - return stdout.String(), stderr.String() -} - -// NormalizeGoplsCmd runs the gopls command and normalizes its output. -func (r *runner) NormalizeGoplsCmd(t testing.TB, args ...string) (string, string) { - stdout, stderr := r.runGoplsCmd(t, args...) - return r.Normalize(stdout), r.Normalize(stderr) -} - -func (r *runner) Normalize(s string) string { - return tests.Normalize(s, r.normalizers) -} - -func (r *runner) NormalizePrefix(s string) string { - return tests.NormalizePrefix(s, r.normalizers) -} diff --git a/internal/lsp/cmd/test/definition.go b/internal/lsp/cmd/test/definition.go deleted file mode 100644 index c82d9a6c1ae..00000000000 --- a/internal/lsp/cmd/test/definition.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "runtime" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -type godefMode int - -const ( - plainGodef = godefMode(1 << iota) - jsonGoDef -) - -var godefModes = []godefMode{ - plainGodef, - jsonGoDef, -} - -func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) { - if d.IsType || d.OnlyHover { - // TODO: support type definition, hover queries - return - } - d.Src = span.New(d.Src.URI(), span.NewPoint(0, 0, d.Src.Start().Offset()), span.Point{}) - for _, mode := range godefModes { - args := []string{"definition", "-markdown"} - tag := d.Name + "-definition" - if mode&jsonGoDef != 0 { - tag += "-json" - args = append(args, "-json") - } - uri := d.Src.URI() - args = append(args, fmt.Sprint(d.Src)) - got, _ := r.NormalizeGoplsCmd(t, args...) - if mode&jsonGoDef != 0 && runtime.GOOS == "windows" { - got = strings.Replace(got, "file:///", "file://", -1) - } - expect := strings.TrimSpace(string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - }))) - if expect != "" && !strings.HasPrefix(got, expect) { - d, err := myers.ComputeEdits("", expect, got) - if err != nil { - t.Fatal(err) - } - t.Errorf("definition %v failed with %#v\n%s", tag, args, diff.ToUnified("expect", "got", expect, d)) - } - } -} diff --git a/internal/lsp/cmd/test/folding_range.go b/internal/lsp/cmd/test/folding_range.go deleted file mode 100644 index 4478687b549..00000000000 --- a/internal/lsp/cmd/test/folding_range.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "testing" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) FoldingRanges(t *testing.T, spn span.Span) { - goldenTag := "foldingRange-cmd" - uri := spn.URI() - filename := uri.Filename() - got, _ := r.NormalizeGoplsCmd(t, "folding_ranges", filename) - expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) { - return []byte(got), nil - })) - - if expect != got { - t.Errorf("folding_ranges failed failed for %s expected:\n%s\ngot:\n%s", filename, expect, got) - } -} diff --git a/internal/lsp/cmd/test/format.go b/internal/lsp/cmd/test/format.go deleted file mode 100644 index 77eedd440e4..00000000000 --- a/internal/lsp/cmd/test/format.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "bytes" - exec "golang.org/x/sys/execabs" - "io/ioutil" - "os" - "regexp" - "strings" - "testing" - - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/testenv" -) - -func (r *runner) Format(t *testing.T, spn span.Span) { - tag := "gofmt" - uri := spn.URI() - filename := uri.Filename() - expect := string(r.data.Golden(tag, filename, func() ([]byte, error) { - cmd := exec.Command("gofmt", filename) - contents, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files - contents = []byte(r.Normalize(fixFileHeader(string(contents)))) - return contents, nil - })) - if expect == "" { - //TODO: our error handling differs, for now just skip unformattable files - t.Skip("Unformattable file") - } - got, _ := r.NormalizeGoplsCmd(t, "format", filename) - if expect != got { - t.Errorf("format failed for %s expected:\n%s\ngot:\n%s", filename, expect, got) - } - // now check we can build a valid unified diff - unified, _ := r.NormalizeGoplsCmd(t, "format", "-d", filename) - checkUnified(t, filename, expect, unified) -} - -var unifiedHeader = regexp.MustCompile(`^diff -u.*\n(---\s+\S+\.go\.orig)\s+[\d-:. ]+(\n\+\+\+\s+\S+\.go)\s+[\d-:. ]+(\n@@)`) - -func fixFileHeader(s string) string { - match := unifiedHeader.FindStringSubmatch(s) - if match == nil { - return s - } - return strings.Join(append(match[1:], s[len(match[0]):]), "") -} - -func checkUnified(t *testing.T, filename string, expect string, patch string) { - testenv.NeedsTool(t, "patch") - if strings.Count(patch, "\n+++ ") > 1 { - // TODO(golang/go/#34580) - t.Skip("multi-file patch tests not supported yet") - } - applied := "" - if patch == "" { - applied = expect - } else { - temp, err := ioutil.TempFile("", "applied") - if err != nil { - t.Fatal(err) - } - temp.Close() - defer os.Remove(temp.Name()) - cmd := exec.Command("patch", "-u", "-p0", "-o", temp.Name(), filename) - cmd.Stdin = bytes.NewBuffer([]byte(patch)) - msg, err := cmd.CombinedOutput() - if err != nil { - t.Errorf("failed applying patch to %s: %v\ngot:\n%s\npatch:\n%s", filename, err, msg, patch) - return - } - out, err := ioutil.ReadFile(temp.Name()) - if err != nil { - t.Errorf("failed reading patched output for %s: %v\n", filename, err) - return - } - applied = string(out) - } - if expect != applied { - t.Errorf("apply unified gave wrong result for %s expected:\n%s\ngot:\n%s\npatch:\n%s", filename, expect, applied, patch) - } -} diff --git a/internal/lsp/cmd/test/highlight.go b/internal/lsp/cmd/test/highlight.go deleted file mode 100644 index 99e8b2c3fc7..00000000000 --- a/internal/lsp/cmd/test/highlight.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "testing" - - "fmt" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) Highlight(t *testing.T, spn span.Span, spans []span.Span) { - var expect string - for _, l := range spans { - expect += fmt.Sprintln(l) - } - expect = r.Normalize(expect) - - uri := spn.URI() - filename := uri.Filename() - target := filename + ":" + fmt.Sprint(spn.Start().Line()) + ":" + fmt.Sprint(spn.Start().Column()) - got, _ := r.NormalizeGoplsCmd(t, "highlight", target) - if expect != got { - t.Errorf("highlight failed for %s expected:\n%s\ngot:\n%s", target, expect, got) - } -} diff --git a/internal/lsp/cmd/test/implementation.go b/internal/lsp/cmd/test/implementation.go deleted file mode 100644 index 189452466ce..00000000000 --- a/internal/lsp/cmd/test/implementation.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "sort" - "testing" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) Implementation(t *testing.T, spn span.Span, imps []span.Span) { - var itemStrings []string - for _, i := range imps { - itemStrings = append(itemStrings, fmt.Sprint(i)) - } - sort.Strings(itemStrings) - var expect string - for _, i := range itemStrings { - expect += i + "\n" - } - expect = r.Normalize(expect) - - uri := spn.URI() - filename := uri.Filename() - target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column()) - - got, stderr := r.NormalizeGoplsCmd(t, "implementation", target) - if stderr != "" { - t.Errorf("implementation failed for %s: %s", target, stderr) - } else if expect != got { - t.Errorf("implementation failed for %s expected:\n%s\ngot:\n%s", target, expect, got) - } -} diff --git a/internal/lsp/cmd/test/imports.go b/internal/lsp/cmd/test/imports.go deleted file mode 100644 index ce8aee55dfa..00000000000 --- a/internal/lsp/cmd/test/imports.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/span" -) - -func (r *runner) Import(t *testing.T, spn span.Span) { - uri := spn.URI() - filename := uri.Filename() - got, _ := r.NormalizeGoplsCmd(t, "imports", filename) - want := string(r.data.Golden("goimports", filename, func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - d, err := myers.ComputeEdits(uri, want, got) - if err != nil { - t.Fatal(err) - } - t.Errorf("imports failed for %s, expected:\n%s", filename, diff.ToUnified("want", "got", want, d)) - } -} diff --git a/internal/lsp/cmd/test/links.go b/internal/lsp/cmd/test/links.go deleted file mode 100644 index 88df768323a..00000000000 --- a/internal/lsp/cmd/test/links.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "encoding/json" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) { - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - out, _ := r.NormalizeGoplsCmd(t, "links", "-json", uri.Filename()) - var got []protocol.DocumentLink - err = json.Unmarshal([]byte(out), &got) - if err != nil { - t.Fatal(err) - } - if diff := tests.DiffLinks(m, wantLinks, got); diff != "" { - t.Error(diff) - } -} diff --git a/internal/lsp/cmd/test/prepare_rename.go b/internal/lsp/cmd/test/prepare_rename.go deleted file mode 100644 index b5359e57b42..00000000000 --- a/internal/lsp/cmd/test/prepare_rename.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/lsp/cmd" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) { - m, err := r.data.Mapper(src.URI()) - if err != nil { - t.Errorf("prepare_rename failed: %v", err) - } - - var ( - target = fmt.Sprintf("%v", src) - args = []string{"prepare_rename", target} - stdOut, stdErr = r.NormalizeGoplsCmd(t, args...) - expect string - ) - - if want.Text == "" { - if stdErr != "" && stdErr != cmd.ErrInvalidRenamePosition.Error() { - t.Errorf("prepare_rename failed for %s,\nexpected:\n`%v`\ngot:\n`%v`", target, expect, stdErr) - } - return - } - - ws, err := m.Span(protocol.Location{Range: want.Range}) - if err != nil { - t.Errorf("prepare_rename failed: %v", err) - } - - expect = r.Normalize(fmt.Sprintln(ws)) - if expect != stdOut { - t.Errorf("prepare_rename failed for %s expected:\n`%s`\ngot:\n`%s`\n", target, expect, stdOut) - } -} diff --git a/internal/lsp/cmd/test/references.go b/internal/lsp/cmd/test/references.go deleted file mode 100644 index 66d0d066286..00000000000 --- a/internal/lsp/cmd/test/references.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "sort" - "testing" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) References(t *testing.T, spn span.Span, itemList []span.Span) { - for _, includeDeclaration := range []bool{true, false} { - t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) { - var itemStrings []string - for i, s := range itemList { - // We don't want the first result if we aren't including the declaration. - if i == 0 && !includeDeclaration { - continue - } - itemStrings = append(itemStrings, fmt.Sprint(s)) - } - sort.Strings(itemStrings) - var expect string - for _, s := range itemStrings { - expect += s + "\n" - } - expect = r.Normalize(expect) - - uri := spn.URI() - filename := uri.Filename() - target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column()) - args := []string{"references"} - if includeDeclaration { - args = append(args, "-d") - } - args = append(args, target) - got, stderr := r.NormalizeGoplsCmd(t, args...) - if stderr != "" { - t.Errorf("references failed for %s: %s", target, stderr) - } else if expect != got { - t.Errorf("references failed for %s expected:\n%s\ngot:\n%s", target, expect, got) - } - }) - } -} diff --git a/internal/lsp/cmd/test/rename.go b/internal/lsp/cmd/test/rename.go deleted file mode 100644 index 0fe2d1e1825..00000000000 --- a/internal/lsp/cmd/test/rename.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) Rename(t *testing.T, spn span.Span, newText string) { - filename := spn.URI().Filename() - goldenTag := newText + "-rename" - loc := fmt.Sprintf("%v", spn) - got, err := r.NormalizeGoplsCmd(t, "rename", loc, newText) - got += err - expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) { - return []byte(got), nil - })) - if expect != got { - t.Errorf("rename failed with %v %v\nexpected:\n%s\ngot:\n%s", loc, newText, expect, got) - } - // now check we can build a valid unified diff - unified, _ := r.NormalizeGoplsCmd(t, "rename", "-d", loc, newText) - checkUnified(t, filename, expect, unified) -} diff --git a/internal/lsp/cmd/test/semanticdriver.go b/internal/lsp/cmd/test/semanticdriver.go deleted file mode 100644 index 247f755bf20..00000000000 --- a/internal/lsp/cmd/test/semanticdriver.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "strings" - "testing" - - "golang.org/x/tools/internal/span" -) - -func (r *runner) SemanticTokens(t *testing.T, spn span.Span) { - uri := spn.URI() - filename := uri.Filename() - got, stderr := r.NormalizeGoplsCmd(t, "semtok", filename) - if stderr != "" { - t.Fatalf("%s: %q", filename, stderr) - } - want := string(r.data.Golden("semantic", filename, func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - lwant := strings.Split(want, "\n") - lgot := strings.Split(got, "\n") - t.Errorf("want(%d-%d) != got(%d-%d) for %s", len(want), len(lwant), len(got), len(lgot), r.Normalize(filename)) - for i := 0; i < len(lwant) && i < len(lgot); i++ { - if lwant[i] != lgot[i] { - // This is the line number in the golden file. - // It is one larger than the line number in the source file. - t.Errorf("line %d:\nwant%q\ngot %q\n", i+2, lwant[i], lgot[i]) - } - } - } -} diff --git a/internal/lsp/cmd/test/signature.go b/internal/lsp/cmd/test/signature.go deleted file mode 100644 index f6bdaebf312..00000000000 --- a/internal/lsp/cmd/test/signature.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) { - uri := spn.URI() - filename := uri.Filename() - target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column()) - got, _ := r.NormalizeGoplsCmd(t, "signature", target) - if want == nil { - if got != "" { - t.Fatalf("want nil, but got %s", got) - } - return - } - goldenTag := want.Signatures[0].Label + "-signature" - expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) { - return []byte(got), nil - })) - if tests.NormalizeAny(expect) != tests.NormalizeAny(got) { - t.Errorf("signature failed for %s expected:\n%q\ngot:\n%q'", filename, expect, got) - } -} diff --git a/internal/lsp/cmd/test/suggested_fix.go b/internal/lsp/cmd/test/suggested_fix.go deleted file mode 100644 index c819e051735..00000000000 --- a/internal/lsp/cmd/test/suggested_fix.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { - uri := spn.URI() - filename := uri.Filename() - args := []string{"fix", "-a", fmt.Sprintf("%s", spn)} - for _, kind := range actionKinds { - if kind == "refactor.rewrite" { - t.Skip("refactor.rewrite is not yet supported on the command line") - } - } - args = append(args, actionKinds...) - got, stderr := r.NormalizeGoplsCmd(t, args...) - if stderr == "ExecuteCommand is not yet supported on the command line" { - return // don't skip to keep the summary counts correct - } - want := string(r.data.Golden("suggestedfix_"+tests.SpanName(spn), filename, func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - t.Errorf("suggested fixes failed for %s:\n%s", filename, tests.Diff(t, want, got)) - } -} diff --git a/internal/lsp/cmd/test/symbols.go b/internal/lsp/cmd/test/symbols.go deleted file mode 100644 index 055be030829..00000000000 --- a/internal/lsp/cmd/test/symbols.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) { - filename := uri.Filename() - got, _ := r.NormalizeGoplsCmd(t, "symbols", filename) - expect := string(r.data.Golden("symbols", filename, func() ([]byte, error) { - return []byte(got), nil - })) - if expect != got { - t.Errorf("symbols failed for %s expected:\n%s\ngot:\n%s", filename, expect, got) - } -} diff --git a/internal/lsp/cmd/test/workspace_symbol.go b/internal/lsp/cmd/test/workspace_symbol.go deleted file mode 100644 index ce965f03a31..00000000000 --- a/internal/lsp/cmd/test/workspace_symbol.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmdtest - -import ( - "fmt" - "path/filepath" - "sort" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { - var matcher string - switch typ { - case tests.WorkspaceSymbolsFuzzy: - matcher = "fuzzy" - case tests.WorkspaceSymbolsCaseSensitive: - matcher = "caseSensitive" - case tests.WorkspaceSymbolsDefault: - matcher = "caseInsensitive" - } - r.runWorkspaceSymbols(t, uri, matcher, query) -} - -func (r *runner) runWorkspaceSymbols(t *testing.T, uri span.URI, matcher, query string) { - t.Helper() - - out, _ := r.runGoplsCmd(t, "workspace_symbol", "-matcher", matcher, query) - var filtered []string - dir := filepath.Dir(uri.Filename()) - for _, line := range strings.Split(out, "\n") { - if source.InDir(dir, line) { - filtered = append(filtered, filepath.ToSlash(line)) - } - } - sort.Strings(filtered) - got := r.Normalize(strings.Join(filtered, "\n") + "\n") - - expect := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if expect != got { - t.Errorf("workspace_symbol failed for %s:\n%s", query, tests.Diff(t, expect, got)) - } -} diff --git a/internal/lsp/cmd/usage/vulncheck.hlp b/internal/lsp/cmd/usage/vulncheck.hlp deleted file mode 100644 index 19a674b2ea7..00000000000 --- a/internal/lsp/cmd/usage/vulncheck.hlp +++ /dev/null @@ -1,15 +0,0 @@ -run experimental vulncheck analysis (experimental: under development) - -Usage: - gopls [flags] vulncheck - - WARNING: this command is experimental. - - By default, the command outputs a JSON-encoded - golang.org/x/tools/internal/lsp/command.VulncheckResult - message. - Example: - $ gopls vulncheck - - -config - If true, the command reads a JSON-encoded package load configuration from stdin diff --git a/internal/lsp/cmd/usage/workspace.hlp b/internal/lsp/cmd/usage/workspace.hlp deleted file mode 100644 index 912cf294610..00000000000 --- a/internal/lsp/cmd/usage/workspace.hlp +++ /dev/null @@ -1,7 +0,0 @@ -manage the gopls workspace (experimental: under development) - -Usage: - gopls [flags] workspace [arg]... - -Subcommand: - generate generate a gopls.mod file for a workspace diff --git a/internal/lsp/cmd/vulncheck.go b/internal/lsp/cmd/vulncheck.go deleted file mode 100644 index 4d245cecb60..00000000000 --- a/internal/lsp/cmd/vulncheck.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/tool" -) - -// vulncheck implements the vulncheck command. -type vulncheck struct { - Config bool `flag:"config" help:"If true, the command reads a JSON-encoded package load configuration from stdin"` - app *Application -} - -type pkgLoadConfig struct { - // BuildFlags is a list of command-line flags to be passed through to - // the build system's query tool. - BuildFlags []string - - // Env is the environment to use when invoking the build system's query tool. - // If Env is nil, the current environment is used. - Env []string - - // If Tests is set, the loader includes related test packages. - Tests bool -} - -// TODO(hyangah): document pkgLoadConfig - -func (v *vulncheck) Name() string { return "vulncheck" } -func (v *vulncheck) Parent() string { return v.app.Name() } -func (v *vulncheck) Usage() string { return "" } -func (v *vulncheck) ShortHelp() string { - return "run experimental vulncheck analysis (experimental: under development)" -} -func (v *vulncheck) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` - WARNING: this command is experimental. - - By default, the command outputs a JSON-encoded - golang.org/x/tools/internal/lsp/command.VulncheckResult - message. - Example: - $ gopls vulncheck - -`) - printFlagDefaults(f) -} - -func (v *vulncheck) Run(ctx context.Context, args ...string) error { - if len(args) > 1 { - return tool.CommandLineErrorf("vulncheck accepts at most one package pattern") - } - pattern := "." - if len(args) == 1 { - pattern = args[0] - } - - cwd, err := os.Getwd() - if err != nil { - return tool.CommandLineErrorf("failed to get current directory: %v", err) - } - var cfg pkgLoadConfig - if v.Config { - if err := json.NewDecoder(os.Stdin).Decode(&cfg); err != nil { - return tool.CommandLineErrorf("failed to parse cfg: %v", err) - } - } - - opts := source.DefaultOptions().Clone() - v.app.options(opts) // register hook - if opts == nil || opts.Hooks.Govulncheck == nil { - return tool.CommandLineErrorf("vulncheck feature is not available") - } - - loadCfg := &packages.Config{ - Context: ctx, - Tests: cfg.Tests, - BuildFlags: cfg.BuildFlags, - Env: cfg.Env, - } - - res, err := opts.Hooks.Govulncheck(ctx, loadCfg, command.VulncheckArgs{ - Dir: protocol.URIFromPath(cwd), - Pattern: pattern, - }) - if err != nil { - return tool.CommandLineErrorf("govulncheck failed: %v", err) - } - data, err := json.MarshalIndent(res, " ", " ") - if err != nil { - return tool.CommandLineErrorf("failed to decode results: %v", err) - } - fmt.Printf("%s", data) - return nil -} diff --git a/internal/lsp/cmd/workspace.go b/internal/lsp/cmd/workspace.go deleted file mode 100644 index c0ddd9eb46e..00000000000 --- a/internal/lsp/cmd/workspace.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -// workspace is a top-level command for working with the gopls workspace. This -// is experimental and subject to change. The idea is that subcommands could be -// used for manipulating the workspace mod file, rather than editing it -// manually. -type workspace struct { - app *Application - subcommands -} - -func newWorkspace(app *Application) *workspace { - return &workspace{ - app: app, - subcommands: subcommands{ - &generateWorkspaceMod{app: app}, - }, - } -} - -func (w *workspace) Name() string { return "workspace" } -func (w *workspace) Parent() string { return w.app.Name() } -func (w *workspace) ShortHelp() string { - return "manage the gopls workspace (experimental: under development)" -} - -// generateWorkspaceMod (re)generates the gopls.mod file for the current -// workspace. -type generateWorkspaceMod struct { - app *Application -} - -func (c *generateWorkspaceMod) Name() string { return "generate" } -func (c *generateWorkspaceMod) Usage() string { return "" } -func (c *generateWorkspaceMod) ShortHelp() string { - return "generate a gopls.mod file for a workspace" -} - -func (c *generateWorkspaceMod) DetailedHelp(f *flag.FlagSet) { - printFlagDefaults(f) -} - -func (c *generateWorkspaceMod) Run(ctx context.Context, args ...string) error { - origOptions := c.app.options - c.app.options = func(opts *source.Options) { - origOptions(opts) - opts.ExperimentalWorkspaceModule = true - } - conn, err := c.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - cmd, err := command.NewGenerateGoplsModCommand("", command.URIArg{}) - if err != nil { - return err - } - params := &protocol.ExecuteCommandParams{Command: cmd.Command, Arguments: cmd.Arguments} - if _, err := conn.ExecuteCommand(ctx, params); err != nil { - return fmt.Errorf("executing server command: %v", err) - } - return nil -} diff --git a/internal/lsp/cmd/workspace_symbol.go b/internal/lsp/cmd/workspace_symbol.go deleted file mode 100644 index 38fe5decf7f..00000000000 --- a/internal/lsp/cmd/workspace_symbol.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmd - -import ( - "context" - "flag" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/tool" -) - -// workspaceSymbol implements the workspace_symbol verb for gopls. -type workspaceSymbol struct { - Matcher string `flag:"matcher" help:"specifies the type of matcher: fuzzy, caseSensitive, or caseInsensitive.\nThe default is caseInsensitive."` - - app *Application -} - -func (r *workspaceSymbol) Name() string { return "workspace_symbol" } -func (r *workspaceSymbol) Parent() string { return r.app.Name() } -func (r *workspaceSymbol) Usage() string { return "[workspace_symbol-flags] " } -func (r *workspaceSymbol) ShortHelp() string { return "search symbols in workspace" } -func (r *workspaceSymbol) DetailedHelp(f *flag.FlagSet) { - fmt.Fprint(f.Output(), ` -Example: - - $ gopls workspace_symbol -matcher fuzzy 'wsymbols' - -workspace_symbol-flags: -`) - printFlagDefaults(f) -} - -func (r *workspaceSymbol) Run(ctx context.Context, args ...string) error { - if len(args) != 1 { - return tool.CommandLineErrorf("workspace_symbol expects 1 argument") - } - - opts := r.app.options - r.app.options = func(o *source.Options) { - if opts != nil { - opts(o) - } - switch r.Matcher { - case "fuzzy": - o.SymbolMatcher = source.SymbolFuzzy - case "caseSensitive": - o.SymbolMatcher = source.SymbolCaseSensitive - case "fastfuzzy": - o.SymbolMatcher = source.SymbolFastFuzzy - default: - o.SymbolMatcher = source.SymbolCaseInsensitive - } - } - - conn, err := r.app.connect(ctx) - if err != nil { - return err - } - defer conn.terminate(ctx) - - p := protocol.WorkspaceSymbolParams{ - Query: args[0], - } - - symbols, err := conn.Symbol(ctx, &p) - if err != nil { - return err - } - for _, s := range symbols { - f := conn.AddFile(ctx, fileURI(s.Location.URI)) - span, err := f.mapper.Span(s.Location) - if err != nil { - return err - } - fmt.Printf("%s %s %s\n", span, s.Name, s.Kind) - } - - return nil -} diff --git a/internal/lsp/code_lens.go b/internal/lsp/code_lens.go deleted file mode 100644 index e1944583883..00000000000 --- a/internal/lsp/code_lens.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "fmt" - "sort" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - var lenses map[command.Command]source.LensFunc - switch snapshot.View().FileKind(fh) { - case source.Mod: - lenses = mod.LensFuncs() - case source.Go: - lenses = source.LensFuncs() - default: - // Unsupported file kind for a code lens. - return nil, nil - } - var result []protocol.CodeLens - for cmd, lf := range lenses { - if !snapshot.View().Options().Codelenses[string(cmd)] { - continue - } - added, err := lf(ctx, snapshot, fh) - // Code lens is called on every keystroke, so we should just operate in - // a best-effort mode, ignoring errors. - if err != nil { - event.Error(ctx, fmt.Sprintf("code lens %s failed", cmd), err) - continue - } - result = append(result, added...) - } - sort.Slice(result, func(i, j int) bool { - a, b := result[i], result[j] - if protocol.CompareRange(a.Range, b.Range) == 0 { - return a.Command.Command < b.Command.Command - } - return protocol.CompareRange(a.Range, b.Range) < 0 - }) - return result, nil -} diff --git a/internal/lsp/command.go b/internal/lsp/command.go deleted file mode 100644 index 862af6088ec..00000000000 --- a/internal/lsp/command.go +++ /dev/null @@ -1,823 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/progress" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -func (s *Server) executeCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) { - var found bool - for _, name := range s.session.Options().SupportedCommands { - if name == params.Command { - found = true - break - } - } - if !found { - return nil, fmt.Errorf("%s is not a supported command", params.Command) - } - - handler := &commandHandler{ - s: s, - params: params, - } - return command.Dispatch(ctx, params, handler) -} - -type commandHandler struct { - s *Server - params *protocol.ExecuteCommandParams -} - -// commandConfig configures common command set-up and execution. -type commandConfig struct { - async bool // whether to run the command asynchronously. Async commands can only return errors. - requireSave bool // whether all files must be saved for the command to work - progress string // title to use for progress reporting. If empty, no progress will be reported. - forURI protocol.DocumentURI // URI to resolve to a snapshot. If unset, snapshot will be nil. -} - -// commandDeps is evaluated from a commandConfig. Note that not all fields may -// be populated, depending on which configuration is set. See comments in-line -// for details. -type commandDeps struct { - snapshot source.Snapshot // present if cfg.forURI was set - fh source.VersionedFileHandle // present if cfg.forURI was set - work *progress.WorkDone // present cfg.progress was set -} - -type commandFunc func(context.Context, commandDeps) error - -func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run commandFunc) (err error) { - if cfg.requireSave { - var unsaved []string - for _, overlay := range c.s.session.Overlays() { - if !overlay.Saved() { - unsaved = append(unsaved, overlay.URI().Filename()) - } - } - if len(unsaved) > 0 { - return fmt.Errorf("All files must be saved first (unsaved: %v).", unsaved) - } - } - var deps commandDeps - if cfg.forURI != "" { - var ok bool - var release func() - deps.snapshot, deps.fh, ok, release, err = c.s.beginFileRequest(ctx, cfg.forURI, source.UnknownKind) - defer release() - if !ok { - if err != nil { - return err - } - return fmt.Errorf("invalid file URL: %v", cfg.forURI) - } - } - ctx, cancel := context.WithCancel(xcontext.Detach(ctx)) - if cfg.progress != "" { - deps.work = c.s.progress.Start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel) - } - runcmd := func() error { - defer cancel() - err := run(ctx, deps) - if deps.work != nil { - switch { - case errors.Is(err, context.Canceled): - deps.work.End(ctx, "canceled") - case err != nil: - event.Error(ctx, "command error", err) - deps.work.End(ctx, "failed") - default: - deps.work.End(ctx, "completed") - } - } - return err - } - if cfg.async { - go func() { - if err := runcmd(); err != nil { - if showMessageErr := c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ - Type: protocol.Error, - Message: err.Error(), - }); showMessageErr != nil { - event.Error(ctx, fmt.Sprintf("failed to show message: %q", err.Error()), showMessageErr) - } - } - }() - return nil - } - return runcmd() -} - -func (c *commandHandler) ApplyFix(ctx context.Context, args command.ApplyFixArgs) error { - return c.run(ctx, commandConfig{ - // Note: no progress here. Applying fixes should be quick. - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - edits, err := source.ApplyFix(ctx, args.Fix, deps.snapshot, deps.fh, args.Range) - if err != nil { - return err - } - r, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ - Edit: protocol.WorkspaceEdit{ - DocumentChanges: edits, - }, - }) - if err != nil { - return err - } - if !r.Applied { - return errors.New(r.FailureReason) - } - return nil - }) -} - -func (c *commandHandler) RegenerateCgo(ctx context.Context, args command.URIArg) error { - return c.run(ctx, commandConfig{ - progress: "Regenerating Cgo", - }, func(ctx context.Context, deps commandDeps) error { - mod := source.FileModification{ - URI: args.URI.SpanURI(), - Action: source.InvalidateMetadata, - } - return c.s.didModifyFiles(ctx, []source.FileModification{mod}, FromRegenerateCgo) - }) -} - -func (c *commandHandler) CheckUpgrades(ctx context.Context, args command.CheckUpgradesArgs) error { - return c.run(ctx, commandConfig{ - forURI: args.URI, - progress: "Checking for upgrades", - }, func(ctx context.Context, deps commandDeps) error { - upgrades, err := c.s.getUpgrades(ctx, deps.snapshot, args.URI.SpanURI(), args.Modules) - if err != nil { - return err - } - deps.snapshot.View().RegisterModuleUpgrades(upgrades) - // Re-diagnose the snapshot to publish the new module diagnostics. - c.s.diagnoseSnapshot(deps.snapshot, nil, false) - return nil - }) -} - -func (c *commandHandler) AddDependency(ctx context.Context, args command.DependencyArgs) error { - return c.GoGetModule(ctx, args) -} - -func (c *commandHandler) UpgradeDependency(ctx context.Context, args command.DependencyArgs) error { - return c.GoGetModule(ctx, args) -} - -func (c *commandHandler) GoGetModule(ctx context.Context, args command.DependencyArgs) error { - return c.run(ctx, commandConfig{ - progress: "Running go get", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - return runGoGetModule(invoke, args.AddRequire, args.GoCmdArgs) - }) - }) -} - -// TODO(rFindley): UpdateGoSum, Tidy, and Vendor could probably all be one command. -func (c *commandHandler) UpdateGoSum(ctx context.Context, args command.URIArgs) error { - return c.run(ctx, commandConfig{ - progress: "Updating go.sum", - }, func(ctx context.Context, deps commandDeps) error { - for _, uri := range args.URIs { - snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind) - defer release() - if !ok { - return err - } - if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - _, err := invoke("list", "all") - return err - }); err != nil { - return err - } - } - return nil - }) -} - -func (c *commandHandler) Tidy(ctx context.Context, args command.URIArgs) error { - return c.run(ctx, commandConfig{ - requireSave: true, - progress: "Running go mod tidy", - }, func(ctx context.Context, deps commandDeps) error { - for _, uri := range args.URIs { - snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind) - defer release() - if !ok { - return err - } - if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - _, err := invoke("mod", "tidy") - return err - }); err != nil { - return err - } - } - return nil - }) -} - -func (c *commandHandler) Vendor(ctx context.Context, args command.URIArg) error { - return c.run(ctx, commandConfig{ - requireSave: true, - progress: "Running go mod vendor", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - // Use RunGoCommandPiped here so that we don't compete with any other go - // command invocations. go mod vendor deletes modules.txt before recreating - // it, and therefore can run into file locking issues on Windows if that - // file is in use by another process, such as go list. - // - // If golang/go#44119 is resolved, go mod vendor will instead modify - // modules.txt in-place. In that case we could theoretically allow this - // command to run concurrently. - err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{ - Verb: "mod", - Args: []string{"vendor"}, - WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()), - }, &bytes.Buffer{}, &bytes.Buffer{}) - return err - }) -} - -func (c *commandHandler) EditGoDirective(ctx context.Context, args command.EditGoDirectiveArgs) error { - return c.run(ctx, commandConfig{ - requireSave: true, // if go.mod isn't saved it could cause a problem - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, args.URI, source.UnknownKind) - defer release() - if !ok { - return err - } - if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - _, err := invoke("mod", "edit", "-go", args.Version) - return err - }); err != nil { - return err - } - return nil - }) -} - -func (c *commandHandler) RemoveDependency(ctx context.Context, args command.RemoveDependencyArgs) error { - return c.run(ctx, commandConfig{ - progress: "Removing dependency", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - // If the module is tidied apart from the one unused diagnostic, we can - // run `go get module@none`, and then run `go mod tidy`. Otherwise, we - // must make textual edits. - // TODO(rstambler): In Go 1.17+, we will be able to use the go command - // without checking if the module is tidy. - if args.OnlyDiagnostic { - return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - if err := runGoGetModule(invoke, false, []string{args.ModulePath + "@none"}); err != nil { - return err - } - _, err := invoke("mod", "tidy") - return err - }) - } - pm, err := deps.snapshot.ParseMod(ctx, deps.fh) - if err != nil { - return err - } - edits, err := dropDependency(deps.snapshot, pm, args.ModulePath) - if err != nil { - return err - } - response, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ - Edit: protocol.WorkspaceEdit{ - DocumentChanges: []protocol.TextDocumentEdit{{ - TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ - Version: deps.fh.Version(), - TextDocumentIdentifier: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(deps.fh.URI()), - }, - }, - Edits: edits, - }}, - }, - }) - if err != nil { - return err - } - if !response.Applied { - return fmt.Errorf("edits not applied because of %s", response.FailureReason) - } - return nil - }) -} - -// dropDependency returns the edits to remove the given require from the go.mod -// file. -func dropDependency(snapshot source.Snapshot, pm *source.ParsedModule, modulePath string) ([]protocol.TextEdit, error) { - // We need a private copy of the parsed go.mod file, since we're going to - // modify it. - copied, err := modfile.Parse("", pm.Mapper.Content, nil) - if err != nil { - return nil, err - } - if err := copied.DropRequire(modulePath); err != nil { - return nil, err - } - copied.Cleanup() - newContent, err := copied.Format() - if err != nil { - return nil, err - } - // Calculate the edits to be made due to the change. - diff, err := snapshot.View().Options().ComputeEdits(pm.URI, string(pm.Mapper.Content), string(newContent)) - if err != nil { - return nil, err - } - return source.ToProtocolEdits(pm.Mapper, diff) -} - -func (c *commandHandler) Test(ctx context.Context, uri protocol.DocumentURI, tests, benchmarks []string) error { - return c.RunTests(ctx, command.RunTestsArgs{ - URI: uri, - Tests: tests, - Benchmarks: benchmarks, - }) -} - -func (c *commandHandler) RunTests(ctx context.Context, args command.RunTestsArgs) error { - return c.run(ctx, commandConfig{ - async: true, - progress: "Running go test", - requireSave: true, - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - if err := c.runTests(ctx, deps.snapshot, deps.work, args.URI, args.Tests, args.Benchmarks); err != nil { - return fmt.Errorf("running tests failed: %w", err) - } - return nil - }) -} - -func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error { - // TODO: fix the error reporting when this runs async. - pkgs, err := snapshot.PackagesForFile(ctx, uri.SpanURI(), source.TypecheckWorkspace, false) - if err != nil { - return err - } - if len(pkgs) == 0 { - return fmt.Errorf("package could not be found for file: %s", uri.SpanURI().Filename()) - } - pkgPath := pkgs[0].ForTest() - - // create output - buf := &bytes.Buffer{} - ew := progress.NewEventWriter(ctx, "test") - out := io.MultiWriter(ew, progress.NewWorkDoneWriter(work), buf) - - // Run `go test -run Func` on each test. - var failedTests int - for _, funcName := range tests { - inv := &gocommand.Invocation{ - Verb: "test", - Args: []string{pkgPath, "-v", "-count=1", "-run", fmt.Sprintf("^%s$", funcName)}, - WorkingDir: filepath.Dir(uri.SpanURI().Filename()), - } - if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil { - if errors.Is(err, context.Canceled) { - return err - } - failedTests++ - } - } - - // Run `go test -run=^$ -bench Func` on each test. - var failedBenchmarks int - for _, funcName := range benchmarks { - inv := &gocommand.Invocation{ - Verb: "test", - Args: []string{pkgPath, "-v", "-run=^$", "-bench", fmt.Sprintf("^%s$", funcName)}, - WorkingDir: filepath.Dir(uri.SpanURI().Filename()), - } - if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil { - if errors.Is(err, context.Canceled) { - return err - } - failedBenchmarks++ - } - } - - var title string - if len(tests) > 0 && len(benchmarks) > 0 { - title = "tests and benchmarks" - } else if len(tests) > 0 { - title = "tests" - } else if len(benchmarks) > 0 { - title = "benchmarks" - } else { - return errors.New("No functions were provided") - } - message := fmt.Sprintf("all %s passed", title) - if failedTests > 0 && failedBenchmarks > 0 { - message = fmt.Sprintf("%d / %d tests failed and %d / %d benchmarks failed", failedTests, len(tests), failedBenchmarks, len(benchmarks)) - } else if failedTests > 0 { - message = fmt.Sprintf("%d / %d tests failed", failedTests, len(tests)) - } else if failedBenchmarks > 0 { - message = fmt.Sprintf("%d / %d benchmarks failed", failedBenchmarks, len(benchmarks)) - } - if failedTests > 0 || failedBenchmarks > 0 { - message += "\n" + buf.String() - } - - return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ - Type: protocol.Info, - Message: message, - }) -} - -func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs) error { - title := "Running go generate ." - if args.Recursive { - title = "Running go generate ./..." - } - return c.run(ctx, commandConfig{ - requireSave: true, - progress: title, - forURI: args.Dir, - }, func(ctx context.Context, deps commandDeps) error { - er := progress.NewEventWriter(ctx, "generate") - - pattern := "." - if args.Recursive { - pattern = "./..." - } - inv := &gocommand.Invocation{ - Verb: "generate", - Args: []string{"-x", pattern}, - WorkingDir: args.Dir.SpanURI().Filename(), - } - stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(deps.work)) - if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil { - return err - } - return nil - }) -} - -func (c *commandHandler) GoGetPackage(ctx context.Context, args command.GoGetPackageArgs) error { - return c.run(ctx, commandConfig{ - forURI: args.URI, - progress: "Running go get", - }, func(ctx context.Context, deps commandDeps) error { - // Run on a throwaway go.mod, otherwise it'll write to the real one. - stdout, err := deps.snapshot.RunGoCommandDirect(ctx, source.WriteTemporaryModFile|source.AllowNetwork, &gocommand.Invocation{ - Verb: "list", - Args: []string{"-f", "{{.Module.Path}}@{{.Module.Version}}", args.Pkg}, - WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()), - }) - if err != nil { - return err - } - ver := strings.TrimSpace(stdout.String()) - return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { - if args.AddRequire { - if err := addModuleRequire(invoke, []string{ver}); err != nil { - return err - } - } - _, err := invoke(append([]string{"get", "-d"}, args.Pkg)...) - return err - }) - }) -} - -func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Snapshot, uri span.URI, run func(invoke func(...string) (*bytes.Buffer, error)) error) error { - tmpModfile, newModBytes, newSumBytes, err := snapshot.RunGoCommands(ctx, true, filepath.Dir(uri.Filename()), run) - if err != nil { - return err - } - if !tmpModfile { - return nil - } - modURI := snapshot.GoModForFile(uri) - sumURI := span.URIFromPath(strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum") - modEdits, err := applyFileEdits(ctx, snapshot, modURI, newModBytes) - if err != nil { - return err - } - sumEdits, err := applyFileEdits(ctx, snapshot, sumURI, newSumBytes) - if err != nil { - return err - } - changes := append(sumEdits, modEdits...) - if len(changes) == 0 { - return nil - } - response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ - Edit: protocol.WorkspaceEdit{ - DocumentChanges: changes, - }, - }) - if err != nil { - return err - } - if !response.Applied { - return fmt.Errorf("edits not applied because of %s", response.FailureReason) - } - return nil -} - -func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) { - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - return nil, err - } - oldContent, err := fh.Read() - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if bytes.Equal(oldContent, newContent) { - return nil, nil - } - - // Sending a workspace edit to a closed file causes VS Code to open the - // file and leave it unsaved. We would rather apply the changes directly, - // especially to go.sum, which should be mostly invisible to the user. - if !snapshot.IsOpen(uri) { - err := ioutil.WriteFile(uri.Filename(), newContent, 0666) - return nil, err - } - - m := protocol.NewColumnMapper(fh.URI(), oldContent) - diff, err := snapshot.View().Options().ComputeEdits(uri, string(oldContent), string(newContent)) - if err != nil { - return nil, err - } - edits, err := source.ToProtocolEdits(m, diff) - if err != nil { - return nil, err - } - return []protocol.TextDocumentEdit{{ - TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ - Version: fh.Version(), - TextDocumentIdentifier: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }, - Edits: edits, - }}, nil -} - -func runGoGetModule(invoke func(...string) (*bytes.Buffer, error), addRequire bool, args []string) error { - if addRequire { - if err := addModuleRequire(invoke, args); err != nil { - return err - } - } - _, err := invoke(append([]string{"get", "-d"}, args...)...) - return err -} - -func addModuleRequire(invoke func(...string) (*bytes.Buffer, error), args []string) error { - // Using go get to create a new dependency results in an - // `// indirect` comment we may not want. The only way to avoid it - // is to add the require as direct first. Then we can use go get to - // update go.sum and tidy up. - _, err := invoke(append([]string{"mod", "edit", "-require"}, args...)...) - return err -} - -func (s *Server) getUpgrades(ctx context.Context, snapshot source.Snapshot, uri span.URI, modules []string) (map[string]string, error) { - stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{ - Verb: "list", - Args: append([]string{"-m", "-u", "-json"}, modules...), - WorkingDir: filepath.Dir(uri.Filename()), - ModFlag: "readonly", - }) - if err != nil { - return nil, err - } - - upgrades := map[string]string{} - for dec := json.NewDecoder(stdout); dec.More(); { - mod := &gocommand.ModuleJSON{} - if err := dec.Decode(mod); err != nil { - return nil, err - } - if mod.Update == nil { - continue - } - upgrades[mod.Path] = mod.Update.Version - } - return upgrades, nil -} - -func (c *commandHandler) GCDetails(ctx context.Context, uri protocol.DocumentURI) error { - return c.ToggleGCDetails(ctx, command.URIArg{URI: uri}) -} - -func (c *commandHandler) ToggleGCDetails(ctx context.Context, args command.URIArg) error { - return c.run(ctx, commandConfig{ - requireSave: true, - progress: "Toggling GC Details", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - pkg, err := deps.snapshot.PackageForFile(ctx, deps.fh.URI(), source.TypecheckWorkspace, source.NarrowestPackage) - if err != nil { - return err - } - c.s.gcOptimizationDetailsMu.Lock() - if _, ok := c.s.gcOptimizationDetails[pkg.ID()]; ok { - delete(c.s.gcOptimizationDetails, pkg.ID()) - c.s.clearDiagnosticSource(gcDetailsSource) - } else { - c.s.gcOptimizationDetails[pkg.ID()] = struct{}{} - } - c.s.gcOptimizationDetailsMu.Unlock() - c.s.diagnoseSnapshot(deps.snapshot, nil, false) - return nil - }) -} - -func (c *commandHandler) GenerateGoplsMod(ctx context.Context, args command.URIArg) error { - // TODO: go back to using URI - return c.run(ctx, commandConfig{ - requireSave: true, - progress: "Generating gopls.mod", - }, func(ctx context.Context, deps commandDeps) error { - views := c.s.session.Views() - if len(views) != 1 { - return fmt.Errorf("cannot resolve view: have %d views", len(views)) - } - v := views[0] - snapshot, release := v.Snapshot(ctx) - defer release() - modFile, err := snapshot.BuildGoplsMod(ctx) - if err != nil { - return fmt.Errorf("getting workspace mod file: %w", err) - } - content, err := modFile.Format() - if err != nil { - return fmt.Errorf("formatting mod file: %w", err) - } - filename := filepath.Join(snapshot.View().Folder().Filename(), "gopls.mod") - if err := ioutil.WriteFile(filename, content, 0644); err != nil { - return fmt.Errorf("writing mod file: %w", err) - } - return nil - }) -} - -func (c *commandHandler) ListKnownPackages(ctx context.Context, args command.URIArg) (command.ListKnownPackagesResult, error) { - var result command.ListKnownPackagesResult - err := c.run(ctx, commandConfig{ - progress: "Listing packages", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - var err error - result.Packages, err = source.KnownPackages(ctx, deps.snapshot, deps.fh) - return err - }) - return result, err -} - -func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (command.ListImportsResult, error) { - var result command.ListImportsResult - err := c.run(ctx, commandConfig{ - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - pkg, err := deps.snapshot.PackageForFile(ctx, args.URI.SpanURI(), source.TypecheckWorkspace, source.NarrowestPackage) - if err != nil { - return err - } - pgf, err := pkg.File(args.URI.SpanURI()) - if err != nil { - return err - } - for _, group := range astutil.Imports(deps.snapshot.FileSet(), pgf.File) { - for _, imp := range group { - if imp.Path == nil { - continue - } - var name string - if imp.Name != nil { - name = imp.Name.Name - } - result.Imports = append(result.Imports, command.FileImport{ - Path: source.ImportPath(imp), - Name: name, - }) - } - } - for _, imp := range pkg.Imports() { - result.PackageImports = append(result.PackageImports, command.PackageImport{ - Path: imp.PkgPath(), // This might be the vendored path under GOPATH vendoring, in which case it's a bug. - }) - } - sort.Slice(result.PackageImports, func(i, j int) bool { - return result.PackageImports[i].Path < result.PackageImports[j].Path - }) - return nil - }) - return result, err -} - -func (c *commandHandler) AddImport(ctx context.Context, args command.AddImportArgs) error { - return c.run(ctx, commandConfig{ - progress: "Adding import", - forURI: args.URI, - }, func(ctx context.Context, deps commandDeps) error { - edits, err := source.AddImport(ctx, deps.snapshot, deps.fh, args.ImportPath) - if err != nil { - return fmt.Errorf("could not add import: %v", err) - } - if _, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ - Edit: protocol.WorkspaceEdit{ - DocumentChanges: documentChanges(deps.fh, edits), - }, - }); err != nil { - return fmt.Errorf("could not apply import edits: %v", err) - } - return nil - }) -} - -func (c *commandHandler) StartDebugging(ctx context.Context, args command.DebuggingArgs) (result command.DebuggingResult, _ error) { - addr := args.Addr - if addr == "" { - addr = "localhost:0" - } - di := debug.GetInstance(ctx) - if di == nil { - return result, errors.New("internal error: server has no debugging instance") - } - listenedAddr, err := di.Serve(ctx, addr) - if err != nil { - return result, fmt.Errorf("starting debug server: %w", err) - } - result.URLs = []string{"http://" + listenedAddr} - return result, nil -} - -func (c *commandHandler) RunVulncheckExp(ctx context.Context, args command.VulncheckArgs) (result command.VulncheckResult, _ error) { - err := c.run(ctx, commandConfig{ - progress: "Running vulncheck", - requireSave: true, - forURI: args.Dir, // Will dir work? - }, func(ctx context.Context, deps commandDeps) error { - view := deps.snapshot.View() - opts := view.Options() - if opts == nil || opts.Hooks.Govulncheck == nil { - return errors.New("vulncheck feature is not available") - } - - buildFlags := opts.BuildFlags // XXX: is session.Options equivalent to view.Options? - var viewEnv []string - if e := opts.EnvSlice(); e != nil { - viewEnv = append(os.Environ(), e...) - } - cfg := &packages.Config{ - Context: ctx, - Tests: true, // TODO(hyangah): add a field in args. - BuildFlags: buildFlags, - Env: viewEnv, - Dir: args.Dir.SpanURI().Filename(), - // TODO(hyangah): configure overlay - } - var err error - result, err = opts.Hooks.Govulncheck(ctx, cfg, args) - return err - }) - return result, err -} diff --git a/internal/lsp/command/generate.go b/internal/lsp/command/generate.go deleted file mode 100644 index 14628c733b5..00000000000 --- a/internal/lsp/command/generate.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ignore -// +build ignore - -package main - -import ( - "fmt" - "io/ioutil" - "os" - - "golang.org/x/tools/internal/lsp/command/gen" -) - -func main() { - content, err := gen.Generate() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - ioutil.WriteFile("command_gen.go", content, 0644) -} diff --git a/internal/lsp/command/interface_test.go b/internal/lsp/command/interface_test.go deleted file mode 100644 index 9ea30b4463e..00000000000 --- a/internal/lsp/command/interface_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package command_test - -import ( - "bytes" - "io/ioutil" - "testing" - - "golang.org/x/tools/internal/lsp/command/gen" - "golang.org/x/tools/internal/testenv" -) - -func TestGenerated(t *testing.T) { - testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code. - - onDisk, err := ioutil.ReadFile("command_gen.go") - if err != nil { - t.Fatal(err) - } - - generated, err := gen.Generate() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(onDisk, generated) { - t.Error("command_gen.go is stale -- regenerate") - } -} diff --git a/internal/lsp/completion.go b/internal/lsp/completion.go deleted file mode 100644 index 06af1bdaec0..00000000000 --- a/internal/lsp/completion.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/lsppos" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/source/completion" - "golang.org/x/tools/internal/lsp/template" - "golang.org/x/tools/internal/lsp/work" -) - -func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - var candidates []completion.CompletionItem - var surrounding *completion.Selection - switch snapshot.View().FileKind(fh) { - case source.Go: - candidates, surrounding, err = completion.Completion(ctx, snapshot, fh, params.Position, params.Context) - case source.Mod: - candidates, surrounding = nil, nil - case source.Work: - cl, err := work.Completion(ctx, snapshot, fh, params.Position) - if err != nil { - break - } - return cl, nil - case source.Tmpl: - var cl *protocol.CompletionList - cl, err = template.Completion(ctx, snapshot, fh, params.Position, params.Context) - if err != nil { - break // use common error handling, candidates==nil - } - return cl, nil - } - if err != nil { - event.Error(ctx, "no completions found", err, tag.Position.Of(params.Position)) - } - if candidates == nil { - return &protocol.CompletionList{ - IsIncomplete: true, - Items: []protocol.CompletionItem{}, - }, nil - } - - // Map positions to LSP positions using the original content, rather than - // internal/span, as the latter treats end of file as the beginning of the - // next line, even when it's not newline-terminated. See golang/go#41029 for - // more details. - src, err := fh.Read() - if err != nil { - return nil, err - } - tf := snapshot.FileSet().File(surrounding.Start()) - mapper := lsppos.NewTokenMapper(src, tf) - rng, err := mapper.Range(surrounding.Start(), surrounding.End()) - if err != nil { - return nil, err - } - - // When using deep completions/fuzzy matching, report results as incomplete so - // client fetches updated completions after every key stroke. - options := snapshot.View().Options() - incompleteResults := options.DeepCompletion || options.Matcher == source.Fuzzy - - items := toProtocolCompletionItems(candidates, rng, options) - - return &protocol.CompletionList{ - IsIncomplete: incompleteResults, - Items: items, - }, nil -} - -func toProtocolCompletionItems(candidates []completion.CompletionItem, rng protocol.Range, options *source.Options) []protocol.CompletionItem { - var ( - items = make([]protocol.CompletionItem, 0, len(candidates)) - numDeepCompletionsSeen int - ) - for i, candidate := range candidates { - // Limit the number of deep completions to not overwhelm the user in cases - // with dozens of deep completion matches. - if candidate.Depth > 0 { - if !options.DeepCompletion { - continue - } - if numDeepCompletionsSeen >= completion.MaxDeepCompletions { - continue - } - numDeepCompletionsSeen++ - } - insertText := candidate.InsertText - if options.InsertTextFormat == protocol.SnippetTextFormat { - insertText = candidate.Snippet() - } - - // This can happen if the client has snippets disabled but the - // candidate only supports snippet insertion. - if insertText == "" { - continue - } - - item := protocol.CompletionItem{ - Label: candidate.Label, - Detail: candidate.Detail, - Kind: candidate.Kind, - TextEdit: &protocol.TextEdit{ - NewText: insertText, - Range: rng, - }, - InsertTextFormat: options.InsertTextFormat, - AdditionalTextEdits: candidate.AdditionalTextEdits, - // This is a hack so that the client sorts completion results in the order - // according to their score. This can be removed upon the resolution of - // https://github.com/Microsoft/language-server-protocol/issues/348. - SortText: fmt.Sprintf("%05d", i), - - // Trim operators (VSCode doesn't like weird characters in - // filterText). - FilterText: strings.TrimLeft(candidate.InsertText, "&*"), - - Preselect: i == 0, - Documentation: candidate.Documentation, - Tags: candidate.Tags, - Deprecated: candidate.Deprecated, - } - items = append(items, item) - } - return items -} diff --git a/internal/lsp/completion_test.go b/internal/lsp/completion_test.go deleted file mode 100644 index d496a40a5cc..00000000000 --- a/internal/lsp/completion_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" -) - -func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = false - opts.Matcher = source.CaseInsensitive - opts.CompleteUnimported = false - opts.InsertTextFormat = protocol.SnippetTextFormat - opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal") - opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix") - }) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) - if diff := tests.DiffCompletionItems(want, got); diff != "" { - t.Errorf("%s", diff) - } -} - -func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) { - list := r.callCompletion(t, src, func(opts *source.Options) { - opts.UsePlaceholders = placeholders - opts.DeepCompletion = true - opts.Matcher = source.Fuzzy - opts.CompleteUnimported = false - }) - got := tests.FindItem(list, *items[expected.CompletionItem]) - want := expected.PlainSnippet - if placeholders { - want = expected.PlaceholderSnippet - } - if diff := tests.DiffSnippets(want, got); diff != "" { - t.Errorf("%s", diff) - } -} - -func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) {}) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) - if diff := tests.CheckCompletionOrder(want, got, false); diff != "" { - t.Errorf("%s", diff) - } -} - -func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.CaseInsensitive - opts.CompleteUnimported = false - }) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) - if msg := tests.DiffCompletionItems(want, got); msg != "" { - t.Errorf("%s", msg) - } -} - -func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.Fuzzy - opts.CompleteUnimported = false - }) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) - if msg := tests.DiffCompletionItems(want, got); msg != "" { - t.Errorf("%s", msg) - } -} - -func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) { - opts.Matcher = source.CaseSensitive - opts.CompleteUnimported = false - }) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) - if msg := tests.DiffCompletionItems(want, got); msg != "" { - t.Errorf("%s", msg) - } -} - -func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.Fuzzy - opts.CompleteUnimported = false - opts.LiteralCompletions = true - opts.ExperimentalPostfixCompletions = true - }) - want := expected(t, test, items) - if msg := tests.CheckCompletionOrder(want, got, true); msg != "" { - t.Errorf("%s", msg) - } -} - -func expected(t *testing.T, test tests.Completion, items tests.CompletionItems) []protocol.CompletionItem { - t.Helper() - - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - item := items[pos] - want = append(want, tests.ToProtocolCompletionItem(*item)) - } - return want -} - -func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) []protocol.CompletionItem { - t.Helper() - - view, err := r.server.session.ViewOf(src.URI()) - if err != nil { - t.Fatal(err) - } - original := view.Options() - modified := view.Options().Clone() - options(modified) - view, err = view.SetOptions(r.ctx, modified) - if err != nil { - t.Error(err) - return nil - } - defer view.SetOptions(r.ctx, original) - - list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(src.URI()), - }, - Position: protocol.Position{ - Line: uint32(src.Start().Line() - 1), - Character: uint32(src.Start().Column() - 1), - }, - }, - }) - if err != nil { - t.Fatal(err) - } - return list.Items -} diff --git a/internal/lsp/debug/info.go b/internal/lsp/debug/info.go deleted file mode 100644 index bcc2f4f0605..00000000000 --- a/internal/lsp/debug/info.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package debug exports debug information for gopls. -package debug - -import ( - "context" - "encoding/json" - "fmt" - "io" - "reflect" - "runtime" - "runtime/debug" - "sort" - "strings" - - "golang.org/x/tools/internal/lsp/source" -) - -type PrintMode int - -const ( - PlainText = PrintMode(iota) - Markdown - HTML - JSON -) - -// Version is a manually-updated mechanism for tracking versions. -const Version = "master" - -// ServerVersion is the format used by gopls to report its version to the -// client. This format is structured so that the client can parse it easily. -type ServerVersion struct { - *BuildInfo - Version string -} - -type Module struct { - ModuleVersion - Replace *ModuleVersion `json:"replace,omitempty"` -} - -type ModuleVersion struct { - Path string `json:"path,omitempty"` - Version string `json:"version,omitempty"` - Sum string `json:"sum,omitempty"` -} - -// VersionInfo returns the build info for the gopls process. If it was not -// built in module mode, we return a GOPATH-specific message with the -// hardcoded version. -func VersionInfo() *ServerVersion { - if info, ok := readBuildInfo(); ok { - return getVersion(info) - } - buildInfo := &BuildInfo{} - // go1.17 or earlier, part of s.BuildInfo are embedded fields. - buildInfo.Path = "gopls, built in GOPATH mode" - buildInfo.GoVersion = runtime.Version() - return &ServerVersion{ - Version: Version, - BuildInfo: buildInfo, - } -} - -func getVersion(info *BuildInfo) *ServerVersion { - return &ServerVersion{ - Version: Version, - BuildInfo: info, - } -} - -// PrintServerInfo writes HTML debug info to w for the Instance. -func (i *Instance) PrintServerInfo(ctx context.Context, w io.Writer) { - section(w, HTML, "Server Instance", func() { - fmt.Fprintf(w, "Start time: %v\n", i.StartTime) - fmt.Fprintf(w, "LogFile: %s\n", i.Logfile) - fmt.Fprintf(w, "Working directory: %s\n", i.Workdir) - fmt.Fprintf(w, "Address: %s\n", i.ServerAddress) - fmt.Fprintf(w, "Debug address: %s\n", i.DebugAddress()) - }) - PrintVersionInfo(ctx, w, true, HTML) - section(w, HTML, "Command Line", func() { - fmt.Fprintf(w, "cmdline") - }) -} - -// PrintVersionInfo writes version information to w, using the output format -// specified by mode. verbose controls whether additional information is -// written, including section headers. -func PrintVersionInfo(_ context.Context, w io.Writer, verbose bool, mode PrintMode) error { - info := VersionInfo() - if mode == JSON { - return printVersionInfoJSON(w, info) - } - - if !verbose { - printBuildInfo(w, info, false, mode) - return nil - } - section(w, mode, "Build info", func() { - printBuildInfo(w, info, true, mode) - }) - return nil -} - -func printVersionInfoJSON(w io.Writer, info *ServerVersion) error { - js, err := json.MarshalIndent(info, "", "\t") - if err != nil { - return err - } - _, err = fmt.Fprint(w, string(js)) - return err -} - -func section(w io.Writer, mode PrintMode, title string, body func()) { - switch mode { - case PlainText: - fmt.Fprintln(w, title) - fmt.Fprintln(w, strings.Repeat("-", len(title))) - body() - case Markdown: - fmt.Fprintf(w, "#### %s\n\n```\n", title) - body() - fmt.Fprintf(w, "```\n") - case HTML: - fmt.Fprintf(w, "

    %s

    \n
    \n", title)
    -		body()
    -		fmt.Fprint(w, "
    \n") - } -} - -func printBuildInfo(w io.Writer, info *ServerVersion, verbose bool, mode PrintMode) { - fmt.Fprintf(w, "%v %v\n", info.Path, Version) - printModuleInfo(w, info.Main, mode) - if !verbose { - return - } - for _, dep := range info.Deps { - printModuleInfo(w, *dep, mode) - } - fmt.Fprintf(w, "go: %v\n", info.GoVersion) -} - -func printModuleInfo(w io.Writer, m debug.Module, _ PrintMode) { - fmt.Fprintf(w, " %s@%s", m.Path, m.Version) - if m.Sum != "" { - fmt.Fprintf(w, " %s", m.Sum) - } - if m.Replace != nil { - fmt.Fprintf(w, " => %v", m.Replace.Path) - } - fmt.Fprintf(w, "\n") -} - -type field struct { - index []int -} - -var fields []field - -// find all the options. The presumption is that the Options are nested structs -// and that pointers don't need to be dereferenced -func swalk(t reflect.Type, ix []int, indent string) { - switch t.Kind() { - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - ixx := append(append([]int{}, ix...), i) - swalk(fld.Type, ixx, indent+". ") - } - default: - // everything is either a struct or a field (that's an assumption about Options) - fields = append(fields, field{ix}) - } -} - -type sessionOption struct { - Name string - Type string - Current string - Default string -} - -func showOptions(o *source.Options) []sessionOption { - var out []sessionOption - t := reflect.TypeOf(*o) - swalk(t, []int{}, "") - v := reflect.ValueOf(*o) - do := reflect.ValueOf(*source.DefaultOptions()) - for _, f := range fields { - val := v.FieldByIndex(f.index) - def := do.FieldByIndex(f.index) - tx := t.FieldByIndex(f.index) - is := strVal(val) - was := strVal(def) - out = append(out, sessionOption{ - Name: tx.Name, - Type: tx.Type.String(), - Current: is, - Default: was, - }) - } - sort.Slice(out, func(i, j int) bool { - rd := out[i].Current == out[i].Default - ld := out[j].Current == out[j].Default - if rd != ld { - return ld - } - return out[i].Name < out[j].Name - }) - return out -} - -func strVal(val reflect.Value) string { - switch val.Kind() { - case reflect.Bool: - return fmt.Sprintf("%v", val.Interface()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return fmt.Sprintf("%v", val.Interface()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return fmt.Sprintf("%v", val.Interface()) - case reflect.Uintptr, reflect.UnsafePointer: - return fmt.Sprintf("0x%x", val.Pointer()) - case reflect.Complex64, reflect.Complex128: - return fmt.Sprintf("%v", val.Complex()) - case reflect.Array, reflect.Slice: - ans := []string{} - for i := 0; i < val.Len(); i++ { - ans = append(ans, strVal(val.Index(i))) - } - sort.Strings(ans) - return fmt.Sprintf("%v", ans) - case reflect.Chan, reflect.Func, reflect.Ptr: - return val.Kind().String() - case reflect.Struct: - var x source.Analyzer - if val.Type() != reflect.TypeOf(x) { - return val.Kind().String() - } - // this is sort of ugly, but usable - str := val.FieldByName("Analyzer").Elem().FieldByName("Doc").String() - ix := strings.Index(str, "\n") - if ix == -1 { - ix = len(str) - } - return str[:ix] - case reflect.String: - return fmt.Sprintf("%q", val.Interface()) - case reflect.Map: - ans := []string{} - iter := val.MapRange() - for iter.Next() { - k := iter.Key() - v := iter.Value() - ans = append(ans, fmt.Sprintf("%s:%s, ", strVal(k), strVal(v))) - } - sort.Strings(ans) - return fmt.Sprintf("%v", ans) - } - return fmt.Sprintf("??%s??", val.Type()) -} diff --git a/internal/lsp/debug/serve.go b/internal/lsp/debug/serve.go deleted file mode 100644 index 0bdee92c5e0..00000000000 --- a/internal/lsp/debug/serve.go +++ /dev/null @@ -1,922 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package debug - -import ( - "archive/zip" - "bytes" - "context" - "errors" - "fmt" - "html/template" - "io" - stdlog "log" - "net" - "net/http" - "net/http/pprof" - "os" - "path" - "path/filepath" - "runtime" - rpprof "runtime/pprof" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/event/core" - "golang.org/x/tools/internal/event/export" - "golang.org/x/tools/internal/event/export/metric" - "golang.org/x/tools/internal/event/export/ocagent" - "golang.org/x/tools/internal/event/export/prometheus" - "golang.org/x/tools/internal/event/keys" - "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug/log" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -type contextKeyType int - -const ( - instanceKey contextKeyType = iota - traceKey -) - -// An Instance holds all debug information associated with a gopls instance. -type Instance struct { - Logfile string - StartTime time.Time - ServerAddress string - Workdir string - OCAgentConfig string - - LogWriter io.Writer - - exporter event.Exporter - - ocagent *ocagent.Exporter - prometheus *prometheus.Exporter - rpcs *Rpcs - traces *traces - State *State - - serveMu sync.Mutex - debugAddress string - listenedDebugAddress string -} - -// State holds debugging information related to the server state. -type State struct { - mu sync.Mutex - clients []*Client - servers []*Server -} - -func (st *State) Bugs() []bug.Bug { - return bug.List() -} - -// Caches returns the set of Cache objects currently being served. -func (st *State) Caches() []*cache.Cache { - var caches []*cache.Cache - seen := make(map[string]struct{}) - for _, client := range st.Clients() { - cache, ok := client.Session.Cache().(*cache.Cache) - if !ok { - continue - } - if _, found := seen[cache.ID()]; found { - continue - } - seen[cache.ID()] = struct{}{} - caches = append(caches, cache) - } - return caches -} - -// Cache returns the Cache that matches the supplied id. -func (st *State) Cache(id string) *cache.Cache { - for _, c := range st.Caches() { - if c.ID() == id { - return c - } - } - return nil -} - -// Sessions returns the set of Session objects currently being served. -func (st *State) Sessions() []*cache.Session { - var sessions []*cache.Session - for _, client := range st.Clients() { - sessions = append(sessions, client.Session) - } - return sessions -} - -// Session returns the Session that matches the supplied id. -func (st *State) Session(id string) *cache.Session { - for _, s := range st.Sessions() { - if s.ID() == id { - return s - } - } - return nil -} - -// Views returns the set of View objects currently being served. -func (st *State) Views() []*cache.View { - var views []*cache.View - for _, s := range st.Sessions() { - for _, v := range s.Views() { - if cv, ok := v.(*cache.View); ok { - views = append(views, cv) - } - } - } - return views -} - -// View returns the View that matches the supplied id. -func (st *State) View(id string) *cache.View { - for _, v := range st.Views() { - if v.ID() == id { - return v - } - } - return nil -} - -// Clients returns the set of Clients currently being served. -func (st *State) Clients() []*Client { - st.mu.Lock() - defer st.mu.Unlock() - clients := make([]*Client, len(st.clients)) - copy(clients, st.clients) - return clients -} - -// Client returns the Client matching the supplied id. -func (st *State) Client(id string) *Client { - for _, c := range st.Clients() { - if c.Session.ID() == id { - return c - } - } - return nil -} - -// Servers returns the set of Servers the instance is currently connected to. -func (st *State) Servers() []*Server { - st.mu.Lock() - defer st.mu.Unlock() - servers := make([]*Server, len(st.servers)) - copy(servers, st.servers) - return servers -} - -// A Client is an incoming connection from a remote client. -type Client struct { - Session *cache.Session - DebugAddress string - Logfile string - GoplsPath string - ServerID string - Service protocol.Server -} - -// A Server is an outgoing connection to a remote LSP server. -type Server struct { - ID string - DebugAddress string - Logfile string - GoplsPath string - ClientID string -} - -// AddClient adds a client to the set being served. -func (st *State) addClient(session *cache.Session) { - st.mu.Lock() - defer st.mu.Unlock() - st.clients = append(st.clients, &Client{Session: session}) -} - -// DropClient removes a client from the set being served. -func (st *State) dropClient(session source.Session) { - st.mu.Lock() - defer st.mu.Unlock() - for i, c := range st.clients { - if c.Session == session { - copy(st.clients[i:], st.clients[i+1:]) - st.clients[len(st.clients)-1] = nil - st.clients = st.clients[:len(st.clients)-1] - return - } - } -} - -// AddServer adds a server to the set being queried. In practice, there should -// be at most one remote server. -func (st *State) updateServer(server *Server) { - st.mu.Lock() - defer st.mu.Unlock() - for i, existing := range st.servers { - if existing.ID == server.ID { - // Replace, rather than mutate, to avoid a race. - newServers := make([]*Server, len(st.servers)) - copy(newServers, st.servers[:i]) - newServers[i] = server - copy(newServers[i+1:], st.servers[i+1:]) - st.servers = newServers - return - } - } - st.servers = append(st.servers, server) -} - -// DropServer drops a server from the set being queried. -func (st *State) dropServer(id string) { - st.mu.Lock() - defer st.mu.Unlock() - for i, s := range st.servers { - if s.ID == id { - copy(st.servers[i:], st.servers[i+1:]) - st.servers[len(st.servers)-1] = nil - st.servers = st.servers[:len(st.servers)-1] - return - } - } -} - -// an http.ResponseWriter that filters writes -type filterResponse struct { - w http.ResponseWriter - edit func([]byte) []byte -} - -func (c filterResponse) Header() http.Header { - return c.w.Header() -} - -func (c filterResponse) Write(buf []byte) (int, error) { - ans := c.edit(buf) - return c.w.Write(ans) -} - -func (c filterResponse) WriteHeader(n int) { - c.w.WriteHeader(n) -} - -// replace annoying nuls by spaces -func cmdline(w http.ResponseWriter, r *http.Request) { - fake := filterResponse{ - w: w, - edit: func(buf []byte) []byte { - return bytes.ReplaceAll(buf, []byte{0}, []byte{' '}) - }, - } - pprof.Cmdline(fake, r) -} - -func (i *Instance) getCache(r *http.Request) interface{} { - return i.State.Cache(path.Base(r.URL.Path)) -} - -func (i *Instance) getSession(r *http.Request) interface{} { - return i.State.Session(path.Base(r.URL.Path)) -} - -func (i *Instance) getClient(r *http.Request) interface{} { - return i.State.Client(path.Base(r.URL.Path)) -} - -func (i *Instance) getServer(r *http.Request) interface{} { - i.State.mu.Lock() - defer i.State.mu.Unlock() - id := path.Base(r.URL.Path) - for _, s := range i.State.servers { - if s.ID == id { - return s - } - } - return nil -} - -func (i *Instance) getView(r *http.Request) interface{} { - return i.State.View(path.Base(r.URL.Path)) -} - -func (i *Instance) getFile(r *http.Request) interface{} { - identifier := path.Base(r.URL.Path) - sid := path.Base(path.Dir(r.URL.Path)) - s := i.State.Session(sid) - if s == nil { - return nil - } - for _, o := range s.Overlays() { - if o.FileIdentity().Hash == identifier { - return o - } - } - return nil -} - -func (i *Instance) getInfo(r *http.Request) interface{} { - buf := &bytes.Buffer{} - i.PrintServerInfo(r.Context(), buf) - return template.HTML(buf.String()) -} - -func (i *Instance) AddService(s protocol.Server, session *cache.Session) { - for _, c := range i.State.clients { - if c.Session == session { - c.Service = s - return - } - } - stdlog.Printf("unable to find a Client to add the protocol.Server to") -} - -func getMemory(_ *http.Request) interface{} { - var m runtime.MemStats - runtime.ReadMemStats(&m) - return m -} - -func init() { - event.SetExporter(makeGlobalExporter(os.Stderr)) -} - -func GetInstance(ctx context.Context) *Instance { - if ctx == nil { - return nil - } - v := ctx.Value(instanceKey) - if v == nil { - return nil - } - return v.(*Instance) -} - -// WithInstance creates debug instance ready for use using the supplied -// configuration and stores it in the returned context. -func WithInstance(ctx context.Context, workdir, agent string) context.Context { - i := &Instance{ - StartTime: time.Now(), - Workdir: workdir, - OCAgentConfig: agent, - } - i.LogWriter = os.Stderr - ocConfig := ocagent.Discover() - //TODO: we should not need to adjust the discovered configuration - ocConfig.Address = i.OCAgentConfig - i.ocagent = ocagent.Connect(ocConfig) - i.prometheus = prometheus.New() - i.rpcs = &Rpcs{} - i.traces = &traces{} - i.State = &State{} - i.exporter = makeInstanceExporter(i) - return context.WithValue(ctx, instanceKey, i) -} - -// SetLogFile sets the logfile for use with this instance. -func (i *Instance) SetLogFile(logfile string, isDaemon bool) (func(), error) { - // TODO: probably a better solution for deferring closure to the caller would - // be for the debug instance to itself be closed, but this fixes the - // immediate bug of logs not being captured. - closeLog := func() {} - if logfile != "" { - if logfile == "auto" { - if isDaemon { - logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-daemon-%d.log", os.Getpid())) - } else { - logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid())) - } - } - f, err := os.Create(logfile) - if err != nil { - return nil, fmt.Errorf("unable to create log file: %w", err) - } - closeLog = func() { - defer f.Close() - } - stdlog.SetOutput(io.MultiWriter(os.Stderr, f)) - i.LogWriter = f - } - i.Logfile = logfile - return closeLog, nil -} - -// Serve starts and runs a debug server in the background on the given addr. -// It also logs the port the server starts on, to allow for :0 auto assigned -// ports. -func (i *Instance) Serve(ctx context.Context, addr string) (string, error) { - stdlog.SetFlags(stdlog.Lshortfile) - if addr == "" { - return "", nil - } - i.serveMu.Lock() - defer i.serveMu.Unlock() - - if i.listenedDebugAddress != "" { - // Already serving. Return the bound address. - return i.listenedDebugAddress, nil - } - - i.debugAddress = addr - listener, err := net.Listen("tcp", i.debugAddress) - if err != nil { - return "", err - } - i.listenedDebugAddress = listener.Addr().String() - - port := listener.Addr().(*net.TCPAddr).Port - if strings.HasSuffix(i.debugAddress, ":0") { - stdlog.Printf("debug server listening at http://localhost:%d", port) - } - event.Log(ctx, "Debug serving", tag.Port.Of(port)) - go func() { - mux := http.NewServeMux() - mux.HandleFunc("/", render(MainTmpl, func(*http.Request) interface{} { return i })) - mux.HandleFunc("/debug/", render(DebugTmpl, nil)) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - if i.prometheus != nil { - mux.HandleFunc("/metrics/", i.prometheus.Serve) - } - if i.rpcs != nil { - mux.HandleFunc("/rpc/", render(RPCTmpl, i.rpcs.getData)) - } - if i.traces != nil { - mux.HandleFunc("/trace/", render(TraceTmpl, i.traces.getData)) - } - mux.HandleFunc("/cache/", render(CacheTmpl, i.getCache)) - mux.HandleFunc("/session/", render(SessionTmpl, i.getSession)) - mux.HandleFunc("/view/", render(ViewTmpl, i.getView)) - mux.HandleFunc("/client/", render(ClientTmpl, i.getClient)) - mux.HandleFunc("/server/", render(ServerTmpl, i.getServer)) - mux.HandleFunc("/file/", render(FileTmpl, i.getFile)) - mux.HandleFunc("/info", render(InfoTmpl, i.getInfo)) - mux.HandleFunc("/memory", render(MemoryTmpl, getMemory)) - - mux.HandleFunc("/_makeabug", func(w http.ResponseWriter, r *http.Request) { - bug.Report("bug here", nil) - http.Error(w, "made a bug", http.StatusOK) - }) - - if err := http.Serve(listener, mux); err != nil { - event.Error(ctx, "Debug server failed", err) - return - } - event.Log(ctx, "Debug server finished") - }() - return i.listenedDebugAddress, nil -} - -func (i *Instance) DebugAddress() string { - i.serveMu.Lock() - defer i.serveMu.Unlock() - return i.debugAddress -} - -func (i *Instance) ListenedDebugAddress() string { - i.serveMu.Lock() - defer i.serveMu.Unlock() - return i.listenedDebugAddress -} - -// MonitorMemory starts recording memory statistics each second. -func (i *Instance) MonitorMemory(ctx context.Context) { - tick := time.NewTicker(time.Second) - nextThresholdGiB := uint64(1) - go func() { - for { - <-tick.C - var mem runtime.MemStats - runtime.ReadMemStats(&mem) - if mem.HeapAlloc < nextThresholdGiB*1<<30 { - continue - } - if err := i.writeMemoryDebug(nextThresholdGiB, true); err != nil { - event.Error(ctx, "writing memory debug info", err) - } - if err := i.writeMemoryDebug(nextThresholdGiB, false); err != nil { - event.Error(ctx, "writing memory debug info", err) - } - event.Log(ctx, fmt.Sprintf("Wrote memory usage debug info to %v", os.TempDir())) - nextThresholdGiB++ - } - }() -} - -func (i *Instance) writeMemoryDebug(threshold uint64, withNames bool) error { - suffix := "withnames" - if !withNames { - suffix = "nonames" - } - - filename := fmt.Sprintf("gopls.%d-%dGiB-%s.zip", os.Getpid(), threshold, suffix) - zipf, err := os.OpenFile(filepath.Join(os.TempDir(), filename), os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return err - } - zipw := zip.NewWriter(zipf) - - f, err := zipw.Create("heap.pb.gz") - if err != nil { - return err - } - if err := rpprof.Lookup("heap").WriteTo(f, 0); err != nil { - return err - } - - f, err = zipw.Create("goroutines.txt") - if err != nil { - return err - } - if err := rpprof.Lookup("goroutine").WriteTo(f, 1); err != nil { - return err - } - - for _, cache := range i.State.Caches() { - cf, err := zipw.Create(fmt.Sprintf("cache-%v.html", cache.ID())) - if err != nil { - return err - } - if _, err := cf.Write([]byte(cache.PackageStats(withNames))); err != nil { - return err - } - } - - if err := zipw.Close(); err != nil { - return err - } - return zipf.Close() -} - -func makeGlobalExporter(stderr io.Writer) event.Exporter { - p := export.Printer{} - var pMu sync.Mutex - return func(ctx context.Context, ev core.Event, lm label.Map) context.Context { - i := GetInstance(ctx) - - if event.IsLog(ev) { - // Don't log context cancellation errors. - if err := keys.Err.Get(ev); errors.Is(err, context.Canceled) { - return ctx - } - // Make sure any log messages without an instance go to stderr. - if i == nil { - pMu.Lock() - p.WriteEvent(stderr, ev, lm) - pMu.Unlock() - } - level := log.LabeledLevel(lm) - // Exclude trace logs from LSP logs. - if level < log.Trace { - ctx = protocol.LogEvent(ctx, ev, lm, messageType(level)) - } - } - if i == nil { - return ctx - } - return i.exporter(ctx, ev, lm) - } -} - -func messageType(l log.Level) protocol.MessageType { - switch l { - case log.Error: - return protocol.Error - case log.Warning: - return protocol.Warning - case log.Debug: - return protocol.Log - } - return protocol.Info -} - -func makeInstanceExporter(i *Instance) event.Exporter { - exporter := func(ctx context.Context, ev core.Event, lm label.Map) context.Context { - if i.ocagent != nil { - ctx = i.ocagent.ProcessEvent(ctx, ev, lm) - } - if i.prometheus != nil { - ctx = i.prometheus.ProcessEvent(ctx, ev, lm) - } - if i.rpcs != nil { - ctx = i.rpcs.ProcessEvent(ctx, ev, lm) - } - if i.traces != nil { - ctx = i.traces.ProcessEvent(ctx, ev, lm) - } - if event.IsLog(ev) { - if s := cache.KeyCreateSession.Get(ev); s != nil { - i.State.addClient(s) - } - if sid := tag.NewServer.Get(ev); sid != "" { - i.State.updateServer(&Server{ - ID: sid, - Logfile: tag.Logfile.Get(ev), - DebugAddress: tag.DebugAddress.Get(ev), - GoplsPath: tag.GoplsPath.Get(ev), - ClientID: tag.ClientID.Get(ev), - }) - } - if s := cache.KeyShutdownSession.Get(ev); s != nil { - i.State.dropClient(s) - } - if sid := tag.EndServer.Get(ev); sid != "" { - i.State.dropServer(sid) - } - if s := cache.KeyUpdateSession.Get(ev); s != nil { - if c := i.State.Client(s.ID()); c != nil { - c.DebugAddress = tag.DebugAddress.Get(ev) - c.Logfile = tag.Logfile.Get(ev) - c.ServerID = tag.ServerID.Get(ev) - c.GoplsPath = tag.GoplsPath.Get(ev) - } - } - } - return ctx - } - // StdTrace must be above export.Spans below (by convention, export - // middleware applies its wrapped exporter last). - exporter = StdTrace(exporter) - metrics := metric.Config{} - registerMetrics(&metrics) - exporter = metrics.Exporter(exporter) - exporter = export.Spans(exporter) - exporter = export.Labels(exporter) - return exporter -} - -type dataFunc func(*http.Request) interface{} - -func render(tmpl *template.Template, fun dataFunc) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - var data interface{} - if fun != nil { - data = fun(r) - } - if err := tmpl.Execute(w, data); err != nil { - event.Error(context.Background(), "", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - } - } -} - -func commas(s string) string { - for i := len(s); i > 3; { - i -= 3 - s = s[:i] + "," + s[i:] - } - return s -} - -func fuint64(v uint64) string { - return commas(strconv.FormatUint(v, 10)) -} - -func fuint32(v uint32) string { - return commas(strconv.FormatUint(uint64(v), 10)) -} - -func fcontent(v []byte) string { - return string(v) -} - -var BaseTemplate = template.Must(template.New("").Parse(` - - -{{template "title" .}} - -{{block "head" .}}{{end}} - - -Main -Info -Memory -Metrics -RPC -Trace -
    -

    {{template "title" .}}

    -{{block "body" .}} -Unknown page -{{end}} - - - -{{define "cachelink"}}Cache {{.}}{{end}} -{{define "clientlink"}}Client {{.}}{{end}} -{{define "serverlink"}}Server {{.}}{{end}} -{{define "sessionlink"}}Session {{.}}{{end}} -{{define "viewlink"}}View {{.}}{{end}} -{{define "filelink"}}{{.FileIdentity.URI}}{{end}} -`)).Funcs(template.FuncMap{ - "fuint64": fuint64, - "fuint32": fuint32, - "fcontent": fcontent, - "localAddress": func(s string) string { - // Try to translate loopback addresses to localhost, both for cosmetics and - // because unspecified ipv6 addresses can break links on Windows. - // - // TODO(rfindley): In the future, it would be better not to assume the - // server is running on localhost, and instead construct this address using - // the remote host. - host, port, err := net.SplitHostPort(s) - if err != nil { - return s - } - ip := net.ParseIP(host) - if ip == nil { - return s - } - if ip.IsLoopback() || ip.IsUnspecified() { - return "localhost:" + port - } - return s - }, - "options": func(s *cache.Session) []sessionOption { - return showOptions(s.Options()) - }, -}) - -var MainTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}GoPls server information{{end}} -{{define "body"}} -

    Caches

    -
      {{range .State.Caches}}
    • {{template "cachelink" .ID}}
    • {{end}}
    -

    Sessions

    -
      {{range .State.Sessions}}
    • {{template "sessionlink" .ID}} from {{template "cachelink" .Cache.ID}}
    • {{end}}
    -

    Views

    -
      {{range .State.Views}}
    • {{.Name}} is {{template "viewlink" .ID}} from {{template "sessionlink" .Session.ID}} in {{.Folder}}
    • {{end}}
    -

    Clients

    -
      {{range .State.Clients}}
    • {{template "clientlink" .Session.ID}}
    • {{end}}
    -

    Servers

    -
      {{range .State.Servers}}
    • {{template "serverlink" .ID}}
    • {{end}}
    -

    Bug reports

    -
    {{range .State.Bugs}}
    {{.Key}}
    {{.Description}}
    {{end}}
    -{{end}} -`)) - -var InfoTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}GoPls version information{{end}} -{{define "body"}} -{{.}} -{{end}} -`)) - -var MemoryTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}GoPls memory usage{{end}} -{{define "head"}}{{end}} -{{define "body"}} -

    Stats

    - - - - - - - - - - - - - - - - -
    Allocated bytes{{fuint64 .HeapAlloc}}
    Total allocated bytes{{fuint64 .TotalAlloc}}
    System bytes{{fuint64 .Sys}}
    Heap system bytes{{fuint64 .HeapSys}}
    Malloc calls{{fuint64 .Mallocs}}
    Frees{{fuint64 .Frees}}
    Idle heap bytes{{fuint64 .HeapIdle}}
    In use bytes{{fuint64 .HeapInuse}}
    Released to system bytes{{fuint64 .HeapReleased}}
    Heap object count{{fuint64 .HeapObjects}}
    Stack in use bytes{{fuint64 .StackInuse}}
    Stack from system bytes{{fuint64 .StackSys}}
    Bucket hash bytes{{fuint64 .BuckHashSys}}
    GC metadata bytes{{fuint64 .GCSys}}
    Off heap bytes{{fuint64 .OtherSys}}
    -

    By size

    - - -{{range .BySize}}{{end}} -
    SizeMallocsFrees
    {{fuint32 .Size}}{{fuint64 .Mallocs}}{{fuint64 .Frees}}
    -{{end}} -`)) - -var DebugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}GoPls Debug pages{{end}} -{{define "body"}} -Profiling -{{end}} -`)) - -var CacheTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}Cache {{.ID}}{{end}} -{{define "body"}} -

    memoize.Store entries

    -
      {{range $k,$v := .MemStats}}
    • {{$k}} - {{$v}}
    • {{end}}
    -

    Per-package usage - not accurate, for guidance only

    -{{.PackageStats true}} -{{end}} -`)) - -var ClientTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}Client {{.Session.ID}}{{end}} -{{define "body"}} -Using session: {{template "sessionlink" .Session.ID}}
    -{{if .DebugAddress}}Debug this client at: {{localAddress .DebugAddress}}
    {{end}} -Logfile: {{.Logfile}}
    -Gopls Path: {{.GoplsPath}}
    -

    Diagnostics

    -{{/*Service: []protocol.Server; each server has map[uri]fileReports; - each fileReport: map[diagnosticSoure]diagnosticReport - diagnosticSource is one of 5 source - diagnosticReport: snapshotID and map[hash]*source.Diagnostic - sourceDiagnostic: struct { - Range protocol.Range - Message string - Source string - Code string - CodeHref string - Severity protocol.DiagnosticSeverity - Tags []protocol.DiagnosticTag - - Related []RelatedInformation - } - RelatedInformation: struct { - URI span.URI - Range protocol.Range - Message string - } - */}} -
      {{range $k, $v := .Service.Diagnostics}}
    • {{$k}}:
        {{range $v}}
      1. {{.}}
      2. {{end}}
    • {{end}}
    -{{end}} -`)) - -var ServerTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}Server {{.ID}}{{end}} -{{define "body"}} -{{if .DebugAddress}}Debug this server at: {{localAddress .DebugAddress}}
    {{end}} -Logfile: {{.Logfile}}
    -Gopls Path: {{.GoplsPath}}
    -{{end}} -`)) - -var SessionTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}Session {{.ID}}{{end}} -{{define "body"}} -From: {{template "cachelink" .Cache.ID}}
    -

    Views

    -
      {{range .Views}}
    • {{.Name}} is {{template "viewlink" .ID}} in {{.Folder}}
    • {{end}}
    -

    Overlays

    -
      {{range .Overlays}}
    • {{template "filelink" .}}
    • {{end}}
    -

    Options

    -{{range options .}} -

    {{.Name}} {{.Type}}

    -

    default: {{.Default}}

    -{{if ne .Default .Current}}

    current: {{.Current}}

    {{end}} -{{end}} -{{end}} -`)) - -var ViewTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}View {{.ID}}{{end}} -{{define "body"}} -Name: {{.Name}}
    -Folder: {{.Folder}}
    -From: {{template "sessionlink" .Session.ID}}
    -

    Environment

    -
      {{range .Options.Env}}
    • {{.}}
    • {{end}}
    -{{end}} -`)) - -var FileTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}Overlay {{.FileIdentity.Hash}}{{end}} -{{define "body"}} -{{with .}} - From: {{template "sessionlink" .Session}}
    - URI: {{.URI}}
    - Identifier: {{.FileIdentity.Hash}}
    - Version: {{.Version}}
    - Kind: {{.Kind}}
    -{{end}} -

    Contents

    -
    {{fcontent .Read}}
    -{{end}} -`)) diff --git a/internal/lsp/definition.go b/internal/lsp/definition.go deleted file mode 100644 index 9487c684327..00000000000 --- a/internal/lsp/definition.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" -) - -func (s *Server) definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - if snapshot.View().FileKind(fh) == source.Tmpl { - return template.Definition(snapshot, fh, params.Position) - } - ident, err := source.Identifier(ctx, snapshot, fh, params.Position) - if err != nil { - return nil, err - } - if ident.IsImport() && !snapshot.View().Options().ImportShortcut.ShowDefinition() { - return nil, nil - } - var locations []protocol.Location - for _, ref := range ident.Declaration.MappedRange { - decRange, err := ref.Range() - if err != nil { - return nil, err - } - - locations = append(locations, protocol.Location{ - URI: protocol.URIFromSpanURI(ref.URI()), - Range: decRange, - }) - } - - return locations, nil -} - -func (s *Server) typeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - ident, err := source.Identifier(ctx, snapshot, fh, params.Position) - if err != nil { - return nil, err - } - if ident.Type.Object == nil { - return nil, fmt.Errorf("no type definition for %s", ident.Name) - } - identRange, err := ident.Type.Range() - if err != nil { - return nil, err - } - return []protocol.Location{ - { - URI: protocol.URIFromSpanURI(ident.Type.URI()), - Range: identRange, - }, - }, nil -} diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go deleted file mode 100644 index 0837b22cc22..00000000000 --- a/internal/lsp/diagnostics.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/log" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" - "golang.org/x/tools/internal/lsp/work" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -// diagnosticSource differentiates different sources of diagnostics. -type diagnosticSource int - -const ( - modSource diagnosticSource = iota - gcDetailsSource - analysisSource - typeCheckSource - orphanedSource - workSource -) - -// A diagnosticReport holds results for a single diagnostic source. -type diagnosticReport struct { - snapshotID uint64 - publishedHash string - diags map[string]*source.Diagnostic -} - -// fileReports holds a collection of diagnostic reports for a single file, as -// well as the hash of the last published set of diagnostics. -type fileReports struct { - snapshotID uint64 - publishedHash string - reports map[diagnosticSource]diagnosticReport -} - -func (d diagnosticSource) String() string { - switch d { - case modSource: - return "FromSource" - case gcDetailsSource: - return "FromGCDetails" - case analysisSource: - return "FromAnalysis" - case typeCheckSource: - return "FromTypeChecking" - case orphanedSource: - return "FromOrphans" - default: - return fmt.Sprintf("From?%d?", d) - } -} - -// hashDiagnostics computes a hash to identify diags. -func hashDiagnostics(diags ...*source.Diagnostic) string { - source.SortDiagnostics(diags) - h := sha256.New() - for _, d := range diags { - for _, t := range d.Tags { - fmt.Fprintf(h, "%s", t) - } - for _, r := range d.Related { - fmt.Fprintf(h, "%s%s%s", r.URI, r.Message, r.Range) - } - fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source) - } - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func (s *Server) diagnoseDetached(snapshot source.Snapshot) { - ctx := snapshot.BackgroundContext() - ctx = xcontext.Detach(ctx) - s.diagnose(ctx, snapshot, false) - s.publishDiagnostics(ctx, true, snapshot) -} - -func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onDisk bool) { - var diagnosticWG sync.WaitGroup - for snapshot, uris := range snapshots { - diagnosticWG.Add(1) - go func(snapshot source.Snapshot, uris []span.URI) { - defer diagnosticWG.Done() - s.diagnoseSnapshot(snapshot, uris, onDisk) - }(snapshot, uris) - } - diagnosticWG.Wait() -} - -func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) { - ctx := snapshot.BackgroundContext() - ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", tag.Snapshot.Of(snapshot.ID())) - defer done() - - delay := snapshot.View().Options().DiagnosticsDelay - if delay > 0 { - // 2-phase diagnostics. - // - // The first phase just parses and checks packages that have been - // affected by file modifications (no analysis). - // - // The second phase does everything, and is debounced by the configured - // delay. - s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk) - s.publishDiagnostics(ctx, false, snapshot) - if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.ID(), time.After(delay)); ok { - s.diagnose(ctx, snapshot, false) - s.publishDiagnostics(ctx, true, snapshot) - } - return - } - - // Ignore possible workspace configuration warnings in the normal flow. - s.diagnose(ctx, snapshot, false) - s.publishDiagnostics(ctx, true, snapshot) -} - -func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) { - ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", tag.Snapshot.Of(snapshot.ID())) - defer done() - - packages := make(map[source.Package]struct{}) - for _, uri := range uris { - // If the change is only on-disk and the file is not open, don't - // directly request its package. It may not be a workspace package. - if onDisk && !snapshot.IsOpen(uri) { - continue - } - // If the file is not known to the snapshot (e.g., if it was deleted), - // don't diagnose it. - if snapshot.FindFile(uri) == nil { - continue - } - // Don't call PackagesForFile for builtin.go, as it results in a - // command-line-arguments load. - if snapshot.IsBuiltin(ctx, uri) { - continue - } - pkgs, err := snapshot.PackagesForFile(ctx, uri, source.TypecheckFull, false) - if err != nil { - // TODO (findleyr): we should probably do something with the error here, - // but as of now this can fail repeatedly if load fails, so can be too - // noisy to log (and we'll handle things later in the slow pass). - continue - } - for _, pkg := range pkgs { - packages[pkg] = struct{}{} - } - } - var wg sync.WaitGroup - for pkg := range packages { - wg.Add(1) - - go func(pkg source.Package) { - defer wg.Done() - - s.diagnosePkg(ctx, snapshot, pkg, false) - }(pkg) - } - wg.Wait() -} - -// diagnose is a helper function for running diagnostics with a given context. -// Do not call it directly. forceAnalysis is only true for testing purposes. -func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAnalysis bool) { - ctx, done := event.Start(ctx, "Server.diagnose", tag.Snapshot.Of(snapshot.ID())) - defer done() - - // Wait for a free diagnostics slot. - select { - case <-ctx.Done(): - return - case s.diagnosticsSema <- struct{}{}: - } - defer func() { - <-s.diagnosticsSema - }() - - // First, diagnose the go.mod file. - modReports, modErr := mod.Diagnostics(ctx, snapshot) - if ctx.Err() != nil { - log.Trace.Log(ctx, "diagnose cancelled") - return - } - if modErr != nil { - event.Error(ctx, "warning: diagnose go.mod", modErr, tag.Directory.Of(snapshot.View().Folder().Filename()), tag.Snapshot.Of(snapshot.ID())) - } - for id, diags := range modReports { - if id.URI == "" { - event.Error(ctx, "missing URI for module diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename())) - continue - } - s.storeDiagnostics(snapshot, id.URI, modSource, diags) - } - - // Diagnose the go.work file, if it exists. - workReports, workErr := work.Diagnostics(ctx, snapshot) - if ctx.Err() != nil { - log.Trace.Log(ctx, "diagnose cancelled") - return - } - if workErr != nil { - event.Error(ctx, "warning: diagnose go.work", workErr, tag.Directory.Of(snapshot.View().Folder().Filename()), tag.Snapshot.Of(snapshot.ID())) - } - for id, diags := range workReports { - if id.URI == "" { - event.Error(ctx, "missing URI for work file diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename())) - continue - } - s.storeDiagnostics(snapshot, id.URI, workSource, diags) - } - - // Diagnose all of the packages in the workspace. - wsPkgs, err := snapshot.ActivePackages(ctx) - if s.shouldIgnoreError(ctx, snapshot, err) { - return - } - criticalErr := snapshot.GetCriticalError(ctx) - - // Show the error as a progress error report so that it appears in the - // status bar. If a client doesn't support progress reports, the error - // will still be shown as a ShowMessage. If there is no error, any running - // error progress reports will be closed. - s.showCriticalErrorStatus(ctx, snapshot, criticalErr) - - // There may be .tmpl files. - for _, f := range snapshot.Templates() { - diags := template.Diagnose(f) - s.storeDiagnostics(snapshot, f.URI(), typeCheckSource, diags) - } - - // If there are no workspace packages, there is nothing to diagnose and - // there are no orphaned files. - if len(wsPkgs) == 0 { - return - } - - var ( - wg sync.WaitGroup - seen = map[span.URI]struct{}{} - ) - for _, pkg := range wsPkgs { - wg.Add(1) - - for _, pgf := range pkg.CompiledGoFiles() { - seen[pgf.URI] = struct{}{} - } - - go func(pkg source.Package) { - defer wg.Done() - - s.diagnosePkg(ctx, snapshot, pkg, forceAnalysis) - }(pkg) - } - wg.Wait() - - // Confirm that every opened file belongs to a package (if any exist in - // the workspace). Otherwise, add a diagnostic to the file. - for _, o := range s.session.Overlays() { - if _, ok := seen[o.URI()]; ok { - continue - } - diagnostic := s.checkForOrphanedFile(ctx, snapshot, o) - if diagnostic == nil { - continue - } - s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic}) - } -} - -func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg source.Package, alwaysAnalyze bool) { - ctx, done := event.Start(ctx, "Server.diagnosePkg", tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID())) - defer done() - enableDiagnostics := false - includeAnalysis := alwaysAnalyze // only run analyses for packages with open files - for _, pgf := range pkg.CompiledGoFiles() { - enableDiagnostics = enableDiagnostics || !snapshot.IgnoredFile(pgf.URI) - includeAnalysis = includeAnalysis || snapshot.IsOpen(pgf.URI) - } - // Don't show any diagnostics on ignored files. - if !enableDiagnostics { - return - } - - pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg) - if err != nil { - event.Error(ctx, "warning: diagnosing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID())) - return - } - for _, cgf := range pkg.CompiledGoFiles() { - // builtin.go exists only for documentation purposes, and is not valid Go code. - // Don't report distracting errors - if !snapshot.IsBuiltin(ctx, cgf.URI) { - s.storeDiagnostics(snapshot, cgf.URI, typeCheckSource, pkgDiagnostics[cgf.URI]) - } - } - if includeAnalysis && !pkg.HasListOrParseErrors() { - reports, err := source.Analyze(ctx, snapshot, pkg, false) - if err != nil { - event.Error(ctx, "warning: analyzing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID())) - return - } - for _, cgf := range pkg.CompiledGoFiles() { - s.storeDiagnostics(snapshot, cgf.URI, analysisSource, reports[cgf.URI]) - } - } - - // If gc optimization details are requested, add them to the - // diagnostic reports. - s.gcOptimizationDetailsMu.Lock() - _, enableGCDetails := s.gcOptimizationDetails[pkg.ID()] - s.gcOptimizationDetailsMu.Unlock() - if enableGCDetails { - gcReports, err := source.GCOptimizationDetails(ctx, snapshot, pkg) - if err != nil { - event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID())) - } - s.gcOptimizationDetailsMu.Lock() - _, enableGCDetails := s.gcOptimizationDetails[pkg.ID()] - - // NOTE(golang/go#44826): hold the gcOptimizationDetails lock, and re-check - // whether gc optimization details are enabled, while storing gc_details - // results. This ensures that the toggling of GC details and clearing of - // diagnostics does not race with storing the results here. - if enableGCDetails { - for id, diags := range gcReports { - fh := snapshot.FindFile(id.URI) - // Don't publish gc details for unsaved buffers, since the underlying - // logic operates on the file on disk. - if fh == nil || !fh.Saved() { - continue - } - s.storeDiagnostics(snapshot, id.URI, gcDetailsSource, diags) - } - } - s.gcOptimizationDetailsMu.Unlock() - } -} - -// storeDiagnostics stores results from a single diagnostic source. If merge is -// true, it merges results into any existing results for this snapshot. -func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic) { - // Safeguard: ensure that the file actually exists in the snapshot - // (see golang.org/issues/38602). - fh := snapshot.FindFile(uri) - if fh == nil { - return - } - s.diagnosticsMu.Lock() - defer s.diagnosticsMu.Unlock() - if s.diagnostics[uri] == nil { - s.diagnostics[uri] = &fileReports{ - publishedHash: hashDiagnostics(), // Hash for 0 diagnostics. - reports: map[diagnosticSource]diagnosticReport{}, - } - } - report := s.diagnostics[uri].reports[dsource] - // Don't set obsolete diagnostics. - if report.snapshotID > snapshot.ID() { - return - } - if report.diags == nil || report.snapshotID != snapshot.ID() { - report.diags = map[string]*source.Diagnostic{} - } - report.snapshotID = snapshot.ID() - for _, d := range diags { - report.diags[hashDiagnostics(d)] = d - } - s.diagnostics[uri].reports[dsource] = report -} - -// clearDiagnosticSource clears all diagnostics for a given source type. It is -// necessary for cases where diagnostics have been invalidated by something -// other than a snapshot change, for example when gc_details is toggled. -func (s *Server) clearDiagnosticSource(dsource diagnosticSource) { - s.diagnosticsMu.Lock() - defer s.diagnosticsMu.Unlock() - for _, reports := range s.diagnostics { - delete(reports.reports, dsource) - } -} - -const WorkspaceLoadFailure = "Error loading workspace" - -// showCriticalErrorStatus shows the error as a progress report. -// If the error is nil, it clears any existing error progress report. -func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Snapshot, err *source.CriticalError) { - s.criticalErrorStatusMu.Lock() - defer s.criticalErrorStatusMu.Unlock() - - // Remove all newlines so that the error message can be formatted in a - // status bar. - var errMsg string - if err != nil { - event.Error(ctx, "errors loading workspace", err.MainError, tag.Snapshot.Of(snapshot.ID()), tag.Directory.Of(snapshot.View().Folder())) - for _, d := range err.DiagList { - s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d}) - } - errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ") - } - - if s.criticalErrorStatus == nil { - if errMsg != "" { - s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil) - } - return - } - - // If an error is already shown to the user, update it or mark it as - // resolved. - if errMsg == "" { - s.criticalErrorStatus.End(ctx, "Done.") - s.criticalErrorStatus = nil - } else { - s.criticalErrorStatus.Report(ctx, errMsg, 0) - } -} - -// checkForOrphanedFile checks that the given URIs can be mapped to packages. -// If they cannot and the workspace is not otherwise unloaded, it also surfaces -// a warning, suggesting that the user check the file for build tags. -func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle) *source.Diagnostic { - // TODO(rfindley): this function may fail to produce a diagnostic for a - // variety of reasons, some of which should probably not be ignored. For - // example, should this function be tolerant of the case where fh does not - // exist, or does not have a package name? - // - // It would be better to panic or report a bug in several of the cases below, - // so that we can move toward guaranteeing we show the user a meaningful - // error whenever it makes sense. - if snapshot.View().FileKind(fh) != source.Go { - return nil - } - // builtin files won't have a package, but they are never orphaned. - if snapshot.IsBuiltin(ctx, fh.URI()) { - return nil - } - pkgs, err := snapshot.PackagesForFile(ctx, fh.URI(), source.TypecheckWorkspace, false) - if len(pkgs) > 0 || err == nil { - return nil - } - pgf, err := snapshot.ParseGo(ctx, fh, source.ParseHeader) - if err != nil { - return nil - } - if !pgf.File.Name.Pos().IsValid() { - return nil - } - spn, err := span.NewRange(snapshot.FileSet(), pgf.File.Name.Pos(), pgf.File.Name.End()).Span() - if err != nil { - return nil - } - rng, err := pgf.Mapper.Range(spn) - if err != nil { - return nil - } - // If the file no longer has a name ending in .go, this diagnostic is wrong - if filepath.Ext(fh.URI().Filename()) != ".go" { - return nil - } - // TODO(rstambler): We should be able to parse the build tags in the - // file and show a more specific error message. For now, put the diagnostic - // on the package declaration. - return &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityWarning, - Source: source.ListError, - Message: fmt.Sprintf(`No packages found for open file %s: %v. -If this file contains build tags, try adding "-tags=" to your gopls "buildFlags" configuration (see (https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string). -Otherwise, see the troubleshooting guidelines for help investigating (https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md). -`, fh.URI().Filename(), err), - } -} - -// publishDiagnostics collects and publishes any unpublished diagnostic reports. -func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) { - ctx, done := event.Start(ctx, "Server.publishDiagnostics", tag.Snapshot.Of(snapshot.ID())) - defer done() - s.diagnosticsMu.Lock() - defer s.diagnosticsMu.Unlock() - - published := 0 - defer func() { - log.Trace.Logf(ctx, "published %d diagnostics", published) - }() - - for uri, r := range s.diagnostics { - // Snapshot IDs are always increasing, so we use them instead of file - // versions to create the correct order for diagnostics. - - // If we've already delivered diagnostics for a future snapshot for this - // file, do not deliver them. - if r.snapshotID > snapshot.ID() { - continue - } - anyReportsChanged := false - reportHashes := map[diagnosticSource]string{} - var diags []*source.Diagnostic - for dsource, report := range r.reports { - if report.snapshotID != snapshot.ID() { - continue - } - var reportDiags []*source.Diagnostic - for _, d := range report.diags { - diags = append(diags, d) - reportDiags = append(reportDiags, d) - } - hash := hashDiagnostics(reportDiags...) - if hash != report.publishedHash { - anyReportsChanged = true - } - reportHashes[dsource] = hash - } - - if !final && !anyReportsChanged { - // Don't invalidate existing reports on the client if we haven't got any - // new information. - continue - } - source.SortDiagnostics(diags) - hash := hashDiagnostics(diags...) - if hash == r.publishedHash { - // Update snapshotID to be the latest snapshot for which this diagnostic - // hash is valid. - r.snapshotID = snapshot.ID() - continue - } - var version int32 - if fh := snapshot.FindFile(uri); fh != nil { // file may have been deleted - version = fh.Version() - } - if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ - Diagnostics: toProtocolDiagnostics(diags), - URI: protocol.URIFromSpanURI(uri), - Version: version, - }); err == nil { - published++ - r.publishedHash = hash - r.snapshotID = snapshot.ID() - for dsource, hash := range reportHashes { - report := r.reports[dsource] - report.publishedHash = hash - r.reports[dsource] = report - } - } else { - if ctx.Err() != nil { - // Publish may have failed due to a cancelled context. - log.Trace.Log(ctx, "publish cancelled") - return - } - event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri)) - } - } -} - -func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnostic { - reports := []protocol.Diagnostic{} - for _, diag := range diagnostics { - related := make([]protocol.DiagnosticRelatedInformation, 0, len(diag.Related)) - for _, rel := range diag.Related { - related = append(related, protocol.DiagnosticRelatedInformation{ - Location: protocol.Location{ - URI: protocol.URIFromSpanURI(rel.URI), - Range: rel.Range, - }, - Message: rel.Message, - }) - } - pdiag := protocol.Diagnostic{ - // diag.Message might start with \n or \t - Message: strings.TrimSpace(diag.Message), - Range: diag.Range, - Severity: diag.Severity, - Source: string(diag.Source), - Tags: diag.Tags, - RelatedInformation: related, - } - if diag.Code != "" { - pdiag.Code = diag.Code - } - if diag.CodeHref != "" { - pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref} - } - reports = append(reports, pdiag) - } - return reports -} - -func (s *Server) shouldIgnoreError(ctx context.Context, snapshot source.Snapshot, err error) bool { - if err == nil { // if there is no error at all - return false - } - if errors.Is(err, context.Canceled) { - return true - } - // If the folder has no Go code in it, we shouldn't spam the user with a warning. - var hasGo bool - _ = filepath.Walk(snapshot.View().Folder().Filename(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !strings.HasSuffix(info.Name(), ".go") { - return nil - } - hasGo = true - return errors.New("done") - }) - return !hasGo -} - -// Diagnostics formattedfor the debug server -// (all the relevant fields of Server are private) -// (The alternative is to export them) -func (s *Server) Diagnostics() map[string][]string { - ans := make(map[string][]string) - s.diagnosticsMu.Lock() - defer s.diagnosticsMu.Unlock() - for k, v := range s.diagnostics { - fn := k.Filename() - for typ, d := range v.reports { - if len(d.diags) == 0 { - continue - } - for _, dx := range d.diags { - ans[fn] = append(ans[fn], auxStr(dx, d, typ)) - } - } - } - return ans -} - -func auxStr(v *source.Diagnostic, d diagnosticReport, typ diagnosticSource) string { - // Tags? RelatedInformation? - msg := fmt.Sprintf("(%s)%q(source:%q,code:%q,severity:%s,snapshot:%d,type:%s)", - v.Range, v.Message, v.Source, v.Code, v.Severity, d.snapshotID, typ) - for _, r := range v.Related { - msg += fmt.Sprintf(" [%s:%s,%q]", r.URI.Filename(), r.Range, r.Message) - } - return msg -} diff --git a/internal/lsp/diff/diff.go b/internal/lsp/diff/diff.go deleted file mode 100644 index 8fd6824e530..00000000000 --- a/internal/lsp/diff/diff.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package diff supports a pluggable diff algorithm. -package diff - -import ( - "sort" - "strings" - - "golang.org/x/tools/internal/span" -) - -// TextEdit represents a change to a section of a document. -// The text within the specified span should be replaced by the supplied new text. -type TextEdit struct { - Span span.Span - NewText string -} - -// ComputeEdits is the type for a function that produces a set of edits that -// convert from the before content to the after content. -type ComputeEdits func(uri span.URI, before, after string) ([]TextEdit, error) - -// SortTextEdits attempts to order all edits by their starting points. -// The sort is stable so that edits with the same starting point will not -// be reordered. -func SortTextEdits(d []TextEdit) { - // Use a stable sort to maintain the order of edits inserted at the same position. - sort.SliceStable(d, func(i int, j int) bool { - return span.Compare(d[i].Span, d[j].Span) < 0 - }) -} - -// ApplyEdits applies the set of edits to the before and returns the resulting -// content. -// It may panic or produce garbage if the edits are not valid for the provided -// before content. -func ApplyEdits(before string, edits []TextEdit) string { - // Preconditions: - // - all of the edits apply to before - // - and all the spans for each TextEdit have the same URI - if len(edits) == 0 { - return before - } - edits, _ = prepareEdits(before, edits) - after := strings.Builder{} - last := 0 - for _, edit := range edits { - start := edit.Span.Start().Offset() - if start > last { - after.WriteString(before[last:start]) - last = start - } - after.WriteString(edit.NewText) - last = edit.Span.End().Offset() - } - if last < len(before) { - after.WriteString(before[last:]) - } - return after.String() -} - -// LineEdits takes a set of edits and expands and merges them as necessary -// to ensure that there are only full line edits left when it is done. -func LineEdits(before string, edits []TextEdit) []TextEdit { - if len(edits) == 0 { - return nil - } - edits, partial := prepareEdits(before, edits) - if partial { - edits = lineEdits(before, edits) - } - return edits -} - -// prepareEdits returns a sorted copy of the edits -func prepareEdits(before string, edits []TextEdit) ([]TextEdit, bool) { - partial := false - tf := span.NewTokenFile("", []byte(before)) - copied := make([]TextEdit, len(edits)) - for i, edit := range edits { - edit.Span, _ = edit.Span.WithAll(tf) - copied[i] = edit - partial = partial || - edit.Span.Start().Offset() >= len(before) || - edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1 - } - SortTextEdits(copied) - return copied, partial -} - -// lineEdits rewrites the edits to always be full line edits -func lineEdits(before string, edits []TextEdit) []TextEdit { - adjusted := make([]TextEdit, 0, len(edits)) - current := TextEdit{Span: span.Invalid} - for _, edit := range edits { - if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() { - // overlaps with the current edit, need to combine - // first get the gap from the previous edit - gap := before[current.Span.End().Offset():edit.Span.Start().Offset()] - // now add the text of this edit - current.NewText += gap + edit.NewText - // and then adjust the end position - current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End()) - } else { - // does not overlap, add previous run (if there is one) - adjusted = addEdit(before, adjusted, current) - // and then remember this edit as the start of the next run - current = edit - } - } - // add the current pending run if there is one - return addEdit(before, adjusted, current) -} - -func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit { - if !edit.Span.IsValid() { - return edits - } - // if edit is partial, expand it to full line now - start := edit.Span.Start() - end := edit.Span.End() - if start.Column() > 1 { - // prepend the text and adjust to start of line - delta := start.Column() - 1 - start = span.NewPoint(start.Line(), 1, start.Offset()-delta) - edit.Span = span.New(edit.Span.URI(), start, end) - edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText - } - if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' { - // after end of file that does not end in eol, so join to last line of file - // to do this we need to know where the start of the last line was - eol := strings.LastIndex(before, "\n") - if eol < 0 { - // file is one non terminated line - eol = 0 - } - delta := len(before) - eol - start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta) - edit.Span = span.New(edit.Span.URI(), start, end) - edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText - } - if end.Column() > 1 { - remains := before[end.Offset():] - eol := strings.IndexRune(remains, '\n') - if eol < 0 { - eol = len(remains) - } else { - eol++ - } - end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol) - edit.Span = span.New(edit.Span.URI(), start, end) - edit.NewText = edit.NewText + remains[:eol] - } - edits = append(edits, edit) - return edits -} diff --git a/internal/lsp/diff/diff_test.go b/internal/lsp/diff/diff_test.go deleted file mode 100644 index dd9414e5d7a..00000000000 --- a/internal/lsp/diff/diff_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package diff_test - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/difftest" - "golang.org/x/tools/internal/span" -) - -func TestApplyEdits(t *testing.T) { - for _, tc := range difftest.TestCases { - t.Run(tc.Name, func(t *testing.T) { - t.Helper() - if got := diff.ApplyEdits(tc.In, tc.Edits); got != tc.Out { - t.Errorf("ApplyEdits edits got %q, want %q", got, tc.Out) - } - if tc.LineEdits != nil { - if got := diff.ApplyEdits(tc.In, tc.LineEdits); got != tc.Out { - t.Errorf("ApplyEdits lineEdits got %q, want %q", got, tc.Out) - } - } - }) - } -} - -func TestLineEdits(t *testing.T) { - for _, tc := range difftest.TestCases { - t.Run(tc.Name, func(t *testing.T) { - t.Helper() - // if line edits not specified, it is the same as edits - edits := tc.LineEdits - if edits == nil { - edits = tc.Edits - } - if got := diff.LineEdits(tc.In, tc.Edits); diffEdits(got, edits) { - t.Errorf("LineEdits got %q, want %q", got, edits) - } - }) - } -} - -func TestUnified(t *testing.T) { - for _, tc := range difftest.TestCases { - t.Run(tc.Name, func(t *testing.T) { - t.Helper() - unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.Edits)) - if unified != tc.Unified { - t.Errorf("edits got diff:\n%v\nexpected:\n%v", unified, tc.Unified) - } - if tc.LineEdits != nil { - unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.LineEdits)) - if unified != tc.Unified { - t.Errorf("lineEdits got diff:\n%v\nexpected:\n%v", unified, tc.Unified) - } - } - }) - } -} - -func diffEdits(got, want []diff.TextEdit) bool { - if len(got) != len(want) { - return true - } - for i, w := range want { - g := got[i] - if span.Compare(w.Span, g.Span) != 0 { - return true - } - if w.NewText != g.NewText { - return true - } - } - return false -} diff --git a/internal/lsp/diff/difftest/difftest.go b/internal/lsp/diff/difftest/difftest.go deleted file mode 100644 index a78e2674521..00000000000 --- a/internal/lsp/diff/difftest/difftest.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package difftest supplies a set of tests that will operate on any -// implementation of a diff algorithm as exposed by -// "golang.org/x/tools/internal/lsp/diff" -package difftest - -import ( - "fmt" - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/span" -) - -const ( - FileA = "from" - FileB = "to" - UnifiedPrefix = "--- " + FileA + "\n+++ " + FileB + "\n" -) - -var TestCases = []struct { - Name, In, Out, Unified string - Edits, LineEdits []diff.TextEdit - NoDiff bool -}{{ - Name: "empty", - In: "", - Out: "", -}, { - Name: "no_diff", - In: "gargantuan\n", - Out: "gargantuan\n", -}, { - Name: "replace_all", - In: "fruit\n", - Out: "cheese\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --fruit -+cheese -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "cheese"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "cheese\n"}}, -}, { - Name: "insert_rune", - In: "gord\n", - Out: "gourd\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --gord -+gourd -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(2, 2), NewText: "u"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "gourd\n"}}, -}, { - Name: "delete_rune", - In: "groat\n", - Out: "goat\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --groat -+goat -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(1, 2), NewText: ""}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "goat\n"}}, -}, { - Name: "replace_rune", - In: "loud\n", - Out: "lord\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --loud -+lord -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(2, 3), NewText: "r"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "lord\n"}}, -}, { - Name: "replace_partials", - In: "blanket\n", - Out: "bunker\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --blanket -+bunker -`[1:], - Edits: []diff.TextEdit{ - {Span: newSpan(1, 3), NewText: "u"}, - {Span: newSpan(6, 7), NewText: "r"}, - }, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 8), NewText: "bunker\n"}}, -}, { - Name: "insert_line", - In: "1: one\n3: three\n", - Out: "1: one\n2: two\n3: three\n", - Unified: UnifiedPrefix + ` -@@ -1,2 +1,3 @@ - 1: one -+2: two - 3: three -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(7, 7), NewText: "2: two\n"}}, -}, { - Name: "replace_no_newline", - In: "A", - Out: "B", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --A -\ No newline at end of file -+B -\ No newline at end of file -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "B"}}, -}, { - Name: "add_end", - In: "A", - Out: "AB", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --A -\ No newline at end of file -+AB -\ No newline at end of file -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(1, 1), NewText: "B"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "AB"}}, -}, { - Name: "add_newline", - In: "A", - Out: "A\n", - Unified: UnifiedPrefix + ` -@@ -1 +1 @@ --A -\ No newline at end of file -+A -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(1, 1), NewText: "\n"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "A\n"}}, -}, { - Name: "delete_front", - In: "A\nB\nC\nA\nB\nB\nA\n", - Out: "C\nB\nA\nB\nA\nC\n", - Unified: UnifiedPrefix + ` -@@ -1,7 +1,6 @@ --A --B - C -+B - A - B --B - A -+C -`[1:], - Edits: []diff.TextEdit{ - {Span: newSpan(0, 4), NewText: ""}, - {Span: newSpan(6, 6), NewText: "B\n"}, - {Span: newSpan(10, 12), NewText: ""}, - {Span: newSpan(14, 14), NewText: "C\n"}, - }, - NoDiff: true, // diff algorithm produces different delete/insert pattern -}, - { - Name: "replace_last_line", - In: "A\nB\n", - Out: "A\nC\n\n", - Unified: UnifiedPrefix + ` -@@ -1,2 +1,3 @@ - A --B -+C -+ -`[1:], - Edits: []diff.TextEdit{{Span: newSpan(2, 3), NewText: "C\n"}}, - LineEdits: []diff.TextEdit{{Span: newSpan(2, 4), NewText: "C\n\n"}}, - }, - { - Name: "multiple_replace", - In: "A\nB\nC\nD\nE\nF\nG\n", - Out: "A\nH\nI\nJ\nE\nF\nK\n", - Unified: UnifiedPrefix + ` -@@ -1,7 +1,7 @@ - A --B --C --D -+H -+I -+J - E - F --G -+K -`[1:], - Edits: []diff.TextEdit{ - {Span: newSpan(2, 8), NewText: "H\nI\nJ\n"}, - {Span: newSpan(12, 14), NewText: "K\n"}, - }, - NoDiff: true, // diff algorithm produces different delete/insert pattern - }, -} - -func init() { - // expand all the spans to full versions - // we need them all to have their line number and column - for _, tc := range TestCases { - tf := span.NewTokenFile("", []byte(tc.In)) - for i := range tc.Edits { - tc.Edits[i].Span, _ = tc.Edits[i].Span.WithAll(tf) - } - for i := range tc.LineEdits { - tc.LineEdits[i].Span, _ = tc.LineEdits[i].Span.WithAll(tf) - } - } -} - -func DiffTest(t *testing.T, compute diff.ComputeEdits) { - t.Helper() - for _, test := range TestCases { - t.Run(test.Name, func(t *testing.T) { - t.Helper() - edits, err := compute(span.URIFromPath("/"+test.Name), test.In, test.Out) - if err != nil { - t.Fatal(err) - } - got := diff.ApplyEdits(test.In, edits) - unified := fmt.Sprint(diff.ToUnified(FileA, FileB, test.In, edits)) - if got != test.Out { - t.Errorf("got patched:\n%v\nfrom diff:\n%v\nexpected:\n%v", got, unified, test.Out) - } - if !test.NoDiff && unified != test.Unified { - t.Errorf("got diff:\n%v\nexpected:\n%v", unified, test.Unified) - } - }) - } -} - -func newSpan(start, end int) span.Span { - return span.New("", span.NewPoint(0, 0, start), span.NewPoint(0, 0, end)) -} diff --git a/internal/lsp/diff/myers/diff.go b/internal/lsp/diff/myers/diff.go deleted file mode 100644 index a59475058a5..00000000000 --- a/internal/lsp/diff/myers/diff.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package myers implements the Myers diff algorithm. -package myers - -import ( - "strings" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/span" -) - -// Sources: -// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/ -// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2 - -func ComputeEdits(uri span.URI, before, after string) ([]diff.TextEdit, error) { - ops := operations(splitLines(before), splitLines(after)) - edits := make([]diff.TextEdit, 0, len(ops)) - for _, op := range ops { - s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0)) - switch op.Kind { - case diff.Delete: - // Delete: unformatted[i1:i2] is deleted. - edits = append(edits, diff.TextEdit{Span: s}) - case diff.Insert: - // Insert: formatted[j1:j2] is inserted at unformatted[i1:i1]. - if content := strings.Join(op.Content, ""); content != "" { - edits = append(edits, diff.TextEdit{Span: s, NewText: content}) - } - } - } - return edits, nil -} - -type operation struct { - Kind diff.OpKind - Content []string // content from b - I1, I2 int // indices of the line in a - J1 int // indices of the line in b, J2 implied by len(Content) -} - -// operations returns the list of operations to convert a into b, consolidating -// operations for multiple lines and not including equal lines. -func operations(a, b []string) []*operation { - if len(a) == 0 && len(b) == 0 { - return nil - } - - trace, offset := shortestEditSequence(a, b) - snakes := backtrack(trace, len(a), len(b), offset) - - M, N := len(a), len(b) - - var i int - solution := make([]*operation, len(a)+len(b)) - - add := func(op *operation, i2, j2 int) { - if op == nil { - return - } - op.I2 = i2 - if op.Kind == diff.Insert { - op.Content = b[op.J1:j2] - } - solution[i] = op - i++ - } - x, y := 0, 0 - for _, snake := range snakes { - if len(snake) < 2 { - continue - } - var op *operation - // delete (horizontal) - for snake[0]-snake[1] > x-y { - if op == nil { - op = &operation{ - Kind: diff.Delete, - I1: x, - J1: y, - } - } - x++ - if x == M { - break - } - } - add(op, x, y) - op = nil - // insert (vertical) - for snake[0]-snake[1] < x-y { - if op == nil { - op = &operation{ - Kind: diff.Insert, - I1: x, - J1: y, - } - } - y++ - } - add(op, x, y) - op = nil - // equal (diagonal) - for x < snake[0] { - x++ - y++ - } - if x >= M && y >= N { - break - } - } - return solution[:i] -} - -// backtrack uses the trace for the edit sequence computation and returns the -// "snakes" that make up the solution. A "snake" is a single deletion or -// insertion followed by zero or diagonals. -func backtrack(trace [][]int, x, y, offset int) [][]int { - snakes := make([][]int, len(trace)) - d := len(trace) - 1 - for ; x > 0 && y > 0 && d > 0; d-- { - V := trace[d] - if len(V) == 0 { - continue - } - snakes[d] = []int{x, y} - - k := x - y - - var kPrev int - if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { - kPrev = k + 1 - } else { - kPrev = k - 1 - } - - x = V[kPrev+offset] - y = x - kPrev - } - if x < 0 || y < 0 { - return snakes - } - snakes[d] = []int{x, y} - return snakes -} - -// shortestEditSequence returns the shortest edit sequence that converts a into b. -func shortestEditSequence(a, b []string) ([][]int, int) { - M, N := len(a), len(b) - V := make([]int, 2*(N+M)+1) - offset := N + M - trace := make([][]int, N+M+1) - - // Iterate through the maximum possible length of the SES (N+M). - for d := 0; d <= N+M; d++ { - copyV := make([]int, len(V)) - // k lines are represented by the equation y = x - k. We move in - // increments of 2 because end points for even d are on even k lines. - for k := -d; k <= d; k += 2 { - // At each point, we either go down or to the right. We go down if - // k == -d, and we go to the right if k == d. We also prioritize - // the maximum x value, because we prefer deletions to insertions. - var x int - if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { - x = V[k+1+offset] // down - } else { - x = V[k-1+offset] + 1 // right - } - - y := x - k - - // Diagonal moves while we have equal contents. - for x < M && y < N && a[x] == b[y] { - x++ - y++ - } - - V[k+offset] = x - - // Return if we've exceeded the maximum values. - if x == M && y == N { - // Makes sure to save the state of the array before returning. - copy(copyV, V) - trace[d] = copyV - return trace, offset - } - } - - // Save the state of the array. - copy(copyV, V) - trace[d] = copyV - } - return nil, 0 -} - -func splitLines(text string) []string { - lines := strings.SplitAfter(text, "\n") - if lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] - } - return lines -} diff --git a/internal/lsp/diff/myers/diff_test.go b/internal/lsp/diff/myers/diff_test.go deleted file mode 100644 index bce0399c58d..00000000000 --- a/internal/lsp/diff/myers/diff_test.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package myers_test - -import ( - "testing" - - "golang.org/x/tools/internal/lsp/diff/difftest" - "golang.org/x/tools/internal/lsp/diff/myers" -) - -func TestDiff(t *testing.T) { - difftest.DiffTest(t, myers.ComputeEdits) -} diff --git a/internal/lsp/diff/unified.go b/internal/lsp/diff/unified.go deleted file mode 100644 index 323471d2046..00000000000 --- a/internal/lsp/diff/unified.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package diff - -import ( - "fmt" - "strings" -) - -// Unified represents a set of edits as a unified diff. -type Unified struct { - // From is the name of the original file. - From string - // To is the name of the modified file. - To string - // Hunks is the set of edit hunks needed to transform the file content. - Hunks []*Hunk -} - -// Hunk represents a contiguous set of line edits to apply. -type Hunk struct { - // The line in the original source where the hunk starts. - FromLine int - // The line in the original source where the hunk finishes. - ToLine int - // The set of line based edits to apply. - Lines []Line -} - -// Line represents a single line operation to apply as part of a Hunk. -type Line struct { - // Kind is the type of line this represents, deletion, insertion or copy. - Kind OpKind - // Content is the content of this line. - // For deletion it is the line being removed, for all others it is the line - // to put in the output. - Content string -} - -// OpKind is used to denote the type of operation a line represents. -type OpKind int - -const ( - // Delete is the operation kind for a line that is present in the input - // but not in the output. - Delete OpKind = iota - // Insert is the operation kind for a line that is new in the output. - Insert - // Equal is the operation kind for a line that is the same in the input and - // output, often used to provide context around edited lines. - Equal -) - -// String returns a human readable representation of an OpKind. It is not -// intended for machine processing. -func (k OpKind) String() string { - switch k { - case Delete: - return "delete" - case Insert: - return "insert" - case Equal: - return "equal" - default: - panic("unknown operation kind") - } -} - -const ( - edge = 3 - gap = edge * 2 -) - -// ToUnified takes a file contents and a sequence of edits, and calculates -// a unified diff that represents those edits. -func ToUnified(from, to string, content string, edits []TextEdit) Unified { - u := Unified{ - From: from, - To: to, - } - if len(edits) == 0 { - return u - } - edits, partial := prepareEdits(content, edits) - if partial { - edits = lineEdits(content, edits) - } - lines := splitLines(content) - var h *Hunk - last := 0 - toLine := 0 - for _, edit := range edits { - start := edit.Span.Start().Line() - 1 - end := edit.Span.End().Line() - 1 - switch { - case h != nil && start == last: - //direct extension - case h != nil && start <= last+gap: - //within range of previous lines, add the joiners - addEqualLines(h, lines, last, start) - default: - //need to start a new hunk - if h != nil { - // add the edge to the previous hunk - addEqualLines(h, lines, last, last+edge) - u.Hunks = append(u.Hunks, h) - } - toLine += start - last - h = &Hunk{ - FromLine: start + 1, - ToLine: toLine + 1, - } - // add the edge to the new hunk - delta := addEqualLines(h, lines, start-edge, start) - h.FromLine -= delta - h.ToLine -= delta - } - last = start - for i := start; i < end; i++ { - h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]}) - last++ - } - if edit.NewText != "" { - for _, line := range splitLines(edit.NewText) { - h.Lines = append(h.Lines, Line{Kind: Insert, Content: line}) - toLine++ - } - } - } - if h != nil { - // add the edge to the final hunk - addEqualLines(h, lines, last, last+edge) - u.Hunks = append(u.Hunks, h) - } - return u -} - -func splitLines(text string) []string { - lines := strings.SplitAfter(text, "\n") - if lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] - } - return lines -} - -func addEqualLines(h *Hunk, lines []string, start, end int) int { - delta := 0 - for i := start; i < end; i++ { - if i < 0 { - continue - } - if i >= len(lines) { - return delta - } - h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]}) - delta++ - } - return delta -} - -// Format converts a unified diff to the standard textual form for that diff. -// The output of this function can be passed to tools like patch. -func (u Unified) Format(f fmt.State, r rune) { - if len(u.Hunks) == 0 { - return - } - fmt.Fprintf(f, "--- %s\n", u.From) - fmt.Fprintf(f, "+++ %s\n", u.To) - for _, hunk := range u.Hunks { - fromCount, toCount := 0, 0 - for _, l := range hunk.Lines { - switch l.Kind { - case Delete: - fromCount++ - case Insert: - toCount++ - default: - fromCount++ - toCount++ - } - } - fmt.Fprint(f, "@@") - if fromCount > 1 { - fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount) - } else { - fmt.Fprintf(f, " -%d", hunk.FromLine) - } - if toCount > 1 { - fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount) - } else { - fmt.Fprintf(f, " +%d", hunk.ToLine) - } - fmt.Fprint(f, " @@\n") - for _, l := range hunk.Lines { - switch l.Kind { - case Delete: - fmt.Fprintf(f, "-%s", l.Content) - case Insert: - fmt.Fprintf(f, "+%s", l.Content) - default: - fmt.Fprintf(f, " %s", l.Content) - } - if !strings.HasSuffix(l.Content, "\n") { - fmt.Fprintf(f, "\n\\ No newline at end of file\n") - } - } - } -} diff --git a/internal/lsp/fake/client.go b/internal/lsp/fake/client.go deleted file mode 100644 index fdc67a6cc64..00000000000 --- a/internal/lsp/fake/client.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "context" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" -) - -// ClientHooks are called to handle the corresponding client LSP method. -type ClientHooks struct { - OnLogMessage func(context.Context, *protocol.LogMessageParams) error - OnDiagnostics func(context.Context, *protocol.PublishDiagnosticsParams) error - OnWorkDoneProgressCreate func(context.Context, *protocol.WorkDoneProgressCreateParams) error - OnProgress func(context.Context, *protocol.ProgressParams) error - OnShowMessage func(context.Context, *protocol.ShowMessageParams) error - OnShowMessageRequest func(context.Context, *protocol.ShowMessageRequestParams) error - OnRegistration func(context.Context, *protocol.RegistrationParams) error - OnUnregistration func(context.Context, *protocol.UnregistrationParams) error -} - -// Client is an adapter that converts an *Editor into an LSP Client. It mosly -// delegates functionality to hooks that can be configured by tests. -type Client struct { - editor *Editor - hooks ClientHooks -} - -func (c *Client) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) error { - if c.hooks.OnShowMessage != nil { - return c.hooks.OnShowMessage(ctx, params) - } - return nil -} - -func (c *Client) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) { - if c.hooks.OnShowMessageRequest != nil { - if err := c.hooks.OnShowMessageRequest(ctx, params); err != nil { - return nil, err - } - } - if len(params.Actions) == 0 || len(params.Actions) > 1 { - return nil, fmt.Errorf("fake editor cannot handle multiple action items") - } - return ¶ms.Actions[0], nil -} - -func (c *Client) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error { - if c.hooks.OnLogMessage != nil { - return c.hooks.OnLogMessage(ctx, params) - } - return nil -} - -func (c *Client) Event(ctx context.Context, event *interface{}) error { - return nil -} - -func (c *Client) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) error { - if c.hooks.OnDiagnostics != nil { - return c.hooks.OnDiagnostics(ctx, params) - } - return nil -} - -func (c *Client) WorkspaceFolders(context.Context) ([]protocol.WorkspaceFolder, error) { - return []protocol.WorkspaceFolder{}, nil -} - -func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) { - results := make([]interface{}, len(p.Items)) - for i, item := range p.Items { - if item.Section != "gopls" { - continue - } - results[i] = c.editor.configuration() - } - return results, nil -} - -func (c *Client) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) error { - if c.hooks.OnRegistration != nil { - return c.hooks.OnRegistration(ctx, params) - } - return nil -} - -func (c *Client) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) error { - if c.hooks.OnUnregistration != nil { - return c.hooks.OnUnregistration(ctx, params) - } - return nil -} - -func (c *Client) Progress(ctx context.Context, params *protocol.ProgressParams) error { - if c.hooks.OnProgress != nil { - return c.hooks.OnProgress(ctx, params) - } - return nil -} - -func (c *Client) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error { - if c.hooks.OnWorkDoneProgressCreate != nil { - return c.hooks.OnWorkDoneProgressCreate(ctx, params) - } - return nil -} - -func (c *Client) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) { - return nil, nil -} - -// ApplyEdit applies edits sent from the server. -func (c *Client) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) { - if len(params.Edit.Changes) != 0 { - return &protocol.ApplyWorkspaceEditResult{FailureReason: "Edit.Changes is unsupported"}, nil - } - for _, change := range params.Edit.DocumentChanges { - if err := c.editor.applyProtocolEdit(ctx, change); err != nil { - return nil, err - } - } - return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil -} diff --git a/internal/lsp/fake/edit.go b/internal/lsp/fake/edit.go deleted file mode 100644 index 8b04c390fc5..00000000000 --- a/internal/lsp/fake/edit.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "fmt" - "sort" - "strings" - - "golang.org/x/tools/internal/lsp/protocol" -) - -// Pos represents a position in a text buffer. Both Line and Column are -// 0-indexed. -type Pos struct { - Line, Column int -} - -func (p Pos) String() string { - return fmt.Sprintf("%v:%v", p.Line, p.Column) -} - -// Range corresponds to protocol.Range, but uses the editor friend Pos -// instead of UTF-16 oriented protocol.Position -type Range struct { - Start Pos - End Pos -} - -func (p Pos) ToProtocolPosition() protocol.Position { - return protocol.Position{ - Line: uint32(p.Line), - Character: uint32(p.Column), - } -} - -func fromProtocolPosition(pos protocol.Position) Pos { - return Pos{ - Line: int(pos.Line), - Column: int(pos.Character), - } -} - -// Edit represents a single (contiguous) buffer edit. -type Edit struct { - Start, End Pos - Text string -} - -// Location is the editor friendly equivalent of protocol.Location -type Location struct { - Path string - Range Range -} - -// SymbolInformation is an editor friendly version of -// protocol.SymbolInformation, with location information transformed to byte -// offsets. Field names correspond to the protocol type. -type SymbolInformation struct { - Name string - Kind protocol.SymbolKind - Location Location -} - -// NewEdit creates an edit replacing all content between -// (startLine, startColumn) and (endLine, endColumn) with text. -func NewEdit(startLine, startColumn, endLine, endColumn int, text string) Edit { - return Edit{ - Start: Pos{Line: startLine, Column: startColumn}, - End: Pos{Line: endLine, Column: endColumn}, - Text: text, - } -} - -func (e Edit) toProtocolChangeEvent() protocol.TextDocumentContentChangeEvent { - return protocol.TextDocumentContentChangeEvent{ - Range: &protocol.Range{ - Start: e.Start.ToProtocolPosition(), - End: e.End.ToProtocolPosition(), - }, - Text: e.Text, - } -} - -func fromProtocolTextEdit(textEdit protocol.TextEdit) Edit { - return Edit{ - Start: fromProtocolPosition(textEdit.Range.Start), - End: fromProtocolPosition(textEdit.Range.End), - Text: textEdit.NewText, - } -} - -// inText reports whether p is a valid position in the text buffer. -func inText(p Pos, content []string) bool { - if p.Line < 0 || p.Line >= len(content) { - return false - } - // Note the strict right bound: the column indexes character _separators_, - // not characters. - if p.Column < 0 || p.Column > len([]rune(content[p.Line])) { - return false - } - return true -} - -// editContent implements a simplistic, inefficient algorithm for applying text -// edits to our buffer representation. It returns an error if the edit is -// invalid for the current content. -func editContent(content []string, edits []Edit) ([]string, error) { - newEdits := make([]Edit, len(edits)) - copy(newEdits, edits) - sort.Slice(newEdits, func(i, j int) bool { - if newEdits[i].Start.Line < newEdits[j].Start.Line { - return true - } - if newEdits[i].Start.Line > newEdits[j].Start.Line { - return false - } - return newEdits[i].Start.Column < newEdits[j].Start.Column - }) - - // Validate edits. - for _, edit := range newEdits { - if edit.End.Line < edit.Start.Line || (edit.End.Line == edit.Start.Line && edit.End.Column < edit.Start.Column) { - return nil, fmt.Errorf("invalid edit: end %v before start %v", edit.End, edit.Start) - } - if !inText(edit.Start, content) { - return nil, fmt.Errorf("start position %v is out of bounds", edit.Start) - } - if !inText(edit.End, content) { - return nil, fmt.Errorf("end position %v is out of bounds", edit.End) - } - } - - var ( - b strings.Builder - line, column int - ) - advance := func(toLine, toColumn int) { - for ; line < toLine; line++ { - b.WriteString(string([]rune(content[line])[column:]) + "\n") - column = 0 - } - b.WriteString(string([]rune(content[line])[column:toColumn])) - column = toColumn - } - for _, edit := range newEdits { - advance(edit.Start.Line, edit.Start.Column) - b.WriteString(edit.Text) - line = edit.End.Line - column = edit.End.Column - } - advance(len(content)-1, len([]rune(content[len(content)-1]))) - return strings.Split(b.String(), "\n"), nil -} diff --git a/internal/lsp/fake/edit_test.go b/internal/lsp/fake/edit_test.go deleted file mode 100644 index 4fa23bdb74a..00000000000 --- a/internal/lsp/fake/edit_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "strings" - "testing" -) - -func TestApplyEdit(t *testing.T) { - tests := []struct { - label string - content string - edits []Edit - want string - wantErr bool - }{ - { - label: "empty content", - }, - { - label: "empty edit", - content: "hello", - edits: []Edit{}, - want: "hello", - }, - { - label: "unicode edit", - content: "hello, ę—„ęœ¬čŖž", - edits: []Edit{{ - Start: Pos{Line: 0, Column: 7}, - End: Pos{Line: 0, Column: 10}, - Text: "world", - }}, - want: "hello, world", - }, - { - label: "range edit", - content: "ABC\nDEF\nGHI\nJKL", - edits: []Edit{{ - Start: Pos{Line: 1, Column: 1}, - End: Pos{Line: 2, Column: 3}, - Text: "12\n345", - }}, - want: "ABC\nD12\n345\nJKL", - }, - { - label: "end before start", - content: "ABC\nDEF\nGHI\nJKL", - edits: []Edit{{ - End: Pos{Line: 1, Column: 1}, - Start: Pos{Line: 2, Column: 3}, - Text: "12\n345", - }}, - wantErr: true, - }, - { - label: "out of bounds line", - content: "ABC\nDEF\nGHI\nJKL", - edits: []Edit{{ - Start: Pos{Line: 1, Column: 1}, - End: Pos{Line: 4, Column: 3}, - Text: "12\n345", - }}, - wantErr: true, - }, - { - label: "out of bounds column", - content: "ABC\nDEF\nGHI\nJKL", - edits: []Edit{{ - Start: Pos{Line: 1, Column: 4}, - End: Pos{Line: 2, Column: 3}, - Text: "12\n345", - }}, - wantErr: true, - }, - } - - for _, test := range tests { - test := test - t.Run(test.label, func(t *testing.T) { - lines := strings.Split(test.content, "\n") - newLines, err := editContent(lines, test.edits) - if (err != nil) != test.wantErr { - t.Errorf("got err %v, want error: %t", err, test.wantErr) - } - if err != nil { - return - } - if got := strings.Join(newLines, "\n"); got != test.want { - t.Errorf("got %q, want %q", got, test.want) - } - }) - } -} diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go deleted file mode 100644 index 06b90bb84e5..00000000000 --- a/internal/lsp/fake/editor.go +++ /dev/null @@ -1,1254 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "bufio" - "context" - "errors" - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "strings" - "sync" - - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// Editor is a fake editor client. It keeps track of client state and can be -// used for writing LSP tests. -type Editor struct { - Config EditorConfig - - // Server, client, and sandbox are concurrency safe and written only - // at construction time, so do not require synchronization. - Server protocol.Server - serverConn jsonrpc2.Conn - client *Client - sandbox *Sandbox - defaultEnv map[string]string - - // Since this editor is intended just for testing, we use very coarse - // locking. - mu sync.Mutex - // Editor state. - buffers map[string]buffer - // Capabilities / Options - serverCapabilities protocol.ServerCapabilities - - // Call metrics for the purpose of expectations. This is done in an ad-hoc - // manner for now. Perhaps in the future we should do something more - // systematic. Guarded with a separate mutex as calls may need to be accessed - // asynchronously via callbacks into the Editor. - callsMu sync.Mutex - calls CallCounts -} - -type CallCounts struct { - DidOpen, DidChange, DidSave, DidChangeWatchedFiles, DidClose uint64 -} - -type buffer struct { - windowsLineEndings bool - version int - path string - lines []string - dirty bool -} - -func (b buffer) text() string { - eol := "\n" - if b.windowsLineEndings { - eol = "\r\n" - } - return strings.Join(b.lines, eol) -} - -// EditorConfig configures the editor's LSP session. This is similar to -// source.UserOptions, but we use a separate type here so that we expose only -// that configuration which we support. -// -// The zero value for EditorConfig should correspond to its defaults. -type EditorConfig struct { - Env map[string]string - BuildFlags []string - - // CodeLenses is a map defining whether codelens are enabled, keyed by the - // codeLens command. CodeLenses which are not present in this map are left in - // their default state. - CodeLenses map[string]bool - - // SymbolMatcher is the config associated with the "symbolMatcher" gopls - // config option. - SymbolMatcher, SymbolStyle *string - - // LimitWorkspaceScope is true if the user does not want to expand their - // workspace scope to the entire module. - LimitWorkspaceScope bool - - // WorkspaceFolders is the workspace folders to configure on the LSP server, - // relative to the sandbox workdir. - // - // As a special case, if WorkspaceFolders is nil the editor defaults to - // configuring a single workspace folder corresponding to the workdir root. - // To explicitly send no workspace folders, use an empty (non-nil) slice. - WorkspaceFolders []string - - // AllExperiments sets the "allExperiments" configuration, which enables - // all of gopls's opt-in settings. - AllExperiments bool - - // Whether to send the current process ID, for testing data that is joined to - // the PID. This can only be set by one test. - SendPID bool - - // Whether to edit files with windows line endings. - WindowsLineEndings bool - - // Map of language ID -> regexp to match, used to set the file type of new - // buffers. Applied as an overlay on top of the following defaults: - // "go" -> ".*\.go" - // "go.mod" -> "go\.mod" - // "go.sum" -> "go\.sum" - // "gotmpl" -> ".*tmpl" - FileAssociations map[string]string - - // Settings holds arbitrary additional settings to apply to the gopls config. - // TODO(rfindley): replace existing EditorConfig fields with Settings. - Settings map[string]interface{} - - ImportShortcut string - DirectoryFilters []string - VerboseOutput bool - ExperimentalUseInvalidMetadata bool -} - -// NewEditor Creates a new Editor. -func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor { - return &Editor{ - buffers: make(map[string]buffer), - sandbox: sandbox, - defaultEnv: sandbox.GoEnv(), - Config: config, - } -} - -// Connect configures the editor to communicate with an LSP server on conn. It -// is not concurrency safe, and should be called at most once, before using the -// editor. -// -// It returns the editor, so that it may be called as follows: -// -// editor, err := NewEditor(s).Connect(ctx, conn) -func (e *Editor) Connect(ctx context.Context, conn jsonrpc2.Conn, hooks ClientHooks) (*Editor, error) { - e.serverConn = conn - e.Server = protocol.ServerDispatcher(conn) - e.client = &Client{editor: e, hooks: hooks} - conn.Go(ctx, - protocol.Handlers( - protocol.ClientHandler(e.client, - jsonrpc2.MethodNotFound))) - if err := e.initialize(ctx, e.Config.WorkspaceFolders); err != nil { - return nil, err - } - e.sandbox.Workdir.AddWatcher(e.onFileChanges) - return e, nil -} - -func (e *Editor) Stats() CallCounts { - e.callsMu.Lock() - defer e.callsMu.Unlock() - return e.calls -} - -// Shutdown issues the 'shutdown' LSP notification. -func (e *Editor) Shutdown(ctx context.Context) error { - if e.Server != nil { - if err := e.Server.Shutdown(ctx); err != nil { - return fmt.Errorf("Shutdown: %w", err) - } - } - return nil -} - -// Exit issues the 'exit' LSP notification. -func (e *Editor) Exit(ctx context.Context) error { - if e.Server != nil { - // Not all LSP clients issue the exit RPC, but we do so here to ensure that - // we gracefully handle it on multi-session servers. - if err := e.Server.Exit(ctx); err != nil { - return fmt.Errorf("Exit: %w", err) - } - } - return nil -} - -// Close issues the shutdown and exit sequence an editor should. -func (e *Editor) Close(ctx context.Context) error { - if err := e.Shutdown(ctx); err != nil { - return err - } - if err := e.Exit(ctx); err != nil { - return err - } - // called close on the editor should result in the connection closing - select { - case <-e.serverConn.Done(): - // connection closed itself - return nil - case <-ctx.Done(): - return fmt.Errorf("connection not closed: %w", ctx.Err()) - } -} - -// Client returns the LSP client for this editor. -func (e *Editor) Client() *Client { - return e.client -} - -func (e *Editor) overlayEnv() map[string]string { - env := make(map[string]string) - for k, v := range e.defaultEnv { - v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) - env[k] = v - } - for k, v := range e.Config.Env { - v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) - env[k] = v - } - return env -} - -func (e *Editor) configuration() map[string]interface{} { - config := map[string]interface{}{ - "verboseWorkDoneProgress": true, - "env": e.overlayEnv(), - "expandWorkspaceToModule": !e.Config.LimitWorkspaceScope, - "completionBudget": "10s", - } - - for k, v := range e.Config.Settings { - config[k] = v - } - - if e.Config.BuildFlags != nil { - config["buildFlags"] = e.Config.BuildFlags - } - if e.Config.DirectoryFilters != nil { - config["directoryFilters"] = e.Config.DirectoryFilters - } - if e.Config.ExperimentalUseInvalidMetadata { - config["experimentalUseInvalidMetadata"] = true - } - if e.Config.CodeLenses != nil { - config["codelenses"] = e.Config.CodeLenses - } - if e.Config.SymbolMatcher != nil { - config["symbolMatcher"] = *e.Config.SymbolMatcher - } - if e.Config.SymbolStyle != nil { - config["symbolStyle"] = *e.Config.SymbolStyle - } - if e.Config.AllExperiments { - config["allExperiments"] = true - } - - if e.Config.VerboseOutput { - config["verboseOutput"] = true - } - - if e.Config.ImportShortcut != "" { - config["importShortcut"] = e.Config.ImportShortcut - } - - config["diagnosticsDelay"] = "10ms" - - // ExperimentalWorkspaceModule is only set as a mode, not a configuration. - return config -} - -func (e *Editor) initialize(ctx context.Context, workspaceFolders []string) error { - params := &protocol.ParamInitialize{} - params.ClientInfo.Name = "fakeclient" - params.ClientInfo.Version = "v1.0.0" - - if workspaceFolders == nil { - workspaceFolders = []string{string(e.sandbox.Workdir.RelativeTo)} - } - for _, folder := range workspaceFolders { - params.WorkspaceFolders = append(params.WorkspaceFolders, protocol.WorkspaceFolder{ - URI: string(e.sandbox.Workdir.URI(folder)), - Name: filepath.Base(folder), - }) - } - - params.Capabilities.Workspace.Configuration = true - params.Capabilities.Window.WorkDoneProgress = true - // TODO: set client capabilities - params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated} - params.InitializationOptions = e.configuration() - if e.Config.SendPID { - params.ProcessID = int32(os.Getpid()) - } - - params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true - params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true - // copied from lsp/semantic.go to avoid import cycle in tests - params.Capabilities.TextDocument.SemanticTokens.TokenTypes = []string{ - "namespace", "type", "class", "enum", "interface", - "struct", "typeParameter", "parameter", "variable", "property", "enumMember", - "event", "function", "method", "macro", "keyword", "modifier", "comment", - "string", "number", "regexp", "operator", - } - - // This is a bit of a hack, since the fake editor doesn't actually support - // watching changed files that match a specific glob pattern. However, the - // editor does send didChangeWatchedFiles notifications, so set this to - // true. - params.Capabilities.Workspace.DidChangeWatchedFiles.DynamicRegistration = true - - params.Trace = "messages" - // TODO: support workspace folders. - if e.Server != nil { - resp, err := e.Server.Initialize(ctx, params) - if err != nil { - return fmt.Errorf("initialize: %w", err) - } - e.mu.Lock() - e.serverCapabilities = resp.Capabilities - e.mu.Unlock() - - if err := e.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil { - return fmt.Errorf("initialized: %w", err) - } - } - // TODO: await initial configuration here, or expect gopls to manage that? - return nil -} - -// onFileChanges is registered to be called by the Workdir on any writes that -// go through the Workdir API. It is called synchronously by the Workdir. -func (e *Editor) onFileChanges(ctx context.Context, evts []FileEvent) { - if e.Server == nil { - return - } - - // e may be locked when onFileChanges is called, but it is important that we - // synchronously increment this counter so that we can subsequently assert on - // the number of expected DidChangeWatchedFiles calls. - e.callsMu.Lock() - e.calls.DidChangeWatchedFiles++ - e.callsMu.Unlock() - - // Since e may be locked, we must run this mutation asynchronously. - go func() { - e.mu.Lock() - defer e.mu.Unlock() - var lspevts []protocol.FileEvent - for _, evt := range evts { - // Always send an on-disk change, even for events that seem useless - // because they're shadowed by an open buffer. - lspevts = append(lspevts, evt.ProtocolEvent) - - if buf, ok := e.buffers[evt.Path]; ok { - // Following VS Code, don't honor deletions or changes to dirty buffers. - if buf.dirty || evt.ProtocolEvent.Type == protocol.Deleted { - continue - } - - content, err := e.sandbox.Workdir.ReadFile(evt.Path) - if err != nil { - continue // A race with some other operation. - } - // No need to update if the buffer content hasn't changed. - if content == buf.text() { - continue - } - // During shutdown, this call will fail. Ignore the error. - _ = e.setBufferContentLocked(ctx, evt.Path, false, lines(content), nil) - } - } - e.Server.DidChangeWatchedFiles(ctx, &protocol.DidChangeWatchedFilesParams{ - Changes: lspevts, - }) - }() -} - -// OpenFile creates a buffer for the given workdir-relative file. -func (e *Editor) OpenFile(ctx context.Context, path string) error { - content, err := e.sandbox.Workdir.ReadFile(path) - if err != nil { - return err - } - return e.createBuffer(ctx, path, false, content) -} - -// CreateBuffer creates a new unsaved buffer corresponding to the workdir path, -// containing the given textual content. -func (e *Editor) CreateBuffer(ctx context.Context, path, content string) error { - return e.createBuffer(ctx, path, true, content) -} - -func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content string) error { - buf := buffer{ - windowsLineEndings: e.Config.WindowsLineEndings, - version: 1, - path: path, - lines: lines(content), - dirty: dirty, - } - e.mu.Lock() - defer e.mu.Unlock() - e.buffers[path] = buf - - item := protocol.TextDocumentItem{ - URI: e.sandbox.Workdir.URI(buf.path), - LanguageID: e.languageID(buf.path), - Version: int32(buf.version), - Text: buf.text(), - } - - if e.Server != nil { - if err := e.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{ - TextDocument: item, - }); err != nil { - return fmt.Errorf("DidOpen: %w", err) - } - e.callsMu.Lock() - e.calls.DidOpen++ - e.callsMu.Unlock() - } - return nil -} - -var defaultFileAssociations = map[string]*regexp.Regexp{ - "go": regexp.MustCompile(`^.*\.go$`), // '$' is important: don't match .gotmpl! - "go.mod": regexp.MustCompile(`^go\.mod$`), - "go.sum": regexp.MustCompile(`^go(\.work)?\.sum$`), - "go.work": regexp.MustCompile(`^go\.work$`), - "gotmpl": regexp.MustCompile(`^.*tmpl$`), -} - -func (e *Editor) languageID(p string) string { - base := path.Base(p) - for lang, re := range e.Config.FileAssociations { - re := regexp.MustCompile(re) - if re.MatchString(base) { - return lang - } - } - for lang, re := range defaultFileAssociations { - if re.MatchString(base) { - return lang - } - } - return "" -} - -// lines returns line-ending agnostic line representation of content. -func lines(content string) []string { - lines := strings.Split(content, "\n") - for i, l := range lines { - lines[i] = strings.TrimSuffix(l, "\r") - } - return lines -} - -// CloseBuffer removes the current buffer (regardless of whether it is saved). -func (e *Editor) CloseBuffer(ctx context.Context, path string) error { - e.mu.Lock() - _, ok := e.buffers[path] - if !ok { - e.mu.Unlock() - return ErrUnknownBuffer - } - delete(e.buffers, path) - e.mu.Unlock() - - if e.Server != nil { - if err := e.Server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{ - TextDocument: e.textDocumentIdentifier(path), - }); err != nil { - return fmt.Errorf("DidClose: %w", err) - } - e.callsMu.Lock() - e.calls.DidClose++ - e.callsMu.Unlock() - } - return nil -} - -func (e *Editor) textDocumentIdentifier(path string) protocol.TextDocumentIdentifier { - return protocol.TextDocumentIdentifier{ - URI: e.sandbox.Workdir.URI(path), - } -} - -// SaveBuffer writes the content of the buffer specified by the given path to -// the filesystem. -func (e *Editor) SaveBuffer(ctx context.Context, path string) error { - if err := e.OrganizeImports(ctx, path); err != nil { - return fmt.Errorf("organizing imports before save: %w", err) - } - if err := e.FormatBuffer(ctx, path); err != nil { - return fmt.Errorf("formatting before save: %w", err) - } - return e.SaveBufferWithoutActions(ctx, path) -} - -func (e *Editor) SaveBufferWithoutActions(ctx context.Context, path string) error { - e.mu.Lock() - defer e.mu.Unlock() - buf, ok := e.buffers[path] - if !ok { - return fmt.Errorf(fmt.Sprintf("unknown buffer: %q", path)) - } - content := buf.text() - includeText := false - syncOptions, ok := e.serverCapabilities.TextDocumentSync.(protocol.TextDocumentSyncOptions) - if ok { - includeText = syncOptions.Save.IncludeText - } - - docID := e.textDocumentIdentifier(buf.path) - if e.Server != nil { - if err := e.Server.WillSave(ctx, &protocol.WillSaveTextDocumentParams{ - TextDocument: docID, - Reason: protocol.Manual, - }); err != nil { - return fmt.Errorf("WillSave: %w", err) - } - } - if err := e.sandbox.Workdir.WriteFile(ctx, path, content); err != nil { - return fmt.Errorf("writing %q: %w", path, err) - } - - buf.dirty = false - e.buffers[path] = buf - - if e.Server != nil { - params := &protocol.DidSaveTextDocumentParams{ - TextDocument: docID, - } - if includeText { - params.Text = &content - } - if err := e.Server.DidSave(ctx, params); err != nil { - return fmt.Errorf("DidSave: %w", err) - } - e.callsMu.Lock() - e.calls.DidSave++ - e.callsMu.Unlock() - } - return nil -} - -// contentPosition returns the (Line, Column) position corresponding to offset -// in the buffer referenced by path. -func contentPosition(content string, offset int) (Pos, error) { - scanner := bufio.NewScanner(strings.NewReader(content)) - start := 0 - line := 0 - for scanner.Scan() { - end := start + len([]rune(scanner.Text())) + 1 - if offset < end { - return Pos{Line: line, Column: offset - start}, nil - } - start = end - line++ - } - if err := scanner.Err(); err != nil { - return Pos{}, fmt.Errorf("scanning content: %w", err) - } - // Scan() will drop the last line if it is empty. Correct for this. - if (strings.HasSuffix(content, "\n") || content == "") && offset == start { - return Pos{Line: line, Column: 0}, nil - } - return Pos{}, fmt.Errorf("position %d out of bounds in %q (line = %d, start = %d)", offset, content, line, start) -} - -// ErrNoMatch is returned if a regexp search fails. -var ( - ErrNoMatch = errors.New("no match") - ErrUnknownBuffer = errors.New("unknown buffer") -) - -// regexpRange returns the start and end of the first occurrence of either re -// or its singular subgroup. It returns ErrNoMatch if the regexp doesn't match. -func regexpRange(content, re string) (Pos, Pos, error) { - content = normalizeEOL(content) - var start, end int - rec, err := regexp.Compile(re) - if err != nil { - return Pos{}, Pos{}, err - } - indexes := rec.FindStringSubmatchIndex(content) - if indexes == nil { - return Pos{}, Pos{}, ErrNoMatch - } - switch len(indexes) { - case 2: - // no subgroups: return the range of the regexp expression - start, end = indexes[0], indexes[1] - case 4: - // one subgroup: return its range - start, end = indexes[2], indexes[3] - default: - return Pos{}, Pos{}, fmt.Errorf("invalid search regexp %q: expect either 0 or 1 subgroups, got %d", re, len(indexes)/2-1) - } - startPos, err := contentPosition(content, start) - if err != nil { - return Pos{}, Pos{}, err - } - endPos, err := contentPosition(content, end) - if err != nil { - return Pos{}, Pos{}, err - } - return startPos, endPos, nil -} - -func normalizeEOL(content string) string { - return strings.Join(lines(content), "\n") -} - -// RegexpRange returns the first range in the buffer bufName matching re. See -// RegexpSearch for more information on matching. -func (e *Editor) RegexpRange(bufName, re string) (Pos, Pos, error) { - e.mu.Lock() - defer e.mu.Unlock() - buf, ok := e.buffers[bufName] - if !ok { - return Pos{}, Pos{}, ErrUnknownBuffer - } - return regexpRange(buf.text(), re) -} - -// RegexpSearch returns the position of the first match for re in the buffer -// bufName. For convenience, RegexpSearch supports the following two modes: -// 1. If re has no subgroups, return the position of the match for re itself. -// 2. If re has one subgroup, return the position of the first subgroup. -// -// It returns an error re is invalid, has more than one subgroup, or doesn't -// match the buffer. -func (e *Editor) RegexpSearch(bufName, re string) (Pos, error) { - start, _, err := e.RegexpRange(bufName, re) - return start, err -} - -// RegexpReplace edits the buffer corresponding to path by replacing the first -// instance of re, or its first subgroup, with the replace text. See -// RegexpSearch for more explanation of these two modes. -// It returns an error if re is invalid, has more than one subgroup, or doesn't -// match the buffer. -func (e *Editor) RegexpReplace(ctx context.Context, path, re, replace string) error { - e.mu.Lock() - defer e.mu.Unlock() - buf, ok := e.buffers[path] - if !ok { - return ErrUnknownBuffer - } - content := buf.text() - start, end, err := regexpRange(content, re) - if err != nil { - return err - } - return e.editBufferLocked(ctx, path, []Edit{{ - Start: start, - End: end, - Text: replace, - }}) -} - -// EditBuffer applies the given test edits to the buffer identified by path. -func (e *Editor) EditBuffer(ctx context.Context, path string, edits []Edit) error { - e.mu.Lock() - defer e.mu.Unlock() - return e.editBufferLocked(ctx, path, edits) -} - -func (e *Editor) SetBufferContent(ctx context.Context, path, content string) error { - e.mu.Lock() - defer e.mu.Unlock() - lines := lines(content) - return e.setBufferContentLocked(ctx, path, true, lines, nil) -} - -// HasBuffer reports whether the file name is open in the editor. -func (e *Editor) HasBuffer(name string) bool { - e.mu.Lock() - defer e.mu.Unlock() - _, ok := e.buffers[name] - return ok -} - -// BufferText returns the content of the buffer with the given name. -func (e *Editor) BufferText(name string) string { - e.mu.Lock() - defer e.mu.Unlock() - return e.buffers[name].text() -} - -// BufferVersion returns the current version of the buffer corresponding to -// name (or 0 if it is not being edited). -func (e *Editor) BufferVersion(name string) int { - e.mu.Lock() - defer e.mu.Unlock() - return e.buffers[name].version -} - -func (e *Editor) editBufferLocked(ctx context.Context, path string, edits []Edit) error { - buf, ok := e.buffers[path] - if !ok { - return fmt.Errorf("unknown buffer %q", path) - } - content := make([]string, len(buf.lines)) - copy(content, buf.lines) - content, err := editContent(content, edits) - if err != nil { - return err - } - return e.setBufferContentLocked(ctx, path, true, content, edits) -} - -func (e *Editor) setBufferContentLocked(ctx context.Context, path string, dirty bool, content []string, fromEdits []Edit) error { - buf, ok := e.buffers[path] - if !ok { - return fmt.Errorf("unknown buffer %q", path) - } - buf.lines = content - buf.version++ - buf.dirty = dirty - e.buffers[path] = buf - // A simple heuristic: if there is only one edit, send it incrementally. - // Otherwise, send the entire content. - var evts []protocol.TextDocumentContentChangeEvent - if len(fromEdits) == 1 { - evts = append(evts, fromEdits[0].toProtocolChangeEvent()) - } else { - evts = append(evts, protocol.TextDocumentContentChangeEvent{ - Text: buf.text(), - }) - } - params := &protocol.DidChangeTextDocumentParams{ - TextDocument: protocol.VersionedTextDocumentIdentifier{ - Version: int32(buf.version), - TextDocumentIdentifier: e.textDocumentIdentifier(buf.path), - }, - ContentChanges: evts, - } - if e.Server != nil { - if err := e.Server.DidChange(ctx, params); err != nil { - return fmt.Errorf("DidChange: %w", err) - } - e.callsMu.Lock() - e.calls.DidChange++ - e.callsMu.Unlock() - } - return nil -} - -// GoToDefinition jumps to the definition of the symbol at the given position -// in an open buffer. It returns the path and position of the resulting jump. -func (e *Editor) GoToDefinition(ctx context.Context, path string, pos Pos) (string, Pos, error) { - if err := e.checkBufferPosition(path, pos); err != nil { - return "", Pos{}, err - } - params := &protocol.DefinitionParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - params.Position = pos.ToProtocolPosition() - - resp, err := e.Server.Definition(ctx, params) - if err != nil { - return "", Pos{}, fmt.Errorf("definition: %w", err) - } - return e.extractFirstPathAndPos(ctx, resp) -} - -// GoToTypeDefinition jumps to the type definition of the symbol at the given position -// in an open buffer. -func (e *Editor) GoToTypeDefinition(ctx context.Context, path string, pos Pos) (string, Pos, error) { - if err := e.checkBufferPosition(path, pos); err != nil { - return "", Pos{}, err - } - params := &protocol.TypeDefinitionParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - params.Position = pos.ToProtocolPosition() - - resp, err := e.Server.TypeDefinition(ctx, params) - if err != nil { - return "", Pos{}, fmt.Errorf("type definition: %w", err) - } - return e.extractFirstPathAndPos(ctx, resp) -} - -// extractFirstPathAndPos returns the path and the position of the first location. -// It opens the file if needed. -func (e *Editor) extractFirstPathAndPos(ctx context.Context, locs []protocol.Location) (string, Pos, error) { - if len(locs) == 0 { - return "", Pos{}, nil - } - - newPath := e.sandbox.Workdir.URIToPath(locs[0].URI) - newPos := fromProtocolPosition(locs[0].Range.Start) - if !e.HasBuffer(newPath) { - if err := e.OpenFile(ctx, newPath); err != nil { - return "", Pos{}, fmt.Errorf("OpenFile: %w", err) - } - } - return newPath, newPos, nil -} - -// Symbol performs a workspace symbol search using query -func (e *Editor) Symbol(ctx context.Context, query string) ([]SymbolInformation, error) { - params := &protocol.WorkspaceSymbolParams{} - params.Query = query - - resp, err := e.Server.Symbol(ctx, params) - if err != nil { - return nil, fmt.Errorf("symbol: %w", err) - } - var res []SymbolInformation - for _, si := range resp { - ploc := si.Location - path := e.sandbox.Workdir.URIToPath(ploc.URI) - start := fromProtocolPosition(ploc.Range.Start) - end := fromProtocolPosition(ploc.Range.End) - rnge := Range{ - Start: start, - End: end, - } - loc := Location{ - Path: path, - Range: rnge, - } - res = append(res, SymbolInformation{ - Name: si.Name, - Kind: si.Kind, - Location: loc, - }) - } - return res, nil -} - -// OrganizeImports requests and performs the source.organizeImports codeAction. -func (e *Editor) OrganizeImports(ctx context.Context, path string) error { - _, err := e.applyCodeActions(ctx, path, nil, nil, protocol.SourceOrganizeImports) - return err -} - -// RefactorRewrite requests and performs the source.refactorRewrite codeAction. -func (e *Editor) RefactorRewrite(ctx context.Context, path string, rng *protocol.Range) error { - applied, err := e.applyCodeActions(ctx, path, rng, nil, protocol.RefactorRewrite) - if applied == 0 { - return fmt.Errorf("no refactorings were applied") - } - return err -} - -// ApplyQuickFixes requests and performs the quickfix codeAction. -func (e *Editor) ApplyQuickFixes(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) error { - applied, err := e.applyCodeActions(ctx, path, rng, diagnostics, protocol.SourceFixAll, protocol.QuickFix) - if applied == 0 { - return fmt.Errorf("no quick fixes were applied") - } - return err -} - -// ApplyCodeAction applies the given code action. -func (e *Editor) ApplyCodeAction(ctx context.Context, action protocol.CodeAction) error { - for _, change := range action.Edit.DocumentChanges { - path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI) - if int32(e.buffers[path].version) != change.TextDocument.Version { - // Skip edits for old versions. - continue - } - edits := convertEdits(change.Edits) - if err := e.EditBuffer(ctx, path, edits); err != nil { - return fmt.Errorf("editing buffer %q: %w", path, err) - } - } - // Execute any commands. The specification says that commands are - // executed after edits are applied. - if action.Command != nil { - if _, err := e.ExecuteCommand(ctx, &protocol.ExecuteCommandParams{ - Command: action.Command.Command, - Arguments: action.Command.Arguments, - }); err != nil { - return err - } - } - // Some commands may edit files on disk. - return e.sandbox.Workdir.CheckForFileChanges(ctx) -} - -// GetQuickFixes returns the available quick fix code actions. -func (e *Editor) GetQuickFixes(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) { - return e.getCodeActions(ctx, path, rng, diagnostics, protocol.QuickFix, protocol.SourceFixAll) -} - -func (e *Editor) applyCodeActions(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) (int, error) { - actions, err := e.getCodeActions(ctx, path, rng, diagnostics, only...) - if err != nil { - return 0, err - } - applied := 0 - for _, action := range actions { - if action.Title == "" { - return 0, fmt.Errorf("empty title for code action") - } - var match bool - for _, o := range only { - if action.Kind == o { - match = true - break - } - } - if !match { - continue - } - applied++ - if err := e.ApplyCodeAction(ctx, action); err != nil { - return 0, err - } - } - return applied, nil -} - -func (e *Editor) getCodeActions(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) ([]protocol.CodeAction, error) { - if e.Server == nil { - return nil, nil - } - params := &protocol.CodeActionParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - params.Context.Only = only - if diagnostics != nil { - params.Context.Diagnostics = diagnostics - } - if rng != nil { - params.Range = *rng - } - return e.Server.CodeAction(ctx, params) -} - -func (e *Editor) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) { - if e.Server == nil { - return nil, nil - } - var match bool - // Ensure that this command was actually listed as a supported command. - for _, command := range e.serverCapabilities.ExecuteCommandProvider.Commands { - if command == params.Command { - match = true - break - } - } - if !match { - return nil, fmt.Errorf("unsupported command %q", params.Command) - } - result, err := e.Server.ExecuteCommand(ctx, params) - if err != nil { - return nil, err - } - // Some commands use the go command, which writes directly to disk. - // For convenience, check for those changes. - if err := e.sandbox.Workdir.CheckForFileChanges(ctx); err != nil { - return nil, err - } - return result, nil -} - -func convertEdits(protocolEdits []protocol.TextEdit) []Edit { - var edits []Edit - for _, lspEdit := range protocolEdits { - edits = append(edits, fromProtocolTextEdit(lspEdit)) - } - return edits -} - -// FormatBuffer gofmts a Go file. -func (e *Editor) FormatBuffer(ctx context.Context, path string) error { - if e.Server == nil { - return nil - } - e.mu.Lock() - version := e.buffers[path].version - e.mu.Unlock() - params := &protocol.DocumentFormattingParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - resp, err := e.Server.Formatting(ctx, params) - if err != nil { - return fmt.Errorf("textDocument/formatting: %w", err) - } - e.mu.Lock() - defer e.mu.Unlock() - if versionAfter := e.buffers[path].version; versionAfter != version { - return fmt.Errorf("before receipt of formatting edits, buffer version changed from %d to %d", version, versionAfter) - } - edits := convertEdits(resp) - if len(edits) == 0 { - return nil - } - return e.editBufferLocked(ctx, path, edits) -} - -func (e *Editor) checkBufferPosition(path string, pos Pos) error { - e.mu.Lock() - defer e.mu.Unlock() - buf, ok := e.buffers[path] - if !ok { - return fmt.Errorf("buffer %q is not open", path) - } - if !inText(pos, buf.lines) { - return fmt.Errorf("position %v is invalid in buffer %q", pos, path) - } - return nil -} - -// RunGenerate runs `go generate` non-recursively in the workdir-relative dir -// path. It does not report any resulting file changes as a watched file -// change, so must be followed by a call to Workdir.CheckForFileChanges once -// the generate command has completed. -// TODO(rFindley): this shouldn't be necessary anymore. Delete it. -func (e *Editor) RunGenerate(ctx context.Context, dir string) error { - if e.Server == nil { - return nil - } - absDir := e.sandbox.Workdir.AbsPath(dir) - cmd, err := command.NewGenerateCommand("", command.GenerateArgs{ - Dir: protocol.URIFromSpanURI(span.URIFromPath(absDir)), - Recursive: false, - }) - if err != nil { - return err - } - params := &protocol.ExecuteCommandParams{ - Command: cmd.Command, - Arguments: cmd.Arguments, - } - if _, err := e.ExecuteCommand(ctx, params); err != nil { - return fmt.Errorf("running generate: %v", err) - } - // Unfortunately we can't simply poll the workdir for file changes here, - // because server-side command may not have completed. In regtests, we can - // Await this state change, but here we must delegate that responsibility to - // the caller. - return nil -} - -// CodeLens executes a codelens request on the server. -func (e *Editor) CodeLens(ctx context.Context, path string) ([]protocol.CodeLens, error) { - if e.Server == nil { - return nil, nil - } - e.mu.Lock() - _, ok := e.buffers[path] - e.mu.Unlock() - if !ok { - return nil, fmt.Errorf("buffer %q is not open", path) - } - params := &protocol.CodeLensParams{ - TextDocument: e.textDocumentIdentifier(path), - } - lens, err := e.Server.CodeLens(ctx, params) - if err != nil { - return nil, err - } - return lens, nil -} - -// Completion executes a completion request on the server. -func (e *Editor) Completion(ctx context.Context, path string, pos Pos) (*protocol.CompletionList, error) { - if e.Server == nil { - return nil, nil - } - e.mu.Lock() - _, ok := e.buffers[path] - e.mu.Unlock() - if !ok { - return nil, fmt.Errorf("buffer %q is not open", path) - } - params := &protocol.CompletionParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: e.textDocumentIdentifier(path), - Position: pos.ToProtocolPosition(), - }, - } - completions, err := e.Server.Completion(ctx, params) - if err != nil { - return nil, err - } - return completions, nil -} - -// AcceptCompletion accepts a completion for the given item at the given -// position. -func (e *Editor) AcceptCompletion(ctx context.Context, path string, pos Pos, item protocol.CompletionItem) error { - if e.Server == nil { - return nil - } - e.mu.Lock() - defer e.mu.Unlock() - _, ok := e.buffers[path] - if !ok { - return fmt.Errorf("buffer %q is not open", path) - } - return e.editBufferLocked(ctx, path, convertEdits(append([]protocol.TextEdit{ - *item.TextEdit, - }, item.AdditionalTextEdits...))) -} - -// Symbols executes a workspace/symbols request on the server. -func (e *Editor) Symbols(ctx context.Context, sym string) ([]protocol.SymbolInformation, error) { - if e.Server == nil { - return nil, nil - } - params := &protocol.WorkspaceSymbolParams{Query: sym} - ans, err := e.Server.Symbol(ctx, params) - return ans, err -} - -// References executes a reference request on the server. -func (e *Editor) References(ctx context.Context, path string, pos Pos) ([]protocol.Location, error) { - if e.Server == nil { - return nil, nil - } - e.mu.Lock() - _, ok := e.buffers[path] - e.mu.Unlock() - if !ok { - return nil, fmt.Errorf("buffer %q is not open", path) - } - params := &protocol.ReferenceParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: e.textDocumentIdentifier(path), - Position: pos.ToProtocolPosition(), - }, - Context: protocol.ReferenceContext{ - IncludeDeclaration: true, - }, - } - locations, err := e.Server.References(ctx, params) - if err != nil { - return nil, err - } - return locations, nil -} - -func (e *Editor) Rename(ctx context.Context, path string, pos Pos, newName string) error { - if e.Server == nil { - return nil - } - params := &protocol.RenameParams{ - TextDocument: e.textDocumentIdentifier(path), - Position: pos.ToProtocolPosition(), - NewName: newName, - } - wsEdits, err := e.Server.Rename(ctx, params) - if err != nil { - return err - } - for _, change := range wsEdits.DocumentChanges { - if err := e.applyProtocolEdit(ctx, change); err != nil { - return err - } - } - return nil -} - -func (e *Editor) applyProtocolEdit(ctx context.Context, change protocol.TextDocumentEdit) error { - path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI) - if ver := int32(e.BufferVersion(path)); ver != change.TextDocument.Version { - return fmt.Errorf("buffer versions for %q do not match: have %d, editing %d", path, ver, change.TextDocument.Version) - } - if !e.HasBuffer(path) { - err := e.OpenFile(ctx, path) - if os.IsNotExist(err) { - // TODO: it's unclear if this is correct. Here we create the buffer (with - // version 1), then apply edits. Perhaps we should apply the edits before - // sending the didOpen notification. - e.CreateBuffer(ctx, path, "") - err = nil - } - if err != nil { - return err - } - } - fakeEdits := convertEdits(change.Edits) - return e.EditBuffer(ctx, path, fakeEdits) -} - -// CodeAction executes a codeAction request on the server. -func (e *Editor) CodeAction(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) { - if e.Server == nil { - return nil, nil - } - e.mu.Lock() - _, ok := e.buffers[path] - e.mu.Unlock() - if !ok { - return nil, fmt.Errorf("buffer %q is not open", path) - } - params := &protocol.CodeActionParams{ - TextDocument: e.textDocumentIdentifier(path), - Context: protocol.CodeActionContext{ - Diagnostics: diagnostics, - }, - } - if rng != nil { - params.Range = *rng - } - lens, err := e.Server.CodeAction(ctx, params) - if err != nil { - return nil, err - } - return lens, nil -} - -// Hover triggers a hover at the given position in an open buffer. -func (e *Editor) Hover(ctx context.Context, path string, pos Pos) (*protocol.MarkupContent, Pos, error) { - if err := e.checkBufferPosition(path, pos); err != nil { - return nil, Pos{}, err - } - params := &protocol.HoverParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - params.Position = pos.ToProtocolPosition() - - resp, err := e.Server.Hover(ctx, params) - if err != nil { - return nil, Pos{}, fmt.Errorf("hover: %w", err) - } - if resp == nil { - return nil, Pos{}, nil - } - return &resp.Contents, fromProtocolPosition(resp.Range.Start), nil -} - -func (e *Editor) DocumentLink(ctx context.Context, path string) ([]protocol.DocumentLink, error) { - if e.Server == nil { - return nil, nil - } - params := &protocol.DocumentLinkParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - return e.Server.DocumentLink(ctx, params) -} - -func (e *Editor) DocumentHighlight(ctx context.Context, path string, pos Pos) ([]protocol.DocumentHighlight, error) { - if e.Server == nil { - return nil, nil - } - if err := e.checkBufferPosition(path, pos); err != nil { - return nil, err - } - params := &protocol.DocumentHighlightParams{} - params.TextDocument.URI = e.sandbox.Workdir.URI(path) - params.Position = pos.ToProtocolPosition() - - return e.Server.DocumentHighlight(ctx, params) -} diff --git a/internal/lsp/fake/editor_test.go b/internal/lsp/fake/editor_test.go deleted file mode 100644 index 3ce5df6e08f..00000000000 --- a/internal/lsp/fake/editor_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "context" - "testing" -) - -func TestContentPosition(t *testing.T) { - content := "foo\nšŸ˜€\nbar" - tests := []struct { - offset, wantLine, wantColumn int - }{ - {0, 0, 0}, - {3, 0, 3}, - {4, 1, 0}, - {5, 1, 1}, - {6, 2, 0}, - } - for _, test := range tests { - pos, err := contentPosition(content, test.offset) - if err != nil { - t.Fatal(err) - } - if pos.Line != test.wantLine { - t.Errorf("contentPosition(%q, %d): Line = %d, want %d", content, test.offset, pos.Line, test.wantLine) - } - if pos.Column != test.wantColumn { - t.Errorf("contentPosition(%q, %d): Column = %d, want %d", content, test.offset, pos.Column, test.wantColumn) - } - } -} - -const exampleProgram = ` --- go.mod -- -go 1.12 --- main.go -- -package main - -import "fmt" - -func main() { - fmt.Println("Hello World.") -} -` - -func TestClientEditing(t *testing.T) { - ws, err := NewSandbox(&SandboxConfig{Files: UnpackTxt(exampleProgram)}) - if err != nil { - t.Fatal(err) - } - defer ws.Close() - ctx := context.Background() - editor := NewEditor(ws, EditorConfig{}) - if err := editor.OpenFile(ctx, "main.go"); err != nil { - t.Fatal(err) - } - if err := editor.EditBuffer(ctx, "main.go", []Edit{ - { - Start: Pos{5, 14}, - End: Pos{5, 26}, - Text: "Hola, mundo.", - }, - }); err != nil { - t.Fatal(err) - } - got := editor.buffers["main.go"].text() - want := `package main - -import "fmt" - -func main() { - fmt.Println("Hola, mundo.") -} -` - if got != want { - t.Errorf("got text %q, want %q", got, want) - } -} diff --git a/internal/lsp/fake/workdir.go b/internal/lsp/fake/workdir.go deleted file mode 100644 index 734f5fd8197..00000000000 --- a/internal/lsp/fake/workdir.go +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "bytes" - "context" - "crypto/sha256" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// FileEvent wraps the protocol.FileEvent so that it can be associated with a -// workdir-relative path. -type FileEvent struct { - Path, Content string - ProtocolEvent protocol.FileEvent -} - -// RelativeTo is a helper for operations relative to a given directory. -type RelativeTo string - -// AbsPath returns an absolute filesystem path for the workdir-relative path. -func (r RelativeTo) AbsPath(path string) string { - fp := filepath.FromSlash(path) - if filepath.IsAbs(fp) { - return fp - } - return filepath.Join(string(r), filepath.FromSlash(path)) -} - -// RelPath returns a '/'-encoded path relative to the working directory (or an -// absolute path if the file is outside of workdir) -func (r RelativeTo) RelPath(fp string) string { - root := string(r) - if rel, err := filepath.Rel(root, fp); err == nil && !strings.HasPrefix(rel, "..") { - return filepath.ToSlash(rel) - } - return filepath.ToSlash(fp) -} - -// WriteFileData writes content to the relative path, replacing the special -// token $SANDBOX_WORKDIR with the relative root given by rel. -func WriteFileData(path string, content []byte, rel RelativeTo) error { - content = bytes.ReplaceAll(content, []byte("$SANDBOX_WORKDIR"), []byte(rel)) - fp := rel.AbsPath(path) - if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil { - return fmt.Errorf("creating nested directory: %w", err) - } - backoff := 1 * time.Millisecond - for { - err := ioutil.WriteFile(fp, []byte(content), 0644) - if err != nil { - if isWindowsErrLockViolation(err) { - time.Sleep(backoff) - backoff *= 2 - continue - } - return fmt.Errorf("writing %q: %w", path, err) - } - return nil - } -} - -// isWindowsErrLockViolation reports whether err is ERROR_LOCK_VIOLATION -// on Windows. -var isWindowsErrLockViolation = func(err error) bool { return false } - -// Workdir is a temporary working directory for tests. It exposes file -// operations in terms of relative paths, and fakes file watching by triggering -// events on file operations. -type Workdir struct { - RelativeTo - - watcherMu sync.Mutex - watchers []func(context.Context, []FileEvent) - - fileMu sync.Mutex - // File identities we know about, for the purpose of detecting changes. - // - // Since files is only used for detecting _changes_, we are tolerant of - // fileIDs that may have hash and mtime coming from different states of the - // file: if either are out of sync, then the next poll should detect a - // discrepancy. It is OK if we detect too many changes, but not OK if we miss - // changes. - // - // For that matter, this mechanism for detecting changes can still be flaky - // on platforms where mtime is very coarse (such as older versions of WSL). - // It would be much better to use a proper fs event library, but we can't - // currently import those into x/tools. - // - // TODO(golang/go#52284): replace this polling mechanism with a - // cross-platform library for filesystem notifications. - files map[string]fileID -} - -// fileID is a file identity for the purposes of detecting on-disk -// modifications. -type fileID struct { - hash string - mtime time.Time -} - -// NewWorkdir writes the txtar-encoded file data in txt to dir, and returns a -// Workir for operating on these files using -func NewWorkdir(dir string) *Workdir { - return &Workdir{RelativeTo: RelativeTo(dir)} -} - -func hashFile(data []byte) string { - return fmt.Sprintf("%x", sha256.Sum256(data)) -} - -func (w *Workdir) writeInitialFiles(files map[string][]byte) error { - w.files = map[string]fileID{} - for name, data := range files { - if err := WriteFileData(name, data, w.RelativeTo); err != nil { - return fmt.Errorf("writing to workdir: %w", err) - } - fp := w.AbsPath(name) - - // We need the mtime of the file just written for the purposes of tracking - // file identity. Calling Stat here could theoretically return an mtime - // that is inconsistent with the file contents represented by the hash, but - // since we "own" this file we assume that the mtime is correct. - // - // Furthermore, see the documentation for Workdir.files for why mismatches - // between identifiers are considered to be benign. - fi, err := os.Stat(fp) - if err != nil { - return fmt.Errorf("reading file info: %v", err) - } - - w.files[name] = fileID{ - hash: hashFile(data), - mtime: fi.ModTime(), - } - } - return nil -} - -// RootURI returns the root URI for this working directory of this scratch -// environment. -func (w *Workdir) RootURI() protocol.DocumentURI { - return toURI(string(w.RelativeTo)) -} - -// AddWatcher registers the given func to be called on any file change. -func (w *Workdir) AddWatcher(watcher func(context.Context, []FileEvent)) { - w.watcherMu.Lock() - w.watchers = append(w.watchers, watcher) - w.watcherMu.Unlock() -} - -// URI returns the URI to a the workdir-relative path. -func (w *Workdir) URI(path string) protocol.DocumentURI { - return toURI(w.AbsPath(path)) -} - -// URIToPath converts a uri to a workdir-relative path (or an absolute path, -// if the uri is outside of the workdir). -func (w *Workdir) URIToPath(uri protocol.DocumentURI) string { - fp := uri.SpanURI().Filename() - return w.RelPath(fp) -} - -func toURI(fp string) protocol.DocumentURI { - return protocol.DocumentURI(span.URIFromPath(fp)) -} - -// ReadFile reads a text file specified by a workdir-relative path. -func (w *Workdir) ReadFile(path string) (string, error) { - backoff := 1 * time.Millisecond - for { - b, err := ioutil.ReadFile(w.AbsPath(path)) - if err != nil { - if runtime.GOOS == "plan9" && strings.HasSuffix(err.Error(), " exclusive use file already open") { - // Plan 9 enforces exclusive access to locked files. - // Give the owner time to unlock it and retry. - time.Sleep(backoff) - backoff *= 2 - continue - } - return "", err - } - return string(b), nil - } -} - -func (w *Workdir) RegexpRange(path, re string) (Pos, Pos, error) { - content, err := w.ReadFile(path) - if err != nil { - return Pos{}, Pos{}, err - } - return regexpRange(content, re) -} - -// RegexpSearch searches the file corresponding to path for the first position -// matching re. -func (w *Workdir) RegexpSearch(path string, re string) (Pos, error) { - content, err := w.ReadFile(path) - if err != nil { - return Pos{}, err - } - start, _, err := regexpRange(content, re) - return start, err -} - -// ChangeFilesOnDisk executes the given on-disk file changes in a batch, -// simulating the action of changing branches outside of an editor. -func (w *Workdir) ChangeFilesOnDisk(ctx context.Context, events []FileEvent) error { - for _, e := range events { - switch e.ProtocolEvent.Type { - case protocol.Deleted: - fp := w.AbsPath(e.Path) - if err := os.Remove(fp); err != nil { - return fmt.Errorf("removing %q: %w", e.Path, err) - } - case protocol.Changed, protocol.Created: - if _, err := w.writeFile(ctx, e.Path, e.Content); err != nil { - return err - } - } - } - w.sendEvents(ctx, events) - return nil -} - -// RemoveFile removes a workdir-relative file path. -func (w *Workdir) RemoveFile(ctx context.Context, path string) error { - fp := w.AbsPath(path) - if err := os.RemoveAll(fp); err != nil { - return fmt.Errorf("removing %q: %w", path, err) - } - w.fileMu.Lock() - defer w.fileMu.Unlock() - - evts := []FileEvent{{ - Path: path, - ProtocolEvent: protocol.FileEvent{ - URI: w.URI(path), - Type: protocol.Deleted, - }, - }} - w.sendEvents(ctx, evts) - delete(w.files, path) - return nil -} - -func (w *Workdir) sendEvents(ctx context.Context, evts []FileEvent) { - if len(evts) == 0 { - return - } - w.watcherMu.Lock() - watchers := make([]func(context.Context, []FileEvent), len(w.watchers)) - copy(watchers, w.watchers) - w.watcherMu.Unlock() - for _, w := range watchers { - w(ctx, evts) - } -} - -// WriteFiles writes the text file content to workdir-relative paths. -// It batches notifications rather than sending them consecutively. -func (w *Workdir) WriteFiles(ctx context.Context, files map[string]string) error { - var evts []FileEvent - for filename, content := range files { - evt, err := w.writeFile(ctx, filename, content) - if err != nil { - return err - } - evts = append(evts, evt) - } - w.sendEvents(ctx, evts) - return nil -} - -// WriteFile writes text file content to a workdir-relative path. -func (w *Workdir) WriteFile(ctx context.Context, path, content string) error { - evt, err := w.writeFile(ctx, path, content) - if err != nil { - return err - } - w.sendEvents(ctx, []FileEvent{evt}) - return nil -} - -func (w *Workdir) writeFile(ctx context.Context, path, content string) (FileEvent, error) { - fp := w.AbsPath(path) - _, err := os.Stat(fp) - if err != nil && !os.IsNotExist(err) { - return FileEvent{}, fmt.Errorf("checking if %q exists: %w", path, err) - } - var changeType protocol.FileChangeType - if os.IsNotExist(err) { - changeType = protocol.Created - } else { - changeType = protocol.Changed - } - if err := WriteFileData(path, []byte(content), w.RelativeTo); err != nil { - return FileEvent{}, err - } - return FileEvent{ - Path: path, - ProtocolEvent: protocol.FileEvent{ - URI: w.URI(path), - Type: changeType, - }, - }, nil -} - -// listFiles lists files in the given directory, returning a map of relative -// path to contents and modification time. -func (w *Workdir) listFiles(dir string) (map[string]fileID, error) { - files := make(map[string]fileID) - absDir := w.AbsPath(dir) - if err := filepath.Walk(absDir, func(fp string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - path := w.RelPath(fp) - - data, err := ioutil.ReadFile(fp) - if err != nil { - return err - } - // The content returned by ioutil.ReadFile could be inconsistent with - // info.ModTime(), due to a subsequent modification. See the documentation - // for w.files for why we consider this to be benign. - files[path] = fileID{ - hash: hashFile(data), - mtime: info.ModTime(), - } - return nil - }); err != nil { - return nil, err - } - return files, nil -} - -// CheckForFileChanges walks the working directory and checks for any files -// that have changed since the last poll. -func (w *Workdir) CheckForFileChanges(ctx context.Context) error { - evts, err := w.pollFiles() - if err != nil { - return err - } - w.sendEvents(ctx, evts) - return nil -} - -// pollFiles updates w.files and calculates FileEvents corresponding to file -// state changes since the last poll. It does not call sendEvents. -func (w *Workdir) pollFiles() ([]FileEvent, error) { - w.fileMu.Lock() - defer w.fileMu.Unlock() - - files, err := w.listFiles(".") - if err != nil { - return nil, err - } - var evts []FileEvent - // Check which files have been added or modified. - for path, id := range files { - oldID, ok := w.files[path] - delete(w.files, path) - var typ protocol.FileChangeType - switch { - case !ok: - typ = protocol.Created - case oldID != id: - typ = protocol.Changed - default: - continue - } - evts = append(evts, FileEvent{ - Path: path, - ProtocolEvent: protocol.FileEvent{ - URI: w.URI(path), - Type: typ, - }, - }) - } - // Any remaining files must have been deleted. - for path := range w.files { - evts = append(evts, FileEvent{ - Path: path, - ProtocolEvent: protocol.FileEvent{ - URI: w.URI(path), - Type: protocol.Deleted, - }, - }) - } - w.files = files - return evts, nil -} diff --git a/internal/lsp/fake/workdir_test.go b/internal/lsp/fake/workdir_test.go deleted file mode 100644 index 33fbb9fa1d5..00000000000 --- a/internal/lsp/fake/workdir_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "context" - "io/ioutil" - "os" - "sort" - "testing" - "time" - - "golang.org/x/tools/internal/lsp/protocol" -) - -const data = ` --- go.mod -- -go 1.12 --- nested/README.md -- -Hello World! -` - -func newWorkdir(t *testing.T) (*Workdir, <-chan []FileEvent, func()) { - t.Helper() - - tmpdir, err := ioutil.TempDir("", "goplstest-workdir-") - if err != nil { - t.Fatal(err) - } - wd := NewWorkdir(tmpdir) - if err := wd.writeInitialFiles(UnpackTxt(data)); err != nil { - t.Fatal(err) - } - cleanup := func() { - if err := os.RemoveAll(tmpdir); err != nil { - t.Error(err) - } - } - - fileEvents := make(chan []FileEvent) - watch := func(_ context.Context, events []FileEvent) { - go func() { - fileEvents <- events - }() - } - wd.AddWatcher(watch) - return wd, fileEvents, cleanup -} - -func TestWorkdir_ReadFile(t *testing.T) { - wd, _, cleanup := newWorkdir(t) - defer cleanup() - - got, err := wd.ReadFile("nested/README.md") - if err != nil { - t.Fatal(err) - } - want := "Hello World!\n" - if got != want { - t.Errorf("reading workdir file, got %q, want %q", got, want) - } -} - -func TestWorkdir_WriteFile(t *testing.T) { - wd, events, cleanup := newWorkdir(t) - defer cleanup() - ctx := context.Background() - - tests := []struct { - path string - wantType protocol.FileChangeType - }{ - {"data.txt", protocol.Created}, - {"nested/README.md", protocol.Changed}, - } - - for _, test := range tests { - if err := wd.WriteFile(ctx, test.path, "42"); err != nil { - t.Fatal(err) - } - es := <-events - if got := len(es); got != 1 { - t.Fatalf("len(events) = %d, want 1", got) - } - if es[0].Path != test.path { - t.Errorf("event.Path = %q, want %q", es[0].Path, test.path) - } - if es[0].ProtocolEvent.Type != test.wantType { - t.Errorf("event type = %v, want %v", es[0].ProtocolEvent.Type, test.wantType) - } - got, err := wd.ReadFile(test.path) - if err != nil { - t.Fatal(err) - } - want := "42" - if got != want { - t.Errorf("ws.ReadFile(%q) = %q, want %q", test.path, got, want) - } - } -} - -func TestWorkdir_ListFiles(t *testing.T) { - wd, _, cleanup := newWorkdir(t) - defer cleanup() - - checkFiles := func(dir string, want []string) { - files, err := wd.listFiles(dir) - if err != nil { - t.Fatal(err) - } - sort.Strings(want) - var got []string - for p := range files { - got = append(got, p) - } - sort.Strings(got) - if len(got) != len(want) { - t.Fatalf("ListFiles(): len = %d, want %d; got=%v; want=%v", len(got), len(want), got, want) - } - for i, f := range got { - if f != want[i] { - t.Errorf("ListFiles()[%d] = %s, want %s", i, f, want[i]) - } - } - } - - checkFiles(".", []string{"go.mod", "nested/README.md"}) - checkFiles("nested", []string{"nested/README.md"}) -} - -func TestWorkdir_CheckForFileChanges(t *testing.T) { - t.Skip("broken on darwin-amd64-10_12") - wd, events, cleanup := newWorkdir(t) - defer cleanup() - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - checkChange := func(path string, typ protocol.FileChangeType) { - if err := wd.CheckForFileChanges(ctx); err != nil { - t.Fatal(err) - } - var gotEvt FileEvent - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case ev := <-events: - gotEvt = ev[0] - } - // Only check relative path and Type - if gotEvt.Path != path || gotEvt.ProtocolEvent.Type != typ { - t.Errorf("file events: got %v, want {Path: %s, Type: %v}", gotEvt, path, typ) - } - } - // Sleep some positive amount of time to ensure a distinct mtime. - time.Sleep(100 * time.Millisecond) - if err := WriteFileData("go.mod", []byte("module foo.test\n"), wd.RelativeTo); err != nil { - t.Fatal(err) - } - checkChange("go.mod", protocol.Changed) - if err := WriteFileData("newFile", []byte("something"), wd.RelativeTo); err != nil { - t.Fatal(err) - } - checkChange("newFile", protocol.Created) - fp := wd.AbsPath("newFile") - if err := os.Remove(fp); err != nil { - t.Fatal(err) - } - checkChange("newFile", protocol.Deleted) -} - -func TestSplitModuleVersionPath(t *testing.T) { - tests := []struct { - path string - wantModule, wantVersion, wantSuffix string - }{ - {"foo.com@v1.2.3/bar", "foo.com", "v1.2.3", "bar"}, - {"foo.com/module@v1.2.3/bar", "foo.com/module", "v1.2.3", "bar"}, - {"foo.com@v1.2.3", "foo.com", "v1.2.3", ""}, - {"std@v1.14.0", "std", "v1.14.0", ""}, - {"another/module/path", "another/module/path", "", ""}, - } - - for _, test := range tests { - module, version, suffix := splitModuleVersionPath(test.path) - if module != test.wantModule || version != test.wantVersion || suffix != test.wantSuffix { - t.Errorf("splitModuleVersionPath(%q) =\n\t(%q, %q, %q)\nwant\n\t(%q, %q, %q)", - test.path, module, version, suffix, test.wantModule, test.wantVersion, test.wantSuffix) - } - } -} diff --git a/internal/lsp/fake/workdir_windows.go b/internal/lsp/fake/workdir_windows.go deleted file mode 100644 index bcd18b7a226..00000000000 --- a/internal/lsp/fake/workdir_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fake - -import ( - "errors" - "syscall" -) - -func init() { - // from https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499- - const ERROR_LOCK_VIOLATION syscall.Errno = 33 - - isWindowsErrLockViolation = func(err error) bool { - return errors.Is(err, ERROR_LOCK_VIOLATION) - } -} diff --git a/internal/lsp/folding_range.go b/internal/lsp/folding_range.go deleted file mode 100644 index 75f48a4498f..00000000000 --- a/internal/lsp/folding_range.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) foldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - - ranges, err := source.FoldingRange(ctx, snapshot, fh, snapshot.View().Options().LineFoldingOnly) - if err != nil { - return nil, err - } - return toProtocolFoldingRanges(ranges) -} - -func toProtocolFoldingRanges(ranges []*source.FoldingRangeInfo) ([]protocol.FoldingRange, error) { - result := make([]protocol.FoldingRange, 0, len(ranges)) - for _, info := range ranges { - rng, err := info.Range() - if err != nil { - return nil, err - } - result = append(result, protocol.FoldingRange{ - StartLine: rng.Start.Line, - StartCharacter: rng.Start.Character, - EndLine: rng.End.Line, - EndCharacter: rng.End.Character, - Kind: string(info.Kind), - }) - } - return result, nil -} diff --git a/internal/lsp/format.go b/internal/lsp/format.go deleted file mode 100644 index 19736af38bc..00000000000 --- a/internal/lsp/format.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/work" -) - -func (s *Server) formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - switch snapshot.View().FileKind(fh) { - case source.Mod: - return mod.Format(ctx, snapshot, fh) - case source.Go: - return source.Format(ctx, snapshot, fh) - case source.Work: - return work.Format(ctx, snapshot, fh) - } - return nil, nil -} diff --git a/internal/lsp/general.go b/internal/lsp/general.go deleted file mode 100644 index 478152bdf9a..00000000000 --- a/internal/lsp/general.go +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "os" - "path" - "path/filepath" - "sync" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) { - s.stateMu.Lock() - if s.state >= serverInitializing { - defer s.stateMu.Unlock() - return nil, fmt.Errorf("%w: initialize called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state) - } - s.state = serverInitializing - s.stateMu.Unlock() - - // For uniqueness, use the gopls PID rather than params.ProcessID (the client - // pid). Some clients might start multiple gopls servers, though they - // probably shouldn't. - pid := os.Getpid() - s.tempDir = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.%s", pid, s.session.ID())) - err := os.Mkdir(s.tempDir, 0700) - if err != nil { - // MkdirTemp could fail due to permissions issues. This is a problem with - // the user's environment, but should not block gopls otherwise behaving. - // All usage of s.tempDir should be predicated on having a non-empty - // s.tempDir. - event.Error(ctx, "creating temp dir", err) - s.tempDir = "" - } - s.progress.SetSupportsWorkDoneProgress(params.Capabilities.Window.WorkDoneProgress) - - options := s.session.Options() - defer func() { s.session.SetOptions(options) }() - - if err := s.handleOptionResults(ctx, source.SetOptions(options, params.InitializationOptions)); err != nil { - return nil, err - } - options.ForClientCapabilities(params.Capabilities) - - if options.ShowBugReports { - // Report the next bug that occurs on the server. - bugCh := bug.Notify() - go func() { - b := <-bugCh - msg := &protocol.ShowMessageParams{ - Type: protocol.Error, - Message: fmt.Sprintf("A bug occurred on the server: %s\nLocation:%s", b.Description, b.Key), - } - if err := s.eventuallyShowMessage(context.Background(), msg); err != nil { - log.Printf("error showing bug: %v", err) - } - }() - } - - folders := params.WorkspaceFolders - if len(folders) == 0 { - if params.RootURI != "" { - folders = []protocol.WorkspaceFolder{{ - URI: string(params.RootURI), - Name: path.Base(params.RootURI.SpanURI().Filename()), - }} - } - } - for _, folder := range folders { - uri := span.URIFromURI(folder.URI) - if !uri.IsFile() { - continue - } - s.pendingFolders = append(s.pendingFolders, folder) - } - // gopls only supports URIs with a file:// scheme, so if we have no - // workspace folders with a supported scheme, fail to initialize. - if len(folders) > 0 && len(s.pendingFolders) == 0 { - return nil, fmt.Errorf("unsupported URI schemes: %v (gopls only supports file URIs)", folders) - } - - var codeActionProvider interface{} = true - if ca := params.Capabilities.TextDocument.CodeAction; len(ca.CodeActionLiteralSupport.CodeActionKind.ValueSet) > 0 { - // If the client has specified CodeActionLiteralSupport, - // send the code actions we support. - // - // Using CodeActionOptions is only valid if codeActionLiteralSupport is set. - codeActionProvider = &protocol.CodeActionOptions{ - CodeActionKinds: s.getSupportedCodeActions(), - } - } - var renameOpts interface{} = true - if r := params.Capabilities.TextDocument.Rename; r.PrepareSupport { - renameOpts = protocol.RenameOptions{ - PrepareProvider: r.PrepareSupport, - } - } - - versionInfo := debug.VersionInfo() - - // golang/go#45732: Warn users who've installed sergi/go-diff@v1.2.0, since - // it will corrupt the formatting of their files. - for _, dep := range versionInfo.Deps { - if dep.Path == "github.com/sergi/go-diff" && dep.Version == "v1.2.0" { - if err := s.eventuallyShowMessage(ctx, &protocol.ShowMessageParams{ - Message: `It looks like you have a bad gopls installation. -Please reinstall gopls by running 'GO111MODULE=on go install golang.org/x/tools/gopls@latest'. -See https://github.com/golang/go/issues/45732 for more information.`, - Type: protocol.Error, - }); err != nil { - return nil, err - } - } - } - - goplsVersion, err := json.Marshal(versionInfo) - if err != nil { - return nil, err - } - - return &protocol.InitializeResult{ - Capabilities: protocol.ServerCapabilities{ - CallHierarchyProvider: true, - CodeActionProvider: codeActionProvider, - CompletionProvider: protocol.CompletionOptions{ - TriggerCharacters: []string{"."}, - }, - DefinitionProvider: true, - TypeDefinitionProvider: true, - ImplementationProvider: true, - DocumentFormattingProvider: true, - DocumentSymbolProvider: true, - WorkspaceSymbolProvider: true, - ExecuteCommandProvider: protocol.ExecuteCommandOptions{ - Commands: options.SupportedCommands, - }, - FoldingRangeProvider: true, - HoverProvider: true, - DocumentHighlightProvider: true, - DocumentLinkProvider: protocol.DocumentLinkOptions{}, - ReferencesProvider: true, - RenameProvider: renameOpts, - SignatureHelpProvider: protocol.SignatureHelpOptions{ - TriggerCharacters: []string{"(", ","}, - }, - TextDocumentSync: &protocol.TextDocumentSyncOptions{ - Change: protocol.Incremental, - OpenClose: true, - Save: protocol.SaveOptions{ - IncludeText: false, - }, - }, - Workspace: protocol.Workspace6Gn{ - WorkspaceFolders: protocol.WorkspaceFolders5Gn{ - Supported: true, - ChangeNotifications: "workspace/didChangeWorkspaceFolders", - }, - }, - }, - ServerInfo: struct { - Name string `json:"name"` - Version string `json:"version,omitempty"` - }{ - Name: "gopls", - Version: string(goplsVersion), - }, - }, nil -} - -func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error { - s.stateMu.Lock() - if s.state >= serverInitialized { - defer s.stateMu.Unlock() - return fmt.Errorf("%w: initialized called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state) - } - s.state = serverInitialized - s.stateMu.Unlock() - - for _, not := range s.notifications { - s.client.ShowMessage(ctx, not) - } - s.notifications = nil - - options := s.session.Options() - defer func() { s.session.SetOptions(options) }() - - if err := s.addFolders(ctx, s.pendingFolders); err != nil { - return err - } - s.pendingFolders = nil - - var registrations []protocol.Registration - if options.ConfigurationSupported && options.DynamicConfigurationSupported { - registrations = append(registrations, protocol.Registration{ - ID: "workspace/didChangeConfiguration", - Method: "workspace/didChangeConfiguration", - }) - } - if options.SemanticTokens && options.DynamicRegistrationSemanticTokensSupported { - registrations = append(registrations, semanticTokenRegistration(options.SemanticTypes, options.SemanticMods)) - } - if len(registrations) > 0 { - if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ - Registrations: registrations, - }); err != nil { - return err - } - } - return nil -} - -func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error { - originalViews := len(s.session.Views()) - viewErrors := make(map[span.URI]error) - - var wg sync.WaitGroup - if s.session.Options().VerboseWorkDoneProgress { - work := s.progress.Start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil) - defer func() { - go func() { - wg.Wait() - work.End(ctx, "Done.") - }() - }() - } - // Only one view gets to have a workspace. - var allFoldersWg sync.WaitGroup - for _, folder := range folders { - uri := span.URIFromURI(folder.URI) - // Ignore non-file URIs. - if !uri.IsFile() { - continue - } - work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil) - snapshot, release, err := s.addView(ctx, folder.Name, uri) - if err == source.ErrViewExists { - continue - } - if err != nil { - viewErrors[uri] = err - work.End(ctx, fmt.Sprintf("Error loading packages: %s", err)) - continue - } - var swg sync.WaitGroup - swg.Add(1) - allFoldersWg.Add(1) - go func() { - defer swg.Done() - defer allFoldersWg.Done() - snapshot.AwaitInitialized(ctx) - work.End(ctx, "Finished loading packages.") - }() - - // Print each view's environment. - buf := &bytes.Buffer{} - if err := snapshot.WriteEnv(ctx, buf); err != nil { - viewErrors[uri] = err - continue - } - event.Log(ctx, buf.String()) - - // Diagnose the newly created view. - wg.Add(1) - go func() { - s.diagnoseDetached(snapshot) - swg.Wait() - release() - wg.Done() - }() - } - - // Register for file watching notifications, if they are supported. - // Wait for all snapshots to be initialized first, since all files might - // not yet be known to the snapshots. - allFoldersWg.Wait() - if err := s.updateWatchedDirectories(ctx); err != nil { - event.Error(ctx, "failed to register for file watching notifications", err) - } - - if len(viewErrors) > 0 { - errMsg := fmt.Sprintf("Error loading workspace folders (expected %v, got %v)\n", len(folders), len(s.session.Views())-originalViews) - for uri, err := range viewErrors { - errMsg += fmt.Sprintf("failed to load view for %s: %v\n", uri, err) - } - return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ - Type: protocol.Error, - Message: errMsg, - }) - } - return nil -} - -// updateWatchedDirectories compares the current set of directories to watch -// with the previously registered set of directories. If the set of directories -// has changed, we unregister and re-register for file watching notifications. -// updatedSnapshots is the set of snapshots that have been updated. -func (s *Server) updateWatchedDirectories(ctx context.Context) error { - patterns := s.session.FileWatchingGlobPatterns(ctx) - - s.watchedGlobPatternsMu.Lock() - defer s.watchedGlobPatternsMu.Unlock() - - // Nothing to do if the set of workspace directories is unchanged. - if equalURISet(s.watchedGlobPatterns, patterns) { - return nil - } - - // If the set of directories to watch has changed, register the updates and - // unregister the previously watched directories. This ordering avoids a - // period where no files are being watched. Still, if a user makes on-disk - // changes before these updates are complete, we may miss them for the new - // directories. - prevID := s.watchRegistrationCount - 1 - if err := s.registerWatchedDirectoriesLocked(ctx, patterns); err != nil { - return err - } - if prevID >= 0 { - return s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{ - Unregisterations: []protocol.Unregistration{{ - ID: watchedFilesCapabilityID(prevID), - Method: "workspace/didChangeWatchedFiles", - }}, - }) - } - return nil -} - -func watchedFilesCapabilityID(id int) string { - return fmt.Sprintf("workspace/didChangeWatchedFiles-%d", id) -} - -func equalURISet(m1, m2 map[string]struct{}) bool { - if len(m1) != len(m2) { - return false - } - for k := range m1 { - _, ok := m2[k] - if !ok { - return false - } - } - return true -} - -// registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles -// registrations to the client and updates s.watchedDirectories. -func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns map[string]struct{}) error { - if !s.session.Options().DynamicWatchedFilesSupported { - return nil - } - for k := range s.watchedGlobPatterns { - delete(s.watchedGlobPatterns, k) - } - var watchers []protocol.FileSystemWatcher - for pattern := range patterns { - watchers = append(watchers, protocol.FileSystemWatcher{ - GlobPattern: pattern, - Kind: uint32(protocol.WatchChange + protocol.WatchDelete + protocol.WatchCreate), - }) - } - - if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ - Registrations: []protocol.Registration{{ - ID: watchedFilesCapabilityID(s.watchRegistrationCount), - Method: "workspace/didChangeWatchedFiles", - RegisterOptions: protocol.DidChangeWatchedFilesRegistrationOptions{ - Watchers: watchers, - }, - }}, - }); err != nil { - return err - } - s.watchRegistrationCount++ - - for k, v := range patterns { - s.watchedGlobPatterns[k] = v - } - return nil -} - -func (s *Server) fetchConfig(ctx context.Context, name string, folder span.URI, o *source.Options) error { - if !s.session.Options().ConfigurationSupported { - return nil - } - configs, err := s.client.Configuration(ctx, &protocol.ParamConfiguration{ - ConfigurationParams: protocol.ConfigurationParams{ - Items: []protocol.ConfigurationItem{{ - ScopeURI: string(folder), - Section: "gopls", - }}, - }, - }) - if err != nil { - return fmt.Errorf("failed to get workspace configuration from client (%s): %v", folder, err) - } - for _, config := range configs { - if err := s.handleOptionResults(ctx, source.SetOptions(o, config)); err != nil { - return err - } - } - return nil -} - -func (s *Server) eventuallyShowMessage(ctx context.Context, msg *protocol.ShowMessageParams) error { - s.stateMu.Lock() - defer s.stateMu.Unlock() - if s.state == serverInitialized { - return s.client.ShowMessage(ctx, msg) - } - s.notifications = append(s.notifications, msg) - return nil -} - -func (s *Server) handleOptionResults(ctx context.Context, results source.OptionResults) error { - for _, result := range results { - var msg *protocol.ShowMessageParams - switch result.Error.(type) { - case nil: - // nothing to do - case *source.SoftError: - msg = &protocol.ShowMessageParams{ - Type: protocol.Warning, - Message: result.Error.Error(), - } - default: - msg = &protocol.ShowMessageParams{ - Type: protocol.Error, - Message: result.Error.Error(), - } - } - if msg != nil { - if err := s.eventuallyShowMessage(ctx, msg); err != nil { - return err - } - } - } - return nil -} - -// beginFileRequest checks preconditions for a file-oriented request and routes -// it to a snapshot. -// We don't want to return errors for benign conditions like wrong file type, -// so callers should do if !ok { return err } rather than if err != nil. -// The returned cleanup function is non-nil even in case of false/error result. -func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI, expectKind source.FileKind) (source.Snapshot, source.VersionedFileHandle, bool, func(), error) { - uri := pURI.SpanURI() - if !uri.IsFile() { - // Not a file URI. Stop processing the request, but don't return an error. - return nil, nil, false, func() {}, nil - } - view, err := s.session.ViewOf(uri) - if err != nil { - return nil, nil, false, func() {}, err - } - snapshot, release := view.Snapshot(ctx) - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - release() - return nil, nil, false, func() {}, err - } - kind := snapshot.View().FileKind(fh) - if expectKind != source.UnknownKind && kind != expectKind { - // Wrong kind of file. Nothing to do. - release() - return nil, nil, false, func() {}, nil - } - return snapshot, fh, true, release, nil -} - -func (s *Server) shutdown(ctx context.Context) error { - s.stateMu.Lock() - defer s.stateMu.Unlock() - if s.state < serverInitialized { - event.Log(ctx, "server shutdown without initialization") - } - if s.state != serverShutDown { - // drop all the active views - s.session.Shutdown(ctx) - s.state = serverShutDown - if s.tempDir != "" { - if err := os.RemoveAll(s.tempDir); err != nil { - event.Error(ctx, "removing temp dir", err) - } - } - } - return nil -} - -func (s *Server) exit(ctx context.Context) error { - s.stateMu.Lock() - defer s.stateMu.Unlock() - - s.client.Close() - - if s.state != serverShutDown { - // TODO: We should be able to do better than this. - os.Exit(1) - } - // we don't terminate the process on a normal exit, we just allow it to - // close naturally if needed after the connection is closed. - return nil -} diff --git a/internal/lsp/highlight.go b/internal/lsp/highlight.go deleted file mode 100644 index 5dc636eb58a..00000000000 --- a/internal/lsp/highlight.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" -) - -func (s *Server) documentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - - if snapshot.View().FileKind(fh) == source.Tmpl { - return template.Highlight(ctx, snapshot, fh, params.Position) - } - - rngs, err := source.Highlight(ctx, snapshot, fh, params.Position) - if err != nil { - event.Error(ctx, "no highlight", err, tag.URI.Of(params.TextDocument.URI)) - } - return toProtocolHighlight(rngs), nil -} - -func toProtocolHighlight(rngs []protocol.Range) []protocol.DocumentHighlight { - result := make([]protocol.DocumentHighlight, 0, len(rngs)) - kind := protocol.Text - for _, rng := range rngs { - result = append(result, protocol.DocumentHighlight{ - Kind: kind, - Range: rng, - }) - } - return result -} diff --git a/internal/lsp/hover.go b/internal/lsp/hover.go deleted file mode 100644 index d59f5dbdb3b..00000000000 --- a/internal/lsp/hover.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/mod" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" - "golang.org/x/tools/internal/lsp/work" -) - -func (s *Server) hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - switch snapshot.View().FileKind(fh) { - case source.Mod: - return mod.Hover(ctx, snapshot, fh, params.Position) - case source.Go: - return source.Hover(ctx, snapshot, fh, params.Position) - case source.Tmpl: - return template.Hover(ctx, snapshot, fh, params.Position) - case source.Work: - return work.Hover(ctx, snapshot, fh, params.Position) - } - return nil, nil -} diff --git a/internal/lsp/implementation.go b/internal/lsp/implementation.go deleted file mode 100644 index 49992b9113a..00000000000 --- a/internal/lsp/implementation.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - return source.Implementation(ctx, snapshot, fh, params.Position) -} diff --git a/internal/lsp/link.go b/internal/lsp/link.go deleted file mode 100644 index 7bb09b40355..00000000000 --- a/internal/lsp/link.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "bytes" - "context" - "fmt" - "go/ast" - "go/token" - "net/url" - "regexp" - "strconv" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLinkParams) (links []protocol.DocumentLink, err error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - switch snapshot.View().FileKind(fh) { - case source.Mod: - links, err = modLinks(ctx, snapshot, fh) - case source.Go: - links, err = goLinks(ctx, snapshot, fh) - } - // Don't return errors for document links. - if err != nil { - event.Error(ctx, "failed to compute document links", err, tag.URI.Of(fh.URI())) - return nil, nil - } - return links, nil -} - -func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) { - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - return nil, err - } - var links []protocol.DocumentLink - for _, req := range pm.File.Require { - if req.Syntax == nil { - continue - } - // See golang/go#36998: don't link to modules matching GOPRIVATE. - if snapshot.View().IsGoPrivatePath(req.Mod.Path) { - continue - } - dep := []byte(req.Mod.Path) - s, e := req.Syntax.Start.Byte, req.Syntax.End.Byte - i := bytes.Index(pm.Mapper.Content[s:e], dep) - if i == -1 { - continue - } - // Shift the start position to the location of the - // dependency within the require statement. - start, end := token.Pos(s+i), token.Pos(s+i+len(dep)) - target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "") - l, err := toProtocolLink(snapshot, pm.Mapper, target, start, end, source.Mod) - if err != nil { - return nil, err - } - links = append(links, l) - } - // TODO(ridersofrohan): handle links for replace and exclude directives. - if syntax := pm.File.Syntax; syntax == nil { - return links, nil - } - // Get all the links that are contained in the comments of the file. - for _, expr := range pm.File.Syntax.Stmt { - comments := expr.Comment() - if comments == nil { - continue - } - for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} { - for _, comment := range section { - l, err := findLinksInString(ctx, snapshot, comment.Token, token.Pos(comment.Start.Byte), pm.Mapper, source.Mod) - if err != nil { - return nil, err - } - links = append(links, l...) - } - } - } - return links, nil -} - -func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) { - view := snapshot.View() - // We don't actually need type information, so any typecheck mode is fine. - pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckWorkspace, source.WidestPackage) - if err != nil { - return nil, err - } - pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) - if err != nil { - return nil, err - } - var imports []*ast.ImportSpec - var str []*ast.BasicLit - ast.Inspect(pgf.File, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.ImportSpec: - imports = append(imports, n) - return false - case *ast.BasicLit: - // Look for links in string literals. - if n.Kind == token.STRING { - str = append(str, n) - } - return false - } - return true - }) - var links []protocol.DocumentLink - // For import specs, provide a link to a documentation website, like - // https://pkg.go.dev. - if view.Options().ImportShortcut.ShowLinks() { - for _, imp := range imports { - target, err := strconv.Unquote(imp.Path.Value) - if err != nil { - continue - } - // See golang/go#36998: don't link to modules matching GOPRIVATE. - if view.IsGoPrivatePath(target) { - continue - } - if mod, version, ok := moduleAtVersion(target, pkg); ok && strings.ToLower(view.Options().LinkTarget) == "pkg.go.dev" { - target = strings.Replace(target, mod, mod+"@"+version, 1) - } - // Account for the quotation marks in the positions. - start := imp.Path.Pos() + 1 - end := imp.Path.End() - 1 - target = source.BuildLink(view.Options().LinkTarget, target, "") - l, err := toProtocolLink(snapshot, pgf.Mapper, target, start, end, source.Go) - if err != nil { - return nil, err - } - links = append(links, l) - } - } - for _, s := range str { - l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Mapper, source.Go) - if err != nil { - return nil, err - } - links = append(links, l...) - } - for _, commentGroup := range pgf.File.Comments { - for _, comment := range commentGroup.List { - l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Mapper, source.Go) - if err != nil { - return nil, err - } - links = append(links, l...) - } - } - return links, nil -} - -func moduleAtVersion(target string, pkg source.Package) (string, string, bool) { - impPkg, err := pkg.GetImport(target) - if err != nil { - return "", "", false - } - if impPkg.Version() == nil { - return "", "", false - } - version, modpath := impPkg.Version().Version, impPkg.Version().Path - if modpath == "" || version == "" { - return "", "", false - } - return modpath, version, true -} - -// acceptedSchemes controls the schemes that URLs must have to be shown to the -// user. Other schemes can't be opened by LSP clients, so linkifying them is -// distracting. See golang/go#43990. -var acceptedSchemes = map[string]bool{ - "http": true, - "https": true, -} - -func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) { - var links []protocol.DocumentLink - for _, index := range snapshot.View().Options().URLRegexp.FindAllIndex([]byte(src), -1) { - start, end := index[0], index[1] - startPos := token.Pos(int(pos) + start) - endPos := token.Pos(int(pos) + end) - link := src[start:end] - linkURL, err := url.Parse(link) - // Fallback: Linkify IP addresses as suggested in golang/go#18824. - if err != nil { - linkURL, err = url.Parse("//" + link) - // Not all potential links will be valid, so don't return this error. - if err != nil { - continue - } - } - // If the URL has no scheme, use https. - if linkURL.Scheme == "" { - linkURL.Scheme = "https" - } - if !acceptedSchemes[linkURL.Scheme] { - continue - } - l, err := toProtocolLink(snapshot, m, linkURL.String(), startPos, endPos, fileKind) - if err != nil { - return nil, err - } - links = append(links, l) - } - // Handle golang/go#1234-style links. - r := getIssueRegexp() - for _, index := range r.FindAllIndex([]byte(src), -1) { - start, end := index[0], index[1] - startPos := token.Pos(int(pos) + start) - endPos := token.Pos(int(pos) + end) - matches := r.FindStringSubmatch(src) - if len(matches) < 4 { - continue - } - org, repo, number := matches[1], matches[2], matches[3] - target := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number) - l, err := toProtocolLink(snapshot, m, target, startPos, endPos, fileKind) - if err != nil { - return nil, err - } - links = append(links, l) - } - return links, nil -} - -func getIssueRegexp() *regexp.Regexp { - once.Do(func() { - issueRegexp = regexp.MustCompile(`(\w+)/([\w-]+)#([0-9]+)`) - }) - return issueRegexp -} - -var ( - once sync.Once - issueRegexp *regexp.Regexp -) - -func toProtocolLink(snapshot source.Snapshot, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) { - var rng protocol.Range - switch fileKind { - case source.Go: - spn, err := span.NewRange(snapshot.FileSet(), start, end).Span() - if err != nil { - return protocol.DocumentLink{}, err - } - rng, err = m.Range(spn) - if err != nil { - return protocol.DocumentLink{}, err - } - case source.Mod: - s, e := int(start), int(end) - line, col, err := span.ToPosition(m.TokFile, s) - if err != nil { - return protocol.DocumentLink{}, err - } - start := span.NewPoint(line, col, s) - line, col, err = span.ToPosition(m.TokFile, e) - if err != nil { - return protocol.DocumentLink{}, err - } - end := span.NewPoint(line, col, e) - rng, err = m.Range(span.New(m.URI, start, end)) - if err != nil { - return protocol.DocumentLink{}, err - } - } - return protocol.DocumentLink{ - Range: rng, - Target: target, - }, nil -} diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go deleted file mode 100644 index ee364b8b034..00000000000 --- a/internal/lsp/lsp_test.go +++ /dev/null @@ -1,1311 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "fmt" - "go/token" - "os" - "os/exec" - "path/filepath" - "sort" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/testenv" -) - -func TestMain(m *testing.M) { - bug.PanicOnBugs = true - testenv.ExitIfSmallMachine() - os.Exit(m.Run()) -} - -func TestLSP(t *testing.T) { - tests.RunTests(t, "testdata", true, testLSP) -} - -type runner struct { - server *Server - data *tests.Data - diagnostics map[span.URI][]*source.Diagnostic - ctx context.Context - normalizers []tests.Normalizer - editRecv chan map[span.URI]string -} - -func testLSP(t *testing.T, datum *tests.Data) { - ctx := tests.Context(t) - - cache := cache.New(nil) - session := cache.NewSession(ctx) - options := source.DefaultOptions().Clone() - tests.DefaultOptions(options) - session.SetOptions(options) - options.SetEnvSlice(datum.Config.Env) - view, snapshot, release, err := session.NewView(ctx, datum.Config.Dir, span.URIFromPath(datum.Config.Dir), options) - if err != nil { - t.Fatal(err) - } - - defer view.Shutdown(ctx) - - // Enable type error analyses for tests. - // TODO(golang/go#38212): Delete this once they are enabled by default. - tests.EnableAllAnalyzers(view, options) - view.SetOptions(ctx, options) - - // Only run the -modfile specific tests in module mode with Go 1.14 or above. - datum.ModfileFlagAvailable = len(snapshot.ModFiles()) > 0 && testenv.Go1Point() >= 14 - release() - - var modifications []source.FileModification - for filename, content := range datum.Config.Overlay { - if filepath.Ext(filename) != ".go" { - continue - } - modifications = append(modifications, source.FileModification{ - URI: span.URIFromPath(filename), - Action: source.Open, - Version: -1, - Text: content, - LanguageID: "go", - }) - } - if err := session.ModifyFiles(ctx, modifications); err != nil { - t.Fatal(err) - } - r := &runner{ - data: datum, - ctx: ctx, - normalizers: tests.CollectNormalizers(datum.Exported), - editRecv: make(chan map[span.URI]string, 1), - } - - r.server = NewServer(session, testClient{runner: r}) - tests.Run(t, r, datum) -} - -// testClient stubs any client functions that may be called by LSP functions. -type testClient struct { - protocol.Client - runner *runner -} - -func (c testClient) Close() error { - return nil -} - -// Trivially implement PublishDiagnostics so that we can call -// server.publishReports below to de-dup sent diagnostics. -func (c testClient) PublishDiagnostics(context.Context, *protocol.PublishDiagnosticsParams) error { - return nil -} - -func (c testClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error { - return nil -} - -func (c testClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) { - res, err := applyTextDocumentEdits(c.runner, params.Edit.DocumentChanges) - if err != nil { - return nil, err - } - c.runner.editRecv <- res - return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil -} - -func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) { - mapper, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := mapper.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - - params := &protocol.CallHierarchyPrepareParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - } - - items, err := r.server.PrepareCallHierarchy(r.ctx, params) - if err != nil { - t.Fatal(err) - } - if len(items) == 0 { - t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range) - } - - callLocation := protocol.Location{ - URI: items[0].URI, - Range: items[0].Range, - } - if callLocation != loc { - t.Fatalf("expected server.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation) - } - - incomingCalls, err := r.server.IncomingCalls(r.ctx, &protocol.CallHierarchyIncomingCallsParams{Item: items[0]}) - if err != nil { - t.Error(err) - } - var incomingCallItems []protocol.CallHierarchyItem - for _, item := range incomingCalls { - incomingCallItems = append(incomingCallItems, item.From) - } - msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls) - if msg != "" { - t.Error(fmt.Sprintf("incoming calls: %s", msg)) - } - - outgoingCalls, err := r.server.OutgoingCalls(r.ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: items[0]}) - if err != nil { - t.Error(err) - } - var outgoingCallItems []protocol.CallHierarchyItem - for _, item := range outgoingCalls { - outgoingCallItems = append(outgoingCallItems, item.To) - } - msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls) - if msg != "" { - t.Error(fmt.Sprintf("outgoing calls: %s", msg)) - } -} - -func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) { - if !strings.HasSuffix(uri.Filename(), "go.mod") { - return - } - got, err := r.server.codeLens(r.ctx, &protocol.CodeLensParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.DocumentURI(uri), - }, - }) - if err != nil { - t.Fatal(err) - } - if diff := tests.DiffCodeLens(uri, want, got); diff != "" { - t.Errorf("%s: %s", uri, diff) - } -} - -func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) { - // Get the diagnostics for this view if we have not done it before. - v := r.server.session.View(r.data.Config.Dir) - r.collectDiagnostics(v) - d := r.diagnostics[uri] - got := make([]*source.Diagnostic, len(d)) - copy(got, d) - // A special case to test that there are no diagnostics for a file. - if len(want) == 1 && want[0].Source == "no_diagnostics" { - if len(got) != 0 { - t.Errorf("expected no diagnostics for %s, got %v", uri, got) - } - return - } - if diff := tests.DiffDiagnostics(uri, want, got); diff != "" { - t.Error(diff) - } -} - -func (r *runner) FoldingRanges(t *testing.T, spn span.Span) { - uri := spn.URI() - view, err := r.server.session.ViewOf(uri) - if err != nil { - t.Fatal(err) - } - original := view.Options() - modified := original - - // Test all folding ranges. - modified.LineFoldingOnly = false - view, err = view.SetOptions(r.ctx, modified) - if err != nil { - t.Error(err) - return - } - ranges, err := r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - t.Error(err) - return - } - r.foldingRanges(t, "foldingRange", uri, ranges) - - // Test folding ranges with lineFoldingOnly = true. - modified.LineFoldingOnly = true - view, err = view.SetOptions(r.ctx, modified) - if err != nil { - t.Error(err) - return - } - ranges, err = r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - t.Error(err) - return - } - r.foldingRanges(t, "foldingRange-lineFolding", uri, ranges) - view.SetOptions(r.ctx, original) -} - -func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, ranges []protocol.FoldingRange) { - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - // Fold all ranges. - nonOverlapping := nonOverlappingRanges(ranges) - for i, rngs := range nonOverlapping { - got, err := foldRanges(m, string(m.Content), rngs) - if err != nil { - t.Error(err) - continue - } - tag := fmt.Sprintf("%s-%d", prefix, i) - want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if want != got { - t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got) - } - } - - // Filter by kind. - kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment} - for _, kind := range kinds { - var kindOnly []protocol.FoldingRange - for _, fRng := range ranges { - if fRng.Kind == string(kind) { - kindOnly = append(kindOnly, fRng) - } - } - - nonOverlapping := nonOverlappingRanges(kindOnly) - for i, rngs := range nonOverlapping { - got, err := foldRanges(m, string(m.Content), rngs) - if err != nil { - t.Error(err) - continue - } - tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i) - want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if want != got { - t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got) - } - } - - } -} - -func nonOverlappingRanges(ranges []protocol.FoldingRange) (res [][]protocol.FoldingRange) { - for _, fRng := range ranges { - setNum := len(res) - for i := 0; i < len(res); i++ { - canInsert := true - for _, rng := range res[i] { - if conflict(rng, fRng) { - canInsert = false - break - } - } - if canInsert { - setNum = i - break - } - } - if setNum == len(res) { - res = append(res, []protocol.FoldingRange{}) - } - res[setNum] = append(res[setNum], fRng) - } - return res -} - -func conflict(a, b protocol.FoldingRange) bool { - // a start position is <= b start positions - return (a.StartLine < b.StartLine || (a.StartLine == b.StartLine && a.StartCharacter <= b.StartCharacter)) && - (a.EndLine > b.StartLine || (a.EndLine == b.StartLine && a.EndCharacter > b.StartCharacter)) -} - -func foldRanges(m *protocol.ColumnMapper, contents string, ranges []protocol.FoldingRange) (string, error) { - foldedText := "<>" - res := contents - // Apply the edits from the end of the file forward - // to preserve the offsets - for i := len(ranges) - 1; i >= 0; i-- { - fRange := ranges[i] - spn, err := m.RangeSpan(protocol.Range{ - Start: protocol.Position{ - Line: fRange.StartLine, - Character: fRange.StartCharacter, - }, - End: protocol.Position{ - Line: fRange.EndLine, - Character: fRange.EndCharacter, - }, - }) - if err != nil { - return "", err - } - start := spn.Start().Offset() - end := spn.End().Offset() - - tmp := res[0:start] + foldedText - res = tmp + res[end:] - } - return res, nil -} - -func (r *runner) Format(t *testing.T, spn span.Span) { - uri := spn.URI() - filename := uri.Filename() - gofmted := string(r.data.Golden("gofmt", filename, func() ([]byte, error) { - cmd := exec.Command("gofmt", filename) - out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files - return out, nil - })) - - edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - if gofmted != "" { - t.Error(err) - } - return - } - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - sedits, err := source.FromProtocolEdits(m, edits) - if err != nil { - t.Error(err) - } - got := diff.ApplyEdits(string(m.Content), sedits) - if gofmted != got { - t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", filename, gofmted, got) - } -} - -func (r *runner) SemanticTokens(t *testing.T, spn span.Span) { - uri := spn.URI() - filename := uri.Filename() - // this is called solely for coverage in semantic.go - _, err := r.server.semanticTokensFull(r.ctx, &protocol.SemanticTokensParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - t.Errorf("%v for %s", err, filename) - } - _, err = r.server.semanticTokensRange(r.ctx, &protocol.SemanticTokensRangeParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - // any legal range. Just to exercise the call. - Range: protocol.Range{ - Start: protocol.Position{ - Line: 0, - Character: 0, - }, - End: protocol.Position{ - Line: 2, - Character: 0, - }, - }, - }) - if err != nil { - t.Errorf("%v for Range %s", err, filename) - } -} - -func (r *runner) Import(t *testing.T, spn span.Span) { - uri := spn.URI() - filename := uri.Filename() - actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - t.Fatal(err) - } - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - got := string(m.Content) - if len(actions) > 0 { - res, err := applyTextDocumentEdits(r, actions[0].Edit.DocumentChanges) - if err != nil { - t.Fatal(err) - } - got = res[uri] - } - want := string(r.data.Golden("goimports", filename, func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - d, err := myers.ComputeEdits(uri, want, got) - if err != nil { - t.Fatal(err) - } - t.Errorf("import failed for %s: %s", filename, diff.ToUnified("want", "got", want, d)) - } -} - -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { - uri := spn.URI() - view, err := r.server.session.ViewOf(uri) - if err != nil { - t.Fatal(err) - } - - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - rng, err := m.Range(spn) - if err != nil { - t.Fatal(err) - } - // Get the diagnostics for this view if we have not done it before. - r.collectDiagnostics(view) - var diagnostics []protocol.Diagnostic - for _, d := range r.diagnostics[uri] { - // Compare the start positions rather than the entire range because - // some diagnostics have a range with the same start and end position (8:1-8:1). - // The current marker functionality prevents us from having a range of 0 length. - if protocol.ComparePosition(d.Range.Start, rng.Start) == 0 { - diagnostics = append(diagnostics, toProtocolDiagnostics([]*source.Diagnostic{d})...) - break - } - } - codeActionKinds := []protocol.CodeActionKind{} - for _, k := range actionKinds { - codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k)) - } - actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - Range: rng, - Context: protocol.CodeActionContext{ - Only: codeActionKinds, - Diagnostics: diagnostics, - }, - }) - if err != nil { - t.Fatalf("CodeAction %s failed: %v", spn, err) - } - if len(actions) != expectedActions { - // Hack: We assume that we only get one code action per range. - var cmds []string - for _, a := range actions { - cmds = append(cmds, fmt.Sprintf("%s (%s)", a.Command, a.Title)) - } - t.Fatalf("unexpected number of code actions, want %d, got %d: %v", expectedActions, len(actions), cmds) - } - action := actions[0] - var match bool - for _, k := range codeActionKinds { - if action.Kind == k { - match = true - break - } - } - if !match { - t.Fatalf("unexpected kind for code action %s, expected one of %v, got %v", action.Title, codeActionKinds, action.Kind) - } - var res map[span.URI]string - if cmd := action.Command; cmd != nil { - _, err := r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ - Command: action.Command.Command, - Arguments: action.Command.Arguments, - }) - if err != nil { - t.Fatalf("error converting command %q to edits: %v", action.Command.Command, err) - } - res = <-r.editRecv - } else { - res, err = applyTextDocumentEdits(r, action.Edit.DocumentChanges) - if err != nil { - t.Fatal(err) - } - } - for u, got := range res { - want := string(r.data.Golden("suggestedfix_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - t.Errorf("suggested fixes failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got)) - } - } -} - -func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) { - uri := start.URI() - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - spn := span.New(start.URI(), start.Start(), end.End()) - rng, err := m.Range(spn) - if err != nil { - t.Fatal(err) - } - actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - Range: rng, - Context: protocol.CodeActionContext{ - Only: []protocol.CodeActionKind{"refactor.extract"}, - }, - }) - if err != nil { - t.Fatal(err) - } - var actions []protocol.CodeAction - for _, action := range actionsRaw { - if action.Command.Title == "Extract function" { - actions = append(actions, action) - } - } - // Hack: We assume that we only get one code action per range. - // TODO(rstambler): Support multiple code actions per test. - if len(actions) == 0 || len(actions) > 1 { - t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions)) - } - _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ - Command: actions[0].Command.Command, - Arguments: actions[0].Command.Arguments, - }) - if err != nil { - t.Fatal(err) - } - res := <-r.editRecv - for u, got := range res { - want := string(r.data.Golden("functionextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - t.Errorf("function extraction failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got)) - } - } -} - -func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) { - uri := start.URI() - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - spn := span.New(start.URI(), start.Start(), end.End()) - rng, err := m.Range(spn) - if err != nil { - t.Fatal(err) - } - actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - Range: rng, - Context: protocol.CodeActionContext{ - Only: []protocol.CodeActionKind{"refactor.extract"}, - }, - }) - if err != nil { - t.Fatal(err) - } - var actions []protocol.CodeAction - for _, action := range actionsRaw { - if action.Command.Title == "Extract method" { - actions = append(actions, action) - } - } - // Hack: We assume that we only get one matching code action per range. - // TODO(rstambler): Support multiple code actions per test. - if len(actions) == 0 || len(actions) > 1 { - t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions)) - } - _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{ - Command: actions[0].Command.Command, - Arguments: actions[0].Command.Arguments, - }) - if err != nil { - t.Fatal(err) - } - res := <-r.editRecv - for u, got := range res { - want := string(r.data.Golden("methodextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - t.Errorf("method extraction failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got)) - } - } -} - -func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) { - sm, err := r.data.Mapper(d.Src.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := sm.Location(d.Src) - if err != nil { - t.Fatalf("failed for %v: %v", d.Src, err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - var locs []protocol.Location - var hover *protocol.Hover - if d.IsType { - params := &protocol.TypeDefinitionParams{ - TextDocumentPositionParams: tdpp, - } - locs, err = r.server.TypeDefinition(r.ctx, params) - } else { - params := &protocol.DefinitionParams{ - TextDocumentPositionParams: tdpp, - } - locs, err = r.server.Definition(r.ctx, params) - if err != nil { - t.Fatalf("failed for %v: %+v", d.Src, err) - } - v := &protocol.HoverParams{ - TextDocumentPositionParams: tdpp, - } - hover, err = r.server.Hover(r.ctx, v) - } - if err != nil { - t.Fatalf("failed for %v: %v", d.Src, err) - } - if len(locs) != 1 { - t.Errorf("got %d locations for definition, expected 1", len(locs)) - } - didSomething := false - if hover != nil { - didSomething = true - tag := fmt.Sprintf("%s-hoverdef", d.Name) - expectHover := string(r.data.Golden(tag, d.Src.URI().Filename(), func() ([]byte, error) { - return []byte(hover.Contents.Value), nil - })) - got := tests.StripSubscripts(hover.Contents.Value) - expectHover = tests.StripSubscripts(expectHover) - if got != expectHover { - t.Errorf("%s:\n%s", d.Src, tests.Diff(t, expectHover, got)) - } - } - if !d.OnlyHover { - didSomething = true - locURI := locs[0].URI.SpanURI() - lm, err := r.data.Mapper(locURI) - if err != nil { - t.Fatal(err) - } - if def, err := lm.Span(locs[0]); err != nil { - t.Fatalf("failed for %v: %v", locs[0], err) - } else if def != d.Def { - t.Errorf("for %v got %v want %v", d.Src, def, d.Def) - } - } - if !didSomething { - t.Errorf("no tests ran for %s", d.Src.URI()) - } -} - -func (r *runner) Implementation(t *testing.T, spn span.Span, impls []span.Span) { - sm, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := sm.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - var locs []protocol.Location - params := &protocol.ImplementationParams{ - TextDocumentPositionParams: tdpp, - } - locs, err = r.server.Implementation(r.ctx, params) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - if len(locs) != len(impls) { - t.Fatalf("got %d locations for implementation, expected %d", len(locs), len(impls)) - } - - var results []span.Span - for i := range locs { - locURI := locs[i].URI.SpanURI() - lm, err := r.data.Mapper(locURI) - if err != nil { - t.Fatal(err) - } - imp, err := lm.Span(locs[i]) - if err != nil { - t.Fatalf("failed for %v: %v", locs[i], err) - } - results = append(results, imp) - } - // Sort results and expected to make tests deterministic. - sort.SliceStable(results, func(i, j int) bool { - return span.Compare(results[i], results[j]) == -1 - }) - sort.SliceStable(impls, func(i, j int) bool { - return span.Compare(impls[i], impls[j]) == -1 - }) - for i := range results { - if results[i] != impls[i] { - t.Errorf("for %dth implementation of %v got %v want %v", i, spn, results[i], impls[i]) - } - } -} - -func (r *runner) Highlight(t *testing.T, src span.Span, locations []span.Span) { - m, err := r.data.Mapper(src.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := m.Location(src) - if err != nil { - t.Fatalf("failed for %v: %v", locations[0], err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - params := &protocol.DocumentHighlightParams{ - TextDocumentPositionParams: tdpp, - } - highlights, err := r.server.DocumentHighlight(r.ctx, params) - if err != nil { - t.Fatal(err) - } - if len(highlights) != len(locations) { - t.Fatalf("got %d highlights for highlight at %v:%v:%v, expected %d", len(highlights), src.URI().Filename(), src.Start().Line(), src.Start().Column(), len(locations)) - } - // Check to make sure highlights have a valid range. - var results []span.Span - for i := range highlights { - h, err := m.RangeSpan(highlights[i].Range) - if err != nil { - t.Fatalf("failed for %v: %v", highlights[i], err) - } - results = append(results, h) - } - // Sort results to make tests deterministic since DocumentHighlight uses a map. - sort.SliceStable(results, func(i, j int) bool { - return span.Compare(results[i], results[j]) == -1 - }) - // Check to make sure all the expected highlights are found. - for i := range results { - if results[i] != locations[i] { - t.Errorf("want %v, got %v\n", locations[i], results[i]) - } - } -} - -func (r *runner) Hover(t *testing.T, src span.Span, text string) { - m, err := r.data.Mapper(src.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := m.Location(src) - if err != nil { - t.Fatalf("failed for %v", err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - params := &protocol.HoverParams{ - TextDocumentPositionParams: tdpp, - } - hover, err := r.server.Hover(r.ctx, params) - if err != nil { - t.Fatal(err) - } - if text == "" { - if hover != nil { - t.Errorf("want nil, got %v\n", hover) - } - } else { - if hover == nil { - t.Fatalf("want hover result to include %s, but got nil", text) - } - if got := hover.Contents.Value; got != text { - t.Errorf("want %v, got %v\n", text, got) - } - if want, got := loc.Range, hover.Range; want != got { - t.Errorf("want range %v, got %v instead", want, got) - } - } -} - -func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) { - sm, err := r.data.Mapper(src.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := sm.Location(src) - if err != nil { - t.Fatalf("failed for %v: %v", src, err) - } - for _, includeDeclaration := range []bool{true, false} { - t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) { - want := make(map[protocol.Location]bool) - for i, pos := range itemList { - // We don't want the first result if we aren't including the declaration. - if i == 0 && !includeDeclaration { - continue - } - m, err := r.data.Mapper(pos.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := m.Location(pos) - if err != nil { - t.Fatalf("failed for %v: %v", src, err) - } - want[loc] = true - } - params := &protocol.ReferenceParams{ - TextDocumentPositionParams: protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - }, - Context: protocol.ReferenceContext{ - IncludeDeclaration: includeDeclaration, - }, - } - got, err := r.server.References(r.ctx, params) - if err != nil { - t.Fatalf("failed for %v: %v", src, err) - } - if len(got) != len(want) { - t.Errorf("references failed: different lengths got %v want %v", len(got), len(want)) - } - for _, loc := range got { - if !want[loc] { - t.Errorf("references failed: incorrect references got %v want %v", loc, want) - } - } - }) - } -} - -func (r *runner) Rename(t *testing.T, spn span.Span, newText string) { - tag := fmt.Sprintf("%s-rename", newText) - - uri := spn.URI() - filename := uri.Filename() - sm, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - loc, err := sm.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - - wedit, err := r.server.Rename(r.ctx, &protocol.RenameParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - Position: loc.Range.Start, - NewName: newText, - }) - if err != nil { - renamed := string(r.data.Golden(tag, filename, func() ([]byte, error) { - return []byte(err.Error()), nil - })) - if err.Error() != renamed { - t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v\n", newText, renamed, err) - } - return - } - res, err := applyTextDocumentEdits(r, wedit.DocumentChanges) - if err != nil { - t.Fatal(err) - } - var orderedURIs []string - for uri := range res { - orderedURIs = append(orderedURIs, string(uri)) - } - sort.Strings(orderedURIs) - - var got string - for i := 0; i < len(res); i++ { - if i != 0 { - got += "\n" - } - uri := span.URIFromURI(orderedURIs[i]) - if len(res) > 1 { - got += filepath.Base(uri.Filename()) + ":\n" - } - val := res[uri] - got += val - } - want := string(r.data.Golden(tag, filename, func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - t.Errorf("rename failed for %s:\n%s", newText, tests.Diff(t, want, got)) - } -} - -func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) { - m, err := r.data.Mapper(src.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := m.Location(src) - if err != nil { - t.Fatalf("failed for %v: %v", src, err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, - Position: loc.Range.Start, - } - params := &protocol.PrepareRenameParams{ - TextDocumentPositionParams: tdpp, - } - got, err := r.server.PrepareRename(context.Background(), params) - if err != nil { - t.Errorf("prepare rename failed for %v: got error: %v", src, err) - return - } - // we all love typed nils - if got == nil { - if want.Text != "" { // expected an ident. - t.Errorf("prepare rename failed for %v: got nil", src) - } - return - } - if got.Range.Start == got.Range.End { - // Special case for 0-length ranges. Marks can't specify a 0-length range, - // so just compare the start. - if got.Range.Start != want.Range.Start { - t.Errorf("prepare rename failed: incorrect point, got %v want %v", got.Range.Start, want.Range.Start) - } - } else { - if protocol.CompareRange(got.Range, want.Range) != 0 { - t.Errorf("prepare rename failed: incorrect range got %v want %v", got.Range, want.Range) - } - } - if got.Placeholder != want.Text { - t.Errorf("prepare rename failed: incorrect text got %v want %v", got.Placeholder, want.Text) - } -} - -func applyTextDocumentEdits(r *runner, edits []protocol.TextDocumentEdit) (map[span.URI]string, error) { - res := map[span.URI]string{} - for _, docEdits := range edits { - uri := docEdits.TextDocument.URI.SpanURI() - var m *protocol.ColumnMapper - // If we have already edited this file, we use the edited version (rather than the - // file in its original state) so that we preserve our initial changes. - if content, ok := res[uri]; ok { - m = protocol.NewColumnMapper(uri, []byte(content)) - } else { - var err error - if m, err = r.data.Mapper(uri); err != nil { - return nil, err - } - } - res[uri] = string(m.Content) - sedits, err := source.FromProtocolEdits(m, docEdits.Edits) - if err != nil { - return nil, err - } - res[uri] = applyEdits(res[uri], sedits) - } - return res, nil -} - -func applyEdits(contents string, edits []diff.TextEdit) string { - res := contents - - // Apply the edits from the end of the file forward - // to preserve the offsets - for i := len(edits) - 1; i >= 0; i-- { - edit := edits[i] - start := edit.Span.Start().Offset() - end := edit.Span.End().Offset() - tmp := res[0:start] + edit.NewText - res = tmp + res[end:] - } - return res -} - -func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) { - params := &protocol.DocumentSymbolParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - } - got, err := r.server.DocumentSymbol(r.ctx, params) - if err != nil { - t.Fatal(err) - } - if len(got) != len(expectedSymbols) { - t.Errorf("want %d top-level symbols in %v, got %d", len(expectedSymbols), uri, len(got)) - return - } - symbols := make([]protocol.DocumentSymbol, len(got)) - for i, s := range got { - s, ok := s.(protocol.DocumentSymbol) - if !ok { - t.Fatalf("%v: wanted []DocumentSymbols but got %v", uri, got) - } - symbols[i] = s - } - if diff := tests.DiffSymbols(t, uri, expectedSymbols, symbols); diff != "" { - t.Error(diff) - } -} - -func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { - r.callWorkspaceSymbols(t, uri, query, typ) -} - -func (r *runner) callWorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { - t.Helper() - - matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ) - - original := r.server.session.Options() - modified := original - modified.SymbolMatcher = matcher - r.server.session.SetOptions(modified) - defer r.server.session.SetOptions(original) - - params := &protocol.WorkspaceSymbolParams{ - Query: query, - } - gotSymbols, err := r.server.Symbol(r.ctx, params) - if err != nil { - t.Fatal(err) - } - got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols) - if err != nil { - t.Fatal(err) - } - got = filepath.ToSlash(tests.Normalize(got, r.normalizers)) - want := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if diff := tests.Diff(t, want, got); diff != "" { - t.Error(diff) - } -} - -func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) { - m, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := m.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", loc, err) - } - tdpp := protocol.TextDocumentPositionParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(spn.URI()), - }, - Position: loc.Range.Start, - } - params := &protocol.SignatureHelpParams{ - TextDocumentPositionParams: tdpp, - } - got, err := r.server.SignatureHelp(r.ctx, params) - if err != nil { - // Only fail if we got an error we did not expect. - if want != nil { - t.Fatal(err) - } - return - } - if want == nil { - if got != nil { - t.Errorf("expected no signature, got %v", got) - } - return - } - if got == nil { - t.Fatalf("expected %v, got nil", want) - } - diff, err := tests.DiffSignatures(spn, want, got) - if err != nil { - t.Fatal(err) - } - if diff != "" { - t.Error(diff) - } -} - -func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) { - m, err := r.data.Mapper(uri) - if err != nil { - t.Fatal(err) - } - got, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{ - TextDocument: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(uri), - }, - }) - if err != nil { - t.Fatal(err) - } - if diff := tests.DiffLinks(m, wantLinks, got); diff != "" { - t.Error(diff) - } -} - -func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) { - cmd, err := command.NewListKnownPackagesCommand("List Known Packages", command.URIArg{ - URI: protocol.URIFromSpanURI(uri), - }) - if err != nil { - t.Fatal(err) - } - resp, err := r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{ - Command: cmd.Command, - Arguments: cmd.Arguments, - }) - if err != nil { - t.Fatal(err) - } - res := resp.(command.ListKnownPackagesResult) - var hasPkg bool - for _, p := range res.Packages { - if p == expectedImport { - hasPkg = true - break - } - } - if !hasPkg { - t.Fatalf("%s: got %v packages\nwant contains %q", command.ListKnownPackages, res.Packages, expectedImport) - } - cmd, err = command.NewAddImportCommand("Add Imports", command.AddImportArgs{ - URI: protocol.URIFromSpanURI(uri), - ImportPath: expectedImport, - }) - if err != nil { - t.Fatal(err) - } - _, err = r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{ - Command: cmd.Command, - Arguments: cmd.Arguments, - }) - if err != nil { - t.Fatal(err) - } - got := (<-r.editRecv)[uri] - want := r.data.Golden("addimport", uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - }) - if want == nil { - t.Fatalf("golden file %q not found", uri.Filename()) - } - if diff := tests.Diff(t, got, string(want)); diff != "" { - t.Errorf("%s mismatch\n%s", command.AddImport, diff) - } -} - -func TestBytesOffset(t *testing.T) { - tests := []struct { - text string - pos protocol.Position - want int - }{ - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 0}, want: 0}, - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 1}, want: 1}, - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 2}, want: 1}, - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 3}, want: 5}, - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 4}, want: 6}, - {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 5}, want: -1}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 3}, want: 3}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 4}, want: 3}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 0}, want: 4}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 3}, want: 7}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 4}, want: 7}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8}, - {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 1}, want: -1}, - {text: "aaa\nbbb\n\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8}, - } - - for i, test := range tests { - fname := fmt.Sprintf("test %d", i) - fset := token.NewFileSet() - f := fset.AddFile(fname, -1, len(test.text)) - f.SetLinesForContent([]byte(test.text)) - uri := span.URIFromPath(fname) - mapper := protocol.NewColumnMapper(uri, []byte(test.text)) - got, err := mapper.Point(test.pos) - if err != nil && test.want != -1 { - t.Errorf("unexpected error: %v", err) - } - if err == nil && got.Offset() != test.want { - t.Errorf("want %d for %q(Line:%d,Character:%d), but got %d", test.want, test.text, int(test.pos.Line), int(test.pos.Character), got.Offset()) - } - } -} - -func (r *runner) collectDiagnostics(view source.View) { - if r.diagnostics != nil { - return - } - r.diagnostics = make(map[span.URI][]*source.Diagnostic) - - snapshot, release := view.Snapshot(r.ctx) - defer release() - - // Always run diagnostics with analysis. - r.server.diagnose(r.ctx, snapshot, true) - for uri, reports := range r.server.diagnostics { - for _, report := range reports.reports { - for _, d := range report.diags { - r.diagnostics[uri] = append(r.diagnostics[uri], d) - } - } - } -} diff --git a/internal/lsp/lsppos/lsppos.go b/internal/lsp/lsppos/lsppos.go deleted file mode 100644 index 35f6f134854..00000000000 --- a/internal/lsp/lsppos/lsppos.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lsppos provides utilities for working with LSP positions. Much of -// this functionality is duplicated from the internal/span package, but this -// package is simpler and more accurate with respect to newline terminated -// content. -// -// See https://microsoft.github.io/language-server-protocol/specification#textDocuments -// for a description of LSP positions. Notably: -// - Positions are specified by a 0-based line count and 0-based utf-16 -// character offset. -// - Positions are line-ending agnostic: there is no way to specify \r|\n or -// \n|. Instead the former maps to the end of the current line, and the -// latter to the start of the next line. -package lsppos - -import ( - "errors" - "sort" - "unicode/utf8" - - "golang.org/x/tools/internal/lsp/protocol" -) - -// Mapper maps utf-8 byte offsets to LSP positions for a single file. -type Mapper struct { - nonASCII bool - content []byte - - // Start-of-line positions. If src is newline-terminated, the final entry - // will be len(content). - lines []int -} - -// NewMapper creates a new Mapper for the given content. -func NewMapper(content []byte) *Mapper { - m := &Mapper{ - content: content, - lines: []int{0}, - } - for offset, b := range content { - if b == '\n' { - m.lines = append(m.lines, offset+1) - } - if b >= utf8.RuneSelf { - m.nonASCII = true - } - } - return m -} - -// LineColUTF16 returns the 0-based UTF-16 line and character index for the -// given offset. It returns -1, -1 if offset is out of bounds for the file -// being mapped. -func (m *Mapper) LineColUTF16(offset int) (line, char int) { - if offset < 0 || offset > len(m.content) { - return -1, -1 - } - nextLine := sort.Search(len(m.lines), func(i int) bool { - return offset < m.lines[i] - }) - if nextLine == 0 { - return -1, -1 - } - line = nextLine - 1 - start := m.lines[line] - var charOffset int - if m.nonASCII { - charOffset = UTF16len(m.content[start:offset]) - } else { - charOffset = offset - start - } - - var eol int - if line == len(m.lines)-1 { - eol = len(m.content) - } else { - eol = m.lines[line+1] - 1 - } - - // Adjustment for line-endings: \r|\n is the same as |\r\n. - if offset == eol && offset > 0 && m.content[offset-1] == '\r' { - charOffset-- - } - - return line, charOffset -} - -// Position returns the protocol position corresponding to the given offset. It -// returns false if offset is out of bounds for the file being mapped. -func (m *Mapper) Position(offset int) (protocol.Position, bool) { - l, c := m.LineColUTF16(offset) - if l < 0 { - return protocol.Position{}, false - } - return protocol.Position{ - Line: uint32(l), - Character: uint32(c), - }, true -} - -// Range returns the protocol range corresponding to the given start and end -// offsets. -func (m *Mapper) Range(start, end int) (protocol.Range, error) { - startPos, ok := m.Position(start) - if !ok { - return protocol.Range{}, errors.New("invalid start position") - } - endPos, ok := m.Position(end) - if !ok { - return protocol.Range{}, errors.New("invalid end position") - } - - return protocol.Range{Start: startPos, End: endPos}, nil -} - -// UTF16Len returns the UTF-16 length of the UTF-8 encoded content, were it to -// be re-encoded as UTF-16. -func UTF16len(buf []byte) int { - // This function copies buf, but microbenchmarks showed it to be faster than - // using utf8.DecodeRune due to inlining and avoiding bounds checks. - cnt := 0 - for _, r := range string(buf) { - cnt++ - if r >= 1<<16 { - cnt++ - } - } - return cnt -} diff --git a/internal/lsp/lsppos/lsppos_test.go b/internal/lsp/lsppos/lsppos_test.go deleted file mode 100644 index 8353f927681..00000000000 --- a/internal/lsp/lsppos/lsppos_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsppos_test - -import ( - "fmt" - "strings" - "testing" - - . "golang.org/x/tools/internal/lsp/lsppos" - "golang.org/x/tools/internal/lsp/protocol" -) - -type testCase struct { - content string // input text - substrOrOffset interface{} // explicit integer offset, or a substring - wantLine, wantChar int // expected LSP position information -} - -// offset returns the test case byte offset -func (c testCase) offset() int { - switch x := c.substrOrOffset.(type) { - case int: - return x - case string: - i := strings.Index(c.content, x) - if i < 0 { - panic(fmt.Sprintf("%q does not contain substring %q", c.content, x)) - } - return i - } - panic("substrOrIndex must be an integer or string") -} - -var tests = []testCase{ - {"a𐐀b", "a", 0, 0}, - {"a𐐀b", "𐐀", 0, 1}, - {"a𐐀b", "b", 0, 3}, - {"a𐐀b\n", "\n", 0, 4}, - {"a𐐀b\r\n", "\n", 0, 4}, // \r|\n is not a valid position, so we move back to the end of the first line. - {"a𐐀b\r\nx", "x", 1, 0}, - {"a𐐀b\r\nx\ny", "y", 2, 0}, - - // Testing EOL and EOF positions - {"", 0, 0, 0}, // 0th position of an empty buffer is (0, 0) - {"abc", "c", 0, 2}, - {"abc", 3, 0, 3}, - {"abc\n", "\n", 0, 3}, - {"abc\n", 4, 1, 0}, // position after a newline is on the next line -} - -func TestLineChar(t *testing.T) { - for _, test := range tests { - m := NewMapper([]byte(test.content)) - offset := test.offset() - gotLine, gotChar := m.LineColUTF16(offset) - if gotLine != test.wantLine || gotChar != test.wantChar { - t.Errorf("LineChar(%d) = (%d,%d), want (%d,%d)", offset, gotLine, gotChar, test.wantLine, test.wantChar) - } - } -} - -func TestInvalidOffset(t *testing.T) { - content := []byte("a𐐀b\r\nx\ny") - m := NewMapper(content) - for _, offset := range []int{-1, 100} { - gotLine, gotChar := m.LineColUTF16(offset) - if gotLine != -1 { - t.Errorf("LineChar(%d) = (%d,%d), want (-1,-1)", offset, gotLine, gotChar) - } - } -} - -func TestPosition(t *testing.T) { - for _, test := range tests { - m := NewMapper([]byte(test.content)) - offset := test.offset() - got, ok := m.Position(offset) - if !ok { - t.Error("invalid position for", test.substrOrOffset) - continue - } - want := protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)} - if got != want { - t.Errorf("Position(%d) = %v, want %v", offset, got, want) - } - } -} - -func TestRange(t *testing.T) { - for _, test := range tests { - m := NewMapper([]byte(test.content)) - offset := test.offset() - got, err := m.Range(0, offset) - if err != nil { - t.Fatal(err) - } - want := protocol.Range{ - End: protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)}, - } - if got != want { - t.Errorf("Range(%d) = %v, want %v", offset, got, want) - } - } -} diff --git a/internal/lsp/lsppos/token.go b/internal/lsp/lsppos/token.go deleted file mode 100644 index 0f1f2b24c7b..00000000000 --- a/internal/lsp/lsppos/token.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsppos - -import ( - "errors" - "go/token" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" -) - -// TokenMapper maps token.Pos to LSP positions for a single file. -type TokenMapper struct { - // file is used for computing offsets. - file *token.File - - // For now, just delegate to a Mapper for position calculation. As an - // optimization we could avoid building the mapper and just use the file, but - // then have to correctly adjust for newline-terminated files. It is easier - // to just delegate unless performance becomes a concern. - mapper *Mapper -} - -// NewMapper creates a new TokenMapper for the given content, using the -// provided file to compute offsets. -func NewTokenMapper(content []byte, file *token.File) *TokenMapper { - return &TokenMapper{ - file: file, - mapper: NewMapper(content), - } -} - -// Position returns the protocol position corresponding to the given pos. It -// returns false if pos is out of bounds for the file being mapped. -func (m *TokenMapper) Position(pos token.Pos) (protocol.Position, bool) { - offset, err := safetoken.Offset(m.file, pos) - if err != nil { - return protocol.Position{}, false - } - return m.mapper.Position(offset) -} - -// Range returns the protocol range corresponding to the given start and end -// positions. It returns an error if start or end is out of bounds for the file -// being mapped. -func (m *TokenMapper) Range(start, end token.Pos) (protocol.Range, error) { - startPos, ok := m.Position(start) - if !ok { - return protocol.Range{}, errors.New("invalid start position") - } - endPos, ok := m.Position(end) - if !ok { - return protocol.Range{}, errors.New("invalid end position") - } - - return protocol.Range{Start: startPos, End: endPos}, nil -} diff --git a/internal/lsp/lsppos/token_test.go b/internal/lsp/lsppos/token_test.go deleted file mode 100644 index c12d15026c7..00000000000 --- a/internal/lsp/lsppos/token_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsppos_test - -import ( - "go/token" - "testing" - - . "golang.org/x/tools/internal/lsp/lsppos" - "golang.org/x/tools/internal/lsp/protocol" -) - -func makeTokenMapper(content []byte) (*TokenMapper, *token.File) { - file := token.NewFileSet().AddFile("p.go", -1, len(content)) - file.SetLinesForContent(content) - return NewTokenMapper(content, file), file -} - -func TestInvalidPosition(t *testing.T) { - content := []byte("a𐐀b\r\nx\ny") - m, _ := makeTokenMapper(content) - - for _, pos := range []token.Pos{-1, 100} { - posn, ok := m.Position(pos) - if ok { - t.Errorf("Position(%d) = %v, want error", pos, posn) - } - } -} - -func TestTokenPosition(t *testing.T) { - for _, test := range tests { - m, f := makeTokenMapper([]byte(test.content)) - pos := token.Pos(f.Base() + test.offset()) - got, ok := m.Position(pos) - if !ok { - t.Error("invalid position for", test.substrOrOffset) - continue - } - want := protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)} - if got != want { - t.Errorf("Position(%d) = %v, want %v", pos, got, want) - } - gotRange, err := m.Range(token.Pos(f.Base()), pos) - if err != nil { - t.Fatal(err) - } - wantRange := protocol.Range{ - End: want, - } - if gotRange != wantRange { - t.Errorf("Range(%d) = %v, want %v", pos, got, want) - } - } -} diff --git a/internal/lsp/mod/code_lens.go b/internal/lsp/mod/code_lens.go deleted file mode 100644 index b26bae75c47..00000000000 --- a/internal/lsp/mod/code_lens.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mod - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -// LensFuncs returns the supported lensFuncs for go.mod files. -func LensFuncs() map[command.Command]source.LensFunc { - return map[command.Command]source.LensFunc{ - command.UpgradeDependency: upgradeLenses, - command.Tidy: tidyLens, - command.Vendor: vendorLens, - } -} - -func upgradeLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil || pm.File == nil { - return nil, err - } - if len(pm.File.Require) == 0 { - // Nothing to upgrade. - return nil, nil - } - var requires []string - for _, req := range pm.File.Require { - requires = append(requires, req.Mod.Path) - } - uri := protocol.URIFromSpanURI(fh.URI()) - checkUpgrade, err := command.NewCheckUpgradesCommand("Check for upgrades", command.CheckUpgradesArgs{ - URI: uri, - Modules: requires, - }) - if err != nil { - return nil, err - } - upgradeTransitive, err := command.NewUpgradeDependencyCommand("Upgrade transitive dependencies", command.DependencyArgs{ - URI: uri, - AddRequire: false, - GoCmdArgs: []string{"-d", "-u", "-t", "./..."}, - }) - if err != nil { - return nil, err - } - upgradeDirect, err := command.NewUpgradeDependencyCommand("Upgrade direct dependencies", command.DependencyArgs{ - URI: uri, - AddRequire: false, - GoCmdArgs: append([]string{"-d"}, requires...), - }) - if err != nil { - return nil, err - } - // Put the upgrade code lenses above the first require block or statement. - rng, err := firstRequireRange(fh, pm) - if err != nil { - return nil, err - } - - return []protocol.CodeLens{ - {Range: rng, Command: checkUpgrade}, - {Range: rng, Command: upgradeTransitive}, - {Range: rng, Command: upgradeDirect}, - }, nil -} - -func tidyLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil || pm.File == nil { - return nil, err - } - uri := protocol.URIFromSpanURI(fh.URI()) - cmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: []protocol.DocumentURI{uri}}) - if err != nil { - return nil, err - } - rng, err := moduleStmtRange(fh, pm) - if err != nil { - return nil, err - } - return []protocol.CodeLens{{ - Range: rng, - Command: cmd, - }}, nil -} - -func vendorLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) { - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil || pm.File == nil { - return nil, err - } - if len(pm.File.Require) == 0 { - // Nothing to vendor. - return nil, nil - } - rng, err := moduleStmtRange(fh, pm) - if err != nil { - return nil, err - } - title := "Create vendor directory" - uri := protocol.URIFromSpanURI(fh.URI()) - cmd, err := command.NewVendorCommand(title, command.URIArg{URI: uri}) - if err != nil { - return nil, err - } - // Change the message depending on whether or not the module already has a - // vendor directory. - vendorDir := filepath.Join(filepath.Dir(fh.URI().Filename()), "vendor") - if info, _ := os.Stat(vendorDir); info != nil && info.IsDir() { - title = "Sync vendor directory" - } - return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil -} - -func moduleStmtRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) { - if pm.File == nil || pm.File.Module == nil || pm.File.Module.Syntax == nil { - return protocol.Range{}, fmt.Errorf("no module statement in %s", fh.URI()) - } - syntax := pm.File.Module.Syntax - return source.LineToRange(pm.Mapper, fh.URI(), syntax.Start, syntax.End) -} - -// firstRequireRange returns the range for the first "require" in the given -// go.mod file. This is either a require block or an individual require line. -func firstRequireRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) { - if len(pm.File.Require) == 0 { - return protocol.Range{}, fmt.Errorf("no requires in the file %s", fh.URI()) - } - var start, end modfile.Position - for _, stmt := range pm.File.Syntax.Stmt { - if b, ok := stmt.(*modfile.LineBlock); ok && len(b.Token) == 1 && b.Token[0] == "require" { - start, end = b.Span() - break - } - } - - firstRequire := pm.File.Require[0].Syntax - if start.Byte == 0 || firstRequire.Start.Byte < start.Byte { - start, end = firstRequire.Start, firstRequire.End - } - return source.LineToRange(pm.Mapper, fh.URI(), start, end) -} diff --git a/internal/lsp/mod/diagnostics.go b/internal/lsp/mod/diagnostics.go deleted file mode 100644 index 9c49d8b36b1..00000000000 --- a/internal/lsp/mod/diagnostics.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mod provides core features related to go.mod file -// handling for use by Go editors and tools. -package mod - -import ( - "context" - "fmt" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) { - ctx, done := event.Start(ctx, "mod.Diagnostics", tag.Snapshot.Of(snapshot.ID())) - defer done() - - reports := map[source.VersionedFileIdentity][]*source.Diagnostic{} - for _, uri := range snapshot.ModFiles() { - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - return nil, err - } - reports[fh.VersionedFileIdentity()] = []*source.Diagnostic{} - diagnostics, err := DiagnosticsForMod(ctx, snapshot, fh) - if err != nil { - return nil, err - } - for _, d := range diagnostics { - fh, err := snapshot.GetVersionedFile(ctx, d.URI) - if err != nil { - return nil, err - } - reports[fh.VersionedFileIdentity()] = append(reports[fh.VersionedFileIdentity()], d) - } - } - return reports, nil -} - -func DiagnosticsForMod(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) { - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - if pm == nil || len(pm.ParseErrors) == 0 { - return nil, err - } - return pm.ParseErrors, nil - } - - var diagnostics []*source.Diagnostic - - // Add upgrade quick fixes for individual modules if we know about them. - upgrades := snapshot.View().ModuleUpgrades() - for _, req := range pm.File.Require { - ver, ok := upgrades[req.Mod.Path] - if !ok || req.Mod.Version == ver { - continue - } - rng, err := source.LineToRange(pm.Mapper, fh.URI(), req.Syntax.Start, req.Syntax.End) - if err != nil { - return nil, err - } - // Upgrade to the exact version we offer the user, not the most recent. - title := fmt.Sprintf("Upgrade to %v", ver) - cmd, err := command.NewUpgradeDependencyCommand(title, command.DependencyArgs{ - URI: protocol.URIFromSpanURI(fh.URI()), - AddRequire: false, - GoCmdArgs: []string{req.Mod.Path + "@" + ver}, - }) - if err != nil { - return nil, err - } - diagnostics = append(diagnostics, &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityInformation, - Source: source.UpgradeNotification, - Message: fmt.Sprintf("%v can be upgraded", req.Mod.Path), - SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, - }) - } - - // Packages in the workspace can contribute diagnostics to go.mod files. - wspkgs, err := snapshot.ActivePackages(ctx) - if err != nil && !source.IsNonFatalGoModError(err) { - event.Error(ctx, fmt.Sprintf("workspace packages: diagnosing %s", pm.URI), err) - } - if err == nil { - for _, pkg := range wspkgs { - pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg) - if err != nil { - return nil, err - } - diagnostics = append(diagnostics, pkgDiagnostics[fh.URI()]...) - } - } - - tidied, err := snapshot.ModTidy(ctx, pm) - if err != nil && !source.IsNonFatalGoModError(err) { - event.Error(ctx, fmt.Sprintf("tidy: diagnosing %s", pm.URI), err) - } - if err == nil { - for _, d := range tidied.Diagnostics { - if d.URI != fh.URI() { - continue - } - diagnostics = append(diagnostics, d) - } - } - return diagnostics, nil -} diff --git a/internal/lsp/mod/format.go b/internal/lsp/mod/format.go deleted file mode 100644 index c3557663272..00000000000 --- a/internal/lsp/mod/format.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mod - -import ( - "context" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) { - ctx, done := event.Start(ctx, "mod.Format") - defer done() - - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - return nil, err - } - formatted, err := pm.File.Format() - if err != nil { - return nil, err - } - // Calculate the edits to be made due to the change. - diff, err := snapshot.View().Options().ComputeEdits(fh.URI(), string(pm.Mapper.Content), string(formatted)) - if err != nil { - return nil, err - } - return source.ToProtocolEdits(pm.Mapper, diff) -} diff --git a/internal/lsp/mod/hover.go b/internal/lsp/mod/hover.go deleted file mode 100644 index 1461d52edbd..00000000000 --- a/internal/lsp/mod/hover.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mod - -import ( - "bytes" - "context" - "fmt" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) { - var found bool - for _, uri := range snapshot.ModFiles() { - if fh.URI() == uri { - found = true - break - } - } - - // We only provide hover information for the view's go.mod files. - if !found { - return nil, nil - } - - ctx, done := event.Start(ctx, "mod.Hover") - defer done() - - // Get the position of the cursor. - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - return nil, fmt.Errorf("getting modfile handle: %w", err) - } - offset, err := pm.Mapper.Offset(position) - if err != nil { - return nil, fmt.Errorf("computing cursor position: %w", err) - } - - // Confirm that the cursor is at the position of a require statement. - var req *modfile.Require - var startPos, endPos int - for _, r := range pm.File.Require { - dep := []byte(r.Mod.Path) - s, e := r.Syntax.Start.Byte, r.Syntax.End.Byte - i := bytes.Index(pm.Mapper.Content[s:e], dep) - if i == -1 { - continue - } - // Shift the start position to the location of the - // dependency within the require statement. - startPos, endPos = s+i, s+i+len(dep) - if startPos <= offset && offset <= endPos { - req = r - break - } - } - - // The cursor position is not on a require statement. - if req == nil { - return nil, nil - } - - // Get the `go mod why` results for the given file. - why, err := snapshot.ModWhy(ctx, fh) - if err != nil { - return nil, err - } - explanation, ok := why[req.Mod.Path] - if !ok { - return nil, nil - } - - // Get the range to highlight for the hover. - rng, err := source.ByteOffsetsToRange(pm.Mapper, fh.URI(), startPos, endPos) - if err != nil { - return nil, err - } - if err != nil { - return nil, err - } - options := snapshot.View().Options() - isPrivate := snapshot.View().IsGoPrivatePath(req.Mod.Path) - explanation = formatExplanation(explanation, req, options, isPrivate) - return &protocol.Hover{ - Contents: protocol.MarkupContent{ - Kind: options.PreferredContentFormat, - Value: explanation, - }, - Range: rng, - }, nil -} - -func formatExplanation(text string, req *modfile.Require, options *source.Options, isPrivate bool) string { - text = strings.TrimSuffix(text, "\n") - splt := strings.Split(text, "\n") - length := len(splt) - - var b strings.Builder - // Write the heading as an H3. - b.WriteString("##" + splt[0]) - if options.PreferredContentFormat == protocol.Markdown { - b.WriteString("\n\n") - } else { - b.WriteRune('\n') - } - - // If the explanation is 2 lines, then it is of the form: - // # golang.org/x/text/encoding - // (main module does not need package golang.org/x/text/encoding) - if length == 2 { - b.WriteString(splt[1]) - return b.String() - } - - imp := splt[length-1] // import path - reference := imp - // See golang/go#36998: don't link to modules matching GOPRIVATE. - if !isPrivate && options.PreferredContentFormat == protocol.Markdown { - target := imp - if strings.ToLower(options.LinkTarget) == "pkg.go.dev" { - target = strings.Replace(target, req.Mod.Path, req.Mod.String(), 1) - } - reference = fmt.Sprintf("[%s](%s)", imp, source.BuildLink(options.LinkTarget, target, "")) - } - b.WriteString("This module is necessary because " + reference + " is imported in") - - // If the explanation is 3 lines, then it is of the form: - // # golang.org/x/tools - // modtest - // golang.org/x/tools/go/packages - if length == 3 { - msg := fmt.Sprintf(" `%s`.", splt[1]) - b.WriteString(msg) - return b.String() - } - - // If the explanation is more than 3 lines, then it is of the form: - // # golang.org/x/text/language - // rsc.io/quote - // rsc.io/sampler - // golang.org/x/text/language - b.WriteString(":\n```text") - dash := "" - for _, imp := range splt[1 : length-1] { - dash += "-" - b.WriteString("\n" + dash + " " + imp) - } - b.WriteString("\n```") - return b.String() -} diff --git a/internal/lsp/protocol/doc.go b/internal/lsp/protocol/doc.go deleted file mode 100644 index 2ffdf51287e..00000000000 --- a/internal/lsp/protocol/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protocol contains the structs that map directly to the wire format -// of the "Language Server Protocol". -// -// It is a literal transcription, with unmodified comments, and only the changes -// required to make it go code. -// Names are uppercased to export them. -// All fields have JSON tags added to correct the names. -// Fields marked with a ? are also marked as "omitempty" -// Fields that are "|| null" are made pointers -// Fields that are string or number are left as string -// Fields that are type "number" are made float64 -package protocol diff --git a/internal/lsp/protocol/span.go b/internal/lsp/protocol/span.go deleted file mode 100644 index 744746d3538..00000000000 --- a/internal/lsp/protocol/span.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// this file contains protocol<->span converters - -package protocol - -import ( - "fmt" - "go/token" - - "golang.org/x/tools/internal/span" -) - -// A ColumnMapper maps between UTF-8 oriented positions (e.g. token.Pos, -// span.Span) and the UTF-16 oriented positions used by the LSP. -type ColumnMapper struct { - URI span.URI - TokFile *token.File - Content []byte -} - -// NewColumnMapper creates a new column mapper for the given uri and content. -func NewColumnMapper(uri span.URI, content []byte) *ColumnMapper { - tf := span.NewTokenFile(uri.Filename(), content) - return &ColumnMapper{ - URI: uri, - TokFile: tf, - Content: content, - } -} - -func URIFromSpanURI(uri span.URI) DocumentURI { - return DocumentURI(uri) -} - -func URIFromPath(path string) DocumentURI { - return URIFromSpanURI(span.URIFromPath(path)) -} - -func (u DocumentURI) SpanURI() span.URI { - return span.URIFromURI(string(u)) -} - -func (m *ColumnMapper) Location(s span.Span) (Location, error) { - rng, err := m.Range(s) - if err != nil { - return Location{}, err - } - return Location{URI: URIFromSpanURI(s.URI()), Range: rng}, nil -} - -func (m *ColumnMapper) Range(s span.Span) (Range, error) { - if span.CompareURI(m.URI, s.URI()) != 0 { - return Range{}, fmt.Errorf("column mapper is for file %q instead of %q", m.URI, s.URI()) - } - s, err := s.WithAll(m.TokFile) - if err != nil { - return Range{}, err - } - start, err := m.Position(s.Start()) - if err != nil { - return Range{}, err - } - end, err := m.Position(s.End()) - if err != nil { - return Range{}, err - } - return Range{Start: start, End: end}, nil -} - -func (m *ColumnMapper) Position(p span.Point) (Position, error) { - chr, err := span.ToUTF16Column(p, m.Content) - if err != nil { - return Position{}, err - } - return Position{ - Line: uint32(p.Line() - 1), - Character: uint32(chr - 1), - }, nil -} - -func (m *ColumnMapper) Span(l Location) (span.Span, error) { - return m.RangeSpan(l.Range) -} - -func (m *ColumnMapper) RangeSpan(r Range) (span.Span, error) { - start, err := m.Point(r.Start) - if err != nil { - return span.Span{}, err - } - end, err := m.Point(r.End) - if err != nil { - return span.Span{}, err - } - return span.New(m.URI, start, end).WithAll(m.TokFile) -} - -func (m *ColumnMapper) RangeToSpanRange(r Range) (span.Range, error) { - spn, err := m.RangeSpan(r) - if err != nil { - return span.Range{}, err - } - return spn.Range(m.TokFile) -} - -// Pos returns the token.Pos of p within the mapped file. -func (m *ColumnMapper) Pos(p Position) (token.Pos, error) { - start, err := m.Point(p) - if err != nil { - return token.NoPos, err - } - // TODO: refactor the span package to avoid creating this unnecessary end position. - spn, err := span.New(m.URI, start, start).WithAll(m.TokFile) - if err != nil { - return token.NoPos, err - } - rng, err := spn.Range(m.TokFile) - if err != nil { - return token.NoPos, err - } - return rng.Start, nil -} - -// Offset returns the utf-8 byte offset of p within the mapped file. -func (m *ColumnMapper) Offset(p Position) (int, error) { - start, err := m.Point(p) - if err != nil { - return 0, err - } - return start.Offset(), nil -} - -// Point returns a span.Point for p within the mapped file. The resulting point -// always has an Offset. -func (m *ColumnMapper) Point(p Position) (span.Point, error) { - line := int(p.Line) + 1 - offset, err := span.ToOffset(m.TokFile, line, 1) - if err != nil { - return span.Point{}, err - } - lineStart := span.NewPoint(line, 1, offset) - return span.FromUTF16Column(lineStart, int(p.Character)+1, m.Content) -} - -func IsPoint(r Range) bool { - return r.Start.Line == r.End.Line && r.Start.Character == r.End.Character -} - -func CompareRange(a, b Range) int { - if r := ComparePosition(a.Start, b.Start); r != 0 { - return r - } - return ComparePosition(a.End, b.End) -} - -func ComparePosition(a, b Position) int { - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Character < b.Character { - return -1 - } - if a.Character > b.Character { - return 1 - } - return 0 -} - -func Intersect(a, b Range) bool { - if a.Start.Line > b.End.Line || a.End.Line < b.Start.Line { - return false - } - return !((a.Start.Line == b.End.Line) && a.Start.Character > b.End.Character || - (a.End.Line == b.Start.Line) && a.End.Character < b.Start.Character) -} - -func (r Range) Format(f fmt.State, _ rune) { - fmt.Fprintf(f, "%v:%v-%v:%v", r.Start.Line, r.Start.Character, r.End.Line, r.End.Character) -} diff --git a/internal/lsp/protocol/tsclient.go b/internal/lsp/protocol/tsclient.go deleted file mode 100644 index 971a2df72b1..00000000000 --- a/internal/lsp/protocol/tsclient.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated (see typescript/README.md) DO NOT EDIT. - -package protocol - -// Package protocol contains data types and code for LSP json rpcs -// generated automatically from vscode-languageserver-node -// commit: 696f9285bf849b73745682fdb1c1feac73eb8772 -// last fetched Fri Apr 01 2022 10:53:41 GMT-0400 (Eastern Daylight Time) - -import ( - "context" - "encoding/json" - "fmt" - - "golang.org/x/tools/internal/jsonrpc2" -) - -type Client interface { - ShowMessage(context.Context, *ShowMessageParams) error - LogMessage(context.Context, *LogMessageParams) error - Event(context.Context, *interface{}) error - PublishDiagnostics(context.Context, *PublishDiagnosticsParams) error - Progress(context.Context, *ProgressParams) error - WorkspaceFolders(context.Context) ([]WorkspaceFolder /*WorkspaceFolder[] | null*/, error) - Configuration(context.Context, *ParamConfiguration) ([]LSPAny, error) - WorkDoneProgressCreate(context.Context, *WorkDoneProgressCreateParams) error - ShowDocument(context.Context, *ShowDocumentParams) (*ShowDocumentResult, error) - RegisterCapability(context.Context, *RegistrationParams) error - UnregisterCapability(context.Context, *UnregistrationParams) error - ShowMessageRequest(context.Context, *ShowMessageRequestParams) (*MessageActionItem /*MessageActionItem | null*/, error) - ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) -} - -func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { - switch r.Method() { - case "window/showMessage": // notif - var params ShowMessageParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.ShowMessage(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "window/logMessage": // notif - var params LogMessageParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.LogMessage(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "telemetry/event": // notif - var params interface{} - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.Event(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/publishDiagnostics": // notif - var params PublishDiagnosticsParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.PublishDiagnostics(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "$/progress": // notif - var params ProgressParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.Progress(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "workspace/workspaceFolders": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - resp, err := client.WorkspaceFolders(ctx) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/configuration": // req - var params ParamConfiguration - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := client.Configuration(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "window/workDoneProgress/create": // req - var params WorkDoneProgressCreateParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.WorkDoneProgressCreate(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "window/showDocument": // req - var params ShowDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := client.ShowDocument(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "client/registerCapability": // req - var params RegistrationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.RegisterCapability(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "client/unregisterCapability": // req - var params UnregistrationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := client.UnregisterCapability(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "window/showMessageRequest": // req - var params ShowMessageRequestParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := client.ShowMessageRequest(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/applyEdit": // req - var params ApplyWorkspaceEditParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := client.ApplyEdit(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - - default: - return false, nil - } -} - -func (s *clientDispatcher) ShowMessage(ctx context.Context, params *ShowMessageParams) error { - return s.sender.Notify(ctx, "window/showMessage", params) -} - -func (s *clientDispatcher) LogMessage(ctx context.Context, params *LogMessageParams) error { - return s.sender.Notify(ctx, "window/logMessage", params) -} - -func (s *clientDispatcher) Event(ctx context.Context, params *interface{}) error { - return s.sender.Notify(ctx, "telemetry/event", params) -} - -func (s *clientDispatcher) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) error { - return s.sender.Notify(ctx, "textDocument/publishDiagnostics", params) -} - -func (s *clientDispatcher) Progress(ctx context.Context, params *ProgressParams) error { - return s.sender.Notify(ctx, "$/progress", params) -} -func (s *clientDispatcher) WorkspaceFolders(ctx context.Context) ([]WorkspaceFolder /*WorkspaceFolder[] | null*/, error) { - var result []WorkspaceFolder /*WorkspaceFolder[] | null*/ - if err := s.sender.Call(ctx, "workspace/workspaceFolders", nil, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *clientDispatcher) Configuration(ctx context.Context, params *ParamConfiguration) ([]LSPAny, error) { - var result []LSPAny - if err := s.sender.Call(ctx, "workspace/configuration", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *clientDispatcher) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) error { - return s.sender.Call(ctx, "window/workDoneProgress/create", params, nil) // Call, not Notify -} - -func (s *clientDispatcher) ShowDocument(ctx context.Context, params *ShowDocumentParams) (*ShowDocumentResult, error) { - var result *ShowDocumentResult - if err := s.sender.Call(ctx, "window/showDocument", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *clientDispatcher) RegisterCapability(ctx context.Context, params *RegistrationParams) error { - return s.sender.Call(ctx, "client/registerCapability", params, nil) // Call, not Notify -} - -func (s *clientDispatcher) UnregisterCapability(ctx context.Context, params *UnregistrationParams) error { - return s.sender.Call(ctx, "client/unregisterCapability", params, nil) // Call, not Notify -} - -func (s *clientDispatcher) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (*MessageActionItem /*MessageActionItem | null*/, error) { - var result *MessageActionItem /*MessageActionItem | null*/ - if err := s.sender.Call(ctx, "window/showMessageRequest", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *clientDispatcher) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) { - var result *ApplyWorkspaceEditResult - if err := s.sender.Call(ctx, "workspace/applyEdit", params, &result); err != nil { - return nil, err - } - return result, nil -} diff --git a/internal/lsp/protocol/tsprotocol.go b/internal/lsp/protocol/tsprotocol.go deleted file mode 100644 index 647aabc2ee1..00000000000 --- a/internal/lsp/protocol/tsprotocol.go +++ /dev/null @@ -1,6750 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated (see typescript/README.md) DO NOT EDIT. - -// Package protocol contains data types and code for LSP json rpcs -// generated automatically from vscode-languageserver-node -// commit: 696f9285bf849b73745682fdb1c1feac73eb8772 -// last fetched Fri Apr 01 2022 10:53:41 GMT-0400 (Eastern Daylight Time) -package protocol - -import "encoding/json" - -/** - * A special text edit with an additional change annotation. - * - * @since 3.16.0. - */ -type AnnotatedTextEdit struct { - /** - * The actual identifier of the change annotation - */ - AnnotationID ChangeAnnotationIdentifier `json:"annotationId"` - TextEdit -} - -/** - * The parameters passed via a apply workspace edit request. - */ -type ApplyWorkspaceEditParams struct { - /** - * An optional label of the workspace edit. This label is - * presented in the user interface for example on an undo - * stack to undo the workspace edit. - */ - Label string `json:"label,omitempty"` - /** - * The edits to apply. - */ - Edit WorkspaceEdit `json:"edit"` -} - -/** - * The result returned from the apply workspace edit request. - * - * @since 3.17 renamed from ApplyWorkspaceEditResponse - */ -type ApplyWorkspaceEditResult struct { - /** - * Indicates whether the edit was applied or not. - */ - Applied bool `json:"applied"` - /** - * An optional textual description for why the edit was not applied. - * This may be used by the server for diagnostic logging or to provide - * a suitable error for a request that triggered the edit. - */ - FailureReason string `json:"failureReason,omitempty"` - /** - * Depending on the client's failure handling strategy `failedChange` might - * contain the index of the change that failed. This property is only available - * if the client signals a `failureHandlingStrategy` in its client capabilities. - */ - FailedChange uint32 `json:"failedChange,omitempty"` -} - -/** - * @since 3.16.0 - */ -type CallHierarchyClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` - * return value for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Represents an incoming call, e.g. a caller of a method or constructor. - * - * @since 3.16.0 - */ -type CallHierarchyIncomingCall struct { - /** - * The item that makes the call. - */ - From CallHierarchyItem `json:"from"` - /** - * The ranges at which the calls appear. This is relative to the caller - * denoted by [`this.from`](#CallHierarchyIncomingCall.from). - */ - FromRanges []Range `json:"fromRanges"` -} - -/** - * The parameter of a `callHierarchy/incomingCalls` request. - * - * @since 3.16.0 - */ -type CallHierarchyIncomingCallsParams struct { - Item CallHierarchyItem `json:"item"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * Represents programming constructs like functions or constructors in the context - * of call hierarchy. - * - * @since 3.16.0 - */ -type CallHierarchyItem struct { - /** - * The name of this item. - */ - Name string `json:"name"` - /** - * The kind of this item. - */ - Kind SymbolKind `json:"kind"` - /** - * Tags for this item. - */ - Tags []SymbolTag `json:"tags,omitempty"` - /** - * More detail for this item, e.g. the signature of a function. - */ - Detail string `json:"detail,omitempty"` - /** - * The resource identifier of this item. - */ - URI DocumentURI `json:"uri"` - /** - * The range enclosing this symbol not including leading/trailing whitespace but everything else, e.g. comments and code. - */ - Range Range `json:"range"` - /** - * The range that should be selected and revealed when this symbol is being picked, e.g. the name of a function. - * Must be contained by the [`range`](#CallHierarchyItem.range). - */ - SelectionRange Range `json:"selectionRange"` - /** - * A data entry field that is preserved between a call hierarchy prepare and - * incoming calls or outgoing calls requests. - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * Call hierarchy options used during static registration. - * - * @since 3.16.0 - */ -type CallHierarchyOptions struct { - WorkDoneProgressOptions -} - -/** - * Represents an outgoing call, e.g. calling a getter from a method or a method from a constructor etc. - * - * @since 3.16.0 - */ -type CallHierarchyOutgoingCall struct { - /** - * The item that is called. - */ - To CallHierarchyItem `json:"to"` - /** - * The range at which this item is called. This is the range relative to the caller, e.g the item - * passed to [`provideCallHierarchyOutgoingCalls`](#CallHierarchyItemProvider.provideCallHierarchyOutgoingCalls) - * and not [`this.to`](#CallHierarchyOutgoingCall.to). - */ - FromRanges []Range `json:"fromRanges"` -} - -/** - * The parameter of a `callHierarchy/outgoingCalls` request. - * - * @since 3.16.0 - */ -type CallHierarchyOutgoingCallsParams struct { - Item CallHierarchyItem `json:"item"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * The parameter of a `textDocument/prepareCallHierarchy` request. - * - * @since 3.16.0 - */ -type CallHierarchyPrepareParams struct { - TextDocumentPositionParams - WorkDoneProgressParams -} - -/** - * Call hierarchy options used during static or dynamic registration. - * - * @since 3.16.0 - */ -type CallHierarchyRegistrationOptions struct { - TextDocumentRegistrationOptions - CallHierarchyOptions - StaticRegistrationOptions -} - -type CancelParams struct { - /** - * The request id to cancel. - */ - ID interface{} /*number | string*/ `json:"id"` -} - -/** - * Additional information that describes document changes. - * - * @since 3.16.0 - */ -type ChangeAnnotation struct { - /** - * A human-readable string describing the actual change. The string - * is rendered prominent in the user interface. - */ - Label string `json:"label"` - /** - * A flag which indicates that user confirmation is needed - * before applying the change. - */ - NeedsConfirmation bool `json:"needsConfirmation,omitempty"` - /** - * A human-readable string which is rendered less prominent in - * the user interface. - */ - Description string `json:"description,omitempty"` -} - -/** - * An identifier to refer to a change annotation stored with a workspace edit. - */ -type ChangeAnnotationIdentifier = string - -type ClientCapabilities struct { - /** - * The workspace client capabilities - */ - Workspace Workspace3Gn `json:"workspace,omitempty"` - /** - * Text document specific client capabilities. - */ - TextDocument TextDocumentClientCapabilities `json:"textDocument,omitempty"` - /** - * Window specific client capabilities. - */ - Window struct { - /** - * Whether client supports server initiated progress using the - * `window/workDoneProgress/create` request. - * - * Since 3.15.0 - */ - WorkDoneProgress bool `json:"workDoneProgress,omitempty"` - /** - * Capabilities specific to the showMessage request. - * - * @since 3.16.0 - */ - ShowMessage ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"` - /** - * Capabilities specific to the showDocument request. - * - * @since 3.16.0 - */ - ShowDocument ShowDocumentClientCapabilities `json:"showDocument,omitempty"` - } `json:"window,omitempty"` - /** - * General client capabilities. - * - * @since 3.16.0 - */ - General GeneralClientCapabilities `json:"general,omitempty"` - /** - * Experimental client capabilities. - */ - Experimental interface{} `json:"experimental,omitempty"` -} - -/** - * A code action represents a change that can be performed in code, e.g. to fix a problem or - * to refactor code. - * - * A CodeAction must set either `edit` and/or a `command`. If both are supplied, the `edit` is applied first, then the `command` is executed. - */ -type CodeAction struct { - /** - * A short, human-readable, title for this code action. - */ - Title string `json:"title"` - /** - * The kind of the code action. - * - * Used to filter code actions. - */ - Kind CodeActionKind `json:"kind,omitempty"` - /** - * The diagnostics that this code action resolves. - */ - Diagnostics []Diagnostic `json:"diagnostics,omitempty"` - /** - * Marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted - * by keybindings. - * - * A quick fix should be marked preferred if it properly addresses the underlying error. - * A refactoring should be marked preferred if it is the most reasonable choice of actions to take. - * - * @since 3.15.0 - */ - IsPreferred bool `json:"isPreferred,omitempty"` - /** - * Marks that the code action cannot currently be applied. - * - * Clients should follow the following guidelines regarding disabled code actions: - * - * - Disabled code actions are not shown in automatic [lightbulb](https://code.visualstudio.com/docs/editor/editingevolved#_code-action) - * code action menu. - * - * - Disabled actions are shown as faded out in the code action menu when the user request a more specific type - * of code action, such as refactorings. - * - * - If the user has a [keybinding](https://code.visualstudio.com/docs/editor/refactoring#_keybindings-for-code-actions) - * that auto applies a code action and only a disabled code actions are returned, the client should show the user an - * error message with `reason` in the editor. - * - * @since 3.16.0 - */ - Disabled *struct { - /** - * Human readable description of why the code action is currently disabled. - * - * This is displayed in the code actions UI. - */ - Reason string `json:"reason"` - } `json:"disabled,omitempty"` - /** - * The workspace edit this code action performs. - */ - Edit WorkspaceEdit `json:"edit,omitempty"` - /** - * A command this code action executes. If a code action - * provides a edit and a command, first the edit is - * executed and then the command. - */ - Command *Command `json:"command,omitempty"` - /** - * A data entry field that is preserved on a code action between - * a `textDocument/codeAction` and a `codeAction/resolve` request. - * - * @since 3.16.0 - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * The Client Capabilities of a [CodeActionRequest](#CodeActionRequest). - */ -type CodeActionClientCapabilities struct { - /** - * Whether code action supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client support code action literals of type `CodeAction` as a valid - * response of the `textDocument/codeAction` request. If the property is not - * set the request can only return `Command` literals. - * - * @since 3.8.0 - */ - CodeActionLiteralSupport struct { - /** - * The code action kind is support with the following value - * set. - */ - CodeActionKind struct { - /** - * The code action kind values the client supports. When this - * property exists the client also guarantees that it will - * handle values outside its set gracefully and falls back - * to a default value when unknown. - */ - ValueSet []CodeActionKind `json:"valueSet"` - } `json:"codeActionKind"` - } `json:"codeActionLiteralSupport,omitempty"` - /** - * Whether code action supports the `isPreferred` property. - * - * @since 3.15.0 - */ - IsPreferredSupport bool `json:"isPreferredSupport,omitempty"` - /** - * Whether code action supports the `disabled` property. - * - * @since 3.16.0 - */ - DisabledSupport bool `json:"disabledSupport,omitempty"` - /** - * Whether code action supports the `data` property which is - * preserved between a `textDocument/codeAction` and a - * `codeAction/resolve` request. - * - * @since 3.16.0 - */ - DataSupport bool `json:"dataSupport,omitempty"` - /** - * Whether the client support resolving additional code action - * properties via a separate `codeAction/resolve` request. - * - * @since 3.16.0 - */ - ResolveSupport struct { - /** - * The properties that a client can resolve lazily. - */ - Properties []string `json:"properties"` - } `json:"resolveSupport,omitempty"` - /** - * Whether th client honors the change annotations in - * text edits and resource operations returned via the - * `CodeAction#edit` property by for example presenting - * the workspace edit in the user interface and asking - * for confirmation. - * - * @since 3.16.0 - */ - HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` -} - -/** - * Contains additional diagnostic information about the context in which - * a [code action](#CodeActionProvider.provideCodeActions) is run. - */ -type CodeActionContext struct { - /** - * An array of diagnostics known on the client side overlapping the range provided to the - * `textDocument/codeAction` request. They are provided so that the server knows which - * errors are currently presented to the user for the given range. There is no guarantee - * that these accurately reflect the error state of the resource. The primary parameter - * to compute code actions is the provided range. - */ - Diagnostics []Diagnostic `json:"diagnostics"` - /** - * Requested kind of actions to return. - * - * Actions not of this kind are filtered out by the client before being shown. So servers - * can omit computing them. - */ - Only []CodeActionKind `json:"only,omitempty"` - /** - * The reason why code actions were requested. - * - * @since 3.17.0 - */ - TriggerKind CodeActionTriggerKind `json:"triggerKind,omitempty"` -} - -/** - * A set of predefined code action kinds - */ -type CodeActionKind string - -/** - * Provider options for a [CodeActionRequest](#CodeActionRequest). - */ -type CodeActionOptions struct { - /** - * CodeActionKinds that this server may return. - * - * The list of kinds may be generic, such as `CodeActionKind.Refactor`, or the server - * may list out every specific kind they provide. - */ - CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"` - /** - * The server provides support to resolve additional - * information for a code action. - * - * @since 3.16.0 - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [CodeActionRequest](#CodeActionRequest). - */ -type CodeActionParams struct { - /** - * The document in which the command was invoked. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The range for which the command was invoked. - */ - Range Range `json:"range"` - /** - * Context carrying additional information. - */ - Context CodeActionContext `json:"context"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * The reason why code actions were requested. - * - * @since 3.17.0 - proposed state - */ -type CodeActionTriggerKind float64 - -/** - * Structure to capture a description for an error code. - * - * @since 3.16.0 - */ -type CodeDescription struct { - /** - * An URI to open with more information about the diagnostic error. - */ - Href URI `json:"href"` -} - -/** - * A code lens represents a [command](#Command) that should be shown along with - * source text, like the number of references, a way to run tests, etc. - * - * A code lens is _unresolved_ when no command is associated to it. For performance - * reasons the creation of a code lens and resolving should be done to two stages. - */ -type CodeLens struct { - /** - * The range in which this code lens is valid. Should only span a single line. - */ - Range Range `json:"range"` - /** - * The command this code lens represents. - */ - Command Command `json:"command,omitempty"` - /** - * A data entry field that is preserved on a code lens item between - * a [CodeLensRequest](#CodeLensRequest) and a [CodeLensResolveRequest] - * (#CodeLensResolveRequest) - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * The client capabilities of a [CodeLensRequest](#CodeLensRequest). - */ -type CodeLensClientCapabilities struct { - /** - * Whether code lens supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Code Lens provider options of a [CodeLensRequest](#CodeLensRequest). - */ -type CodeLensOptions struct { - /** - * Code lens has a resolve provider as well. - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [CodeLensRequest](#CodeLensRequest). - */ -type CodeLensParams struct { - /** - * The document to request code lens for. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * @since 3.16.0 - */ -type CodeLensWorkspaceClientCapabilities struct { - /** - * Whether the client implementation supports a refresh request sent from the - * server to the client. - * - * Note that this event is global and will force the client to refresh all - * code lenses currently shown. It should be used with absolute care and is - * useful for situation where a server for example detect a project wide - * change that requires such a calculation. - */ - RefreshSupport bool `json:"refreshSupport,omitempty"` -} - -/** - * Represents a color in RGBA space. - */ -type Color struct { - /** - * The red component of this color in the range [0-1]. - */ - Red Decimal `json:"red"` - /** - * The green component of this color in the range [0-1]. - */ - Green Decimal `json:"green"` - /** - * The blue component of this color in the range [0-1]. - */ - Blue Decimal `json:"blue"` - /** - * The alpha component of this color in the range [0-1]. - */ - Alpha Decimal `json:"alpha"` -} - -/** - * Represents a color range from a document. - */ -type ColorInformation struct { - /** - * The range in the document where this color appears. - */ - Range Range `json:"range"` - /** - * The actual color value for this color range. - */ - Color Color `json:"color"` -} - -type ColorPresentation struct { - /** - * The label of this color presentation. It will be shown on the color - * picker header. By default this is also the text that is inserted when selecting - * this color presentation. - */ - Label string `json:"label"` - /** - * An [edit](#TextEdit) which is applied to a document when selecting - * this presentation for the color. When `falsy` the [label](#ColorPresentation.label) - * is used. - */ - TextEdit TextEdit `json:"textEdit,omitempty"` - /** - * An optional array of additional [text edits](#TextEdit) that are applied when - * selecting this color presentation. Edits must not overlap with the main [edit](#ColorPresentation.textEdit) nor with themselves. - */ - AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` -} - -/** - * Parameters for a [ColorPresentationRequest](#ColorPresentationRequest). - */ -type ColorPresentationParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The color to request presentations for. - */ - Color Color `json:"color"` - /** - * The range where the color would be inserted. Serves as a context. - */ - Range Range `json:"range"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * Represents a reference to a command. Provides a title which - * will be used to represent a command in the UI and, optionally, - * an array of arguments which will be passed to the command handler - * function when invoked. - */ -type Command struct { - /** - * Title of the command, like `save`. - */ - Title string `json:"title"` - /** - * The identifier of the actual command handler. - */ - Command string `json:"command"` - /** - * Arguments that the command handler should be - * invoked with. - */ - Arguments []json.RawMessage `json:"arguments,omitempty"` -} - -/** - * Completion client capabilities - */ -type CompletionClientCapabilities struct { - /** - * Whether completion supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports the following `CompletionItem` specific - * capabilities. - */ - CompletionItem struct { - /** - * Client supports snippets as insert text. - * - * A snippet can define tab stops and placeholders with `$1`, `$2` - * and `${3:foo}`. `$0` defines the final tab stop, it defaults to - * the end of the snippet. Placeholders with equal identifiers are linked, - * that is typing in one will update others too. - */ - SnippetSupport bool `json:"snippetSupport,omitempty"` - /** - * Client supports commit characters on a completion item. - */ - CommitCharactersSupport bool `json:"commitCharactersSupport,omitempty"` - /** - * Client supports the follow content formats for the documentation - * property. The order describes the preferred format of the client. - */ - DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` - /** - * Client supports the deprecated property on a completion item. - */ - DeprecatedSupport bool `json:"deprecatedSupport,omitempty"` - /** - * Client supports the preselect property on a completion item. - */ - PreselectSupport bool `json:"preselectSupport,omitempty"` - /** - * Client supports the tag property on a completion item. Clients supporting - * tags have to handle unknown tags gracefully. Clients especially need to - * preserve unknown tags when sending a completion item back to the server in - * a resolve call. - * - * @since 3.15.0 - */ - TagSupport struct { - /** - * The tags supported by the client. - */ - ValueSet []CompletionItemTag `json:"valueSet"` - } `json:"tagSupport,omitempty"` - /** - * Client support insert replace edit to control different behavior if a - * completion item is inserted in the text or should replace text. - * - * @since 3.16.0 - */ - InsertReplaceSupport bool `json:"insertReplaceSupport,omitempty"` - /** - * Indicates which properties a client can resolve lazily on a completion - * item. Before version 3.16.0 only the predefined properties `documentation` - * and `details` could be resolved lazily. - * - * @since 3.16.0 - */ - ResolveSupport struct { - /** - * The properties that a client can resolve lazily. - */ - Properties []string `json:"properties"` - } `json:"resolveSupport,omitempty"` - /** - * The client supports the `insertTextMode` property on - * a completion item to override the whitespace handling mode - * as defined by the client (see `insertTextMode`). - * - * @since 3.16.0 - */ - InsertTextModeSupport struct { - ValueSet []InsertTextMode `json:"valueSet"` - } `json:"insertTextModeSupport,omitempty"` - /** - * The client has support for completion item label - * details (see also `CompletionItemLabelDetails`). - * - * @since 3.17.0 - proposed state - */ - LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"` - } `json:"completionItem,omitempty"` - CompletionItemKind struct { - /** - * The completion item kind values the client supports. When this - * property exists the client also guarantees that it will - * handle values outside its set gracefully and falls back - * to a default value when unknown. - * - * If this property is not present the client only supports - * the completion items kinds from `Text` to `Reference` as defined in - * the initial version of the protocol. - */ - ValueSet []CompletionItemKind `json:"valueSet,omitempty"` - } `json:"completionItemKind,omitempty"` - /** - * Defines how the client handles whitespace and indentation - * when accepting a completion item that uses multi line - * text in either `insertText` or `textEdit`. - * - * @since 3.17.0 - proposed state - */ - InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` - /** - * The client supports to send additional context information for a - * `textDocument/completion` request. - */ - ContextSupport bool `json:"contextSupport,omitempty"` - /** - * The client supports the following `CompletionList` specific - * capabilities. - * - * @since 3.17.0 - proposed state - */ - CompletionList struct { - /** - * The client supports the the following itemDefaults on - * a completion list. - * - * The value lists the supported property names of the - * `CompletionList.itemDefaults` object. If omitted - * no properties are supported. - * - * @since 3.17.0 - proposed state - */ - ItemDefaults []string `json:"itemDefaults,omitempty"` - } `json:"completionList,omitempty"` -} - -/** - * Contains additional information about the context in which a completion request is triggered. - */ -type CompletionContext struct { - /** - * How the completion was triggered. - */ - TriggerKind CompletionTriggerKind `json:"triggerKind"` - /** - * The trigger character (a single character) that has trigger code complete. - * Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` - */ - TriggerCharacter string `json:"triggerCharacter,omitempty"` -} - -/** - * A completion item represents a text snippet that is - * proposed to complete text that is being typed. - */ -type CompletionItem struct { - /** - * The label of this completion item. - * - * The label property is also by default the text that - * is inserted when selecting this completion. - * - * If label details are provided the label itself should - * be an unqualified name of the completion item. - */ - Label string `json:"label"` - /** - * Additional details for the label - * - * @since 3.17.0 - proposed state - */ - LabelDetails CompletionItemLabelDetails `json:"labelDetails,omitempty"` - /** - * The kind of this completion item. Based of the kind - * an icon is chosen by the editor. - */ - Kind CompletionItemKind `json:"kind,omitempty"` - /** - * Tags for this completion item. - * - * @since 3.15.0 - */ - Tags []CompletionItemTag `json:"tags,omitempty"` - /** - * A human-readable string with additional information - * about this item, like type or symbol information. - */ - Detail string `json:"detail,omitempty"` - /** - * A human-readable string that represents a doc-comment. - */ - Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"` - /** - * Indicates if this item is deprecated. - * @deprecated Use `tags` instead. - */ - Deprecated bool `json:"deprecated,omitempty"` - /** - * Select this item when showing. - * - * *Note* that only one completion item can be selected and that the - * tool / client decides which item that is. The rule is that the *first* - * item of those that match best is selected. - */ - Preselect bool `json:"preselect,omitempty"` - /** - * A string that should be used when comparing this item - * with other items. When `falsy` the [label](#CompletionItem.label) - * is used. - */ - SortText string `json:"sortText,omitempty"` - /** - * A string that should be used when filtering a set of - * completion items. When `falsy` the [label](#CompletionItem.label) - * is used. - */ - FilterText string `json:"filterText,omitempty"` - /** - * A string that should be inserted into a document when selecting - * this completion. When `falsy` the [label](#CompletionItem.label) - * is used. - * - * The `insertText` is subject to interpretation by the client side. - * Some tools might not take the string literally. For example - * VS Code when code complete is requested in this example `con` - * and a completion item with an `insertText` of `console` is provided it - * will only insert `sole`. Therefore it is recommended to use `textEdit` instead - * since it avoids additional client side interpretation. - */ - InsertText string `json:"insertText,omitempty"` - /** - * The format of the insert text. The format applies to both the `insertText` property - * and the `newText` property of a provided `textEdit`. If omitted defaults to - * `InsertTextFormat.PlainText`. - * - * Please note that the insertTextFormat doesn't apply to `additionalTextEdits`. - */ - InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"` - /** - * How whitespace and indentation is handled during completion - * item insertion. If ignored the clients default value depends on - * the `textDocument.completion.insertTextMode` client capability. - * - * @since 3.16.0 - */ - InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` - /** - * An [edit](#TextEdit) which is applied to a document when selecting - * this completion. When an edit is provided the value of - * [insertText](#CompletionItem.insertText) is ignored. - * - * Most editors support two different operation when accepting a completion item. One is to insert a - * completion text and the other is to replace an existing text with a completion text. Since this can - * usually not predetermined by a server it can report both ranges. Clients need to signal support for - * `InsertReplaceEdits` via the `textDocument.completion.insertReplaceSupport` client capability - * property. - * - * *Note 1:* The text edit's range as well as both ranges from a insert replace edit must be a - * [single line] and they must contain the position at which completion has been requested. - * *Note 2:* If an `InsertReplaceEdit` is returned the edit's insert range must be a prefix of - * the edit's replace range, that means it must be contained and starting at the same position. - * - * @since 3.16.0 additional type `InsertReplaceEdit` - */ - TextEdit *TextEdit/*TextEdit | InsertReplaceEdit*/ `json:"textEdit,omitempty"` - /** - * An optional array of additional [text edits](#TextEdit) that are applied when - * selecting this completion. Edits must not overlap (including the same insert position) - * with the main [edit](#CompletionItem.textEdit) nor with themselves. - * - * Additional text edits should be used to change text unrelated to the current cursor position - * (for example adding an import statement at the top of the file if the completion item will - * insert an unqualified type). - */ - AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` - /** - * An optional set of characters that when pressed while this completion is active will accept it first and - * then type that character. *Note* that all commit characters should have `length=1` and that superfluous - * characters will be ignored. - */ - CommitCharacters []string `json:"commitCharacters,omitempty"` - /** - * An optional [command](#Command) that is executed *after* inserting this completion. *Note* that - * additional modifications to the current document should be described with the - * [additionalTextEdits](#CompletionItem.additionalTextEdits)-property. - */ - Command *Command `json:"command,omitempty"` - /** - * A data entry field that is preserved on a completion item between a - * [CompletionRequest](#CompletionRequest) and a [CompletionResolveRequest](#CompletionResolveRequest). - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * The kind of a completion entry. - */ -type CompletionItemKind float64 - -/** - * Additional details for a completion item label. - * - * @since 3.17.0 - proposed state - */ -type CompletionItemLabelDetails struct { - /** - * An optional string which is rendered less prominently directly after {@link CompletionItem.label label}, - * without any spacing. Should be used for function signatures or type annotations. - */ - Detail string `json:"detail,omitempty"` - /** - * An optional string which is rendered less prominently after {@link CompletionItem.detail}. Should be used - * for fully qualified names or file path. - */ - Description string `json:"description,omitempty"` -} - -/** - * Completion item tags are extra annotations that tweak the rendering of a completion - * item. - * - * @since 3.15.0 - */ -type CompletionItemTag float64 - -/** - * Represents a collection of [completion items](#CompletionItem) to be presented - * in the editor. - */ -type CompletionList struct { - /** - * This list it not complete. Further typing results in recomputing this list. - */ - IsIncomplete bool `json:"isIncomplete"` - /** - * In many cases the items of an actual completion result share the same - * value for properties like `commitCharacters` or the range of a text - * edit. A completion list can therefore define item defaults which will - * be used if a completion item itself doesn't specify the value. - * - * If a completion list specifies a default value and a completion item - * also specifies a corresponding value the one from the item is used. - * - * Servers are only allowed to return default values if the client - * signals support for this via the `completionList.itemDefaults` - * capability. - * - * @since 3.17.0 - proposed state - */ - ItemDefaults struct { - /** - * A default commit character set. - * - * @since 3.17.0 - proposed state - */ - CommitCharacters []string `json:"commitCharacters,omitempty"` - /** - * A default edit range - * - * @since 3.17.0 - proposed state - */ - EditRange Range/*Range | { insert: Range; replace: Range; }*/ `json:"editRange,omitempty"` - /** - * A default insert text format - * - * @since 3.17.0 - proposed state - */ - InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"` - /** - * A default insert text mode - * - * @since 3.17.0 - proposed state - */ - InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` - } `json:"itemDefaults,omitempty"` - /** - * The completion items. - */ - Items []CompletionItem `json:"items"` -} - -/** - * Completion options. - */ -type CompletionOptions struct { - /** - * Most tools trigger completion request automatically without explicitly requesting - * it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user - * starts to type an identifier. For example if the user types `c` in a JavaScript file - * code complete will automatically pop up present `console` besides others as a - * completion item. Characters that make up identifiers don't need to be listed here. - * - * If code complete should automatically be trigger on characters not being valid inside - * an identifier (for example `.` in JavaScript) list them in `triggerCharacters`. - */ - TriggerCharacters []string `json:"triggerCharacters,omitempty"` - /** - * The list of all possible characters that commit a completion. This field can be used - * if clients don't support individual commit characters per completion item. See - * `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport` - * - * If a server provides both `allCommitCharacters` and commit characters on an individual - * completion item the ones on the completion item win. - * - * @since 3.2.0 - */ - AllCommitCharacters []string `json:"allCommitCharacters,omitempty"` - /** - * The server provides support to resolve additional - * information for a completion item. - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - /** - * The server supports the following `CompletionItem` specific - * capabilities. - * - * @since 3.17.0 - proposed state - */ - CompletionItem struct { - /** - * The server has support for completion item label - * details (see also `CompletionItemLabelDetails`) when - * receiving a completion item in a resolve call. - * - * @since 3.17.0 - proposed state - */ - LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"` - } `json:"completionItem,omitempty"` - WorkDoneProgressOptions -} - -/** - * Completion parameters - */ -type CompletionParams struct { - /** - * The completion context. This is only available it the client specifies - * to send this using the client capability `textDocument.completion.contextSupport === true` - */ - Context CompletionContext `json:"context,omitempty"` - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -/** - * How a completion was triggered - */ -type CompletionTriggerKind float64 - -type ConfigurationClientCapabilities struct { - /** - * The workspace client capabilities - */ - Workspace Workspace4Gn `json:"workspace,omitempty"` -} - -type ConfigurationItem struct { - /** - * The scope to get the configuration section for. - */ - ScopeURI string `json:"scopeUri,omitempty"` - /** - * The configuration section asked for. - */ - Section string `json:"section,omitempty"` -} - -/** - * The parameters of a configuration request. - */ -type ConfigurationParams struct { - Items []ConfigurationItem `json:"items"` -} - -/** - * Create file operation. - */ -type CreateFile struct { - /** - * A create - */ - Kind string `json:"kind"` - /** - * The resource to create. - */ - URI DocumentURI `json:"uri"` - /** - * Additional options - */ - Options CreateFileOptions `json:"options,omitempty"` - ResourceOperation -} - -/** - * Options to create a file. - */ -type CreateFileOptions struct { - /** - * Overwrite existing file. Overwrite wins over `ignoreIfExists` - */ - Overwrite bool `json:"overwrite,omitempty"` - /** - * Ignore if exists. - */ - IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` -} - -/** - * The parameters sent in file create requests/notifications. - * - * @since 3.16.0 - */ -type CreateFilesParams struct { - /** - * An array of all files/folders created in this operation. - */ - Files []FileCreate `json:"files"` -} - -/** - * Defines a decimal number. Since decimal numbers are very - * rare in the language server specification we denote the - * exact range with every decimal using the mathematics - * interval notations (e.g. [0, 1] denotes all decimals d with - * 0 <= d <= 1. - */ -type Decimal = float64 - -/** - * The declaration of a symbol representation as one or many [locations](#Location). - */ -type Declaration = []Location /*Location | Location[]*/ - -/** - * @since 3.14.0 - */ -type DeclarationClientCapabilities struct { - /** - * Whether declaration supports dynamic registration. If this is set to `true` - * the client supports the new `DeclarationRegistrationOptions` return value - * for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports additional metadata in the form of declaration links. - */ - LinkSupport bool `json:"linkSupport,omitempty"` -} - -/** - * Information about where a symbol is declared. - * - * Provides additional metadata over normal [location](#Location) declarations, including the range of - * the declaring symbol. - * - * Servers should prefer returning `DeclarationLink` over `Declaration` if supported - * by the client. - */ -type DeclarationLink = LocationLink - -type DeclarationOptions struct { - WorkDoneProgressOptions -} - -type DeclarationParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -type DeclarationRegistrationOptions struct { - DeclarationOptions - TextDocumentRegistrationOptions - StaticRegistrationOptions -} - -/** - * The definition of a symbol represented as one or many [locations](#Location). - * For most programming languages there is only one location at which a symbol is - * defined. - * - * Servers should prefer returning `DefinitionLink` over `Definition` if supported - * by the client. - */ -type Definition = []Location /*Location | Location[]*/ - -/** - * Client Capabilities for a [DefinitionRequest](#DefinitionRequest). - */ -type DefinitionClientCapabilities struct { - /** - * Whether definition supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports additional metadata in the form of definition links. - * - * @since 3.14.0 - */ - LinkSupport bool `json:"linkSupport,omitempty"` -} - -/** - * Information about where a symbol is defined. - * - * Provides additional metadata over normal [location](#Location) definitions, including the range of - * the defining symbol - */ -type DefinitionLink = LocationLink - -/** - * Server Capabilities for a [DefinitionRequest](#DefinitionRequest). - */ -type DefinitionOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [DefinitionRequest](#DefinitionRequest). - */ -type DefinitionParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -/** - * Delete file operation - */ -type DeleteFile struct { - /** - * A delete - */ - Kind string `json:"kind"` - /** - * The file to delete. - */ - URI DocumentURI `json:"uri"` - /** - * Delete options. - */ - Options DeleteFileOptions `json:"options,omitempty"` - ResourceOperation -} - -/** - * Delete file options - */ -type DeleteFileOptions struct { - /** - * Delete the content recursively if a folder is denoted. - */ - Recursive bool `json:"recursive,omitempty"` - /** - * Ignore the operation if the file doesn't exist. - */ - IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"` -} - -/** - * The parameters sent in file delete requests/notifications. - * - * @since 3.16.0 - */ -type DeleteFilesParams struct { - /** - * An array of all files/folders deleted in this operation. - */ - Files []FileDelete `json:"files"` -} - -/** - * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects - * are only valid in the scope of a resource. - */ -type Diagnostic struct { - /** - * The range at which the message applies - */ - Range Range `json:"range"` - /** - * The diagnostic's severity. Can be omitted. If omitted it is up to the - * client to interpret diagnostics as error, warning, info or hint. - */ - Severity DiagnosticSeverity `json:"severity,omitempty"` - /** - * The diagnostic's code, which usually appear in the user interface. - */ - Code interface{}/*integer | string*/ `json:"code,omitempty"` - /** - * An optional property to describe the error code. - * Requires the code field (above) to be present/not null. - * - * @since 3.16.0 - */ - CodeDescription *CodeDescription `json:"codeDescription,omitempty"` - /** - * A human-readable string describing the source of this - * diagnostic, e.g. 'typescript' or 'super lint'. It usually - * appears in the user interface. - */ - Source string `json:"source,omitempty"` - /** - * The diagnostic's message. It usually appears in the user interface - */ - Message string `json:"message"` - /** - * Additional metadata about the diagnostic. - * - * @since 3.15.0 - */ - Tags []DiagnosticTag `json:"tags,omitempty"` - /** - * An array of related diagnostic information, e.g. when symbol-names within - * a scope collide all definitions can be marked via this property. - */ - RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"` - /** - * A data entry field that is preserved between a `textDocument/publishDiagnostics` - * notification and `textDocument/codeAction` request. - * - * @since 3.16.0 - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * Represents a related message and source code location for a diagnostic. This should be - * used to point to code locations that cause or related to a diagnostics, e.g when duplicating - * a symbol in a scope. - */ -type DiagnosticRelatedInformation struct { - /** - * The location of this related diagnostic information. - */ - Location Location `json:"location"` - /** - * The message of this related diagnostic information. - */ - Message string `json:"message"` -} - -/** - * The diagnostic's severity. - */ -type DiagnosticSeverity float64 - -/** - * The diagnostic tags. - * - * @since 3.15.0 - */ -type DiagnosticTag float64 - -type DidChangeConfigurationClientCapabilities struct { - /** - * Did change configuration notification supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * The parameters of a change configuration notification. - */ -type DidChangeConfigurationParams struct { - /** - * The actual changed settings - */ - Settings LSPAny `json:"settings"` -} - -/** - * The params sent in a change notebook document notification. - * - * @since 3.17.0 - proposed state - */ -type DidChangeNotebookDocumentParams = struct { - /** - * The notebook document that did change. The version number points - * to the version after all provided changes have been applied. If - * only the text document content of a cell changes the notebook version - * doesn't necessarily have to change. - */ - NotebookDocument VersionedNotebookDocumentIdentifier `json:"notebookDocument"` - /** - * The actual changes to the notebook document. - * - * The changes describe single state changes to the notebook document. - * So if there are two changes c1 (at array index 0) and c2 (at array - * index 1) for a notebook in state S then c1 moves the notebook from - * S to S' and c2 from S' to S''. So c1 is computed on the state S and - * c2 is computed on the state S'. - * - * To mirror the content of a notebook using change events use the following approach: - * - start with the same initial content - * - apply the 'notebookDocument/didChange' notifications in the order you receive them. - * - apply the `NotebookChangeEvent`s in a single notification in the order - * you receive them. - */ - Change NotebookDocumentChangeEvent `json:"change"` -} - -/** - * The change text document notification's parameters. - */ -type DidChangeTextDocumentParams struct { - /** - * The document that did change. The version number points - * to the version after all provided content changes have - * been applied. - */ - TextDocument VersionedTextDocumentIdentifier `json:"textDocument"` - /** - * The actual content changes. The content changes describe single state changes - * to the document. So if there are two content changes c1 (at array index 0) and - * c2 (at array index 1) for a document in state S then c1 moves the document from - * S to S' and c2 from S' to S''. So c1 is computed on the state S and c2 is computed - * on the state S'. - * - * To mirror the content of a document using change events use the following approach: - * - start with the same initial content - * - apply the 'textDocument/didChange' notifications in the order you receive them. - * - apply the `TextDocumentContentChangeEvent`s in a single notification in the order - * you receive them. - */ - ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"` -} - -type DidChangeWatchedFilesClientCapabilities struct { - /** - * Did change watched files notification supports dynamic registration. Please note - * that the current protocol doesn't support static configuration for file changes - * from the server side. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * The watched files change notification's parameters. - */ -type DidChangeWatchedFilesParams struct { - /** - * The actual file events. - */ - Changes []FileEvent `json:"changes"` -} - -/** - * Describe options to be used when registered for text document change events. - */ -type DidChangeWatchedFilesRegistrationOptions struct { - /** - * The watchers to register. - */ - Watchers []FileSystemWatcher `json:"watchers"` -} - -/** - * The parameters of a `workspace/didChangeWorkspaceFolders` notification. - */ -type DidChangeWorkspaceFoldersParams struct { - /** - * The actual workspace folder change event. - */ - Event WorkspaceFoldersChangeEvent `json:"event"` -} - -/** - * The params sent in a close notebook document notification. - * - * @since 3.17.0 - proposed state - */ -type DidCloseNotebookDocumentParams = struct { - /** - * The notebook document that got closed. - */ - NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"` - /** - * The text documents that represent the content - * of a notebook cell that got closed. - */ - CellTextDocuments []TextDocumentIdentifier `json:"cellTextDocuments"` -} - -/** - * The parameters send in a close text document notification - */ -type DidCloseTextDocumentParams struct { - /** - * The document that was closed. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` -} - -/** - * The params sent in a open notebook document notification. - * - * @since 3.17.0 - proposed state - */ -type DidOpenNotebookDocumentParams = struct { - /** - * The notebook document that got opened. - */ - NotebookDocument NotebookDocument `json:"notebookDocument"` - /** - * The text documents that represent the content - * of a notebook cell. - */ - CellTextDocuments []TextDocumentItem `json:"cellTextDocuments"` -} - -/** - * The parameters send in a open text document notification - */ -type DidOpenTextDocumentParams struct { - /** - * The document that was opened. - */ - TextDocument TextDocumentItem `json:"textDocument"` -} - -/** - * The params sent in a save notebook document notification. - * - * @since 3.17.0 - proposed state - */ -type DidSaveNotebookDocumentParams = struct { - /** - * The notebook document that got saved. - */ - NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"` -} - -/** - * The parameters send in a save text document notification - */ -type DidSaveTextDocumentParams struct { - /** - * The document that was closed. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * Optional the content when saved. Depends on the includeText value - * when the save notification was requested. - */ - Text *string `json:"text,omitempty"` -} - -type DocumentColorClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `DocumentColorRegistrationOptions` return value - * for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -type DocumentColorOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [DocumentColorRequest](#DocumentColorRequest). - */ -type DocumentColorParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -type DocumentColorRegistrationOptions struct { - TextDocumentRegistrationOptions - StaticRegistrationOptions - DocumentColorOptions -} - -/** - * Parameters of the document diagnostic request. - * - * @since 3.17.0 - proposed state - */ -type DocumentDiagnosticParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * An optional token that a server can use to report partial results (e.g. streaming) to - * the client. - */ - PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The additional identifier provided during registration. - */ - Identifier string `json:"identifier,omitempty"` - /** - * The result id of a previous response if provided. - */ - PreviousResultID string `json:"previousResultId,omitempty"` -} - -/** - * The result of a document diagnostic pull request. A report can - * either be a full report containing all diagnostics for the - * requested document or a unchanged report indicating that nothing - * has changed in terms of diagnostics in comparison to the last - * pull request. - * - * @since 3.17.0 - proposed state - */ -type DocumentDiagnosticReport = interface{} /*RelatedFullDocumentDiagnosticReport | RelatedUnchangedDocumentDiagnosticReport*/ - -/** - * A document filter describes a top level text document or - * a notebook cell document. - * - * @since 3.17.0 - proposed support for NotebookCellTextDocumentFilter. - */ -type DocumentFilter = interface{} /*TextDocumentFilter | NotebookCellTextDocumentFilter*/ - -/** - * Client capabilities of a [DocumentFormattingRequest](#DocumentFormattingRequest). - */ -type DocumentFormattingClientCapabilities struct { - /** - * Whether formatting supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Provider options for a [DocumentFormattingRequest](#DocumentFormattingRequest). - */ -type DocumentFormattingOptions struct { - WorkDoneProgressOptions -} - -/** - * The parameters of a [DocumentFormattingRequest](#DocumentFormattingRequest). - */ -type DocumentFormattingParams struct { - /** - * The document to format. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The format options - */ - Options FormattingOptions `json:"options"` - WorkDoneProgressParams -} - -/** - * A document highlight is a range inside a text document which deserves - * special attention. Usually a document highlight is visualized by changing - * the background color of its range. - */ -type DocumentHighlight struct { - /** - * The range this highlight applies to. - */ - Range Range `json:"range"` - /** - * The highlight kind, default is [text](#DocumentHighlightKind.Text). - */ - Kind DocumentHighlightKind `json:"kind,omitempty"` -} - -/** - * Client Capabilities for a [DocumentHighlightRequest](#DocumentHighlightRequest). - */ -type DocumentHighlightClientCapabilities struct { - /** - * Whether document highlight supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * A document highlight kind. - */ -type DocumentHighlightKind float64 - -/** - * Provider options for a [DocumentHighlightRequest](#DocumentHighlightRequest). - */ -type DocumentHighlightOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [DocumentHighlightRequest](#DocumentHighlightRequest). - */ -type DocumentHighlightParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -/** - * A document link is a range in a text document that links to an internal or external resource, like another - * text document or a web site. - */ -type DocumentLink struct { - /** - * The range this link applies to. - */ - Range Range `json:"range"` - /** - * The uri this link points to. - */ - Target string `json:"target,omitempty"` - /** - * The tooltip text when you hover over this link. - * - * If a tooltip is provided, is will be displayed in a string that includes instructions on how to - * trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS, - * user settings, and localization. - * - * @since 3.15.0 - */ - Tooltip string `json:"tooltip,omitempty"` - /** - * A data entry field that is preserved on a document link between a - * DocumentLinkRequest and a DocumentLinkResolveRequest. - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * The client capabilities of a [DocumentLinkRequest](#DocumentLinkRequest). - */ -type DocumentLinkClientCapabilities struct { - /** - * Whether document link supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Whether the client support the `tooltip` property on `DocumentLink`. - * - * @since 3.15.0 - */ - TooltipSupport bool `json:"tooltipSupport,omitempty"` -} - -/** - * Provider options for a [DocumentLinkRequest](#DocumentLinkRequest). - */ -type DocumentLinkOptions struct { - /** - * Document links have a resolve provider as well. - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [DocumentLinkRequest](#DocumentLinkRequest). - */ -type DocumentLinkParams struct { - /** - * The document to provide document links for. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * Client capabilities of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). - */ -type DocumentOnTypeFormattingClientCapabilities struct { - /** - * Whether on type formatting supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Provider options for a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). - */ -type DocumentOnTypeFormattingOptions struct { - /** - * A character on which formatting should be triggered, like `}`. - */ - FirstTriggerCharacter string `json:"firstTriggerCharacter"` - /** - * More trigger characters. - */ - MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"` -} - -/** - * The parameters of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). - */ -type DocumentOnTypeFormattingParams struct { - /** - * The document to format. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The position at which this request was send. - */ - Position Position `json:"position"` - /** - * The character that has been typed. - */ - Ch string `json:"ch"` - /** - * The format options. - */ - Options FormattingOptions `json:"options"` -} - -/** - * Client capabilities of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). - */ -type DocumentRangeFormattingClientCapabilities struct { - /** - * Whether range formatting supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Provider options for a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). - */ -type DocumentRangeFormattingOptions struct { - WorkDoneProgressOptions -} - -/** - * The parameters of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). - */ -type DocumentRangeFormattingParams struct { - /** - * The document to format. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The range to format - */ - Range Range `json:"range"` - /** - * The format options - */ - Options FormattingOptions `json:"options"` - WorkDoneProgressParams -} - -/** - * A document selector is the combination of one or many document filters. - * - * @sample `let sel:DocumentSelector = [{ language: 'typescript' }, { language: 'json', pattern: '**āˆ•tsconfig.json' }]`; - * - * The use of a string as a document filter is deprecated @since 3.16.0. - */ -type DocumentSelector = []string /*string | DocumentFilter*/ - -/** - * Represents programming constructs like variables, classes, interfaces etc. - * that appear in a document. Document symbols can be hierarchical and they - * have two ranges: one that encloses its definition and one that points to - * its most interesting range, e.g. the range of an identifier. - */ -type DocumentSymbol struct { - /** - * The name of this symbol. Will be displayed in the user interface and therefore must not be - * an empty string or a string only consisting of white spaces. - */ - Name string `json:"name"` - /** - * More detail for this symbol, e.g the signature of a function. - */ - Detail string `json:"detail,omitempty"` - /** - * The kind of this symbol. - */ - Kind SymbolKind `json:"kind"` - /** - * Tags for this document symbol. - * - * @since 3.16.0 - */ - Tags []SymbolTag `json:"tags,omitempty"` - /** - * Indicates if this symbol is deprecated. - * - * @deprecated Use tags instead - */ - Deprecated bool `json:"deprecated,omitempty"` - /** - * The range enclosing this symbol not including leading/trailing whitespace but everything else - * like comments. This information is typically used to determine if the the clients cursor is - * inside the symbol to reveal in the symbol in the UI. - */ - Range Range `json:"range"` - /** - * The range that should be selected and revealed when this symbol is being picked, e.g the name of a function. - * Must be contained by the the `range`. - */ - SelectionRange Range `json:"selectionRange"` - /** - * Children of this symbol, e.g. properties of a class. - */ - Children []DocumentSymbol `json:"children,omitempty"` -} - -/** - * Client Capabilities for a [DocumentSymbolRequest](#DocumentSymbolRequest). - */ -type DocumentSymbolClientCapabilities struct { - /** - * Whether document symbol supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Specific capabilities for the `SymbolKind`. - */ - SymbolKind struct { - /** - * The symbol kind values the client supports. When this - * property exists the client also guarantees that it will - * handle values outside its set gracefully and falls back - * to a default value when unknown. - * - * If this property is not present the client only supports - * the symbol kinds from `File` to `Array` as defined in - * the initial version of the protocol. - */ - ValueSet []SymbolKind `json:"valueSet,omitempty"` - } `json:"symbolKind,omitempty"` - /** - * The client support hierarchical document symbols. - */ - HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"` - /** - * The client supports tags on `SymbolInformation`. Tags are supported on - * `DocumentSymbol` if `hierarchicalDocumentSymbolSupport` is set to true. - * Clients supporting tags have to handle unknown tags gracefully. - * - * @since 3.16.0 - */ - TagSupport struct { - /** - * The tags supported by the client. - */ - ValueSet []SymbolTag `json:"valueSet"` - } `json:"tagSupport,omitempty"` - /** - * The client supports an additional label presented in the UI when - * registering a document symbol provider. - * - * @since 3.16.0 - */ - LabelSupport bool `json:"labelSupport,omitempty"` -} - -/** - * Provider options for a [DocumentSymbolRequest](#DocumentSymbolRequest). - */ -type DocumentSymbolOptions struct { - /** - * A human-readable string that is shown when multiple outlines trees - * are shown for the same document. - * - * @since 3.16.0 - */ - Label string `json:"label,omitempty"` - WorkDoneProgressOptions -} - -/** - * Parameters for a [DocumentSymbolRequest](#DocumentSymbolRequest). - */ -type DocumentSymbolParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * A tagging type for string properties that are actually document URIs. - */ -type DocumentURI string - -/** - * The client capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest). - */ -type ExecuteCommandClientCapabilities struct { - /** - * Execute command supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * The server capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest). - */ -type ExecuteCommandOptions struct { - /** - * The commands to be executed on the server - */ - Commands []string `json:"commands"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [ExecuteCommandRequest](#ExecuteCommandRequest). - */ -type ExecuteCommandParams struct { - /** - * The identifier of the actual command handler. - */ - Command string `json:"command"` - /** - * Arguments that the command should be invoked with. - */ - Arguments []json.RawMessage `json:"arguments,omitempty"` - WorkDoneProgressParams -} - -type ExecutionSummary = struct { - /** - * A strict monotonically increasing value - * indicating the execution order of a cell - * inside a notebook. - */ - ExecutionOrder uint32 `json:"executionOrder"` - /** - * Whether the execution was successful or - * not if known by the client. - */ - Success bool `json:"success,omitempty"` -} - -type FailureHandlingKind string - -/** - * The file event type - */ -type FileChangeType float64 - -/** - * Represents information on a file/folder create. - * - * @since 3.16.0 - */ -type FileCreate struct { - /** - * A file:// URI for the location of the file/folder being created. - */ - URI string `json:"uri"` -} - -/** - * Represents information on a file/folder delete. - * - * @since 3.16.0 - */ -type FileDelete struct { - /** - * A file:// URI for the location of the file/folder being deleted. - */ - URI string `json:"uri"` -} - -/** - * An event describing a file change. - */ -type FileEvent struct { - /** - * The file's uri. - */ - URI DocumentURI `json:"uri"` - /** - * The change type. - */ - Type FileChangeType `json:"type"` -} - -/** - * Capabilities relating to events from file operations by the user in the client. - * - * These events do not come from the file system, they come from user operations - * like renaming a file in the UI. - * - * @since 3.16.0 - */ -type FileOperationClientCapabilities struct { - /** - * Whether the client supports dynamic registration for file requests/notifications. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client has support for sending didCreateFiles notifications. - */ - DidCreate bool `json:"didCreate,omitempty"` - /** - * The client has support for willCreateFiles requests. - */ - WillCreate bool `json:"willCreate,omitempty"` - /** - * The client has support for sending didRenameFiles notifications. - */ - DidRename bool `json:"didRename,omitempty"` - /** - * The client has support for willRenameFiles requests. - */ - WillRename bool `json:"willRename,omitempty"` - /** - * The client has support for sending didDeleteFiles notifications. - */ - DidDelete bool `json:"didDelete,omitempty"` - /** - * The client has support for willDeleteFiles requests. - */ - WillDelete bool `json:"willDelete,omitempty"` -} - -/** - * A filter to describe in which file operation requests or notifications - * the server is interested in. - * - * @since 3.16.0 - */ -type FileOperationFilter struct { - /** - * A Uri like `file` or `untitled`. - */ - Scheme string `json:"scheme,omitempty"` - /** - * The actual file operation pattern. - */ - Pattern FileOperationPattern `json:"pattern"` -} - -/** - * Options for notifications/requests for user operations on files. - * - * @since 3.16.0 - */ -type FileOperationOptions struct { - /** - * The server is interested in didCreateFiles notifications. - */ - DidCreate FileOperationRegistrationOptions `json:"didCreate,omitempty"` - /** - * The server is interested in willCreateFiles requests. - */ - WillCreate FileOperationRegistrationOptions `json:"willCreate,omitempty"` - /** - * The server is interested in didRenameFiles notifications. - */ - DidRename FileOperationRegistrationOptions `json:"didRename,omitempty"` - /** - * The server is interested in willRenameFiles requests. - */ - WillRename FileOperationRegistrationOptions `json:"willRename,omitempty"` - /** - * The server is interested in didDeleteFiles file notifications. - */ - DidDelete FileOperationRegistrationOptions `json:"didDelete,omitempty"` - /** - * The server is interested in willDeleteFiles file requests. - */ - WillDelete FileOperationRegistrationOptions `json:"willDelete,omitempty"` -} - -/** - * A pattern to describe in which file operation requests or notifications - * the server is interested in. - * - * @since 3.16.0 - */ -type FileOperationPattern struct { - /** - * The glob pattern to match. Glob patterns can have the following syntax: - * - `*` to match one or more characters in a path segment - * - `?` to match on one character in a path segment - * - `**` to match any number of path segments, including none - * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) - * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) - * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) - */ - Glob string `json:"glob"` - /** - * Whether to match files or folders with this pattern. - * - * Matches both if undefined. - */ - Matches FileOperationPatternKind `json:"matches,omitempty"` - /** - * Additional options used during matching. - */ - Options FileOperationPatternOptions `json:"options,omitempty"` -} - -/** - * A pattern kind describing if a glob pattern matches a file a folder or - * both. - * - * @since 3.16.0 - */ -type FileOperationPatternKind string - -/** - * Matching options for the file operation pattern. - * - * @since 3.16.0 - */ -type FileOperationPatternOptions struct { - /** - * The pattern should be matched ignoring casing. - */ - IgnoreCase bool `json:"ignoreCase,omitempty"` -} - -/** - * The options to register for file operations. - * - * @since 3.16.0 - */ -type FileOperationRegistrationOptions struct { - /** - * The actual filters. - */ - Filters []FileOperationFilter `json:"filters"` -} - -/** - * Represents information on a file/folder rename. - * - * @since 3.16.0 - */ -type FileRename struct { - /** - * A file:// URI for the original location of the file/folder being renamed. - */ - OldURI string `json:"oldUri"` - /** - * A file:// URI for the new location of the file/folder being renamed. - */ - NewURI string `json:"newUri"` -} - -type FileSystemWatcher struct { - /** - * The glob pattern to watch. Glob patterns can have the following syntax: - * - `*` to match one or more characters in a path segment - * - `?` to match on one character in a path segment - * - `**` to match any number of path segments, including none - * - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) - * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) - * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) - */ - GlobPattern string `json:"globPattern"` - /** - * The kind of events of interest. If omitted it defaults - * to WatchKind.Create | WatchKind.Change | WatchKind.Delete - * which is 7. - */ - Kind uint32 `json:"kind,omitempty"` -} - -/** - * Represents a folding range. To be valid, start and end line must be bigger than zero and smaller - * than the number of lines in the document. Clients are free to ignore invalid ranges. - */ -type FoldingRange struct { - /** - * The zero-based start line of the range to fold. The folded area starts after the line's last character. - * To be valid, the end must be zero or larger and smaller than the number of lines in the document. - */ - StartLine uint32 `json:"startLine"` - /** - * The zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line. - */ - StartCharacter uint32 `json:"startCharacter,omitempty"` - /** - * The zero-based end line of the range to fold. The folded area ends with the line's last character. - * To be valid, the end must be zero or larger and smaller than the number of lines in the document. - */ - EndLine uint32 `json:"endLine"` - /** - * The zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line. - */ - EndCharacter uint32 `json:"endCharacter,omitempty"` - /** - * Describes the kind of the folding range such as `comment' or 'region'. The kind - * is used to categorize folding ranges and used by commands like 'Fold all comments'. See - * [FoldingRangeKind](#FoldingRangeKind) for an enumeration of standardized kinds. - */ - Kind string `json:"kind,omitempty"` -} - -type FoldingRangeClientCapabilities struct { - /** - * Whether implementation supports dynamic registration for folding range providers. If this is set to `true` - * the client supports the new `FoldingRangeRegistrationOptions` return value for the corresponding server - * capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The maximum number of folding ranges that the client prefers to receive per document. The value serves as a - * hint, servers are free to follow the limit. - */ - RangeLimit uint32 `json:"rangeLimit,omitempty"` - /** - * If set, the client signals that it only supports folding complete lines. If set, client will - * ignore specified `startCharacter` and `endCharacter` properties in a FoldingRange. - */ - LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"` -} - -/** - * Enum of known range kinds - */ -type FoldingRangeKind string - -type FoldingRangeOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [FoldingRangeRequest](#FoldingRangeRequest). - */ -type FoldingRangeParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -type FoldingRangeRegistrationOptions struct { - TextDocumentRegistrationOptions - FoldingRangeOptions - StaticRegistrationOptions -} - -/** - * Value-object describing what options formatting should use. - */ -type FormattingOptions struct { - /** - * Size of a tab in spaces. - */ - TabSize uint32 `json:"tabSize"` - /** - * Prefer spaces over tabs. - */ - InsertSpaces bool `json:"insertSpaces"` - /** - * Trim trailing whitespaces on a line. - * - * @since 3.15.0 - */ - TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"` - /** - * Insert a newline character at the end of the file if one does not exist. - * - * @since 3.15.0 - */ - InsertFinalNewline bool `json:"insertFinalNewline,omitempty"` - /** - * Trim all newlines after the final newline at the end of the file. - * - * @since 3.15.0 - */ - TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"` -} - -/** - * A diagnostic report with a full set of problems. - * - * @since 3.17.0 - proposed state - */ -type FullDocumentDiagnosticReport = struct { - /** - * A full document diagnostic report. - */ - Kind string `json:"kind"` - /** - * An optional result id. If provided it will - * be sent on the next diagnostic request for the - * same document. - */ - ResultID string `json:"resultId,omitempty"` - /** - * The actual items. - */ - Items []Diagnostic `json:"items"` -} - -/** - * General client capabilities. - * - * @since 3.16.0 - */ -type GeneralClientCapabilities struct { - /** - * Client capability that signals how the client - * handles stale requests (e.g. a request - * for which the client will not process the response - * anymore since the information is outdated). - * - * @since 3.17.0 - */ - StaleRequestSupport struct { - /** - * The client will actively cancel the request. - */ - Cancel bool `json:"cancel"` - /** - * The list of requests for which the client - * will retry the request if it receives a - * response with error code `ContentModified` - */ - RetryOnContentModified []string `json:"retryOnContentModified"` - } `json:"staleRequestSupport,omitempty"` - /** - * Client capabilities specific to regular expressions. - * - * @since 3.16.0 - */ - RegularExpressions RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"` - /** - * Client capabilities specific to the client's markdown parser. - * - * @since 3.16.0 - */ - Markdown MarkdownClientCapabilities `json:"markdown,omitempty"` -} - -/** - * The result of a hover request. - */ -type Hover struct { - /** - * The hover's content - */ - Contents MarkupContent/*MarkupContent | MarkedString | MarkedString[]*/ `json:"contents"` - /** - * An optional range - */ - Range Range `json:"range,omitempty"` -} - -type HoverClientCapabilities struct { - /** - * Whether hover supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Client supports the follow content formats for the content - * property. The order describes the preferred format of the client. - */ - ContentFormat []MarkupKind `json:"contentFormat,omitempty"` -} - -/** - * Hover options. - */ -type HoverOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [HoverRequest](#HoverRequest). - */ -type HoverParams struct { - TextDocumentPositionParams - WorkDoneProgressParams -} - -/** - * @since 3.6.0 - */ -type ImplementationClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `ImplementationRegistrationOptions` return value - * for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports additional metadata in the form of definition links. - * - * @since 3.14.0 - */ - LinkSupport bool `json:"linkSupport,omitempty"` -} - -type ImplementationOptions struct { - WorkDoneProgressOptions -} - -type ImplementationParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -type ImplementationRegistrationOptions struct { - TextDocumentRegistrationOptions - ImplementationOptions - StaticRegistrationOptions -} - -/** - * Known error codes for an `InitializeError`; - */ -type InitializeError float64 - -type InitializeParams struct { - /** - * The process Id of the parent process that started - * the server. - */ - ProcessID int32/*integer | null*/ `json:"processId"` - /** - * Information about the client - * - * @since 3.15.0 - */ - ClientInfo struct { - /** - * The name of the client as defined by the client. - */ - Name string `json:"name"` - /** - * The client's version as defined by the client. - */ - Version string `json:"version,omitempty"` - } `json:"clientInfo,omitempty"` - /** - * The locale the client is currently showing the user interface - * in. This must not necessarily be the locale of the operating - * system. - * - * Uses IETF language tags as the value's syntax - * (See https://en.wikipedia.org/wiki/IETF_language_tag) - * - * @since 3.16.0 - */ - Locale string `json:"locale,omitempty"` - /** - * The rootPath of the workspace. Is null - * if no folder is open. - * - * @deprecated in favour of rootUri. - */ - RootPath string/*string | null*/ `json:"rootPath,omitempty"` - /** - * The rootUri of the workspace. Is null if no - * folder is open. If both `rootPath` and `rootUri` are set - * `rootUri` wins. - * - * @deprecated in favour of workspaceFolders. - */ - RootURI DocumentURI/*DocumentUri | null*/ `json:"rootUri"` - /** - * The capabilities provided by the client (editor or tool) - */ - Capabilities ClientCapabilities `json:"capabilities"` - /** - * User provided initialization options. - */ - InitializationOptions LSPAny `json:"initializationOptions,omitempty"` - /** - * The initial trace setting. If omitted trace is disabled ('off'). - */ - Trace string/* 'off' | 'messages' | 'compact' | 'verbose' */ `json:"trace,omitempty"` - /** - * The actual configured workspace folders. - */ - WorkspaceFolders []WorkspaceFolder/*WorkspaceFolder[] | null*/ `json:"workspaceFolders"` -} - -/** - * The result returned from an initialize request. - */ -type InitializeResult struct { - /** - * The capabilities the language server provides. - */ - Capabilities ServerCapabilities `json:"capabilities"` - /** - * Information about the server. - * - * @since 3.15.0 - */ - ServerInfo struct { - /** - * The name of the server as defined by the server. - */ - Name string `json:"name"` - /** - * The server's version as defined by the server. - */ - Version string `json:"version,omitempty"` - } `json:"serverInfo,omitempty"` -} - -type InitializedParams struct { -} - -/** - * Inlay hint information. - * - * @since 3.17.0 - proposed state - */ -type InlayHint = struct { - /** - * The position of this hint. - */ - Position *Position `json:"position"` - /** - * The label of this hint. A human readable string or an array of - * InlayHintLabelPart label parts. - * - * *Note* that neither the string nor the label part can be empty. - */ - Label []InlayHintLabelPart/*string | InlayHintLabelPart[]*/ `json:"label"` - /** - * The kind of this hint. Can be omitted in which case the client - * should fall back to a reasonable default. - */ - Kind InlayHintKind `json:"kind,omitempty"` - /** - * The tooltip text when you hover over this item. - */ - Tooltip string/*string | MarkupContent*/ `json:"tooltip,omitempty"` - /** - * Render padding before the hint. - * - * Note: Padding should use the editor's background color, not the - * background color of the hint itself. That means padding can be used - * to visually align/separate an inlay hint. - */ - PaddingLeft bool `json:"paddingLeft,omitempty"` - /** - * Render padding after the hint. - * - * Note: Padding should use the editor's background color, not the - * background color of the hint itself. That means padding can be used - * to visually align/separate an inlay hint. - */ - PaddingRight bool `json:"paddingRight,omitempty"` -} - -/** - * Inlay hint client capabilities - * - * @since 3.17.0 - proposed state - */ -type InlayHintClientCapabilities = struct { - /** - * Whether inlay hints support dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Indicates which properties a client can resolve lazily on a inlay - * hint. - */ - ResolveSupport struct { - /** - * The properties that a client can resolve lazily. - */ - Properties []string `json:"properties"` - } `json:"resolveSupport,omitempty"` -} - -/** - * Inlay hint kinds. - * - * @since 3.17.0 - proposed state - */ -type InlayHintKind float64 - -/** - * An inlay hint label part allows for interactive and composite labels - * of inlay hints. - * - * @since 3.17.0 - proposed state - */ -type InlayHintLabelPart = struct { - /** - * The value of this label part. - */ - Value string `json:"value"` - /** - * The tooltip text when you hover over this label part. Depending on - * the client capability `inlayHint.resolveSupport` clients might resolve - * this property late using the resolve request. - */ - Tooltip string/*string | MarkupContent*/ `json:"tooltip,omitempty"` - /** - * An optional source code location that represents this - * label part. - * - * The editor will use this location for the hover and for code navigation - * features: This part will become a clickable link that resolves to the - * definition of the symbol at the given location (not necessarily the - * location itself), it shows the hover that shows at the given location, - * and it shows a context menu with further code navigation commands. - * - * Depending on the client capability `inlayHint.resolveSupport` clients - * might resolve this property late using the resolve request. - */ - Location *Location `json:"location,omitempty"` - /** - * An optional command for this label part. - * - * Depending on the client capability `inlayHint.resolveSupport` clients - * might resolve this property late using the resolve request. - */ - Command *Command `json:"command,omitempty"` -} - -/** - * Inlay hint options used during static registration. - * - * @since 3.17.0 - proposed state - */ -type InlayHintOptions struct { - WorkDoneProgress bool `json:"workDoneProgress,omitempty"` - /** - * The server provides support to resolve additional - * information for an inlay hint item. - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` -} - -/** - * A parameter literal used in inlay hints requests. - * - * @since 3.17.0 - proposed state - */ -type InlayHintParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The visible document range for which inlay hints should be computed. - */ - ViewPort Range `json:"viewPort"` -} - -/** - * Inlay hint options used during static or dynamic registration. - * - * @since 3.17.0 - proposed state - */ -type InlayHintRegistrationOptions struct { - WorkDoneProgress bool `json:"workDoneProgress,omitempty"` - /** - * The server provides support to resolve additional - * information for an inlay hint item. - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - /** - * A document selector to identify the scope of the registration. If set to null - * the document selector provided on the client side will be used. - */ - DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"` - /** - * The id used to register the request. The id can be used to deregister - * the request again. See also Registration#id. - */ - ID string `json:"id,omitempty"` -} - -/** - * Client workspace capabilities specific to inlay hints. - * - * @since 3.17.0 - proposed state - */ -type InlayHintWorkspaceClientCapabilities = struct { - /** - * Whether the client implementation supports a refresh request sent from - * the server to the client. - * - * Note that this event is global and will force the client to refresh all - * inlay hints currently shown. It should be used with absolute care and - * is useful for situation where a server for example detects a project wide - * change that requires such a calculation. - */ - RefreshSupport bool `json:"refreshSupport,omitempty"` -} - -/** - * Inline value information can be provided by different means: - * - directly as a text value (class InlineValueText). - * - as a name to use for a variable lookup (class InlineValueVariableLookup) - * - as an evaluatable expression (class InlineValueEvaluatableExpression) - * The InlineValue types combines all inline value types into one type. - * - * @since 3.17.0 - proposed state - */ -type InlineValue = interface{} /* InlineValueText | InlineValueVariableLookup | InlineValueEvaluatableExpression*/ - -/** - * Client capabilities specific to inline values. - * - * @since 3.17.0 - proposed state - */ -type InlineValueClientCapabilities = struct { - /** - * Whether implementation supports dynamic registration for inline value providers. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * @since 3.17.0 - proposed state - */ -type InlineValueContext = struct { - /** - * The document range where execution has stopped. - * Typically the end position of the range denotes the line where the inline values are shown. - */ - StoppedLocation *Range `json:"stoppedLocation"` -} - -/** - * Provide an inline value through an expression evaluation. - * If only a range is specified, the expression will be extracted from the underlying document. - * An optional expression can be used to override the extracted expression. - * - * @since 3.17.0 - proposed state - */ -type InlineValueEvaluatableExpression = struct { - /** - * The document range for which the inline value applies. - * The range is used to extract the evaluatable expression from the underlying document. - */ - Range *Range `json:"range"` - /** - * If specified the expression overrides the extracted expression. - */ - Expression string `json:"expression,omitempty"` -} - -/** - * Inline value options used during static registration. - * - * @since 3.17.0 - proposed state - */ -type InlineValueOptions = WorkDoneProgressOptions - -/** - * A parameter literal used in inline value requests. - * - * @since 3.17.0 - proposed state - */ -type InlineValueParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The visible document range for which inline values should be computed. - */ - ViewPort Range `json:"viewPort"` - /** - * Additional information about the context in which inline values were - * requested. - */ - Context InlineValueContext `json:"context"` -} - -/** - * Inline value options used during static or dynamic registration. - * - * @since 3.17.0 - proposed state - */ -type InlineValueRegistrationOptions struct { - /** - * A document selector to identify the scope of the registration. If set to null - * the document selector provided on the client side will be used. - */ - DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"` - /** - * The id used to register the request. The id can be used to deregister - * the request again. See also Registration#id. - */ - ID string `json:"id,omitempty"` -} - -/** - * Provide inline value as text. - * - * @since 3.17.0 - proposed state - */ -type InlineValueText = struct { - /** - * The document range for which the inline value applies. - */ - Range *Range `json:"range"` - /** - * The text of the inline value. - */ - Text string `json:"text"` -} - -/** - * Provide inline value through a variable lookup. - * If only a range is specified, the variable name will be extracted from the underlying document. - * An optional variable name can be used to override the extracted name. - * - * @since 3.17.0 - proposed state - */ -type InlineValueVariableLookup = struct { - /** - * The document range for which the inline value applies. - * The range is used to extract the variable name from the underlying document. - */ - Range *Range `json:"range"` - /** - * If specified the name of the variable to look up. - */ - VariableName string `json:"variableName,omitempty"` - /** - * How to perform the lookup. - */ - CaseSensitiveLookup bool `json:"caseSensitiveLookup"` -} - -/** - * Client workspace capabilities specific to inline values. - * - * @since 3.17.0 - proposed state - */ -type InlineValueWorkspaceClientCapabilities = struct { - /** - * Whether the client implementation supports a refresh request sent from the - * server to the client. - * - * Note that this event is global and will force the client to refresh all - * inline values currently shown. It should be used with absolute care and is - * useful for situation where a server for example detects a project wide - * change that requires such a calculation. - */ - RefreshSupport bool `json:"refreshSupport,omitempty"` -} - -/** - * A special text edit to provide an insert and a replace operation. - * - * @since 3.16.0 - */ -type InsertReplaceEdit struct { - /** - * The string to be inserted. - */ - NewText string `json:"newText"` - /** - * The range if the insert is requested - */ - Insert Range `json:"insert"` - /** - * The range if the replace is requested. - */ - Replace Range `json:"replace"` -} - -/** - * Defines whether the insert text in a completion item should be interpreted as - * plain text or a snippet. - */ -type InsertTextFormat float64 - -/** - * How whitespace and indentation is handled during completion - * item insertion. - * - * @since 3.16.0 - */ -type InsertTextMode float64 - -/** - * The LSP any type - * - * @since 3.17.0 - */ -type LSPAny = interface{} /* LSPObject | LSPArray | string | int32 | uint32 | Decimal | bool | float64*/ - -/** - * LSP arrays. - * - * @since 3.17.0 - */ -type LSPArray = []LSPAny - -/** - * LSP object definition. - * - * @since 3.17.0 - */ -type LSPObject = map[string]interface{} /*[key: string]: LSPAny*/ - -/** - * Client capabilities for the linked editing range request. - * - * @since 3.16.0 - */ -type LinkedEditingRangeClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` - * return value for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -type LinkedEditingRangeOptions struct { - WorkDoneProgressOptions -} - -type LinkedEditingRangeParams struct { - TextDocumentPositionParams - WorkDoneProgressParams -} - -type LinkedEditingRangeRegistrationOptions struct { - TextDocumentRegistrationOptions - LinkedEditingRangeOptions - StaticRegistrationOptions -} - -/** - * The result of a linked editing range request. - * - * @since 3.16.0 - */ -type LinkedEditingRanges struct { - /** - * A list of ranges that can be edited together. The ranges must have - * identical length and contain identical text content. The ranges cannot overlap. - */ - Ranges []Range `json:"ranges"` - /** - * An optional word pattern (regular expression) that describes valid contents for - * the given ranges. If no pattern is provided, the client configuration's word - * pattern will be used. - */ - WordPattern string `json:"wordPattern,omitempty"` -} - -/** - * Represents a location inside a resource, such as a line - * inside a text file. - */ -type Location struct { - URI DocumentURI `json:"uri"` - Range Range `json:"range"` -} - -/** - * Represents the connection of two locations. Provides additional metadata over normal [locations](#Location), - * including an origin range. - */ -type LocationLink struct { - /** - * Span of the origin of this link. - * - * Used as the underlined span for mouse definition hover. Defaults to the word range at - * the definition position. - */ - OriginSelectionRange Range `json:"originSelectionRange,omitempty"` - /** - * The target resource identifier of this link. - */ - TargetURI DocumentURI `json:"targetUri"` - /** - * The full target range of this link. If the target for example is a symbol then target range is the - * range enclosing this symbol not including leading/trailing whitespace but everything else - * like comments. This information is typically used to highlight the range in the editor. - */ - TargetRange Range `json:"targetRange"` - /** - * The range that should be selected and revealed when this link is being followed, e.g the name of a function. - * Must be contained by the the `targetRange`. See also `DocumentSymbol#range` - */ - TargetSelectionRange Range `json:"targetSelectionRange"` -} - -/** - * The log message parameters. - */ -type LogMessageParams struct { - /** - * The message type. See {@link MessageType} - */ - Type MessageType `json:"type"` - /** - * The actual message - */ - Message string `json:"message"` -} - -type LogTraceParams struct { - Message string `json:"message"` - Verbose string `json:"verbose,omitempty"` -} - -/** - * Client capabilities specific to the used markdown parser. - * - * @since 3.16.0 - */ -type MarkdownClientCapabilities struct { - /** - * The name of the parser. - */ - Parser string `json:"parser"` - /** - * The version of the parser. - */ - Version string `json:"version,omitempty"` - /** - * A list of HTML tags that the client allows / supports in - * Markdown. - * - * @since 3.17.0 - */ - AllowedTags []string `json:"allowedTags,omitempty"` -} - -/** - * MarkedString can be used to render human readable text. It is either a markdown string - * or a code-block that provides a language and a code snippet. The language identifier - * is semantically equal to the optional language identifier in fenced code blocks in GitHub - * issues. See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting - * - * The pair of a language and a value is an equivalent to markdown: - * ```${language} - * ${value} - * ``` - * - * Note that markdown strings will be sanitized - that means html will be escaped. - * @deprecated use MarkupContent instead. - */ -type MarkedString = string /*string | { language: string; value: string }*/ - -/** - * A `MarkupContent` literal represents a string value which content is interpreted base on its - * kind flag. Currently the protocol supports `plaintext` and `markdown` as markup kinds. - * - * If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues. - * See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting - * - * Here is an example how such a string can be constructed using JavaScript / TypeScript: - * ```ts - * let markdown: MarkdownContent = { - * kind: MarkupKind.Markdown, - * value: [ - * '# Header', - * 'Some text', - * '```typescript', - * 'someCode();', - * '```' - * ].join('\n') - * }; - * ``` - * - * *Please Note* that clients might sanitize the return markdown. A client could decide to - * remove HTML from the markdown to avoid script execution. - */ -type MarkupContent struct { - /** - * The type of the Markup - */ - Kind MarkupKind `json:"kind"` - /** - * The content itself - */ - Value string `json:"value"` -} - -/** - * Describes the content type that a client supports in various - * result literals like `Hover`, `ParameterInfo` or `CompletionItem`. - * - * Please note that `MarkupKinds` must not start with a `$`. This kinds - * are reserved for internal usage. - */ -type MarkupKind string - -type MessageActionItem struct { - /** - * A short title like 'Retry', 'Open Log' etc. - */ - Title string `json:"title"` -} - -/** - * The message type - */ -type MessageType float64 - -/** - * Moniker definition to match LSIF 0.5 moniker definition. - * - * @since 3.16.0 - */ -type Moniker struct { - /** - * The scheme of the moniker. For example tsc or .Net - */ - Scheme string `json:"scheme"` - /** - * The identifier of the moniker. The value is opaque in LSIF however - * schema owners are allowed to define the structure if they want. - */ - Identifier string `json:"identifier"` - /** - * The scope in which the moniker is unique - */ - Unique UniquenessLevel `json:"unique"` - /** - * The moniker kind if known. - */ - Kind MonikerKind `json:"kind,omitempty"` -} - -/** - * Client capabilities specific to the moniker request. - * - * @since 3.16.0 - */ -type MonikerClientCapabilities struct { - /** - * Whether moniker supports dynamic registration. If this is set to `true` - * the client supports the new `MonikerRegistrationOptions` return value - * for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * The moniker kind. - * - * @since 3.16.0 - */ -type MonikerKind string - -type MonikerOptions struct { - WorkDoneProgressOptions -} - -type MonikerParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -type MonikerRegistrationOptions struct { - TextDocumentRegistrationOptions - MonikerOptions -} - -/** - * A notebook cell. - * - * A cell's document URI must be unique across ALL notebook - * cells and can therefore be used to uniquely identify a - * notebook cell or the cell's text document. - * - * @since 3.17.0 - proposed state - */ -type NotebookCell = struct { - /** - * The cell's kind - */ - Kind NotebookCellKind `json:"kind"` - /** - * The URI of the cell's text document - * content. - */ - Document DocumentURI `json:"document"` - /** - * Additional metadata stored with the cell. - */ - Metadata LSPObject `json:"metadata,omitempty"` - /** - * Additional execution summary information - * if supported by the client. - */ - ExecutionSummary ExecutionSummary `json:"executionSummary,omitempty"` -} - -/** - * A change describing how to move a `NotebookCell` - * array from state S to S'. - * - * @since 3.17.0 - proposed state - */ -type NotebookCellArrayChange = struct { - /** - * The start oftest of the cell that changed. - */ - Start uint32 `json:"start"` - /** - * The deleted cells - */ - DeleteCount uint32 `json:"deleteCount"` - /** - * The new cells, if any - */ - Cells []NotebookCell `json:"cells,omitempty"` -} - -/** - * A notebook cell kind. - * - * @since 3.17.0 - proposed state - */ -type NotebookCellKind float64 - -/** - * A notebook cell text document filter denotes a cell text - * document by different properties. - * - * @since 3.17.0 - proposed state. - */ -type NotebookCellTextDocumentFilter = struct { - /** - * A filter that matches against the notebook - * containing the notebook cell. - */ - NotebookDocument NotebookDocumentFilter `json:"notebookDocument"` - /** - * A language id like `python`. - * - * Will be matched against the language id of the - * notebook cell document. - */ - CellLanguage string `json:"cellLanguage,omitempty"` -} - -/** - * A notebook document. - * - * @since 3.17.0 - proposed state - */ -type NotebookDocument = struct { - /** - * The notebook document's uri. - */ - URI URI `json:"uri"` - /** - * The type of the notebook. - */ - NotebookType string `json:"notebookType"` - /** - * The version number of this document (it will increase after each - * change, including undo/redo). - */ - Version int32 `json:"version"` - /** - * Additional metadata stored with the notebook - * document. - */ - Metadata LSPObject `json:"metadata,omitempty"` - /** - * The cells of a notebook. - */ - Cells []NotebookCell `json:"cells"` -} - -/** - * A change event for a notebook document. - * - * @since 3.17.0 - proposed state - */ -type NotebookDocumentChangeEvent = struct { - /** - * The changed meta data if any. - */ - Metadata LSPObject `json:"metadata,omitempty"` - /** - * Changes to cells - */ - Cells struct { - /** - * Changes to the cell structure to add or - * remove cells. - */ - Structure struct { - /** - * The change to the cell array. - */ - Array NotebookCellArrayChange `json:"array"` - /** - * Additional opened cell text documents. - */ - DidOpen []TextDocumentItem `json:"didOpen,omitempty"` - /** - * Additional closed cell text documents. - */ - DidClose []TextDocumentIdentifier `json:"didClose,omitempty"` - } `json:"structure,omitempty"` - /** - * Changes to notebook cells properties like its - * kind, execution summary or metadata. - */ - Data []NotebookCell `json:"data,omitempty"` - /** - * Changes to the text content of notebook cells. - */ - TextContent []struct { - Document VersionedTextDocumentIdentifier `json:"document"` - Changes []TextDocumentContentChangeEvent `json:"changes"` - } `json:"textContent,omitempty"` - } `json:"cells,omitempty"` -} - -/** - * A notebook document filter denotes a notebook document by - * different properties. - * - * @since 3.17.0 - proposed state. - */ -type NotebookDocumentFilter = struct { - /** The type of the enclosing notebook. */ - NotebookType string `json:"notebookType"` - /** A Uri [scheme](#Uri.scheme), like `file` or `untitled`. - * Will be matched against the URI of the notebook. */ - Scheme string `json:"scheme,omitempty"` - /** A glob pattern, like `*.ipynb`. - * Will be matched against the notebooks` URI path section.*/ - Pattern string `json:"pattern,omitempty"` -} - -/** - * A literal to identify a notebook document in the client. - * - * @since 3.17.0 - proposed state - */ -type NotebookDocumentIdentifier = struct { - /** - * The notebook document's uri. - */ - URI URI `json:"uri"` -} - -/** - * A text document identifier to optionally denote a specific version of a text document. - */ -type OptionalVersionedTextDocumentIdentifier struct { - /** - * The version number of this document. If a versioned text document identifier - * is sent from the server to the client and the file is not open in the editor - * (the server has not received an open notification before) the server can send - * `null` to indicate that the version is unknown and the content on disk is the - * truth (as specified with document content ownership). - */ - Version int32/*integer | null*/ `json:"version"` - TextDocumentIdentifier -} - -/** - * Represents a parameter of a callable-signature. A parameter can - * have a label and a doc-comment. - */ -type ParameterInformation struct { - /** - * The label of this parameter information. - * - * Either a string or an inclusive start and exclusive end offsets within its containing - * signature label. (see SignatureInformation.label). The offsets are based on a UTF-16 - * string representation as `Position` and `Range` does. - * - * *Note*: a label of type string should be a substring of its containing signature label. - * Its intended use case is to highlight the parameter label part in the `SignatureInformation.label`. - */ - Label string/*string | [uinteger, uinteger]*/ `json:"label"` - /** - * The human-readable doc-comment of this signature. Will be shown - * in the UI but can be omitted. - */ - Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"` -} - -type PartialResultParams struct { - /** - * An optional token that a server can use to report partial results (e.g. streaming) to - * the client. - */ - PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` -} - -/** - * Position in a text document expressed as zero-based line and character offset. - * The offsets are based on a UTF-16 string representation. So a string of the form - * `a𐐀b` the character offset of the character `a` is 0, the character offset of `𐐀` - * is 1 and the character offset of b is 3 since `𐐀` is represented using two code - * units in UTF-16. - * - * Positions are line end character agnostic. So you can not specify a position that - * denotes `\r|\n` or `\n|` where `|` represents the character offset. - */ -type Position struct { - /** - * Line position in a document (zero-based). - */ - Line uint32 `json:"line"` - /** - * Character offset on a line in a document (zero-based). Assuming that the line is - * represented as a string, the `character` value represents the gap between the - * `character` and `character + 1`. - * - * If the character value is greater than the line length it defaults back to the - * line length. - */ - Character uint32 `json:"character"` -} - -type PrepareRenameParams struct { - TextDocumentPositionParams - WorkDoneProgressParams -} - -type PrepareSupportDefaultBehavior = interface{} - -/** - * A previous result id in a workspace pull request. - * - * @since 3.17.0 - proposed state - */ -type PreviousResultID = struct { - /** - * The URI for which the client knowns a - * result id. - */ - URI DocumentURI `json:"uri"` - /** - * The value of the previous result id. - */ - Value string `json:"value"` -} - -type ProgressParams struct { - /** - * The progress token provided by the client or server. - */ - Token ProgressToken `json:"token"` - /** - * The progress data. - */ - Value interface{} `json:"value"` -} - -type ProgressToken = interface{} /*number | string*/ - -/** - * The publish diagnostic client capabilities. - */ -type PublishDiagnosticsClientCapabilities struct { - /** - * Whether the clients accepts diagnostics with related information. - */ - RelatedInformation bool `json:"relatedInformation,omitempty"` - /** - * Client supports the tag property to provide meta data about a diagnostic. - * Clients supporting tags have to handle unknown tags gracefully. - * - * @since 3.15.0 - */ - TagSupport struct { - /** - * The tags supported by the client. - */ - ValueSet []DiagnosticTag `json:"valueSet"` - } `json:"tagSupport,omitempty"` - /** - * Whether the client interprets the version property of the - * `textDocument/publishDiagnostics` notification`s parameter. - * - * @since 3.15.0 - */ - VersionSupport bool `json:"versionSupport,omitempty"` - /** - * Client supports a codeDescription property - * - * @since 3.16.0 - */ - CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"` - /** - * Whether code action supports the `data` property which is - * preserved between a `textDocument/publishDiagnostics` and - * `textDocument/codeAction` request. - * - * @since 3.16.0 - */ - DataSupport bool `json:"dataSupport,omitempty"` -} - -/** - * The publish diagnostic notification's parameters. - */ -type PublishDiagnosticsParams struct { - /** - * The URI for which diagnostic information is reported. - */ - URI DocumentURI `json:"uri"` - /** - * Optional the version number of the document the diagnostics are published for. - * - * @since 3.15.0 - */ - Version int32 `json:"version,omitempty"` - /** - * An array of diagnostic information items. - */ - Diagnostics []Diagnostic `json:"diagnostics"` -} - -/** - * A range in a text document expressed as (zero-based) start and end positions. - * - * If you want to specify a range that contains a line including the line ending - * character(s) then use an end position denoting the start of the next line. - * For example: - * ```ts - * { - * start: { line: 5, character: 23 } - * end : { line 6, character : 0 } - * } - * ``` - */ -type Range struct { - /** - * The range's start position - */ - Start Position `json:"start"` - /** - * The range's end position. - */ - End Position `json:"end"` -} - -/** - * Client Capabilities for a [ReferencesRequest](#ReferencesRequest). - */ -type ReferenceClientCapabilities struct { - /** - * Whether references supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * Value-object that contains additional information when - * requesting references. - */ -type ReferenceContext struct { - /** - * Include the declaration of the current symbol. - */ - IncludeDeclaration bool `json:"includeDeclaration"` -} - -/** - * Reference options. - */ -type ReferenceOptions struct { - WorkDoneProgressOptions -} - -/** - * Parameters for a [ReferencesRequest](#ReferencesRequest). - */ -type ReferenceParams struct { - Context ReferenceContext `json:"context"` - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -/** - * General parameters to to register for an notification or to register a provider. - */ -type Registration struct { - /** - * The id used to register the request. The id can be used to deregister - * the request again. - */ - ID string `json:"id"` - /** - * The method to register for. - */ - Method string `json:"method"` - /** - * Options necessary for the registration. - */ - RegisterOptions LSPAny `json:"registerOptions,omitempty"` -} - -type RegistrationParams struct { - Registrations []Registration `json:"registrations"` -} - -/** - * Client capabilities specific to regular expressions. - * - * @since 3.16.0 - */ -type RegularExpressionsClientCapabilities struct { - /** - * The engine's name. - */ - Engine string `json:"engine"` - /** - * The engine's version. - */ - Version string `json:"version,omitempty"` -} - -/** - * A full diagnostic report with a set of related documents. - * - * @since 3.17.0 - proposed state - */ -type RelatedFullDocumentDiagnosticReport struct { - /** - * Diagnostics of related documents. This information is useful - * in programming languages where code in a file A can generate - * diagnostics in a file B which A depends on. An example of - * such a language is C/C++ where marco definitions in a file - * a.cpp and result in errors in a header file b.hpp. - * - * @since 3.17.0 - proposed state - */ - RelatedDocuments map[string]interface{} /*[uri: string ** DocumentUri *]: FullDocumentDiagnosticReport | UnchangedDocumentDiagnosticReport;*/ `json:"relatedDocuments,omitempty"` -} - -/** - * An unchanged diagnostic report with a set of related documents. - * - * @since 3.17.0 - proposed state - */ -type RelatedUnchangedDocumentDiagnosticReport struct { - /** - * Diagnostics of related documents. This information is useful - * in programming languages where code in a file A can generate - * diagnostics in a file B which A depends on. An example of - * such a language is C/C++ where marco definitions in a file - * a.cpp and result in errors in a header file b.hpp. - * - * @since 3.17.0 - proposed state - */ - RelatedDocuments map[string]interface{} /*[uri: string ** DocumentUri *]: FullDocumentDiagnosticReport | UnchangedDocumentDiagnosticReport;*/ `json:"relatedDocuments,omitempty"` -} - -type RenameClientCapabilities struct { - /** - * Whether rename supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Client supports testing for validity of rename operations - * before execution. - * - * @since 3.12.0 - */ - PrepareSupport bool `json:"prepareSupport,omitempty"` - /** - * Client supports the default behavior result. - * - * The value indicates the default behavior used by the - * client. - * - * @since 3.16.0 - */ - PrepareSupportDefaultBehavior PrepareSupportDefaultBehavior `json:"prepareSupportDefaultBehavior,omitempty"` - /** - * Whether th client honors the change annotations in - * text edits and resource operations returned via the - * rename request's workspace edit by for example presenting - * the workspace edit in the user interface and asking - * for confirmation. - * - * @since 3.16.0 - */ - HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` -} - -/** - * Rename file operation - */ -type RenameFile struct { - /** - * A rename - */ - Kind string `json:"kind"` - /** - * The old (existing) location. - */ - OldURI DocumentURI `json:"oldUri"` - /** - * The new location. - */ - NewURI DocumentURI `json:"newUri"` - /** - * Rename options. - */ - Options RenameFileOptions `json:"options,omitempty"` - ResourceOperation -} - -/** - * Rename file options - */ -type RenameFileOptions struct { - /** - * Overwrite target if existing. Overwrite wins over `ignoreIfExists` - */ - Overwrite bool `json:"overwrite,omitempty"` - /** - * Ignores if target exists. - */ - IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` -} - -/** - * The parameters sent in file rename requests/notifications. - * - * @since 3.16.0 - */ -type RenameFilesParams struct { - /** - * An array of all files/folders renamed in this operation. When a folder is renamed, only - * the folder will be included, and not its children. - */ - Files []FileRename `json:"files"` -} - -/** - * Provider options for a [RenameRequest](#RenameRequest). - */ -type RenameOptions struct { - /** - * Renames should be checked and tested before being executed. - * - * @since version 3.12.0 - */ - PrepareProvider bool `json:"prepareProvider,omitempty"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [RenameRequest](#RenameRequest). - */ -type RenameParams struct { - /** - * The document to rename. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The position at which this request was sent. - */ - Position Position `json:"position"` - /** - * The new name of the symbol. If the given name is not valid the - * request must return a [ResponseError](#ResponseError) with an - * appropriate message set. - */ - NewName string `json:"newName"` - WorkDoneProgressParams -} - -/** - * A generic resource operation. - */ -type ResourceOperation struct { - /** - * The resource operation kind. - */ - Kind string `json:"kind"` - /** - * An optional annotation identifier describing the operation. - * - * @since 3.16.0 - */ - AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` -} - -type ResourceOperationKind string - -/** - * Save options. - */ -type SaveOptions struct { - /** - * The client is supposed to include the content on save. - */ - IncludeText bool `json:"includeText,omitempty"` -} - -/** - * A selection range represents a part of a selection hierarchy. A selection range - * may have a parent selection range that contains it. - */ -type SelectionRange struct { - /** - * The [range](#Range) of this selection range. - */ - Range Range `json:"range"` - /** - * The parent selection range containing this range. Therefore `parent.range` must contain `this.range`. - */ - Parent *SelectionRange `json:"parent,omitempty"` -} - -type SelectionRangeClientCapabilities struct { - /** - * Whether implementation supports dynamic registration for selection range providers. If this is set to `true` - * the client supports the new `SelectionRangeRegistrationOptions` return value for the corresponding server - * capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -type SelectionRangeOptions struct { - WorkDoneProgressOptions -} - -/** - * A parameter literal used in selection range requests. - */ -type SelectionRangeParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The positions inside the text document. - */ - Positions []Position `json:"positions"` - WorkDoneProgressParams - PartialResultParams -} - -type SelectionRangeRegistrationOptions struct { - SelectionRangeOptions - TextDocumentRegistrationOptions - StaticRegistrationOptions -} - -/** - * @since 3.16.0 - */ -type SemanticTokens struct { - /** - * An optional result id. If provided and clients support delta updating - * the client will include the result id in the next semantic token request. - * A server can then instead of computing all semantic tokens again simply - * send a delta. - */ - ResultID string `json:"resultId,omitempty"` - /** - * The actual tokens. - */ - Data []uint32 `json:"data"` -} - -/** - * @since 3.16.0 - */ -type SemanticTokensClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` - * return value for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Which requests the client supports and might send to the server - * depending on the server's capability. Please note that clients might not - * show semantic tokens or degrade some of the user experience if a range - * or full request is advertised by the client but not provided by the - * server. If for example the client capability `requests.full` and - * `request.range` are both set to true but the server only provides a - * range provider the client might not render a minimap correctly or might - * even decide to not show any semantic tokens at all. - */ - Requests struct { - /** - * The client will send the `textDocument/semanticTokens/range` request if - * the server provides a corresponding handler. - */ - Range bool/*boolean | { }*/ `json:"range,omitempty"` - /** - * The client will send the `textDocument/semanticTokens/full` request if - * the server provides a corresponding handler. - */ - Full interface{}/*boolean | */ `json:"full,omitempty"` - } `json:"requests"` - /** - * The token types that the client supports. - */ - TokenTypes []string `json:"tokenTypes"` - /** - * The token modifiers that the client supports. - */ - TokenModifiers []string `json:"tokenModifiers"` - /** - * The token formats the clients supports. - */ - Formats []TokenFormat `json:"formats"` - /** - * Whether the client supports tokens that can overlap each other. - */ - OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"` - /** - * Whether the client supports tokens that can span multiple lines. - */ - MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"` - /** - * Whether the client allows the server to actively cancel a - * semantic token request, e.g. supports returning - * LSPErrorCodes.ServerCancelled. If a server does the client - * needs to retrigger the request. - * - * @since 3.17.0 - */ - ServerCancelSupport bool `json:"serverCancelSupport,omitempty"` - /** - * Whether the client uses semantic tokens to augment existing - * syntax tokens. If set to `true` client side created syntax - * tokens and semantic tokens are both used for colorization. If - * set to `false` the client only uses the returned semantic tokens - * for colorization. - * - * If the value is `undefined` then the client behavior is not - * specified. - * - * @since 3.17.0 - */ - AugmentsSyntaxTokens bool `json:"augmentsSyntaxTokens,omitempty"` -} - -/** - * @since 3.16.0 - */ -type SemanticTokensDelta struct { - ResultID string `json:"resultId,omitempty"` - /** - * The semantic token edits to transform a previous result into a new result. - */ - Edits []SemanticTokensEdit `json:"edits"` -} - -/** - * @since 3.16.0 - */ -type SemanticTokensDeltaParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The result id of a previous response. The result Id can either point to a full response - * or a delta response depending on what was received last. - */ - PreviousResultID string `json:"previousResultId"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * @since 3.16.0 - */ -type SemanticTokensEdit struct { - /** - * The start offset of the edit. - */ - Start uint32 `json:"start"` - /** - * The count of elements to remove. - */ - DeleteCount uint32 `json:"deleteCount"` - /** - * The elements to insert. - */ - Data []uint32 `json:"data,omitempty"` -} - -/** - * @since 3.16.0 - */ -type SemanticTokensLegend struct { - /** - * The token types a server uses. - */ - TokenTypes []string `json:"tokenTypes"` - /** - * The token modifiers a server uses. - */ - TokenModifiers []string `json:"tokenModifiers"` -} - -/** - * @since 3.16.0 - */ -type SemanticTokensOptions struct { - /** - * The legend used by the server - */ - Legend SemanticTokensLegend `json:"legend"` - /** - * Server supports providing semantic tokens for a specific range - * of a document. - */ - Range bool/*boolean | { }*/ `json:"range,omitempty"` - /** - * Server supports providing semantic tokens for a full document. - */ - Full interface{}/*boolean | */ `json:"full,omitempty"` - WorkDoneProgressOptions -} - -/** - * @since 3.16.0 - */ -type SemanticTokensParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * @since 3.16.0 - */ -type SemanticTokensRangeParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The range the semantic tokens are requested for. - */ - Range Range `json:"range"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * @since 3.16.0 - */ -type SemanticTokensRegistrationOptions struct { - TextDocumentRegistrationOptions - SemanticTokensOptions - StaticRegistrationOptions -} - -/** - * @since 3.16.0 - */ -type SemanticTokensWorkspaceClientCapabilities struct { - /** - * Whether the client implementation supports a refresh request sent from - * the server to the client. - * - * Note that this event is global and will force the client to refresh all - * semantic tokens currently shown. It should be used with absolute care - * and is useful for situation where a server for example detects a project - * wide change that requires such a calculation. - */ - RefreshSupport bool `json:"refreshSupport,omitempty"` -} - -type ServerCapabilities struct { - /** - * Defines how text documents are synced. Is either a detailed structure defining each notification or - * for backwards compatibility the TextDocumentSyncKind number. - */ - TextDocumentSync interface{}/*TextDocumentSyncOptions | TextDocumentSyncKind*/ `json:"textDocumentSync,omitempty"` - /** - * The server provides completion support. - */ - CompletionProvider CompletionOptions `json:"completionProvider,omitempty"` - /** - * The server provides hover support. - */ - HoverProvider bool/*boolean | HoverOptions*/ `json:"hoverProvider,omitempty"` - /** - * The server provides signature help support. - */ - SignatureHelpProvider SignatureHelpOptions `json:"signatureHelpProvider,omitempty"` - /** - * The server provides Goto Declaration support. - */ - DeclarationProvider interface{}/* bool | DeclarationOptions | DeclarationRegistrationOptions*/ `json:"declarationProvider,omitempty"` - /** - * The server provides goto definition support. - */ - DefinitionProvider bool/*boolean | DefinitionOptions*/ `json:"definitionProvider,omitempty"` - /** - * The server provides Goto Type Definition support. - */ - TypeDefinitionProvider interface{}/* bool | TypeDefinitionOptions | TypeDefinitionRegistrationOptions*/ `json:"typeDefinitionProvider,omitempty"` - /** - * The server provides Goto Implementation support. - */ - ImplementationProvider interface{}/* bool | ImplementationOptions | ImplementationRegistrationOptions*/ `json:"implementationProvider,omitempty"` - /** - * The server provides find references support. - */ - ReferencesProvider bool/*boolean | ReferenceOptions*/ `json:"referencesProvider,omitempty"` - /** - * The server provides document highlight support. - */ - DocumentHighlightProvider bool/*boolean | DocumentHighlightOptions*/ `json:"documentHighlightProvider,omitempty"` - /** - * The server provides document symbol support. - */ - DocumentSymbolProvider bool/*boolean | DocumentSymbolOptions*/ `json:"documentSymbolProvider,omitempty"` - /** - * The server provides code actions. CodeActionOptions may only be - * specified if the client states that it supports - * `codeActionLiteralSupport` in its initial `initialize` request. - */ - CodeActionProvider interface{}/*boolean | CodeActionOptions*/ `json:"codeActionProvider,omitempty"` - /** - * The server provides code lens. - */ - CodeLensProvider CodeLensOptions `json:"codeLensProvider,omitempty"` - /** - * The server provides document link support. - */ - DocumentLinkProvider DocumentLinkOptions `json:"documentLinkProvider,omitempty"` - /** - * The server provides color provider support. - */ - ColorProvider interface{}/* bool | DocumentColorOptions | DocumentColorRegistrationOptions*/ `json:"colorProvider,omitempty"` - /** - * The server provides workspace symbol support. - */ - WorkspaceSymbolProvider bool/*boolean | WorkspaceSymbolOptions*/ `json:"workspaceSymbolProvider,omitempty"` - /** - * The server provides document formatting. - */ - DocumentFormattingProvider bool/*boolean | DocumentFormattingOptions*/ `json:"documentFormattingProvider,omitempty"` - /** - * The server provides document range formatting. - */ - DocumentRangeFormattingProvider bool/*boolean | DocumentRangeFormattingOptions*/ `json:"documentRangeFormattingProvider,omitempty"` - /** - * The server provides document formatting on typing. - */ - DocumentOnTypeFormattingProvider DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"` - /** - * The server provides rename support. RenameOptions may only be - * specified if the client states that it supports - * `prepareSupport` in its initial `initialize` request. - */ - RenameProvider interface{}/*boolean | RenameOptions*/ `json:"renameProvider,omitempty"` - /** - * The server provides folding provider support. - */ - FoldingRangeProvider interface{}/* bool | FoldingRangeOptions | FoldingRangeRegistrationOptions*/ `json:"foldingRangeProvider,omitempty"` - /** - * The server provides selection range support. - */ - SelectionRangeProvider interface{}/* bool | SelectionRangeOptions | SelectionRangeRegistrationOptions*/ `json:"selectionRangeProvider,omitempty"` - /** - * The server provides execute command support. - */ - ExecuteCommandProvider ExecuteCommandOptions `json:"executeCommandProvider,omitempty"` - /** - * The server provides call hierarchy support. - * - * @since 3.16.0 - */ - CallHierarchyProvider interface{}/* bool | CallHierarchyOptions | CallHierarchyRegistrationOptions*/ `json:"callHierarchyProvider,omitempty"` - /** - * The server provides linked editing range support. - * - * @since 3.16.0 - */ - LinkedEditingRangeProvider interface{}/* bool | LinkedEditingRangeOptions | LinkedEditingRangeRegistrationOptions*/ `json:"linkedEditingRangeProvider,omitempty"` - /** - * The server provides semantic tokens support. - * - * @since 3.16.0 - */ - SemanticTokensProvider interface{}/*SemanticTokensOptions | SemanticTokensRegistrationOptions*/ `json:"semanticTokensProvider,omitempty"` - /** - * The workspace server capabilities - */ - Workspace Workspace6Gn `json:"workspace,omitempty"` - /** - * The server provides moniker support. - * - * @since 3.16.0 - */ - MonikerProvider interface{}/* bool | MonikerOptions | MonikerRegistrationOptions*/ `json:"monikerProvider,omitempty"` - /** - * The server provides type hierarchy support. - * - * @since 3.17.0 - proposed state - */ - TypeHierarchyProvider interface{}/* bool | TypeHierarchyOptions | TypeHierarchyRegistrationOptions*/ `json:"typeHierarchyProvider,omitempty"` - /** - * The server provides inline values. - * - * @since 3.17.0 - proposed state - */ - InlineValueProvider interface{}/* bool | InlineValueOptions | InlineValueRegistrationOptions*/ `json:"inlineValueProvider,omitempty"` - /** - * The server provides inlay hints. - * - * @since 3.17.0 - proposed state - */ - InlayHintProvider interface{}/* bool | InlayHintOptions | InlayHintRegistrationOptions*/ `json:"inlayHintProvider,omitempty"` - /** - * Experimental server capabilities. - */ - Experimental interface{} `json:"experimental,omitempty"` -} - -type SetTraceParams struct { - Value TraceValues `json:"value"` -} - -/** - * Client capabilities for the show document request. - * - * @since 3.16.0 - */ -type ShowDocumentClientCapabilities struct { - /** - * The client has support for the show document - * request. - */ - Support bool `json:"support"` -} - -/** - * Params to show a document. - * - * @since 3.16.0 - */ -type ShowDocumentParams struct { - /** - * The document uri to show. - */ - URI URI `json:"uri"` - /** - * Indicates to show the resource in an external program. - * To show for example `https://code.visualstudio.com/` - * in the default WEB browser set `external` to `true`. - */ - External bool `json:"external,omitempty"` - /** - * An optional property to indicate whether the editor - * showing the document should take focus or not. - * Clients might ignore this property if an external - * program in started. - */ - TakeFocus bool `json:"takeFocus,omitempty"` - /** - * An optional selection range if the document is a text - * document. Clients might ignore the property if an - * external program is started or the file is not a text - * file. - */ - Selection Range `json:"selection,omitempty"` -} - -/** - * The result of an show document request. - * - * @since 3.16.0 - */ -type ShowDocumentResult struct { - /** - * A boolean indicating if the show was successful. - */ - Success bool `json:"success"` -} - -/** - * The parameters of a notification message. - */ -type ShowMessageParams struct { - /** - * The message type. See {@link MessageType} - */ - Type MessageType `json:"type"` - /** - * The actual message - */ - Message string `json:"message"` -} - -/** - * Show message request client capabilities - */ -type ShowMessageRequestClientCapabilities struct { - /** - * Capabilities specific to the `MessageActionItem` type. - */ - MessageActionItem struct { - /** - * Whether the client supports additional attributes which - * are preserved and send back to the server in the - * request's response. - */ - AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport,omitempty"` - } `json:"messageActionItem,omitempty"` -} - -type ShowMessageRequestParams struct { - /** - * The message type. See {@link MessageType} - */ - Type MessageType `json:"type"` - /** - * The actual message - */ - Message string `json:"message"` - /** - * The message action items to present. - */ - Actions []MessageActionItem `json:"actions,omitempty"` -} - -/** - * Signature help represents the signature of something - * callable. There can be multiple signature but only one - * active and only one active parameter. - */ -type SignatureHelp struct { - /** - * One or more signatures. - */ - Signatures []SignatureInformation `json:"signatures"` - /** - * The active signature. If omitted or the value lies outside the - * range of `signatures` the value defaults to zero or is ignored if - * the `SignatureHelp` has no signatures. - * - * Whenever possible implementors should make an active decision about - * the active signature and shouldn't rely on a default value. - * - * In future version of the protocol this property might become - * mandatory to better express this. - */ - ActiveSignature uint32 `json:"activeSignature,omitempty"` - /** - * The active parameter of the active signature. If omitted or the value - * lies outside the range of `signatures[activeSignature].parameters` - * defaults to 0 if the active signature has parameters. If - * the active signature has no parameters it is ignored. - * In future version of the protocol this property might become - * mandatory to better express the active parameter if the - * active signature does have any. - */ - ActiveParameter uint32 `json:"activeParameter,omitempty"` -} - -/** - * Client Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest). - */ -type SignatureHelpClientCapabilities struct { - /** - * Whether signature help supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports the following `SignatureInformation` - * specific properties. - */ - SignatureInformation struct { - /** - * Client supports the follow content formats for the documentation - * property. The order describes the preferred format of the client. - */ - DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` - /** - * Client capabilities specific to parameter information. - */ - ParameterInformation struct { - /** - * The client supports processing label offsets instead of a - * simple label string. - * - * @since 3.14.0 - */ - LabelOffsetSupport bool `json:"labelOffsetSupport,omitempty"` - } `json:"parameterInformation,omitempty"` - /** - * The client support the `activeParameter` property on `SignatureInformation` - * literal. - * - * @since 3.16.0 - */ - ActiveParameterSupport bool `json:"activeParameterSupport,omitempty"` - } `json:"signatureInformation,omitempty"` - /** - * The client supports to send additional context information for a - * `textDocument/signatureHelp` request. A client that opts into - * contextSupport will also support the `retriggerCharacters` on - * `SignatureHelpOptions`. - * - * @since 3.15.0 - */ - ContextSupport bool `json:"contextSupport,omitempty"` -} - -/** - * Additional information about the context in which a signature help request was triggered. - * - * @since 3.15.0 - */ -type SignatureHelpContext struct { - /** - * Action that caused signature help to be triggered. - */ - TriggerKind SignatureHelpTriggerKind `json:"triggerKind"` - /** - * Character that caused signature help to be triggered. - * - * This is undefined when `triggerKind !== SignatureHelpTriggerKind.TriggerCharacter` - */ - TriggerCharacter string `json:"triggerCharacter,omitempty"` - /** - * `true` if signature help was already showing when it was triggered. - * - * Retrigger occurs when the signature help is already active and can be caused by actions such as - * typing a trigger character, a cursor move, or document content changes. - */ - IsRetrigger bool `json:"isRetrigger"` - /** - * The currently active `SignatureHelp`. - * - * The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field updated based on - * the user navigating through available signatures. - */ - ActiveSignatureHelp SignatureHelp `json:"activeSignatureHelp,omitempty"` -} - -/** - * Server Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest). - */ -type SignatureHelpOptions struct { - /** - * List of characters that trigger signature help. - */ - TriggerCharacters []string `json:"triggerCharacters,omitempty"` - /** - * List of characters that re-trigger signature help. - * - * These trigger characters are only active when signature help is already showing. All trigger characters - * are also counted as re-trigger characters. - * - * @since 3.15.0 - */ - RetriggerCharacters []string `json:"retriggerCharacters,omitempty"` - WorkDoneProgressOptions -} - -/** - * Parameters for a [SignatureHelpRequest](#SignatureHelpRequest). - */ -type SignatureHelpParams struct { - /** - * The signature help context. This is only available if the client specifies - * to send this using the client capability `textDocument.signatureHelp.contextSupport === true` - * - * @since 3.15.0 - */ - Context SignatureHelpContext `json:"context,omitempty"` - TextDocumentPositionParams - WorkDoneProgressParams -} - -/** - * How a signature help was triggered. - * - * @since 3.15.0 - */ -type SignatureHelpTriggerKind float64 - -/** - * Represents the signature of something callable. A signature - * can have a label, like a function-name, a doc-comment, and - * a set of parameters. - */ -type SignatureInformation struct { - /** - * The label of this signature. Will be shown in - * the UI. - */ - Label string `json:"label"` - /** - * The human-readable doc-comment of this signature. Will be shown - * in the UI but can be omitted. - */ - Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"` - /** - * The parameters of this signature. - */ - Parameters []ParameterInformation `json:"parameters,omitempty"` - /** - * The index of the active parameter. - * - * If provided, this is used in place of `SignatureHelp.activeParameter`. - * - * @since 3.16.0 - */ - ActiveParameter uint32 `json:"activeParameter,omitempty"` -} - -/** - * Static registration options to be returned in the initialize - * request. - */ -type StaticRegistrationOptions struct { - /** - * The id used to register the request. The id can be used to deregister - * the request again. See also Registration#id. - */ - ID string `json:"id,omitempty"` -} - -/** - * Represents information about programming constructs like variables, classes, - * interfaces etc. - */ -type SymbolInformation struct { - /** - * The name of this symbol. - */ - Name string `json:"name"` - /** - * The kind of this symbol. - */ - Kind SymbolKind `json:"kind"` - /** - * Tags for this completion item. - * - * @since 3.16.0 - */ - Tags []SymbolTag `json:"tags,omitempty"` - /** - * Indicates if this symbol is deprecated. - * - * @deprecated Use tags instead - */ - Deprecated bool `json:"deprecated,omitempty"` - /** - * The location of this symbol. The location's range is used by a tool - * to reveal the location in the editor. If the symbol is selected in the - * tool the range's start information is used to position the cursor. So - * the range usually spans more than the actual symbol's name and does - * normally include thinks like visibility modifiers. - * - * The range doesn't have to denote a node range in the sense of a abstract - * syntax tree. It can therefore not be used to re-construct a hierarchy of - * the symbols. - */ - Location Location `json:"location"` - /** - * The name of the symbol containing this symbol. This information is for - * user interface purposes (e.g. to render a qualifier in the user interface - * if necessary). It can't be used to re-infer a hierarchy for the document - * symbols. - */ - ContainerName string `json:"containerName,omitempty"` -} - -/** - * A symbol kind. - */ -type SymbolKind float64 - -/** - * Symbol tags are extra annotations that tweak the rendering of a symbol. - * @since 3.16 - */ -type SymbolTag float64 - -/** - * Text document specific client capabilities. - */ -type TextDocumentClientCapabilities struct { - /** - * Defines which synchronization capabilities the client supports. - */ - Synchronization TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"` - /** - * Capabilities specific to the `textDocument/completion` - */ - Completion CompletionClientCapabilities `json:"completion,omitempty"` - /** - * Capabilities specific to the `textDocument/hover` - */ - Hover HoverClientCapabilities `json:"hover,omitempty"` - /** - * Capabilities specific to the `textDocument/signatureHelp` - */ - SignatureHelp SignatureHelpClientCapabilities `json:"signatureHelp,omitempty"` - /** - * Capabilities specific to the `textDocument/declaration` - * - * @since 3.14.0 - */ - Declaration DeclarationClientCapabilities `json:"declaration,omitempty"` - /** - * Capabilities specific to the `textDocument/definition` - */ - Definition DefinitionClientCapabilities `json:"definition,omitempty"` - /** - * Capabilities specific to the `textDocument/typeDefinition` - * - * @since 3.6.0 - */ - TypeDefinition TypeDefinitionClientCapabilities `json:"typeDefinition,omitempty"` - /** - * Capabilities specific to the `textDocument/implementation` - * - * @since 3.6.0 - */ - Implementation ImplementationClientCapabilities `json:"implementation,omitempty"` - /** - * Capabilities specific to the `textDocument/references` - */ - References ReferenceClientCapabilities `json:"references,omitempty"` - /** - * Capabilities specific to the `textDocument/documentHighlight` - */ - DocumentHighlight DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"` - /** - * Capabilities specific to the `textDocument/documentSymbol` - */ - DocumentSymbol DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"` - /** - * Capabilities specific to the `textDocument/codeAction` - */ - CodeAction CodeActionClientCapabilities `json:"codeAction,omitempty"` - /** - * Capabilities specific to the `textDocument/codeLens` - */ - CodeLens CodeLensClientCapabilities `json:"codeLens,omitempty"` - /** - * Capabilities specific to the `textDocument/documentLink` - */ - DocumentLink DocumentLinkClientCapabilities `json:"documentLink,omitempty"` - /** - * Capabilities specific to the `textDocument/documentColor` - */ - ColorProvider DocumentColorClientCapabilities `json:"colorProvider,omitempty"` - /** - * Capabilities specific to the `textDocument/formatting` - */ - Formatting DocumentFormattingClientCapabilities `json:"formatting,omitempty"` - /** - * Capabilities specific to the `textDocument/rangeFormatting` - */ - RangeFormatting DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"` - /** - * Capabilities specific to the `textDocument/onTypeFormatting` - */ - OnTypeFormatting DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"` - /** - * Capabilities specific to the `textDocument/rename` - */ - Rename RenameClientCapabilities `json:"rename,omitempty"` - /** - * Capabilities specific to `textDocument/foldingRange` request. - * - * @since 3.10.0 - */ - FoldingRange FoldingRangeClientCapabilities `json:"foldingRange,omitempty"` - /** - * Capabilities specific to `textDocument/selectionRange` request. - * - * @since 3.15.0 - */ - SelectionRange SelectionRangeClientCapabilities `json:"selectionRange,omitempty"` - /** - * Capabilities specific to `textDocument/publishDiagnostics` notification. - */ - PublishDiagnostics PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"` - /** - * Capabilities specific to the various call hierarchy request. - * - * @since 3.16.0 - */ - CallHierarchy CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"` - /** - * Capabilities specific to the various semantic token request. - * - * @since 3.16.0 - */ - SemanticTokens SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"` - /** - * Capabilities specific to the linked editing range request. - * - * @since 3.16.0 - */ - LinkedEditingRange LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"` - /** - * Client capabilities specific to the moniker request. - * - * @since 3.16.0 - */ - Moniker MonikerClientCapabilities `json:"moniker,omitempty"` - /** - * Capabilities specific to the various type hierarchy requests. - * - * @since 3.17.0 - proposed state - */ - TypeHierarchy TypeHierarchyClientCapabilities `json:"typeHierarchy,omitempty"` - /** - * Capabilities specific to the `textDocument/inlineValue` request. - * - * @since 3.17.0 - proposed state - */ - InlineValue InlineValueClientCapabilities `json:"inlineValue,omitempty"` - /** - * Capabilities specific to the `textDocument/inlayHint` request. - * - * @since 3.17.0 - proposed state - */ - InlayHint InlayHintClientCapabilities `json:"inlayHint,omitempty"` -} - -/** - * An event describing a change to a text document. If range and rangeLength are omitted - * the new text is considered to be the full content of the document. - */ -type TextDocumentContentChangeEvent = struct { - /** - * The range of the document that changed. - */ - Range *Range `json:"range,omitempty"` - /** - * The optional length of the range that got replaced. - * - * @deprecated use range instead. - */ - RangeLength uint32 `json:"rangeLength,omitempty"` - /** - * The new text for the provided range. - */ - Text string `json:"text"` -} - -/** - * Describes textual changes on a text document. A TextDocumentEdit describes all changes - * on a document version Si and after they are applied move the document to version Si+1. - * So the creator of a TextDocumentEdit doesn't need to sort the array of edits or do any - * kind of ordering. However the edits must be non overlapping. - */ -type TextDocumentEdit struct { - /** - * The text document to change. - */ - TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"` - /** - * The edits to be applied. - * - * @since 3.16.0 - support for AnnotatedTextEdit. This is guarded using a - * client capability. - */ - Edits []TextEdit/*TextEdit | AnnotatedTextEdit*/ `json:"edits"` -} - -/** - * A document filter denotes a document by different properties like - * the [language](#TextDocument.languageId), the [scheme](#Uri.scheme) of - * its resource, or a glob-pattern that is applied to the [path](#TextDocument.fileName). - * - * Glob patterns can have the following syntax: - * - `*` to match one or more characters in a path segment - * - `?` to match on one character in a path segment - * - `**` to match any number of path segments, including none - * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) - * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) - * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) - * - * @sample A language filter that applies to typescript files on disk: `{ language: 'typescript', scheme: 'file' }` - * @sample A language filter that applies to all package.json paths: `{ language: 'json', pattern: '**package.json' }` - * - * @since 3.17.0 - proposed state. - */ -type TextDocumentFilter = struct { - /** A language id, like `typescript`. */ - Language string `json:"language"` - /** A Uri [scheme](#Uri.scheme), like `file` or `untitled`. */ - Scheme string `json:"scheme,omitempty"` - /** A glob pattern, like `*.{ts,js}`. */ - Pattern string `json:"pattern,omitempty"` -} - -/** - * A literal to identify a text document in the client. - */ -type TextDocumentIdentifier struct { - /** - * The text document's uri. - */ - URI DocumentURI `json:"uri"` -} - -/** - * An item to transfer a text document from the client to the - * server. - */ -type TextDocumentItem struct { - /** - * The text document's uri. - */ - URI DocumentURI `json:"uri"` - /** - * The text document's language identifier - */ - LanguageID string `json:"languageId"` - /** - * The version number of this document (it will increase after each - * change, including undo/redo). - */ - Version int32 `json:"version"` - /** - * The content of the opened text document. - */ - Text string `json:"text"` -} - -/** - * A parameter literal used in requests to pass a text document and a position inside that - * document. - */ -type TextDocumentPositionParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The position inside the text document. - */ - Position Position `json:"position"` -} - -/** - * General text document registration options. - */ -type TextDocumentRegistrationOptions struct { - /** - * A document selector to identify the scope of the registration. If set to null - * the document selector provided on the client side will be used. - */ - DocumentSelector DocumentSelector /*DocumentSelector | null*/ `json:"documentSelector"` -} - -/** - * Represents reasons why a text document is saved. - */ -type TextDocumentSaveReason float64 - -type TextDocumentSyncClientCapabilities struct { - /** - * Whether text document synchronization supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports sending will save notifications. - */ - WillSave bool `json:"willSave,omitempty"` - /** - * The client supports sending a will save request and - * waits for a response providing text edits which will - * be applied to the document before it is saved. - */ - WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` - /** - * The client supports did save notifications. - */ - DidSave bool `json:"didSave,omitempty"` -} - -/** - * Defines how the host (editor) should sync - * document changes to the language server. - */ -type TextDocumentSyncKind float64 - -type TextDocumentSyncOptions struct { - /** - * Open and close notifications are sent to the server. If omitted open close notification should not - * be sent. - */ - OpenClose bool `json:"openClose,omitempty"` - /** - * Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full - * and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None. - */ - Change TextDocumentSyncKind `json:"change,omitempty"` - /** - * If present will save notifications are sent to the server. If omitted the notification should not be - * sent. - */ - WillSave bool `json:"willSave,omitempty"` - /** - * If present will save wait until requests are sent to the server. If omitted the request should not be - * sent. - */ - WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` - /** - * If present save notifications are sent to the server. If omitted the notification should not be - * sent. - */ - Save SaveOptions/*boolean | SaveOptions*/ `json:"save,omitempty"` -} - -/** - * A text edit applicable to a text document. - */ -type TextEdit struct { - /** - * The range of the text document to be manipulated. To insert - * text into a document create a range where start === end. - */ - Range Range `json:"range"` - /** - * The string to be inserted. For delete operations use an - * empty string. - */ - NewText string `json:"newText"` -} - -type TokenFormat = string - -type TraceValues = string /* 'off' | 'messages' | 'compact' | 'verbose' */ - -/** - * Since 3.6.0 - */ -type TypeDefinitionClientCapabilities struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `TypeDefinitionRegistrationOptions` return value - * for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * The client supports additional metadata in the form of definition links. - * - * Since 3.14.0 - */ - LinkSupport bool `json:"linkSupport,omitempty"` -} - -type TypeDefinitionOptions struct { - WorkDoneProgressOptions -} - -type TypeDefinitionParams struct { - TextDocumentPositionParams - WorkDoneProgressParams - PartialResultParams -} - -type TypeDefinitionRegistrationOptions struct { - TextDocumentRegistrationOptions - TypeDefinitionOptions - StaticRegistrationOptions -} - -/** - * @since 3.17.0 - proposed state - */ -type TypeHierarchyClientCapabilities = struct { - /** - * Whether implementation supports dynamic registration. If this is set to `true` - * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` - * return value for the corresponding server capability as well. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` -} - -/** - * @since 3.17.0 - proposed state - */ -type TypeHierarchyItem = struct { - /** - * The name of this item. - */ - Name string `json:"name"` - /** - * The kind of this item. - */ - Kind SymbolKind `json:"kind"` - /** - * Tags for this item. - */ - Tags []SymbolTag `json:"tags,omitempty"` - /** - * More detail for this item, e.g. the signature of a function. - */ - Detail string `json:"detail,omitempty"` - /** - * The resource identifier of this item. - */ - URI DocumentURI `json:"uri"` - /** - * The range enclosing this symbol not including leading/trailing whitespace - * but everything else, e.g. comments and code. - */ - Range *Range `json:"range"` - /** - * The range that should be selected and revealed when this symbol is being - * picked, e.g. the name of a function. Must be contained by the - * [`range`](#TypeHierarchyItem.range). - */ - SelectionRange *Range `json:"selectionRange"` - /** - * A data entry field that is preserved between a type hierarchy prepare and - * supertypes or subtypes requests. It could also be used to identify the - * type hierarchy in the server, helping improve the performance on - * resolving supertypes and subtypes. - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * Type hierarchy options used during static registration. - * - * @since 3.17.0 - proposed state - */ -type TypeHierarchyOptions = WorkDoneProgressOptions - -/** - * The parameter of a `textDocument/prepareTypeHierarchy` request. - * - * @since 3.17.0 - proposed state - */ -type TypeHierarchyPrepareParams struct { - /** - * The text document. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The position inside the text document. - */ - Position Position `json:"position"` - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` -} - -/** - * Type hierarchy options used during static or dynamic registration. - * - * @since 3.17.0 - proposed state - */ -type TypeHierarchyRegistrationOptions struct { - /** - * A document selector to identify the scope of the registration. If set to null - * the document selector provided on the client side will be used. - */ - DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"` - /** - * The id used to register the request. The id can be used to deregister - * the request again. See also Registration#id. - */ - ID string `json:"id,omitempty"` -} - -/** - * The parameter of a `typeHierarchy/subtypes` request. - * - * @since 3.17.0 - proposed state - */ -type TypeHierarchySubtypesParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * An optional token that a server can use to report partial results (e.g. streaming) to - * the client. - */ - PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` - Item TypeHierarchyItem `json:"item"` -} - -/** - * The parameter of a `typeHierarchy/supertypes` request. - * - * @since 3.17.0 - proposed state - */ -type TypeHierarchySupertypesParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * An optional token that a server can use to report partial results (e.g. streaming) to - * the client. - */ - PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` - Item TypeHierarchyItem `json:"item"` -} - -/** - * A tagging type for string properties that are actually URIs - * - * @since 3.16.0 - */ -type URI = string - -/** - * A diagnostic report indicating that the last returned - * report is still accurate. - * - * @since 3.17.0 - proposed state - */ -type UnchangedDocumentDiagnosticReport = struct { - /** - * A document diagnostic report indicating - * no changes to the last result. A server can - * only return `unchanged` if result ids are - * provided. - */ - Kind string `json:"kind"` - /** - * A result id which will be sent on the next - * diagnostic request for the same document. - */ - ResultID string `json:"resultId"` -} - -/** - * Moniker uniqueness level to define scope of the moniker. - * - * @since 3.16.0 - */ -type UniquenessLevel string - -/** - * General parameters to unregister a request or notification. - */ -type Unregistration struct { - /** - * The id used to unregister the request or notification. Usually an id - * provided during the register request. - */ - ID string `json:"id"` - /** - * The method to unregister for. - */ - Method string `json:"method"` -} - -type UnregistrationParams struct { - Unregisterations []Unregistration `json:"unregisterations"` -} - -/** - * A versioned notebook document identifier. - * - * @since 3.17.0 - proposed state - */ -type VersionedNotebookDocumentIdentifier = struct { - /** - * The version number of this notebook document. - */ - Version int32 `json:"version"` - /** - * The notebook document's uri. - */ - URI URI `json:"uri"` -} - -/** - * A text document identifier to denote a specific version of a text document. - */ -type VersionedTextDocumentIdentifier struct { - /** - * The version number of this document. - */ - Version int32 `json:"version"` - TextDocumentIdentifier -} - -type WatchKind float64 - -/** - * The parameters send in a will save text document notification. - */ -type WillSaveTextDocumentParams struct { - /** - * The document that will be saved. - */ - TextDocument TextDocumentIdentifier `json:"textDocument"` - /** - * The 'TextDocumentSaveReason'. - */ - Reason TextDocumentSaveReason `json:"reason"` -} - -type WorkDoneProgressBegin struct { - Kind string `json:"kind"` - /** - * Mandatory title of the progress operation. Used to briefly inform about - * the kind of operation being performed. - * - * Examples: "Indexing" or "Linking dependencies". - */ - Title string `json:"title"` - /** - * Controls if a cancel button should show to allow the user to cancel the - * long running operation. Clients that don't support cancellation are allowed - * to ignore the setting. - */ - Cancellable bool `json:"cancellable,omitempty"` - /** - * Optional, more detailed associated progress message. Contains - * complementary information to the `title`. - * - * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". - * If unset, the previous progress message (if any) is still valid. - */ - Message string `json:"message,omitempty"` - /** - * Optional progress percentage to display (value 100 is considered 100%). - * If not provided infinite progress is assumed and clients are allowed - * to ignore the `percentage` value in subsequent in report notifications. - * - * The value should be steadily rising. Clients are free to ignore values - * that are not following this rule. The value range is [0, 100]. - */ - Percentage uint32 `json:"percentage,omitempty"` -} - -type WorkDoneProgressCancelParams struct { - /** - * The token to be used to report progress. - */ - Token ProgressToken `json:"token"` -} - -type WorkDoneProgressClientCapabilities struct { - /** - * Window specific client capabilities. - */ - Window struct { - /** - * Whether client supports server initiated progress using the - * `window/workDoneProgress/create` request. - * - * Since 3.15.0 - */ - WorkDoneProgress bool `json:"workDoneProgress,omitempty"` - /** - * Capabilities specific to the showMessage request. - * - * @since 3.16.0 - */ - ShowMessage ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"` - /** - * Capabilities specific to the showDocument request. - * - * @since 3.16.0 - */ - ShowDocument ShowDocumentClientCapabilities `json:"showDocument,omitempty"` - } `json:"window,omitempty"` -} - -type WorkDoneProgressCreateParams struct { - /** - * The token to be used to report progress. - */ - Token ProgressToken `json:"token"` -} - -type WorkDoneProgressEnd struct { - Kind string `json:"kind"` - /** - * Optional, a final message indicating to for example indicate the outcome - * of the operation. - */ - Message string `json:"message,omitempty"` -} - -type WorkDoneProgressOptions struct { - WorkDoneProgress bool `json:"workDoneProgress,omitempty"` -} - -type WorkDoneProgressParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` -} - -type WorkDoneProgressReport struct { - Kind string `json:"kind"` - /** - * Controls enablement state of a cancel button. - * - * Clients that don't support cancellation or don't support controlling the button's - * enablement state are allowed to ignore the property. - */ - Cancellable bool `json:"cancellable,omitempty"` - /** - * Optional, more detailed associated progress message. Contains - * complementary information to the `title`. - * - * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". - * If unset, the previous progress message (if any) is still valid. - */ - Message string `json:"message,omitempty"` - /** - * Optional progress percentage to display (value 100 is considered 100%). - * If not provided infinite progress is assumed and clients are allowed - * to ignore the `percentage` value in subsequent in report notifications. - * - * The value should be steadily rising. Clients are free to ignore values - * that are not following this rule. The value range is [0, 100] - */ - Percentage uint32 `json:"percentage,omitempty"` -} - -/** - * Workspace specific client capabilities. - */ -type WorkspaceClientCapabilities struct { - /** - * The client supports applying batch edits - * to the workspace by supporting the request - * 'workspace/applyEdit' - */ - ApplyEdit bool `json:"applyEdit,omitempty"` - /** - * Capabilities specific to `WorkspaceEdit`s - */ - WorkspaceEdit WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"` - /** - * Capabilities specific to the `workspace/didChangeConfiguration` notification. - */ - DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"` - /** - * Capabilities specific to the `workspace/didChangeWatchedFiles` notification. - */ - DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"` - /** - * Capabilities specific to the `workspace/symbol` request. - */ - Symbol WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` - /** - * Capabilities specific to the `workspace/executeCommand` request. - */ - ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` - /** - * Capabilities specific to the semantic token requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` - /** - * Capabilities specific to the code lens requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` - /** - * The client has support for file notifications/requests for user operations on files. - * - * Since 3.16.0 - */ - FileOperations FileOperationClientCapabilities `json:"fileOperations,omitempty"` - /** - * Capabilities specific to the inline values requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"` - /** - * Capabilities specific to the inlay hints requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"` -} - -/** - * Parameters of the workspace diagnostic request. - * - * @since 3.17.0 - proposed state - */ -type WorkspaceDiagnosticParams struct { - /** - * An optional token that a server can use to report work done progress. - */ - WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"` - /** - * An optional token that a server can use to report partial results (e.g. streaming) to - * the client. - */ - PartialResultToken ProgressToken `json:"partialResultToken,omitempty"` - /** - * The additional identifier provided during registration. - */ - Identifier string `json:"identifier,omitempty"` - /** - * The currently known diagnostic reports with their - * previous result ids. - */ - PreviousResultIds []PreviousResultID `json:"previousResultIds"` -} - -/** - * A workspace diagnostic report. - * - * @since 3.17.0 - proposed state - */ -type WorkspaceDiagnosticReport = struct { - Items []WorkspaceDocumentDiagnosticReport `json:"items"` -} - -/** - * A workspace diagnostic document report. - * - * @since 3.17.0 - proposed state - */ -type WorkspaceDocumentDiagnosticReport = interface{} /*WorkspaceFullDocumentDiagnosticReport | WorkspaceUnchangedDocumentDiagnosticReport*/ - -/** - * A workspace edit represents changes to many resources managed in the workspace. The edit - * should either provide `changes` or `documentChanges`. If documentChanges are present - * they are preferred over `changes` if the client can handle versioned document edits. - * - * Since version 3.13.0 a workspace edit can contain resource operations as well. If resource - * operations are present clients need to execute the operations in the order in which they - * are provided. So a workspace edit for example can consist of the following two changes: - * (1) a create file a.txt and (2) a text document edit which insert text into file a.txt. - * - * An invalid sequence (e.g. (1) delete file a.txt and (2) insert text into file a.txt) will - * cause failure of the operation. How the client recovers from the failure is described by - * the client capability: `workspace.workspaceEdit.failureHandling` - */ -type WorkspaceEdit struct { - /** - * Holds changes to existing resources. - */ - Changes map[DocumentURI][]TextEdit/*[uri: DocumentUri]: TextEdit[]*/ `json:"changes,omitempty"` - /** - * Depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes - * are either an array of `TextDocumentEdit`s to express changes to n different text documents - * where each text document edit addresses a specific version of a text document. Or it can contain - * above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations. - * - * Whether a client supports versioned document edits is expressed via - * `workspace.workspaceEdit.documentChanges` client capability. - * - * If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then - * only plain `TextEdit`s using the `changes` property are supported. - */ - DocumentChanges []TextDocumentEdit/*TextDocumentEdit | CreateFile | RenameFile | DeleteFile*/ `json:"documentChanges,omitempty"` - /** - * A map of change annotations that can be referenced in `AnnotatedTextEdit`s or create, rename and - * delete file / folder operations. - * - * Whether clients honor this property depends on the client capability `workspace.changeAnnotationSupport`. - * - * @since 3.16.0 - */ - ChangeAnnotations map[string]ChangeAnnotationIdentifier/*[id: ChangeAnnotationIdentifier]: ChangeAnnotation;*/ `json:"changeAnnotations,omitempty"` -} - -type WorkspaceEditClientCapabilities struct { - /** - * The client supports versioned document changes in `WorkspaceEdit`s - */ - DocumentChanges bool `json:"documentChanges,omitempty"` - /** - * The resource operations the client supports. Clients should at least - * support 'create', 'rename' and 'delete' files and folders. - * - * @since 3.13.0 - */ - ResourceOperations []ResourceOperationKind `json:"resourceOperations,omitempty"` - /** - * The failure handling strategy of a client if applying the workspace edit - * fails. - * - * @since 3.13.0 - */ - FailureHandling FailureHandlingKind `json:"failureHandling,omitempty"` - /** - * Whether the client normalizes line endings to the client specific - * setting. - * If set to `true` the client will normalize line ending characters - * in a workspace edit containing to the client specific new line - * character. - * - * @since 3.16.0 - */ - NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"` - /** - * Whether the client in general supports change annotations on text edits, - * create file, rename file and delete file changes. - * - * @since 3.16.0 - */ - ChangeAnnotationSupport struct { - /** - * Whether the client groups edits with equal labels into tree nodes, - * for instance all edits labelled with "Changes in Strings" would - * be a tree node. - */ - GroupsOnLabel bool `json:"groupsOnLabel,omitempty"` - } `json:"changeAnnotationSupport,omitempty"` -} - -type WorkspaceFolder struct { - /** - * The associated URI for this workspace folder. - */ - URI string `json:"uri"` - /** - * The name of the workspace folder. Used to refer to this - * workspace folder in the user interface. - */ - Name string `json:"name"` -} - -/** - * The workspace folder change event. - */ -type WorkspaceFoldersChangeEvent struct { - /** - * The array of added workspace folders - */ - Added []WorkspaceFolder `json:"added"` - /** - * The array of the removed workspace folders - */ - Removed []WorkspaceFolder `json:"removed"` -} - -type WorkspaceFoldersClientCapabilities struct { - /** - * The workspace client capabilities - */ - Workspace Workspace7Gn `json:"workspace,omitempty"` -} - -type WorkspaceFoldersInitializeParams struct { - /** - * The actual configured workspace folders. - */ - WorkspaceFolders []WorkspaceFolder /*WorkspaceFolder[] | null*/ `json:"workspaceFolders"` -} - -type WorkspaceFoldersServerCapabilities struct { - /** - * The workspace server capabilities - */ - Workspace Workspace9Gn `json:"workspace,omitempty"` -} - -/** - * A full document diagnostic report for a workspace diagnostic result. - * - * @since 3.17.0 - proposed state - */ -type WorkspaceFullDocumentDiagnosticReport struct { - /** - * The URI for which diagnostic information is reported. - */ - URI DocumentURI `json:"uri"` - /** - * The version number for which the diagnostics are reported. - * If the document is not marked as open `null` can be provided. - */ - Version int32/*integer | null*/ `json:"version"` -} - -/** - * A special workspace symbol that supports locations without a range - * - * @since 3.17.0 - proposed state - */ -type WorkspaceSymbol struct { - /** - * The location of the symbol. - * - * See SymbolInformation#location for more details. - */ - Location Location/*Location | { uri: DocumentUri }*/ `json:"location"` - /** - * A data entry field that is preserved on a workspace symbol between a - * workspace symbol request and a workspace symbol resolve request. - */ - Data LSPAny `json:"data,omitempty"` -} - -/** - * Client capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). - */ -type WorkspaceSymbolClientCapabilities struct { - /** - * Symbol request supports dynamic registration. - */ - DynamicRegistration bool `json:"dynamicRegistration,omitempty"` - /** - * Specific capabilities for the `SymbolKind` in the `workspace/symbol` request. - */ - SymbolKind struct { - /** - * The symbol kind values the client supports. When this - * property exists the client also guarantees that it will - * handle values outside its set gracefully and falls back - * to a default value when unknown. - * - * If this property is not present the client only supports - * the symbol kinds from `File` to `Array` as defined in - * the initial version of the protocol. - */ - ValueSet []SymbolKind `json:"valueSet,omitempty"` - } `json:"symbolKind,omitempty"` - /** - * The client supports tags on `SymbolInformation`. - * Clients supporting tags have to handle unknown tags gracefully. - * - * @since 3.16.0 - */ - TagSupport struct { - /** - * The tags supported by the client. - */ - ValueSet []SymbolTag `json:"valueSet"` - } `json:"tagSupport,omitempty"` - /** - * The client support partial workspace symbols. The client will send the - * request `workspaceSymbol/resolve` to the server to resolve additional - * properties. - * - * @since 3.17.0 - proposedState - */ - ResolveSupport struct { - /** - * The properties that a client can resolve lazily. Usually - * `location.range` - */ - Properties []string `json:"properties"` - } `json:"resolveSupport,omitempty"` -} - -/** - * Server capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). - */ -type WorkspaceSymbolOptions struct { - /** - * The server provides support to resolve additional - * information for a workspace symbol. - * - * @since 3.17.0 - proposed state - */ - ResolveProvider bool `json:"resolveProvider,omitempty"` - WorkDoneProgressOptions -} - -/** - * The parameters of a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). - */ -type WorkspaceSymbolParams struct { - /** - * A query string to filter symbols by. Clients may send an empty - * string here to request all symbols. - */ - Query string `json:"query"` - WorkDoneProgressParams - PartialResultParams -} - -/** - * An unchanged document diagnostic report for a workspace diagnostic result. - * - * @since 3.17.0 - proposed state - */ -type WorkspaceUnchangedDocumentDiagnosticReport struct { - /** - * The URI for which diagnostic information is reported. - */ - URI DocumentURI `json:"uri"` - /** - * The version number for which the diagnostics are reported. - * If the document is not marked as open `null` can be provided. - */ - Version int32/*integer | null*/ `json:"version"` -} - -const ( - /** - * Empty kind. - */ - - Empty CodeActionKind = "" - /** - * Base kind for quickfix actions: 'quickfix' - */ - - QuickFix CodeActionKind = "quickfix" - /** - * Base kind for refactoring actions: 'refactor' - */ - - Refactor CodeActionKind = "refactor" - /** - * Base kind for refactoring extraction actions: 'refactor.extract' - * - * Example extract actions: - * - * - Extract method - * - Extract function - * - Extract variable - * - Extract interface from class - * - ... - */ - - RefactorExtract CodeActionKind = "refactor.extract" - /** - * Base kind for refactoring inline actions: 'refactor.inline' - * - * Example inline actions: - * - * - Inline function - * - Inline variable - * - Inline constant - * - ... - */ - - RefactorInline CodeActionKind = "refactor.inline" - /** - * Base kind for refactoring rewrite actions: 'refactor.rewrite' - * - * Example rewrite actions: - * - * - Convert JavaScript function to class - * - Add or remove parameter - * - Encapsulate field - * - Make method static - * - Move method to base class - * - ... - */ - - RefactorRewrite CodeActionKind = "refactor.rewrite" - /** - * Base kind for source actions: `source` - * - * Source code actions apply to the entire file. - */ - - Source CodeActionKind = "source" - /** - * Base kind for an organize imports source action: `source.organizeImports` - */ - - SourceOrganizeImports CodeActionKind = "source.organizeImports" - /** - * Base kind for auto-fix source actions: `source.fixAll`. - * - * Fix all actions automatically fix errors that have a clear fix that do not require user input. - * They should not suppress errors or perform unsafe fixes such as generating new types or classes. - * - * @since 3.15.0 - */ - - SourceFixAll CodeActionKind = "source.fixAll" - /** - * Code actions were explicitly requested by the user or by an extension. - */ - - CodeActionInvoked CodeActionTriggerKind = 1 - /** - * Code actions were requested automatically. - * - * This typically happens when current selection in a file changes, but can - * also be triggered when file content changes. - */ - - CodeActionAutomatic CodeActionTriggerKind = 2 - TextCompletion CompletionItemKind = 1 - MethodCompletion CompletionItemKind = 2 - FunctionCompletion CompletionItemKind = 3 - ConstructorCompletion CompletionItemKind = 4 - FieldCompletion CompletionItemKind = 5 - VariableCompletion CompletionItemKind = 6 - ClassCompletion CompletionItemKind = 7 - InterfaceCompletion CompletionItemKind = 8 - ModuleCompletion CompletionItemKind = 9 - PropertyCompletion CompletionItemKind = 10 - UnitCompletion CompletionItemKind = 11 - ValueCompletion CompletionItemKind = 12 - EnumCompletion CompletionItemKind = 13 - KeywordCompletion CompletionItemKind = 14 - SnippetCompletion CompletionItemKind = 15 - ColorCompletion CompletionItemKind = 16 - FileCompletion CompletionItemKind = 17 - ReferenceCompletion CompletionItemKind = 18 - FolderCompletion CompletionItemKind = 19 - EnumMemberCompletion CompletionItemKind = 20 - ConstantCompletion CompletionItemKind = 21 - StructCompletion CompletionItemKind = 22 - EventCompletion CompletionItemKind = 23 - OperatorCompletion CompletionItemKind = 24 - TypeParameterCompletion CompletionItemKind = 25 - /** - * Render a completion as obsolete, usually using a strike-out. - */ - - ComplDeprecated CompletionItemTag = 1 - /** - * Completion was triggered by typing an identifier (24x7 code - * complete), manual invocation (e.g Ctrl+Space) or via API. - */ - - Invoked CompletionTriggerKind = 1 - /** - * Completion was triggered by a trigger character specified by - * the `triggerCharacters` properties of the `CompletionRegistrationOptions`. - */ - - TriggerCharacter CompletionTriggerKind = 2 - /** - * Completion was re-triggered as current completion list is incomplete - */ - - TriggerForIncompleteCompletions CompletionTriggerKind = 3 - /** - * Reports an error. - */ - - SeverityError DiagnosticSeverity = 1 - /** - * Reports a warning. - */ - - SeverityWarning DiagnosticSeverity = 2 - /** - * Reports an information. - */ - - SeverityInformation DiagnosticSeverity = 3 - /** - * Reports a hint. - */ - - SeverityHint DiagnosticSeverity = 4 - /** - * Unused or unnecessary code. - * - * Clients are allowed to render diagnostics with this tag faded out instead of having - * an error squiggle. - */ - - Unnecessary DiagnosticTag = 1 - /** - * Deprecated or obsolete code. - * - * Clients are allowed to rendered diagnostics with this tag strike through. - */ - - Deprecated DiagnosticTag = 2 - /** - * A textual occurrence. - */ - - Text DocumentHighlightKind = 1 - /** - * Read-access of a symbol, like reading a variable. - */ - - Read DocumentHighlightKind = 2 - /** - * Write-access of a symbol, like writing to a variable. - */ - - Write DocumentHighlightKind = 3 - /** - * Applying the workspace change is simply aborted if one of the changes provided - * fails. All operations executed before the failing operation stay executed. - */ - - Abort FailureHandlingKind = "abort" - /** - * All operations are executed transactional. That means they either all - * succeed or no changes at all are applied to the workspace. - */ - - Transactional FailureHandlingKind = "transactional" - /** - * If the workspace edit contains only textual file changes they are executed transactional. - * If resource changes (create, rename or delete file) are part of the change the failure - * handling strategy is abort. - */ - - TextOnlyTransactional FailureHandlingKind = "textOnlyTransactional" - /** - * The client tries to undo the operations already executed. But there is no - * guarantee that this is succeeding. - */ - - Undo FailureHandlingKind = "undo" - /** - * The file got created. - */ - - Created FileChangeType = 1 - /** - * The file got changed. - */ - - Changed FileChangeType = 2 - /** - * The file got deleted. - */ - - Deleted FileChangeType = 3 - /** - * The pattern matches a file only. - */ - - FileOp FileOperationPatternKind = "file" - /** - * The pattern matches a folder only. - */ - - FolderOp FileOperationPatternKind = "folder" - /** - * Folding range for a comment - */ - Comment FoldingRangeKind = "comment" - /** - * Folding range for a imports or includes - */ - Imports FoldingRangeKind = "imports" - /** - * Folding range for a region (e.g. `#region`) - */ - Region FoldingRangeKind = "region" - /** - * If the protocol version provided by the client can't be handled by the server. - * @deprecated This initialize error got replaced by client capabilities. There is - * no version handshake in version 3.0x - */ - - UnknownProtocolVersion InitializeError = 1 - /** - * An inlay hint that for a type annotation. - */ - - Type InlayHintKind = 1 - /** - * An inlay hint that is for a parameter. - */ - - Parameter InlayHintKind = 2 - /** - * The primary text to be inserted is treated as a plain string. - */ - - PlainTextTextFormat InsertTextFormat = 1 - /** - * The primary text to be inserted is treated as a snippet. - * - * A snippet can define tab stops and placeholders with `$1`, `$2` - * and `${3:foo}`. `$0` defines the final tab stop, it defaults to - * the end of the snippet. Placeholders with equal identifiers are linked, - * that is typing in one will update others too. - * - * See also: https://microsoft.github.io/language-server-protocol/specifications/specification-current/#snippet_syntax - */ - - SnippetTextFormat InsertTextFormat = 2 - /** - * The insertion or replace strings is taken as it is. If the - * value is multi line the lines below the cursor will be - * inserted using the indentation defined in the string value. - * The client will not apply any kind of adjustments to the - * string. - */ - - AsIs InsertTextMode = 1 - /** - * The editor adjusts leading whitespace of new lines so that - * they match the indentation up to the cursor of the line for - * which the item is accepted. - * - * Consider a line like this: <2tabs><3tabs>foo. Accepting a - * multi line completion item is indented using 2 tabs and all - * following lines inserted will be indented using 2 tabs as well. - */ - - AdjustIndentation InsertTextMode = 2 - /** - * Plain text is supported as a content format - */ - - PlainText MarkupKind = "plaintext" - /** - * Markdown is supported as a content format - */ - - Markdown MarkupKind = "markdown" - /** - * An error message. - */ - - Error MessageType = 1 - /** - * A warning message. - */ - - Warning MessageType = 2 - /** - * An information message. - */ - - Info MessageType = 3 - /** - * A log message. - */ - - Log MessageType = 4 - /** - * The moniker represent a symbol that is imported into a project - */ - Import MonikerKind = "import" - /** - * The moniker represents a symbol that is exported from a project - */ - Export MonikerKind = "export" - /** - * The moniker represents a symbol that is local to a project (e.g. a local - * variable of a function, a class not visible outside the project, ...) - */ - Local MonikerKind = "local" - /** - * A markup-cell is formatted source that is used for display. - */ - - Markup NotebookCellKind = 1 - /** - * A code-cell is source code. - */ - - Code NotebookCellKind = 2 - /** - * Supports creating new files and folders. - */ - - Create ResourceOperationKind = "create" - /** - * Supports renaming existing files and folders. - */ - - Rename ResourceOperationKind = "rename" - /** - * Supports deleting existing files and folders. - */ - - Delete ResourceOperationKind = "delete" - /** - * Signature help was invoked manually by the user or by a command. - */ - - SigInvoked SignatureHelpTriggerKind = 1 - /** - * Signature help was triggered by a trigger character. - */ - - SigTriggerCharacter SignatureHelpTriggerKind = 2 - /** - * Signature help was triggered by the cursor moving or by the document content changing. - */ - - SigContentChange SignatureHelpTriggerKind = 3 - File SymbolKind = 1 - Module SymbolKind = 2 - Namespace SymbolKind = 3 - Package SymbolKind = 4 - Class SymbolKind = 5 - Method SymbolKind = 6 - Property SymbolKind = 7 - Field SymbolKind = 8 - Constructor SymbolKind = 9 - Enum SymbolKind = 10 - Interface SymbolKind = 11 - Function SymbolKind = 12 - Variable SymbolKind = 13 - Constant SymbolKind = 14 - String SymbolKind = 15 - Number SymbolKind = 16 - Boolean SymbolKind = 17 - Array SymbolKind = 18 - Object SymbolKind = 19 - Key SymbolKind = 20 - Null SymbolKind = 21 - EnumMember SymbolKind = 22 - Struct SymbolKind = 23 - Event SymbolKind = 24 - Operator SymbolKind = 25 - TypeParameter SymbolKind = 26 - /** - * Render a symbol as obsolete, usually using a strike-out. - */ - - DeprecatedSymbol SymbolTag = 1 - /** - * Manually triggered, e.g. by the user pressing save, by starting debugging, - * or by an API call. - */ - - Manual TextDocumentSaveReason = 1 - /** - * Automatic after a delay. - */ - - AfterDelay TextDocumentSaveReason = 2 - /** - * When the editor lost focus. - */ - - FocusOut TextDocumentSaveReason = 3 - /** - * Documents should not be synced at all. - */ - - None TextDocumentSyncKind = 0 - /** - * Documents are synced by always sending the full content - * of the document. - */ - - Full TextDocumentSyncKind = 1 - /** - * Documents are synced by sending the full content on open. - * After that only incremental updates to the document are - * send. - */ - - Incremental TextDocumentSyncKind = 2 - /** - * The moniker is only unique inside a document - */ - Document UniquenessLevel = "document" - /** - * The moniker is unique inside a project for which a dump got created - */ - Project UniquenessLevel = "project" - /** - * The moniker is unique inside the group to which a project belongs - */ - Group UniquenessLevel = "group" - /** - * The moniker is unique inside the moniker scheme. - */ - Scheme UniquenessLevel = "scheme" - /** - * The moniker is globally unique - */ - Global UniquenessLevel = "global" - /** - * Interested in create events. - */ - - WatchCreate WatchKind = 1 - /** - * Interested in change events - */ - - WatchChange WatchKind = 2 - /** - * Interested in delete events - */ - - WatchDelete WatchKind = 4 -) - -// Types created to name formal parameters and embedded structs -type ParamConfiguration struct { - ConfigurationParams - PartialResultParams -} -type ParamInitialize struct { - InitializeParams - WorkDoneProgressParams -} -type PrepareRename2Gn struct { - Range Range `json:"range"` - Placeholder string `json:"placeholder"` -} -type Workspace3Gn struct { - /** - * The client supports applying batch edits - * to the workspace by supporting the request - * 'workspace/applyEdit' - */ - ApplyEdit bool `json:"applyEdit,omitempty"` - - /** - * Capabilities specific to `WorkspaceEdit`s - */ - WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeConfiguration` notification. - */ - DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeWatchedFiles` notification. - */ - DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"` - - /** - * Capabilities specific to the `workspace/symbol` request. - */ - Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` - - /** - * Capabilities specific to the `workspace/executeCommand` request. - */ - ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` - - /** - * Capabilities specific to the semantic token requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` - - /** - * Capabilities specific to the code lens requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` - - /** - * The client has support for file notifications/requests for user operations on files. - * - * Since 3.16.0 - */ - FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"` - - /** - * Capabilities specific to the inline values requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"` - - /** - * Capabilities specific to the inlay hints requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"` - - /** - * The client has support for workspace folders - * - * @since 3.6.0 - */ - WorkspaceFolders bool `json:"workspaceFolders,omitempty"` - - /** - * The client supports `workspace/configuration` requests. - * - * @since 3.6.0 - */ - Configuration bool `json:"configuration,omitempty"` -} -type Workspace4Gn struct { - /** - * The client supports applying batch edits - * to the workspace by supporting the request - * 'workspace/applyEdit' - */ - ApplyEdit bool `json:"applyEdit,omitempty"` - - /** - * Capabilities specific to `WorkspaceEdit`s - */ - WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeConfiguration` notification. - */ - DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeWatchedFiles` notification. - */ - DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"` - - /** - * Capabilities specific to the `workspace/symbol` request. - */ - Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` - - /** - * Capabilities specific to the `workspace/executeCommand` request. - */ - ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` - - /** - * Capabilities specific to the semantic token requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` - - /** - * Capabilities specific to the code lens requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` - - /** - * The client has support for file notifications/requests for user operations on files. - * - * Since 3.16.0 - */ - FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"` - - /** - * Capabilities specific to the inline values requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"` - - /** - * Capabilities specific to the inlay hints requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"` - - /** - * The client has support for workspace folders - * - * @since 3.6.0 - */ - WorkspaceFolders bool `json:"workspaceFolders,omitempty"` - - /** - * The client supports `workspace/configuration` requests. - * - * @since 3.6.0 - */ - Configuration bool `json:"configuration,omitempty"` -} -type WorkspaceFolders5Gn struct { - /** - * The Server has support for workspace folders - */ - Supported bool `json:"supported,omitempty"` - - /** - * Whether the server wants to receive workspace folder - * change notifications. - * - * If a strings is provided the string is treated as a ID - * under which the notification is registered on the client - * side. The ID can be used to unregister for these events - * using the `client/unregisterCapability` request. - */ - ChangeNotifications string/*string | boolean*/ `json:"changeNotifications,omitempty"` -} -type Workspace6Gn struct { - /** - * The server is interested in notifications/requests for operations on files. - * - * @since 3.16.0 - */ - FileOperations *FileOperationOptions `json:"fileOperations,omitempty"` - - WorkspaceFolders WorkspaceFolders5Gn `json:"workspaceFolders,omitempty"` -} -type Workspace7Gn struct { - /** - * The client supports applying batch edits - * to the workspace by supporting the request - * 'workspace/applyEdit' - */ - ApplyEdit bool `json:"applyEdit,omitempty"` - - /** - * Capabilities specific to `WorkspaceEdit`s - */ - WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeConfiguration` notification. - */ - DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"` - - /** - * Capabilities specific to the `workspace/didChangeWatchedFiles` notification. - */ - DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"` - - /** - * Capabilities specific to the `workspace/symbol` request. - */ - Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` - - /** - * Capabilities specific to the `workspace/executeCommand` request. - */ - ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` - - /** - * Capabilities specific to the semantic token requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` - - /** - * Capabilities specific to the code lens requests scoped to the - * workspace. - * - * @since 3.16.0. - */ - CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` - - /** - * The client has support for file notifications/requests for user operations on files. - * - * Since 3.16.0 - */ - FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"` - - /** - * Capabilities specific to the inline values requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"` - - /** - * Capabilities specific to the inlay hints requests scoped to the - * workspace. - * - * @since 3.17.0. - */ - InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"` - - /** - * The client has support for workspace folders - * - * @since 3.6.0 - */ - WorkspaceFolders bool `json:"workspaceFolders,omitempty"` - - /** - * The client supports `workspace/configuration` requests. - * - * @since 3.6.0 - */ - Configuration bool `json:"configuration,omitempty"` -} -type WorkspaceFolders8Gn struct { - /** - * The Server has support for workspace folders - */ - Supported bool `json:"supported,omitempty"` - - /** - * Whether the server wants to receive workspace folder - * change notifications. - * - * If a strings is provided the string is treated as a ID - * under which the notification is registered on the client - * side. The ID can be used to unregister for these events - * using the `client/unregisterCapability` request. - */ - ChangeNotifications string/*string | boolean*/ `json:"changeNotifications,omitempty"` -} -type Workspace9Gn struct { - /** - * The server is interested in notifications/requests for operations on files. - * - * @since 3.16.0 - */ - FileOperations *FileOperationOptions `json:"fileOperations,omitempty"` - - WorkspaceFolders WorkspaceFolders8Gn `json:"workspaceFolders,omitempty"` -} diff --git a/internal/lsp/protocol/tsserver.go b/internal/lsp/protocol/tsserver.go deleted file mode 100644 index a26e50cf4e5..00000000000 --- a/internal/lsp/protocol/tsserver.go +++ /dev/null @@ -1,1293 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated (see typescript/README.md) DO NOT EDIT. - -package protocol - -// Package protocol contains data types and code for LSP json rpcs -// generated automatically from vscode-languageserver-node -// commit: 696f9285bf849b73745682fdb1c1feac73eb8772 -// last fetched Fri Apr 01 2022 10:53:41 GMT-0400 (Eastern Daylight Time) - -import ( - "context" - "encoding/json" - "fmt" - - "golang.org/x/tools/internal/jsonrpc2" -) - -type Server interface { - DidChangeWorkspaceFolders(context.Context, *DidChangeWorkspaceFoldersParams) error - WorkDoneProgressCancel(context.Context, *WorkDoneProgressCancelParams) error - DidCreateFiles(context.Context, *CreateFilesParams) error - DidRenameFiles(context.Context, *RenameFilesParams) error - DidDeleteFiles(context.Context, *DeleteFilesParams) error - Initialized(context.Context, *InitializedParams) error - Exit(context.Context) error - DidChangeConfiguration(context.Context, *DidChangeConfigurationParams) error - DidOpen(context.Context, *DidOpenTextDocumentParams) error - DidChange(context.Context, *DidChangeTextDocumentParams) error - DidClose(context.Context, *DidCloseTextDocumentParams) error - DidSave(context.Context, *DidSaveTextDocumentParams) error - WillSave(context.Context, *WillSaveTextDocumentParams) error - DidChangeWatchedFiles(context.Context, *DidChangeWatchedFilesParams) error - DidOpenNotebookDocument(context.Context, *DidOpenNotebookDocumentParams) error - DidChangeNotebookDocument(context.Context, *DidChangeNotebookDocumentParams) error - DidSaveNotebookDocument(context.Context, *DidSaveNotebookDocumentParams) error - DidCloseNotebookDocument(context.Context, *DidCloseNotebookDocumentParams) error - SetTrace(context.Context, *SetTraceParams) error - LogTrace(context.Context, *LogTraceParams) error - Implementation(context.Context, *ImplementationParams) (Definition /*Definition | DefinitionLink[] | null*/, error) - TypeDefinition(context.Context, *TypeDefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) - DocumentColor(context.Context, *DocumentColorParams) ([]ColorInformation, error) - ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error) - FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange /*FoldingRange[] | null*/, error) - Declaration(context.Context, *DeclarationParams) (Declaration /*Declaration | DeclarationLink[] | null*/, error) - SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange /*SelectionRange[] | null*/, error) - PrepareCallHierarchy(context.Context, *CallHierarchyPrepareParams) ([]CallHierarchyItem /*CallHierarchyItem[] | null*/, error) - IncomingCalls(context.Context, *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/, error) - OutgoingCalls(context.Context, *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/, error) - SemanticTokensFull(context.Context, *SemanticTokensParams) (*SemanticTokens /*SemanticTokens | null*/, error) - SemanticTokensFullDelta(context.Context, *SemanticTokensDeltaParams) (interface{} /* SemanticTokens | SemanticTokensDelta | float64*/, error) - SemanticTokensRange(context.Context, *SemanticTokensRangeParams) (*SemanticTokens /*SemanticTokens | null*/, error) - SemanticTokensRefresh(context.Context) error - LinkedEditingRange(context.Context, *LinkedEditingRangeParams) (*LinkedEditingRanges /*LinkedEditingRanges | null*/, error) - WillCreateFiles(context.Context, *CreateFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) - WillRenameFiles(context.Context, *RenameFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) - WillDeleteFiles(context.Context, *DeleteFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) - Moniker(context.Context, *MonikerParams) ([]Moniker /*Moniker[] | null*/, error) - PrepareTypeHierarchy(context.Context, *TypeHierarchyPrepareParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) - Supertypes(context.Context, *TypeHierarchySupertypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) - Subtypes(context.Context, *TypeHierarchySubtypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) - InlineValue(context.Context, *InlineValueParams) ([]InlineValue /*InlineValue[] | null*/, error) - InlineValueRefresh(context.Context) error - InlayHint(context.Context, *InlayHintParams) ([]InlayHint /*InlayHint[] | null*/, error) - Resolve(context.Context, *InlayHint) (*InlayHint, error) - InlayHintRefresh(context.Context) error - Initialize(context.Context, *ParamInitialize) (*InitializeResult, error) - Shutdown(context.Context) error - WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit /*TextEdit[] | null*/, error) - Completion(context.Context, *CompletionParams) (*CompletionList /*CompletionItem[] | CompletionList | null*/, error) - ResolveCompletionItem(context.Context, *CompletionItem) (*CompletionItem, error) - Hover(context.Context, *HoverParams) (*Hover /*Hover | null*/, error) - SignatureHelp(context.Context, *SignatureHelpParams) (*SignatureHelp /*SignatureHelp | null*/, error) - Definition(context.Context, *DefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) - References(context.Context, *ReferenceParams) ([]Location /*Location[] | null*/, error) - DocumentHighlight(context.Context, *DocumentHighlightParams) ([]DocumentHighlight /*DocumentHighlight[] | null*/, error) - DocumentSymbol(context.Context, *DocumentSymbolParams) ([]interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/, error) - CodeAction(context.Context, *CodeActionParams) ([]CodeAction /*(Command | CodeAction)[] | null*/, error) - ResolveCodeAction(context.Context, *CodeAction) (*CodeAction, error) - Symbol(context.Context, *WorkspaceSymbolParams) ([]SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/, error) - ResolveWorkspaceSymbol(context.Context, *WorkspaceSymbol) (*WorkspaceSymbol, error) - CodeLens(context.Context, *CodeLensParams) ([]CodeLens /*CodeLens[] | null*/, error) - ResolveCodeLens(context.Context, *CodeLens) (*CodeLens, error) - CodeLensRefresh(context.Context) error - DocumentLink(context.Context, *DocumentLinkParams) ([]DocumentLink /*DocumentLink[] | null*/, error) - ResolveDocumentLink(context.Context, *DocumentLink) (*DocumentLink, error) - Formatting(context.Context, *DocumentFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) - RangeFormatting(context.Context, *DocumentRangeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) - OnTypeFormatting(context.Context, *DocumentOnTypeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) - Rename(context.Context, *RenameParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) - PrepareRename(context.Context, *PrepareRenameParams) (*PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/, error) - ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{} /* LSPAny | void | float64*/, error) - Diagnostic(context.Context, *string) (*string, error) - DiagnosticWorkspace(context.Context, *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) - DiagnosticRefresh(context.Context) error - NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) -} - -func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { - switch r.Method() { - case "workspace/didChangeWorkspaceFolders": // notif - var params DidChangeWorkspaceFoldersParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidChangeWorkspaceFolders(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "window/workDoneProgress/cancel": // notif - var params WorkDoneProgressCancelParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.WorkDoneProgressCancel(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "workspace/didCreateFiles": // notif - var params CreateFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidCreateFiles(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "workspace/didRenameFiles": // notif - var params RenameFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidRenameFiles(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "workspace/didDeleteFiles": // notif - var params DeleteFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidDeleteFiles(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "initialized": // notif - var params InitializedParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.Initialized(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "exit": // notif - err := server.Exit(ctx) - return true, reply(ctx, nil, err) - case "workspace/didChangeConfiguration": // notif - var params DidChangeConfigurationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidChangeConfiguration(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/didOpen": // notif - var params DidOpenTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidOpen(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/didChange": // notif - var params DidChangeTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidChange(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/didClose": // notif - var params DidCloseTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidClose(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/didSave": // notif - var params DidSaveTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidSave(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/willSave": // notif - var params WillSaveTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.WillSave(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "workspace/didChangeWatchedFiles": // notif - var params DidChangeWatchedFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidChangeWatchedFiles(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "notebookDocument/didOpen": // notif - var params DidOpenNotebookDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidOpenNotebookDocument(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "notebookDocument/didChange": // notif - var params DidChangeNotebookDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidChangeNotebookDocument(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "notebookDocument/didSave": // notif - var params DidSaveNotebookDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidSaveNotebookDocument(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "notebookDocument/didClose": // notif - var params DidCloseNotebookDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.DidCloseNotebookDocument(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "$/setTrace": // notif - var params SetTraceParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.SetTrace(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "$/logTrace": // notif - var params LogTraceParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err := server.LogTrace(ctx, ¶ms) - return true, reply(ctx, nil, err) - case "textDocument/implementation": // req - var params ImplementationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Implementation(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/typeDefinition": // req - var params TypeDefinitionParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.TypeDefinition(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/documentColor": // req - var params DocumentColorParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.DocumentColor(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/colorPresentation": // req - var params ColorPresentationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ColorPresentation(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/foldingRange": // req - var params FoldingRangeParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.FoldingRange(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/declaration": // req - var params DeclarationParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Declaration(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/selectionRange": // req - var params SelectionRangeParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.SelectionRange(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/prepareCallHierarchy": // req - var params CallHierarchyPrepareParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.PrepareCallHierarchy(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "callHierarchy/incomingCalls": // req - var params CallHierarchyIncomingCallsParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.IncomingCalls(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "callHierarchy/outgoingCalls": // req - var params CallHierarchyOutgoingCallsParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.OutgoingCalls(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/semanticTokens/full": // req - var params SemanticTokensParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.SemanticTokensFull(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/semanticTokens/full/delta": // req - var params SemanticTokensDeltaParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.SemanticTokensFullDelta(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/semanticTokens/range": // req - var params SemanticTokensRangeParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.SemanticTokensRange(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/semanticTokens/refresh": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.SemanticTokensRefresh(ctx) - return true, reply(ctx, nil, err) - case "textDocument/linkedEditingRange": // req - var params LinkedEditingRangeParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.LinkedEditingRange(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/willCreateFiles": // req - var params CreateFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.WillCreateFiles(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/willRenameFiles": // req - var params RenameFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.WillRenameFiles(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/willDeleteFiles": // req - var params DeleteFilesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.WillDeleteFiles(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/moniker": // req - var params MonikerParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Moniker(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/prepareTypeHierarchy": // req - var params TypeHierarchyPrepareParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.PrepareTypeHierarchy(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "typeHierarchy/supertypes": // req - var params TypeHierarchySupertypesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Supertypes(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "typeHierarchy/subtypes": // req - var params TypeHierarchySubtypesParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Subtypes(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/inlineValue": // req - var params InlineValueParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.InlineValue(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/inlineValue/refresh": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.InlineValueRefresh(ctx) - return true, reply(ctx, nil, err) - case "textDocument/inlayHint": // req - var params InlayHintParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.InlayHint(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "inlayHint/resolve": // req - var params InlayHint - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Resolve(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/inlayHint/refresh": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.InlayHintRefresh(ctx) - return true, reply(ctx, nil, err) - case "initialize": // req - var params ParamInitialize - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - if _, ok := err.(*json.UnmarshalTypeError); !ok { - return true, sendParseError(ctx, reply, err) - } - } - resp, err := server.Initialize(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "shutdown": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.Shutdown(ctx) - return true, reply(ctx, nil, err) - case "textDocument/willSaveWaitUntil": // req - var params WillSaveTextDocumentParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.WillSaveWaitUntil(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/completion": // req - var params CompletionParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Completion(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "completionItem/resolve": // req - var params CompletionItem - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ResolveCompletionItem(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/hover": // req - var params HoverParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Hover(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/signatureHelp": // req - var params SignatureHelpParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.SignatureHelp(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/definition": // req - var params DefinitionParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Definition(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/references": // req - var params ReferenceParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.References(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/documentHighlight": // req - var params DocumentHighlightParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.DocumentHighlight(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/documentSymbol": // req - var params DocumentSymbolParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.DocumentSymbol(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/codeAction": // req - var params CodeActionParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.CodeAction(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "codeAction/resolve": // req - var params CodeAction - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ResolveCodeAction(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/symbol": // req - var params WorkspaceSymbolParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Symbol(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspaceSymbol/resolve": // req - var params WorkspaceSymbol - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ResolveWorkspaceSymbol(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/codeLens": // req - var params CodeLensParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.CodeLens(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "codeLens/resolve": // req - var params CodeLens - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ResolveCodeLens(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/codeLens/refresh": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.CodeLensRefresh(ctx) - return true, reply(ctx, nil, err) - case "textDocument/documentLink": // req - var params DocumentLinkParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.DocumentLink(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "documentLink/resolve": // req - var params DocumentLink - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ResolveDocumentLink(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/formatting": // req - var params DocumentFormattingParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Formatting(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/rangeFormatting": // req - var params DocumentRangeFormattingParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.RangeFormatting(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/onTypeFormatting": // req - var params DocumentOnTypeFormattingParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.OnTypeFormatting(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/rename": // req - var params RenameParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Rename(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/prepareRename": // req - var params PrepareRenameParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.PrepareRename(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/executeCommand": // req - var params ExecuteCommandParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.ExecuteCommand(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "textDocument/diagnostic": // req - var params string - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.Diagnostic(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/diagnostic": // req - var params WorkspaceDiagnosticParams - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - resp, err := server.DiagnosticWorkspace(ctx, ¶ms) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil) - case "workspace/diagnostic/refresh": // req - if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) - } - err := server.DiagnosticRefresh(ctx) - return true, reply(ctx, nil, err) - - default: - return false, nil - } -} - -func (s *serverDispatcher) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) error { - return s.sender.Notify(ctx, "workspace/didChangeWorkspaceFolders", params) -} - -func (s *serverDispatcher) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) error { - return s.sender.Notify(ctx, "window/workDoneProgress/cancel", params) -} - -func (s *serverDispatcher) DidCreateFiles(ctx context.Context, params *CreateFilesParams) error { - return s.sender.Notify(ctx, "workspace/didCreateFiles", params) -} - -func (s *serverDispatcher) DidRenameFiles(ctx context.Context, params *RenameFilesParams) error { - return s.sender.Notify(ctx, "workspace/didRenameFiles", params) -} - -func (s *serverDispatcher) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) error { - return s.sender.Notify(ctx, "workspace/didDeleteFiles", params) -} - -func (s *serverDispatcher) Initialized(ctx context.Context, params *InitializedParams) error { - return s.sender.Notify(ctx, "initialized", params) -} - -func (s *serverDispatcher) Exit(ctx context.Context) error { - return s.sender.Notify(ctx, "exit", nil) -} - -func (s *serverDispatcher) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) error { - return s.sender.Notify(ctx, "workspace/didChangeConfiguration", params) -} - -func (s *serverDispatcher) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) error { - return s.sender.Notify(ctx, "textDocument/didOpen", params) -} - -func (s *serverDispatcher) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) error { - return s.sender.Notify(ctx, "textDocument/didChange", params) -} - -func (s *serverDispatcher) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) error { - return s.sender.Notify(ctx, "textDocument/didClose", params) -} - -func (s *serverDispatcher) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) error { - return s.sender.Notify(ctx, "textDocument/didSave", params) -} - -func (s *serverDispatcher) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) error { - return s.sender.Notify(ctx, "textDocument/willSave", params) -} - -func (s *serverDispatcher) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) error { - return s.sender.Notify(ctx, "workspace/didChangeWatchedFiles", params) -} - -func (s *serverDispatcher) DidOpenNotebookDocument(ctx context.Context, params *DidOpenNotebookDocumentParams) error { - return s.sender.Notify(ctx, "notebookDocument/didOpen", params) -} - -func (s *serverDispatcher) DidChangeNotebookDocument(ctx context.Context, params *DidChangeNotebookDocumentParams) error { - return s.sender.Notify(ctx, "notebookDocument/didChange", params) -} - -func (s *serverDispatcher) DidSaveNotebookDocument(ctx context.Context, params *DidSaveNotebookDocumentParams) error { - return s.sender.Notify(ctx, "notebookDocument/didSave", params) -} - -func (s *serverDispatcher) DidCloseNotebookDocument(ctx context.Context, params *DidCloseNotebookDocumentParams) error { - return s.sender.Notify(ctx, "notebookDocument/didClose", params) -} - -func (s *serverDispatcher) SetTrace(ctx context.Context, params *SetTraceParams) error { - return s.sender.Notify(ctx, "$/setTrace", params) -} - -func (s *serverDispatcher) LogTrace(ctx context.Context, params *LogTraceParams) error { - return s.sender.Notify(ctx, "$/logTrace", params) -} -func (s *serverDispatcher) Implementation(ctx context.Context, params *ImplementationParams) (Definition /*Definition | DefinitionLink[] | null*/, error) { - var result Definition /*Definition | DefinitionLink[] | null*/ - if err := s.sender.Call(ctx, "textDocument/implementation", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) { - var result Definition /*Definition | DefinitionLink[] | null*/ - if err := s.sender.Call(ctx, "textDocument/typeDefinition", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) DocumentColor(ctx context.Context, params *DocumentColorParams) ([]ColorInformation, error) { - var result []ColorInformation - if err := s.sender.Call(ctx, "textDocument/documentColor", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ColorPresentation(ctx context.Context, params *ColorPresentationParams) ([]ColorPresentation, error) { - var result []ColorPresentation - if err := s.sender.Call(ctx, "textDocument/colorPresentation", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) FoldingRange(ctx context.Context, params *FoldingRangeParams) ([]FoldingRange /*FoldingRange[] | null*/, error) { - var result []FoldingRange /*FoldingRange[] | null*/ - if err := s.sender.Call(ctx, "textDocument/foldingRange", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Declaration(ctx context.Context, params *DeclarationParams) (Declaration /*Declaration | DeclarationLink[] | null*/, error) { - var result Declaration /*Declaration | DeclarationLink[] | null*/ - if err := s.sender.Call(ctx, "textDocument/declaration", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange /*SelectionRange[] | null*/, error) { - var result []SelectionRange /*SelectionRange[] | null*/ - if err := s.sender.Call(ctx, "textDocument/selectionRange", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) ([]CallHierarchyItem /*CallHierarchyItem[] | null*/, error) { - var result []CallHierarchyItem /*CallHierarchyItem[] | null*/ - if err := s.sender.Call(ctx, "textDocument/prepareCallHierarchy", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/, error) { - var result []CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/ - if err := s.sender.Call(ctx, "callHierarchy/incomingCalls", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/, error) { - var result []CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/ - if err := s.sender.Call(ctx, "callHierarchy/outgoingCalls", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (*SemanticTokens /*SemanticTokens | null*/, error) { - var result *SemanticTokens /*SemanticTokens | null*/ - if err := s.sender.Call(ctx, "textDocument/semanticTokens/full", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (interface{} /* SemanticTokens | SemanticTokensDelta | float64*/, error) { - var result interface{} /* SemanticTokens | SemanticTokensDelta | float64*/ - if err := s.sender.Call(ctx, "textDocument/semanticTokens/full/delta", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (*SemanticTokens /*SemanticTokens | null*/, error) { - var result *SemanticTokens /*SemanticTokens | null*/ - if err := s.sender.Call(ctx, "textDocument/semanticTokens/range", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SemanticTokensRefresh(ctx context.Context) error { - return s.sender.Call(ctx, "workspace/semanticTokens/refresh", nil, nil) -} - -func (s *serverDispatcher) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (*LinkedEditingRanges /*LinkedEditingRanges | null*/, error) { - var result *LinkedEditingRanges /*LinkedEditingRanges | null*/ - if err := s.sender.Call(ctx, "textDocument/linkedEditingRange", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) { - var result *WorkspaceEdit /*WorkspaceEdit | null*/ - if err := s.sender.Call(ctx, "workspace/willCreateFiles", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) { - var result *WorkspaceEdit /*WorkspaceEdit | null*/ - if err := s.sender.Call(ctx, "workspace/willRenameFiles", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) { - var result *WorkspaceEdit /*WorkspaceEdit | null*/ - if err := s.sender.Call(ctx, "workspace/willDeleteFiles", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Moniker(ctx context.Context, params *MonikerParams) ([]Moniker /*Moniker[] | null*/, error) { - var result []Moniker /*Moniker[] | null*/ - if err := s.sender.Call(ctx, "textDocument/moniker", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) PrepareTypeHierarchy(ctx context.Context, params *TypeHierarchyPrepareParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) { - var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/ - if err := s.sender.Call(ctx, "textDocument/prepareTypeHierarchy", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Supertypes(ctx context.Context, params *TypeHierarchySupertypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) { - var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/ - if err := s.sender.Call(ctx, "typeHierarchy/supertypes", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Subtypes(ctx context.Context, params *TypeHierarchySubtypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) { - var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/ - if err := s.sender.Call(ctx, "typeHierarchy/subtypes", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) InlineValue(ctx context.Context, params *InlineValueParams) ([]InlineValue /*InlineValue[] | null*/, error) { - var result []InlineValue /*InlineValue[] | null*/ - if err := s.sender.Call(ctx, "textDocument/inlineValue", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) InlineValueRefresh(ctx context.Context) error { - return s.sender.Call(ctx, "workspace/inlineValue/refresh", nil, nil) -} - -func (s *serverDispatcher) InlayHint(ctx context.Context, params *InlayHintParams) ([]InlayHint /*InlayHint[] | null*/, error) { - var result []InlayHint /*InlayHint[] | null*/ - if err := s.sender.Call(ctx, "textDocument/inlayHint", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Resolve(ctx context.Context, params *InlayHint) (*InlayHint, error) { - var result *InlayHint - if err := s.sender.Call(ctx, "inlayHint/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) InlayHintRefresh(ctx context.Context) error { - return s.sender.Call(ctx, "workspace/inlayHint/refresh", nil, nil) -} - -func (s *serverDispatcher) Initialize(ctx context.Context, params *ParamInitialize) (*InitializeResult, error) { - var result *InitializeResult - if err := s.sender.Call(ctx, "initialize", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Shutdown(ctx context.Context) error { - return s.sender.Call(ctx, "shutdown", nil, nil) -} - -func (s *serverDispatcher) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) ([]TextEdit /*TextEdit[] | null*/, error) { - var result []TextEdit /*TextEdit[] | null*/ - if err := s.sender.Call(ctx, "textDocument/willSaveWaitUntil", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Completion(ctx context.Context, params *CompletionParams) (*CompletionList /*CompletionItem[] | CompletionList | null*/, error) { - var result *CompletionList /*CompletionItem[] | CompletionList | null*/ - if err := s.sender.Call(ctx, "textDocument/completion", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ResolveCompletionItem(ctx context.Context, params *CompletionItem) (*CompletionItem, error) { - var result *CompletionItem - if err := s.sender.Call(ctx, "completionItem/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Hover(ctx context.Context, params *HoverParams) (*Hover /*Hover | null*/, error) { - var result *Hover /*Hover | null*/ - if err := s.sender.Call(ctx, "textDocument/hover", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (*SignatureHelp /*SignatureHelp | null*/, error) { - var result *SignatureHelp /*SignatureHelp | null*/ - if err := s.sender.Call(ctx, "textDocument/signatureHelp", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Definition(ctx context.Context, params *DefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) { - var result Definition /*Definition | DefinitionLink[] | null*/ - if err := s.sender.Call(ctx, "textDocument/definition", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) References(ctx context.Context, params *ReferenceParams) ([]Location /*Location[] | null*/, error) { - var result []Location /*Location[] | null*/ - if err := s.sender.Call(ctx, "textDocument/references", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) ([]DocumentHighlight /*DocumentHighlight[] | null*/, error) { - var result []DocumentHighlight /*DocumentHighlight[] | null*/ - if err := s.sender.Call(ctx, "textDocument/documentHighlight", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) ([]interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/, error) { - var result []interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/ - if err := s.sender.Call(ctx, "textDocument/documentSymbol", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) CodeAction(ctx context.Context, params *CodeActionParams) ([]CodeAction /*(Command | CodeAction)[] | null*/, error) { - var result []CodeAction /*(Command | CodeAction)[] | null*/ - if err := s.sender.Call(ctx, "textDocument/codeAction", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ResolveCodeAction(ctx context.Context, params *CodeAction) (*CodeAction, error) { - var result *CodeAction - if err := s.sender.Call(ctx, "codeAction/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Symbol(ctx context.Context, params *WorkspaceSymbolParams) ([]SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/, error) { - var result []SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/ - if err := s.sender.Call(ctx, "workspace/symbol", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ResolveWorkspaceSymbol(ctx context.Context, params *WorkspaceSymbol) (*WorkspaceSymbol, error) { - var result *WorkspaceSymbol - if err := s.sender.Call(ctx, "workspaceSymbol/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) CodeLens(ctx context.Context, params *CodeLensParams) ([]CodeLens /*CodeLens[] | null*/, error) { - var result []CodeLens /*CodeLens[] | null*/ - if err := s.sender.Call(ctx, "textDocument/codeLens", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ResolveCodeLens(ctx context.Context, params *CodeLens) (*CodeLens, error) { - var result *CodeLens - if err := s.sender.Call(ctx, "codeLens/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) CodeLensRefresh(ctx context.Context) error { - return s.sender.Call(ctx, "workspace/codeLens/refresh", nil, nil) -} - -func (s *serverDispatcher) DocumentLink(ctx context.Context, params *DocumentLinkParams) ([]DocumentLink /*DocumentLink[] | null*/, error) { - var result []DocumentLink /*DocumentLink[] | null*/ - if err := s.sender.Call(ctx, "textDocument/documentLink", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ResolveDocumentLink(ctx context.Context, params *DocumentLink) (*DocumentLink, error) { - var result *DocumentLink - if err := s.sender.Call(ctx, "documentLink/resolve", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Formatting(ctx context.Context, params *DocumentFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) { - var result []TextEdit /*TextEdit[] | null*/ - if err := s.sender.Call(ctx, "textDocument/formatting", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) { - var result []TextEdit /*TextEdit[] | null*/ - if err := s.sender.Call(ctx, "textDocument/rangeFormatting", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) { - var result []TextEdit /*TextEdit[] | null*/ - if err := s.sender.Call(ctx, "textDocument/onTypeFormatting", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Rename(ctx context.Context, params *RenameParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) { - var result *WorkspaceEdit /*WorkspaceEdit | null*/ - if err := s.sender.Call(ctx, "textDocument/rename", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) PrepareRename(ctx context.Context, params *PrepareRenameParams) (*PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/, error) { - var result *PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/ - if err := s.sender.Call(ctx, "textDocument/prepareRename", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (interface{} /* LSPAny | void | float64*/, error) { - var result interface{} /* LSPAny | void | float64*/ - if err := s.sender.Call(ctx, "workspace/executeCommand", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) Diagnostic(ctx context.Context, params *string) (*string, error) { - var result *string - if err := s.sender.Call(ctx, "textDocument/diagnostic", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) DiagnosticWorkspace(ctx context.Context, params *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) { - var result *WorkspaceDiagnosticReport - if err := s.sender.Call(ctx, "workspace/diagnostic", params, &result); err != nil { - return nil, err - } - return result, nil -} - -func (s *serverDispatcher) DiagnosticRefresh(ctx context.Context) error { - return s.sender.Call(ctx, "workspace/diagnostic/refresh", nil, nil) -} - -func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) { - var result interface{} - if err := s.sender.Call(ctx, method, params, &result); err != nil { - return nil, err - } - return result, nil -} diff --git a/internal/lsp/protocol/typescript/README.md b/internal/lsp/protocol/typescript/README.md deleted file mode 100644 index 74bcd1883d1..00000000000 --- a/internal/lsp/protocol/typescript/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Generate Go types and signatures for the LSP protocol - -## Setup - -Make sure `node` and `tsc` are installed and in your PATH. There are detailed instructions below. -(`tsc -v` should be at least `4.2.4`.) -Get the typescript code for the jsonrpc protocol with - -`git clone git@github.com:microsoft vscode-languageserver-node.git` or -`git clone https://github.com/microsoft/vscode-languageserver-node.git` - -`util.ts` expects it to be in your HOME directory - -If you want to reproduce the existing files you need to be on a branch with the same git hash that `util.ts` expects, for instance, `git checkout 7b90c29` - -## Usage - -Code is generated and normalized by - -`tsc && node code.js && gofmt -w ts*.go` - -(`code.ts` imports `util.ts`.) This generates 3 files in the current directory, `tsprotocol.go` -containing type definitions, and `tsserver.go`, `tsclient.go` containing API stubs. - -## Notes - -1. `code.ts` and `util.ts` use the Typescript compiler's API, which is [introduced](https://github.com/Microsoft/TypeScript/wiki/Architectural-Overview) in their wiki. -2. Because the Typescript and Go type systems are incompatible, `code.ts` and `util.ts` are filled with heuristics and special cases. Therefore they are tied to a specific commit of `vscode-languageserver-node`. The hash code of the commit is included in the header of -the generated files and stored in the variable `gitHash` in `go.ts`. It is checked (see `git()` in `util.ts`) on every execution. -3. Generating the `ts*.go` files is only semi-automated. Please file an issue if the released version is too far behind. -4. For the impatient, first change `gitHash` by hand (`git()` shows how to find the hash). - 1. Then try to run `code.ts`. This will likely fail because the heuristics don't cover some new case. For instance, some simple type like `string` might have changed to a union type `string | [number,number]`. Another example is that some generated formal parameter may have anonymous structure type, which is essentially unusable. - 2. Next step is to move the generated code to `internal/lsp/protocol` and try to build `gopls` and its tests. This will likely fail because types have changed. Generally the fixes are fairly easy. Then run all the tests. - 3. Since there are not adequate integration tests, the next step is to run `gopls`. - -## Detailed instructions for installing node and typescript - -(The instructions are somewhat different for Linux and MacOS. They install some things locally, so `$PATH` needs to be changed.) - -1. For Linux, it is possible to build node from scratch, but if there's a package manager, that's simpler. - 1. To use the Ubuntu package manager - 1. `sudo apt update` (if you can't `sudo` then these instructions are not helpful) - 2. `sudo apt install nodejs` (this may install `/usr/bin/nodejs` rather than `/usr/bin/node`. For me, `/usr/bin/nodejs` pointed to an actual executable `/etc/alternatives/nodejs`, which should be copied to `/usr/bin/node`) - 3. `sudo apt intall npm` - 1. To build from scratch - 1. Go to the [node site](https://nodejs.org), and download the one recommended for most users, and then you're on your own. (It's got binaries in it. Untar the file somewhere and put its `bin` directory in your path, perhaps?) -2. The Mac is easier. Download the macOS installer from [nodejs](https://nodejs.org), click on it, and let it install. -3. (There's a good chance that soon you will be asked to upgrade your new npm. `sudo npm install -g npm` is the command.) -4. For either system, node and nvm should now be available. Running `node -v` and `npm -v` should produce version numbers. -5. `npm install typescript` - 1. This may give warning messages that indicate you've failed to set up a project. Ignore them. - 2. Your home directory will now have new directories `.npm` and `node_modules` (and a `package_lock.json` file) - 3. The typescript executable `tsc` will be in `node_modules/.bin`, so put that directory in your path. - 4. `tsc -v` should print "Version 4.2.4" (or later). If not you may (as I did) have an obsolete tsc earlier in your path. -6. `npm install @types/node` (Without this there will be many incomprehensible typescript error messages.) diff --git a/internal/lsp/protocol/typescript/code.ts b/internal/lsp/protocol/typescript/code.ts deleted file mode 100644 index 1eefa55903a..00000000000 --- a/internal/lsp/protocol/typescript/code.ts +++ /dev/null @@ -1,1450 +0,0 @@ -/* eslint-disable no-useless-return */ -// read files from vscode-languageserver-node, and generate Go rpc stubs -// and data definitions. (and maybe someday unmarshaling code) - -// The output is 3 files, tsprotocol.go contains the type definitions -// while tsclient.go and tsserver.go contain the LSP API and stub. An LSP server -// uses both APIs. To read the code, start in this file's main() function. - -// The code is rich in heuristics and special cases, some of which are to avoid -// extensive changes to gopls, and some of which are due to the mismatch between -// typescript and Go types. In particular, there is no Go equivalent to union -// types, so each case ought to be considered separately. The Go equivalent of A -// & B could frequently be struct{A;B;}, or it could be the equivalent type -// listing all the members of A and B. Typically the code uses the former, but -// especially if A and B have elements with the same name, it does a version of -// the latter. ClientCapabilities has to be expanded, and ServerCapabilities is -// expanded to make the generated code easier to read. - -// for us typescript ignorati, having an import makes this file a module -import * as fs from 'fs'; -import * as ts from 'typescript'; -import * as u from './util'; -import { constName, getComments, goName, loc, strKind } from './util'; - -var program: ts.Program; - -function parse() { - // this won't complain if some fnames don't exist - program = ts.createProgram( - u.fnames, - { target: ts.ScriptTarget.ES2018, module: ts.ModuleKind.CommonJS }); - program.getTypeChecker(); // finish type checking and assignment -} - -// ----- collecting information for RPCs -let req = new Map(); // requests -let not = new Map(); // notifications -let ptypes = new Map(); // req, resp types -let receives = new Map(); // who receives it -let rpcTypes = new Set(); // types seen in the rpcs - -function findRPCs(node: ts.Node) { - if (!ts.isModuleDeclaration(node)) { - return; - } - if (!ts.isIdentifier(node.name)) { - throw new Error( - `expected Identifier, got ${strKind(node.name)} at ${loc(node)}`); - } - let reqnot = req; - let v = node.name.getText(); - if (v.endsWith('Notification')) reqnot = not; - else if (!v.endsWith('Request')) return; - - if (!ts.isModuleBlock(node.body)) { - throw new Error( - `expected ModuleBlock got ${strKind(node.body)} at ${loc(node)}`); - } - let x: ts.ModuleBlock = node.body; - // The story is to expect const method = 'textDocument/implementation' - // const type = new ProtocolRequestType<...>(method) - // but the method may be an explicit string - let rpc: string = ''; - let newNode: ts.NewExpression; - for (let i = 0; i < x.statements.length; i++) { - const uu = x.statements[i]; - if (!ts.isVariableStatement(uu)) continue; - const dl: ts.VariableDeclarationList = uu.declarationList; - if (dl.declarations.length != 1) - throw new Error(`expected a single decl at ${loc(dl)}`); - const decl: ts.VariableDeclaration = dl.declarations[0]; - const name = decl.name.getText(); - // we want the initializers - if (name == 'method') { // mostly StringLiteral but NoSubstitutionTemplateLiteral in protocol.semanticTokens.ts - if (!ts.isStringLiteral(decl.initializer)) { - if (!ts.isNoSubstitutionTemplateLiteral(decl.initializer)) { - console.log(`81: ${decl.initializer.getText()}`); - throw new Error(`expect StringLiteral at ${loc(decl)} got ${strKind(decl.initializer)}`); - } - } - rpc = decl.initializer.getText(); - } - else if (name == 'type') { // NewExpression - if (!ts.isNewExpression(decl.initializer)) - throw new Error(`89 expected new at ${loc(decl)}`); - const nn: ts.NewExpression = decl.initializer; - newNode = nn; - const mtd = nn.arguments[0]; - if (ts.isStringLiteral(mtd)) rpc = mtd.getText(); - switch (nn.typeArguments.length) { - case 1: // exit - ptypes.set(rpc, [nn.typeArguments[0], null]); - break; - case 2: // notifications - ptypes.set(rpc, [nn.typeArguments[0], null]); - break; - case 4: // request with no parameters - ptypes.set(rpc, [null, nn.typeArguments[0]]); - break; - case 5: // request req, resp, partial(?) - ptypes.set(rpc, [nn.typeArguments[0], nn.typeArguments[1]]); - break; - default: - throw new Error(`${nn.typeArguments?.length} at ${loc(nn)}`); - } - } - } - if (rpc == '') throw new Error(`112 no name found at ${loc(x)}`); - // remember the implied types - const [a, b] = ptypes.get(rpc); - const add = function (n: ts.Node) { - rpcTypes.add(goName(n.getText())); - }; - underlying(a, add); - underlying(b, add); - rpc = rpc.substring(1, rpc.length - 1); // 'exit' - reqnot.set(rpc, newNode); -} - -function setReceives() { - // mark them all as server, then adjust the client ones. - // it would be nice to have some independent check on this - // (this logic fails if the server ever sends $/canceRequest - // or $/progress) - req.forEach((_, k) => { receives.set(k, 'server'); }); - not.forEach((_, k) => { receives.set(k, 'server'); }); - receives.set('window/showMessage', 'client'); - receives.set('window/showMessageRequest', 'client'); - receives.set('window/logMessage', 'client'); - receives.set('telemetry/event', 'client'); - receives.set('client/registerCapability', 'client'); - receives.set('client/unregisterCapability', 'client'); - receives.set('workspace/workspaceFolders', 'client'); - receives.set('workspace/configuration', 'client'); - receives.set('workspace/applyEdit', 'client'); - receives.set('textDocument/publishDiagnostics', 'client'); - receives.set('window/workDoneProgress/create', 'client'); - receives.set('window/showDocument', 'client'); - receives.set('$/progress', 'client'); - // a small check - receives.forEach((_, k) => { - if (!req.get(k) && !not.get(k)) throw new Error(`145 missing ${k}}`); - if (req.get(k) && not.get(k)) throw new Error(`146 dup ${k}`); - }); -} - -type DataKind = 'module' | 'interface' | 'alias' | 'enum' | 'class'; - -interface Data { - kind: DataKind; - me: ts.Node; // root node for this type - name: string; // Go name - origname: string; // their name - generics: ts.NodeArray; - as: ts.NodeArray; // inheritance - // Interface - properties: ts.NodeArray - alias: ts.TypeNode; // type alias - // module - statements: ts.NodeArray; - enums: ts.NodeArray; - // class - members: ts.NodeArray; -} -function newData(n: ts.Node, nm: string, k: DataKind, origname: string): Data { - return { - kind: k, - me: n, name: goName(nm), origname: origname, - generics: ts.factory.createNodeArray(), - as: ts.factory.createNodeArray(), - properties: ts.factory.createNodeArray(), alias: undefined, - statements: ts.factory.createNodeArray(), - enums: ts.factory.createNodeArray(), - members: ts.factory.createNodeArray(), - }; -} - -// for debugging, produce a skeleton description -function strData(d: Data): string { - if (!d) { return 'nil'; } - const f = function (na: ts.NodeArray): number { - return na.length; - }; - const nm = d.name == d.origname ? `${d.name}` : `${d.name}/${d.origname}`; - return `g:${f(d.generics)} a:${f(d.as)} p:${f(d.properties)} s:${f(d.statements)} e:${f(d.enums)} m:${f(d.members)} a:${d.alias !== undefined} D(${nm}) k:${d.kind}`; -} - -let data = new Map(); // parsed data types -let seenTypes = new Map(); // type names we've seen -let extraTypes = new Map(); // to avoid struct params - -function setData(nm: string, d: Data) { - const v = data.get(nm); - if (!v) { - data.set(nm, d); - return; - } - // if there are multiple definitions of the same name, decide what to do. - // For now the choices are only aliases and modules - // alias is preferred unless the constant values are needed - if (nm === 'PrepareSupportDefaultBehavior') { - // want the alias, as we're going to change the type and can't afford a constant - if (d.kind === 'alias') data.set(nm, d); - else if (v.kind == 'alias') data.set(nm, v); - else throw new Error(`208 ${d.kind} ${v.kind}`); - return; - } - if (nm === 'CodeActionKind') { - // want the module, need the constants - if (d.kind === 'module') data.set(nm, d); - else if (v.kind === 'module') data.set(nm, v); - else throw new Error(`215 ${d.kind} ${v.kind}`); - } - if (v.kind === 'alias' && d.kind !== 'alias') return; - if (d.kind === 'alias' && v.kind !== 'alias') { - data.set(nm, d); - return; - } - if (v.kind === 'alias' && d.kind === 'alias') return; - // protocol/src/common/protocol.foldingRange.ts 44: 1 (39: 2) and - // types/src/main.ts 397: 1 (392: 2) - // for FoldingRangeKind - if (d.me.getText() === v.me.getText()) return; - // error messages for an unexpected case - console.log(`228 ${strData(v)} ${loc(v.me)} for`); - console.log(`229 ${v.me.getText().replace(/\n/g, '\\n')}`); - console.log(`230 ${strData(d)} ${loc(d.me)}`); - console.log(`231 ${d.me.getText().replace(/\n/g, '\\n')}`); - throw new Error(`232 setData found ${v.kind} for ${d.kind}`); -} - -// look at top level data definitions -function genTypes(node: ts.Node) { - // Ignore top-level items that can't produce output - if (ts.isExpressionStatement(node) || ts.isFunctionDeclaration(node) || - ts.isImportDeclaration(node) || ts.isVariableStatement(node) || - ts.isExportDeclaration(node) || ts.isEmptyStatement(node) || - ts.isExportAssignment(node) || ts.isImportEqualsDeclaration(node) || - ts.isBlock(node) || node.kind == ts.SyntaxKind.EndOfFileToken) { - return; - } - if (ts.isInterfaceDeclaration(node)) { - const v: ts.InterfaceDeclaration = node; - // need to check the members, many of which are disruptive - let mems: ts.PropertySignature[] = []; - const f = function (t: ts.TypeElement) { - if (ts.isPropertySignature(t)) { - mems.push(t); - } else if (ts.isMethodSignature(t) || ts.isCallSignatureDeclaration(t)) { - return; - } else if (ts.isIndexSignatureDeclaration(t)) { - // probably safe to ignore these - // [key: string]: boolean | number | string | undefined; - // and InitializeResult: [custom: string]: any;] - } else - throw new Error(`259 unexpected ${strKind(t)}`); - }; - v.members.forEach(f); - if (mems.length == 0 && !v.heritageClauses && - v.name.getText() != 'InitializedParams') { - return; // Don't seem to need any of these [Logger, PipTransport, ...] - } - // Found one we want - let x = newData(v, goName(v.name.getText()), 'interface', v.name.getText()); - x.properties = ts.factory.createNodeArray(mems); - if (v.typeParameters) x.generics = v.typeParameters; - if (v.heritageClauses) x.as = v.heritageClauses; - if (x.generics.length > 1) { // Unneeded - // Item interface Item... - return; - } - if (data.has(x.name)) { // modifying one we've seen - x = dataChoose(x, data.get(x.name)); - } - setData(x.name, x); - } else if (ts.isTypeAliasDeclaration(node)) { - const v: ts.TypeAliasDeclaration = node; - let x = newData(v, v.name.getText(), 'alias', v.name.getText()); - x.alias = v.type; - // if type is a union of constants, we (mostly) don't want it - // (at the top level) - // Unfortunately this is false for TraceValues - if (ts.isUnionTypeNode(v.type) && - v.type.types.every((n: ts.TypeNode) => ts.isLiteralTypeNode(n))) { - if (x.name != 'TraceValues') return; - } - if (v.typeParameters) { - x.generics = v.typeParameters; - } - if (data.has(x.name)) x = dataChoose(x, data.get(x.name)); - if (x.generics.length > 1) { - return; - } - setData(x.name, x); - } else if (ts.isModuleDeclaration(node)) { - const v: ts.ModuleDeclaration = node; - if (!ts.isModuleBlock(v.body)) { - throw new Error(`${loc(v)} not ModuleBlock, but ${strKind(v.body)}`); - } - const b: ts.ModuleBlock = v.body; - var s: ts.Statement[] = []; - // we don't want most of these - const fx = function (x: ts.Statement) { - if (ts.isFunctionDeclaration(x)) { - return; - } - if (ts.isTypeAliasDeclaration(x) || ts.isModuleDeclaration(x)) { - return; - } - if (!ts.isVariableStatement(x)) - throw new Error( - `315 expected VariableStatment ${loc(x)} ${strKind(x)} ${x.getText()}`); - if (hasNewExpression(x)) { - return; - } - s.push(x); - }; - b.statements.forEach(fx); - if (s.length == 0) { - return; - } - let m = newData(node, v.name.getText(), 'module', v.name.getText()); - m.statements = ts.factory.createNodeArray(s); - if (data.has(m.name)) m = dataChoose(m, data.get(m.name)); - setData(m.name, m); - } else if (ts.isEnumDeclaration(node)) { - const nm = node.name.getText(); - let v = newData(node, nm, 'enum', node.name.getText()); - v.enums = node.members; - if (data.has(nm)) { - v = dataChoose(v, data.get(nm)); - } - setData(nm, v); - } else if (ts.isClassDeclaration(node)) { - const v: ts.ClassDeclaration = node; - var d: ts.PropertyDeclaration[] = []; - const wanted = function (c: ts.ClassElement): string { - if (ts.isConstructorDeclaration(c)) { - return ''; - } - if (ts.isMethodDeclaration(c)) { - return ''; - } - if (ts.isGetAccessor(c)) { - return ''; - } - if (ts.isSetAccessor(c)) { - return ''; - } - if (ts.isPropertyDeclaration(c)) { - d.push(c); - return strKind(c); - } - throw new Error(`Class decl ${strKind(c)} `); - }; - v.members.forEach((c) => wanted(c)); - if (d.length == 0) { - return; - } // don't need it - let c = newData(v, v.name.getText(), 'class', v.name.getText()); - c.members = ts.factory.createNodeArray(d); - if (v.typeParameters) { - c.generics = v.typeParameters; - } - if (c.generics.length > 1) { - return; - } - if (v.heritageClauses) { - c.as = v.heritageClauses; - } - if (data.has(c.name)) - throw new Error(`Class dup ${loc(c.me)} and ${loc(data.get(c.name).me)}`); - setData(c.name, c); - } else { - throw new Error(`378 unexpected ${strKind(node)} ${loc(node)} `); - } -} - -// Typescript can accumulate, but this chooses one or the other -function dataChoose(a: Data, b: Data): Data { - // maybe they are textually identical? (e.g., FoldingRangeKind) - const [at, bt] = [a.me.getText(), b.me.getText()]; - if (at == bt) { - return a; - } - switch (a.name) { - case 'InitializeError': - case 'CompletionItemTag': - case 'SymbolTag': - case 'CodeActionKind': - case 'Integer': - case 'Uinteger': - case 'Decimal': - // want the Module, if anything - return a.statements.length > 0 ? a : b; - case 'CancellationToken': - case 'CancellationStrategy': - // want the Interface - return a.properties.length > 0 ? a : b; - case 'TextDocumentContentChangeEvent': // almost the same - case 'TokenFormat': - case 'PrepareSupportDefaultBehavior': - return a; - } - console.log( - `409 ${strKind(a.me)} ${strKind(b.me)} ${a.name} ${loc(a.me)} ${loc(b.me)}`); - throw new Error(`410 Fix dataChoose for ${a.name}`); -} - -// is a node an ancestor of a NewExpression -function hasNewExpression(n: ts.Node): boolean { - let ans = false; - n.forEachChild((n: ts.Node) => { - if (ts.isNewExpression(n)) ans = true; - }); - return ans; -} - -function checkOnce() { - // Data for all the rpc types? - rpcTypes.forEach(s => { - if (!data.has(s)) throw new Error(`checkOnce, ${s}?`); - }); -} - -// helper function to find underlying types -// eslint-disable-next-line no-unused-vars -function underlying(n: ts.Node | undefined, f: (n: ts.Node) => void) { - if (!n) return; - const ff = function (n: ts.Node) { - underlying(n, f); - }; - if (ts.isIdentifier(n)) { - f(n); - } else if ( - n.kind == ts.SyntaxKind.StringKeyword || - n.kind == ts.SyntaxKind.NumberKeyword || - n.kind == ts.SyntaxKind.AnyKeyword || - n.kind == ts.SyntaxKind.UnknownKeyword || - n.kind == ts.SyntaxKind.NullKeyword || - n.kind == ts.SyntaxKind.BooleanKeyword || - n.kind == ts.SyntaxKind.ObjectKeyword || - n.kind == ts.SyntaxKind.VoidKeyword) { - // nothing to do - } else if (ts.isTypeReferenceNode(n)) { - f(n.typeName); - } else if (ts.isArrayTypeNode(n)) { - underlying(n.elementType, f); - } else if (ts.isHeritageClause(n)) { - n.types.forEach(ff); - } else if (ts.isExpressionWithTypeArguments(n)) { - underlying(n.expression, f); - } else if (ts.isPropertySignature(n)) { - underlying(n.type, f); - } else if (ts.isTypeLiteralNode(n)) { - n.members.forEach(ff); - } else if (ts.isUnionTypeNode(n) || ts.isIntersectionTypeNode(n)) { - n.types.forEach(ff); - } else if (ts.isIndexSignatureDeclaration(n)) { - underlying(n.type, f); - } else if (ts.isParenthesizedTypeNode(n)) { - underlying(n.type, f); - } else if ( - ts.isLiteralTypeNode(n) || ts.isVariableStatement(n) || - ts.isTupleTypeNode(n)) { - // we only see these in moreTypes, but they are handled elsewhere - } else if (ts.isEnumMember(n)) { - if (ts.isStringLiteral(n.initializer)) return; - throw new Error(`472 EnumMember ${strKind(n.initializer)} ${n.name.getText()}`); - } else { - throw new Error(`474 saw ${strKind(n)} in underlying. ${n.getText()} at ${loc(n)}`); - } -} - -// find all the types implied by seenTypes. -// Simplest way to the transitive closure is to stabilize the size of seenTypes -// but it is slow -function moreTypes() { - const extra = function (s: string) { - if (!data.has(s)) throw new Error(`moreTypes needs ${s}`); - seenTypes.set(s, data.get(s)); - }; - rpcTypes.forEach(extra); // all the types needed by the rpcs - // needed in enums.go (or elsewhere) - extra('InitializeError'); - extra('WatchKind'); - extra('FoldingRangeKind'); - // not sure why these weren't picked up - extra('DidChangeWatchedFilesRegistrationOptions'); - extra('WorkDoneProgressBegin'); - extra('WorkDoneProgressReport'); - extra('WorkDoneProgressEnd'); - let old = 0; - do { - old = seenTypes.size; - - const m = new Map(); - const add = function (n: ts.Node) { - const nm = goName(n.getText()); - if (seenTypes.has(nm) || m.has(nm)) return; - if (data.get(nm)) { - m.set(nm, data.get(nm)); - } - }; - // expect all the heritage clauses have single Identifiers - const h = function (n: ts.Node) { - underlying(n, add); - }; - const f = function (x: ts.NodeArray) { - x.forEach(h); - }; - seenTypes.forEach((d: Data) => d && f(d.as)); - // find the types in the properties - seenTypes.forEach((d: Data) => d && f(d.properties)); - // and in the alias and in the statements and in the enums - seenTypes.forEach((d: Data) => d && underlying(d.alias, add)); - seenTypes.forEach((d: Data) => d && f(d.statements)); - seenTypes.forEach((d: Data) => d && f(d.enums)); - m.forEach((d, k) => seenTypes.set(k, d)); - } - while (seenTypes.size != old) - ; -} - -function cleanData() { // middle pass - // seenTypes contains all the top-level types. - seenTypes.forEach((d) => { - if (d.kind == 'alias') mergeAlias(d); - }); -} - -function sameType(a: ts.TypeNode, b: ts.TypeNode): boolean { - if (a.kind !== b.kind) return false; - if (a.kind === ts.SyntaxKind.BooleanKeyword) return true; - if (a.kind === ts.SyntaxKind.StringKeyword) return true; - if (ts.isTypeReferenceNode(a) && ts.isTypeReferenceNode(b) && - a.typeName.getText() === b.typeName.getText()) return true; - if (ts.isArrayTypeNode(a) && ts.isArrayTypeNode(b)) return sameType(a.elementType, b.elementType); - if (ts.isTypeLiteralNode(a) && ts.isTypeLiteralNode(b)) { - if (a.members.length !== b.members.length) return false; - if (a.members.length === 1) return a.members[0].name.getText() === b.members[0].name.getText(); - if (loc(a) === loc(b)) return true; - } - throw new Error(`544 sameType? ${strKind(a)} ${strKind(b)} ${a.getText()}`); -} -type CreateMutable = { - -readonly [Property in keyof Type]: Type[Property]; -}; -type propMap = Map; -function propMapSet(pm: propMap, name: string, v: ts.PropertySignature) { - if (!pm.get(name)) { - try { getComments(v); } catch (e) { console.log(`552 ${name} ${e}`); } - pm.set(name, v); - return; - } - const a = pm.get(name).type; - const b = v.type; - if (sameType(a, b)) { - return; - } - if (ts.isTypeReferenceNode(a) && ts.isTypeLiteralNode(b)) { - const x = mergeTypeRefLit(a, b); - const fake: CreateMutable = v; - fake['type'] = x; - check(fake as ts.PropertySignature, '565'); - pm.set(name, fake as ts.PropertySignature); - return; - } - if (ts.isTypeLiteralNode(a) && ts.isTypeLiteralNode(b)) { - const x = mergeTypeLitLit(a, b); - const fake: CreateMutable = v; - fake['type'] = x; - check(fake as ts.PropertySignature, '578'); - pm.set(name, fake as ts.PropertySignature); - return; - } - console.log(`577 ${pm.get(name).getText()}\n${v.getText()}`); - throw new Error(`578 should merge ${strKind(a)} and ${strKind(b)} for ${name}`); -} -function addToProperties(pm: propMap, tn: ts.TypeNode | undefined, prefix = '') { - if (!tn) return; - if (ts.isTypeReferenceNode(tn)) { - const d = seenTypes.get(goName(tn.typeName.getText())); - if (tn.typeName.getText() === 'T') return; - if (!d) throw new Error(`584 ${tn.typeName.getText()} not found`); - if (d.properties.length === 0 && d.alias === undefined) return; - if (d.alias !== undefined) { - if (ts.isIntersectionTypeNode(d.alias)) { - d.alias.types.forEach((tn) => addToProperties(pm, tn, prefix)); // prefix? - return; - } - } - d.properties.forEach((ps) => { - const name = `${prefix}.${ps.name.getText()}`; - propMapSet(pm, name, ps); - addToProperties(pm, ps.type, name); - }); - } else if (strKind(tn) === 'TypeLiteral') { - if (!ts.isTypeLiteralNode(tn)) new Error(`599 ${strKind(tn)}`); - tn.forEachChild((child: ts.Node) => { - if (ts.isPropertySignature(child)) { - const name = `${prefix}.${child.name.getText()}`; - propMapSet(pm, name, child); - addToProperties(pm, child.type, name); - } else if (!ts.isIndexSignatureDeclaration(child)) { - // ignoring IndexSignatures, seen as relatedDocument in - // RelatedFullDocumentDiagnosticReport - throw new Error(`608 ${strKind(child)} ${loc(child)}`); - } - }); - } -} -function deepProperties(d: Data): propMap | undefined { - let properties: propMap = new Map(); - if (!d.alias || !ts.isIntersectionTypeNode(d.alias)) return undefined; - d.alias.types.forEach((ts) => addToProperties(properties, ts)); - return properties; -} - -function mergeAlias(d: Data) { - const props = deepProperties(d); - if (!props) return; // nothing merged - // now each element of props should have length 1 - // change d to merged, toss its alias field, fill in its properties - const v: ts.PropertySignature[] = []; - props.forEach((ps, nm) => { - const xlen = nm.split('.').length; - if (xlen !== 2) return; // not top-level - v.push(ps); - }); - d.kind = 'interface'; - d.alias = undefined; - d.properties = ts.factory.createNodeArray(v); -} - -function mergeTypeLitLit(a: ts.TypeLiteralNode, b: ts.TypeLiteralNode): ts.TypeLiteralNode { - const v = new Map(); // avoid duplicates - a.members.forEach((te) => v.set(te.name.getText(), te)); - b.members.forEach((te) => v.set(te.name.getText(), te)); - const x: ts.TypeElement[] = []; - v.forEach((te) => x.push(te)); - const fake: CreateMutable = a; - fake['members'] = ts.factory.createNodeArray(x); - check(fake as ts.TypeLiteralNode, '643'); - return fake as ts.TypeLiteralNode; -} - -function mergeTypeRefLit(a: ts.TypeReferenceNode, b: ts.TypeLiteralNode): ts.TypeLiteralNode { - const d = seenTypes.get(goName(a.typeName.getText())); - if (!d) throw new Error(`644 name ${a.typeName.getText()} not found`); - const typ = d.me; - if (!ts.isInterfaceDeclaration(typ)) throw new Error(`646 got ${strKind(typ)} not InterfaceDecl`); - const v = new Map(); // avoid duplicates - typ.members.forEach((te) => v.set(te.name.getText(), te)); - b.members.forEach((te) => v.set(te.name.getText(), te)); - const x: ts.TypeElement[] = []; - v.forEach((te) => x.push(te)); - - const w = ts.factory.createNodeArray(x); - const fk: CreateMutable = b; - fk['members'] = w; - (fk['members'] as { pos: number })['pos'] = b.members.pos; - (fk['members'] as { end: number })['end'] = b.members.end; - check(fk as ts.TypeLiteralNode, '662'); - return fk as ts.TypeLiteralNode; -} - -// check that constructed nodes still have associated text -function check(n: ts.Node, loc: string) { - try { getComments(n); } catch (e) { console.log(`check at ${loc} ${e}`); } - try { n.getText(); } catch (e) { console.log(`text check at ${loc}`); } -} - -let typesOut = new Array(); -let constsOut = new Array(); - -// generate Go types -function toGo(d: Data, nm: string) { - if (!d) return; // this is probably a generic T - if (d.name.startsWith('Inner') || d.name === 'WindowClientCapabilities') return; // removed by alias processing - if (d.name === 'Integer' || d.name === 'Uinteger') return; // unneeded - switch (d.kind) { - case 'alias': - goTypeAlias(d, nm); break; - case 'module': goModule(d, nm); break; - case 'enum': goEnum(d, nm); break; - case 'interface': goInterface(d, nm); break; - default: - throw new Error( - `672: more cases in toGo ${nm} ${d.kind}`); - } -} - -// these fields need a * and are not covered by the code -// that calls isStructType. -var starred: [string, string][] = [ - ['TextDocumentContentChangeEvent', 'range'], ['CodeAction', 'command'], - ['CodeAction', 'disabled'], - ['DidSaveTextDocumentParams', 'text'], ['CompletionItem', 'command'], - ['Diagnostic', 'codeDescription'] -]; - -// generate Go code for an interface -function goInterface(d: Data, nm: string) { - let ans = `type ${goName(nm)} struct {\n`; - - // generate the code for each member - const g = function (n: ts.PropertySignature) { - if (!ts.isPropertySignature(n)) - throw new Error(`expected PropertySignature got ${strKind(n)} `); - ans = ans.concat(getComments(n)); - const json = u.JSON(n); - let gt = goType(n.type, n.name.getText()); - if (gt == d.name) gt = '*' + gt; // avoid recursive types (SelectionRange) - // there are several cases where a * is needed - // (putting * in front of too many things breaks uses of CodeActionKind) - starred.forEach(([a, b]) => { - if (d.name == a && n.name.getText() == b) { - gt = '*' + gt; - } - }); - ans = ans.concat(`${goName(n.name.getText())} ${gt}`, json, '\n'); - }; - d.properties.forEach(g); - // heritage clauses become embedded types - // check they are all Identifiers - const f = function (n: ts.ExpressionWithTypeArguments) { - if (!ts.isIdentifier(n.expression)) - throw new Error(`Interface ${nm} heritage ${strKind(n.expression)} `); - if (n.expression.getText() === 'Omit') return; // Type modification type - ans = ans.concat(goName(n.expression.getText()), '\n'); - }; - d.as.forEach((n: ts.HeritageClause) => n.types.forEach(f)); - ans = ans.concat('}\n'); - typesOut.push(getComments(d.me)); - typesOut.push(ans); -} - -// generate Go code for a module (const declarations) -// Generates type definitions, and named constants -function goModule(d: Data, nm: string) { - if (d.generics.length > 0 || d.as.length > 0) { - throw new Error(`743 goModule: unexpected for ${nm} - `); - } - // all the statements should be export const : value - // or value = value - // They are VariableStatements with x.declarationList having a single - // VariableDeclaration - let isNumeric = false; - const f = function (n: ts.Statement, i: number) { - if (!ts.isVariableStatement(n)) { - throw new Error(`753 ${nm} ${i} expected VariableStatement, - got ${strKind(n)}`); - } - const c = getComments(n); - const v = n.declarationList.declarations[0]; // only one - - if (!v.initializer) - throw new Error(`760 no initializer ${nm} ${i} ${v.name.getText()}`); - isNumeric = strKind(v.initializer) == 'NumericLiteral'; - if (c != '') constsOut.push(c); // no point if there are no comments - // There are duplicates. - const cname = constName(goName(v.name.getText()), nm); - let val = v.initializer.getText(); - val = val.split('\'').join('"'); // useless work for numbers - constsOut.push(`${cname} ${nm} = ${val}`); - }; - d.statements.forEach(f); - typesOut.push(getComments(d.me)); - // Or should they be type aliases? - typesOut.push(`type ${nm} ${isNumeric ? 'float64' : 'string'}`); -} - -// generate Go code for an enum. Both types and named constants -function goEnum(d: Data, nm: string) { - let isNumeric = false; - const f = function (v: ts.EnumMember, j: number) { // same as goModule - if (!v.initializer) - throw new Error(`goEnum no initializer ${nm} ${j} ${v.name.getText()}`); - isNumeric = strKind(v.initializer) == 'NumericLiteral'; - const c = getComments(v); - const cname = constName(goName(v.name.getText()), nm); - let val = v.initializer.getText(); - val = val.split('\'').join('"'); // replace quotes. useless work for numbers - constsOut.push(`${c}${cname} ${nm} = ${val}`); - }; - d.enums.forEach(f); - typesOut.push(getComments(d.me)); - // Or should they be type aliases? - typesOut.push(`type ${nm} ${isNumeric ? 'float64' : 'string'}`); -} - -// generate code for a type alias -function goTypeAlias(d: Data, nm: string) { - if (d.as.length != 0 || d.generics.length != 0) { - if (nm != 'ServerCapabilities') - throw new Error(`${nm} has extra fields(${d.as.length},${d.generics.length}) ${d.me.getText()}`); - } - typesOut.push(getComments(d.me)); - // d.alias doesn't seem to have comments - let aliasStr = goName(nm) == 'DocumentURI' ? ' ' : ' = '; - if (nm == 'PrepareSupportDefaultBehavior') { - // code-insiders is sending a bool, not a number. PJW: check this after Feb/2021 - // (and gopls never looks at it anyway) - typesOut.push(`type ${goName(nm)}${aliasStr}interface{}\n`); - return; - } - typesOut.push(`type ${goName(nm)}${aliasStr}${goType(d.alias, nm)}\n`); -} - -// return a go type and maybe an assocated javascript tag -function goType(n: ts.TypeNode | undefined, nm: string): string { - if (!n) throw new Error(`goType undefined for ${nm}`); - if (n.getText() == 'T') return 'interface{}'; // should check it's generic - if (ts.isTypeReferenceNode(n)) { - // DocumentDiagnosticReportKind.unChanged (or .new) value is "new" or "unChanged" - if (n.getText().startsWith('DocumentDiagnostic')) return 'string'; - switch (n.getText()) { - case 'integer': return 'int32'; - case 'uinteger': return 'uint32'; - default: return goName(n.typeName.getText()); // avoid - } - } else if (ts.isUnionTypeNode(n)) { - return goUnionType(n, nm); - } else if (ts.isIntersectionTypeNode(n)) { - return goIntersectionType(n, nm); - } else if (strKind(n) == 'StringKeyword') { - return 'string'; - } else if (strKind(n) == 'NumberKeyword') { - return 'float64'; - } else if (strKind(n) == 'BooleanKeyword') { - return 'bool'; - } else if (strKind(n) == 'AnyKeyword' || strKind(n) == 'UnknownKeyword') { - return 'interface{}'; - } else if (strKind(n) == 'NullKeyword') { - return 'nil'; - } else if (strKind(n) == 'VoidKeyword' || strKind(n) == 'NeverKeyword') { - return 'void'; - } else if (strKind(n) == 'ObjectKeyword') { - return 'interface{}'; - } else if (ts.isArrayTypeNode(n)) { - if (nm === 'arguments') { - // Command and ExecuteCommandParams - return '[]json.RawMessage'; - } - return `[]${goType(n.elementType, nm)}`; - } else if (ts.isParenthesizedTypeNode(n)) { - return goType(n.type, nm); - } else if (ts.isLiteralTypeNode(n)) { - return strKind(n.literal) == 'StringLiteral' ? 'string' : 'float64'; - } else if (ts.isTypeLiteralNode(n)) { - // these are anonymous structs - const v = goTypeLiteral(n, nm); - return v; - } else if (ts.isTupleTypeNode(n)) { - if (n.getText() == '[number, number]') return '[]float64'; - throw new Error(`goType unexpected Tuple ${n.getText()}`); - } - throw new Error(`${strKind(n)} goType unexpected ${n.getText()} for ${nm}`); -} - -// The choice is uniform interface{}, or some heuristically assigned choice, -// or some better sytematic idea I haven't thought of. Using interface{} -// is, in practice, impossibly complex in the existing code. -function goUnionType(n: ts.UnionTypeNode, nm: string): string { - let help = `/*${n.getText()}*/`; // show the original as a comment - // There are some bad cases with newlines: - // range?: boolean | {\n }; - // full?: boolean | {\n /**\n * The server supports deltas for full documents.\n */\n delta?: boolean;\n } - // These are handled specially: - if (nm == 'range') help = help.replace(/\n/, ''); - if (nm == 'full' && help.indexOf('\n') != -1) { - help = '/*boolean | */'; - } - // handle all the special cases - switch (n.types.length) { - case 2: { - const a = strKind(n.types[0]); - const b = strKind(n.types[1]); - if (a == 'NumberKeyword' && b == 'StringKeyword') { // ID - return `interface{} ${help}`; - } - // for null, b is not useful (LiternalType) - if (n.types[1].getText() === 'null') { - if (nm == 'textDocument/codeAction') { - // (Command | CodeAction)[] | null - return `[]CodeAction ${help}`; - } - let v = goType(n.types[0], 'a'); - return `${v} ${help}`; - } - if (a == 'BooleanKeyword') { // usually want bool - if (nm == 'codeActionProvider') return `interface{} ${help}`; - if (nm == 'renameProvider') return `interface{} ${help}`; - if (nm == 'full') return `interface{} ${help}`; // there's a struct - if (nm == 'save') return `${goType(n.types[1], '680')} ${help}`; - return `${goType(n.types[0], 'b')} ${help}`; - } - if (b == 'ArrayType') return `${goType(n.types[1], 'c')} ${help}`; - if (help.includes('InsertReplaceEdit') && n.types[0].getText() == 'TextEdit') { - return `*TextEdit ${help}`; - } - if (a == 'TypeReference') { - if (nm == 'edits') return `${goType(n.types[0], '901')} ${help}`; - if (a == b) return `interface{} ${help}`; - if (nm == 'code') return `interface{} ${help}`; - if (nm == 'editRange') return `${goType(n.types[0], '904')} ${help}`; - if (nm === 'location') return `${goType(n.types[0], '905')} ${help}`; - } - if (a == 'StringKeyword') return `string ${help}`; - if (a == 'TypeLiteral' && nm == 'TextDocumentContentChangeEvent') { - return `${goType(n.types[0], nm)}`; - } - if (a == 'TypeLiteral' && b === 'TypeLiteral') { - // DocumentDiagnosticReport - // the first one includes the second one - return `${goType(n.types[0], '9d')}`; - } - throw new Error(`911 ${nm}: a:${a}/${goType(n.types[0], '9a')} b:${b}/${goType(n.types[1], '9b')} ${loc(n)}`); - } - case 3: { - const aa = strKind(n.types[0]); - const bb = strKind(n.types[1]); - const cc = strKind(n.types[2]); - if (nm === 'workspace/symbol') return `${goType(n.types[0], '930')} ${help}`; - if (nm == 'DocumentFilter' || nm == 'NotebookDocumentFilter' || nm == 'TextDocumentFilter') { - // not really a union. the first is enough, up to a missing - // omitempty but avoid repetitious comments - return `${goType(n.types[0], 'g')}`; - } - if (nm == 'textDocument/documentSymbol') { - return `[]interface{} ${help}`; - } - if (aa == 'TypeReference' && bb == 'ArrayType' && (cc == 'NullKeyword' || cc === 'LiteralType')) { - return `${goType(n.types[0], 'd')} ${help}`; - } - if (aa == 'TypeReference' && bb == aa && cc == 'ArrayType') { - // should check that this is Hover.Contents - return `${goType(n.types[0], 'e')} ${help}`; - } - if (aa == 'ArrayType' && bb == 'TypeReference' && (cc == 'NullKeyword' || cc === 'LiteralType')) { - // check this is nm == 'textDocument/completion' - return `${goType(n.types[1], 'f')} ${help}`; - } - if (aa == 'LiteralType' && bb == aa && cc == aa) return `string ${help}`; - // keep this for diagnosing unexpected interface{} results - // console.log(`931, interface{} for ${aa}/${goType(n.types[0], 'g')},${bb}/${goType(n.types[1], 'h')},${cc}/${goType(n.types[2], 'i')} ${nm}`); - break; - } - case 4: - if (nm == 'documentChanges') return `TextDocumentEdit ${help} `; - if (nm == 'textDocument/prepareRename') { - // these names have to be made unique - const genName = `${goName("prepareRename")}${extraTypes.size}Gn`; - extraTypes.set(genName, [`Range Range \`json:"range"\` - Placeholder string \`json:"placeholder"\``]); - return `${genName} ${help} `; - } - break; - case 8: // LSPany - break; - default: - throw new Error(`957 goUnionType len=${n.types.length} nm=${nm} ${n.getText()}`); - } - - // Result will be interface{} with a comment - let isLiteral = true; - let literal = 'string'; - let res = 'interface{} /* '; - n.types.forEach((v: ts.TypeNode, i: number) => { - // might get an interface inside: - // (Command | CodeAction)[] | null - let m = goType(v, nm); - if (m.indexOf('interface') != -1) { - // avoid nested comments - m = m.split(' ')[0]; - } - m = m.split('\n').join('; '); // sloppy: struct{; - res = res.concat(`${i == 0 ? '' : ' | '}`, m); - if (!ts.isLiteralTypeNode(v)) isLiteral = false; - else literal = strKind(v.literal) == 'StringLiteral' ? 'string' : 'number'; - }); - if (!isLiteral) { - return res + '*/'; - } - // I don't think we get here - // trace?: 'off' | 'messages' | 'verbose' should get string - return `${literal} /* ${n.getText()} */`; -} - -// some of the intersection types A&B are ok as struct{A;B;} and some -// could be expanded, and ClientCapabilites has to be expanded, -// at least for workspace. It's possible to check algorithmically, -// but much simpler just to check explicitly. -function goIntersectionType(n: ts.IntersectionTypeNode, nm: string): string { - if (nm == 'ClientCapabilities') return expandIntersection(n); - //if (nm == 'ServerCapabilities') return expandIntersection(n); // save for later consideration - let inner = ''; - n.types.forEach( - (t: ts.TypeNode) => { inner = inner.concat(goType(t, nm), '\n'); }); - return `struct{ \n${inner}} `; -} - -// for each of the intersected types, extract its components (each will -// have a Data with properties) extract the properties, and keep track -// of them by name. The names that occur once can be output. The names -// that occur more than once need to be combined. -function expandIntersection(n: ts.IntersectionTypeNode): string { - const bad = function (n: ts.Node, s: string) { - return new Error(`expandIntersection ${strKind(n)} ${s}`); - }; - let props = new Map(); - for (const tp of n.types) { - if (!ts.isTypeReferenceNode(tp)) throw bad(tp, 'A'); - const d = data.get(goName(tp.typeName.getText())); - for (const p of d.properties) { - if (!ts.isPropertySignature(p)) throw bad(p, 'B'); - let v = props.get(p.name.getText()) || []; - v.push(p); - props.set(p.name.getText(), v); - } - } - let ans = 'struct {\n'; - for (const [k, v] of Array.from(props)) { - if (v.length == 1) { - const a = v[0]; - ans = ans.concat(getComments(a)); - ans = ans.concat(`${goName(k)} ${goType(a.type, k)} ${u.JSON(a)}\n`); - continue; - } - ans = ans.concat(`${goName(k)} struct {\n`); - for (let i = 0; i < v.length; i++) { - const a = v[i]; - if (ts.isTypeReferenceNode(a.type)) { - ans = ans.concat(getComments(a)); - ans = ans.concat(goName(a.type.typeName.getText()), '\n'); - } else if (ts.isTypeLiteralNode(a.type)) { - if (a.type.members.length != 1) throw bad(a.type, 'C'); - const b = a.type.members[0]; - if (!ts.isPropertySignature(b)) throw bad(b, 'D'); - ans = ans.concat(getComments(b)); - ans = ans.concat( - goName(b.name.getText()), ' ', goType(b.type, 'a'), u.JSON(b), '\n'); - } else { - throw bad(a.type, `E ${a.getText()} in ${goName(k)} at ${loc(a)}`); - } - } - ans = ans.concat('}\n'); - } - ans = ans.concat('}\n'); - return ans; -} - -// Does it make sense to use a pointer? -function isStructType(te: ts.TypeNode): boolean { - switch (strKind(te)) { - case 'UnionType': // really need to know which type will be chosen - case 'BooleanKeyword': - case 'StringKeyword': - case 'ArrayType': - return false; - case 'TypeLiteral': return false; // true makes for difficult compound constants - // but think more carefully to understands why starred is needed. - case 'TypeReference': { - if (!ts.isTypeReferenceNode(te)) throw new Error(`1047 impossible ${strKind(te)}`); - const d = seenTypes.get(goName(te.typeName.getText())); - if (d === undefined || d.properties.length == 0) return false; - if (d.properties.length > 1) return true; - // alias or interface with a single property (The alias is Uinteger, which we ignore later) - if (d.alias) return false; - const x = d.properties[0].type; - return isStructType(x); - } - default: throw new Error(`1055 indirectable> ${strKind(te)}`); - } -} - -function goTypeLiteral(n: ts.TypeLiteralNode, nm: string): string { - let ans: string[] = []; // in case we generate a new extra type - let res = 'struct{\n'; // the actual answer usually - const g = function (nx: ts.TypeElement) { - // add the json, as in goInterface(). Strange inside union types. - if (ts.isPropertySignature(nx)) { - let json = u.JSON(nx); - let typ = goType(nx.type, nx.name.getText()); - // }/*\n*/`json:v` is not legal, the comment is a newline - if (typ.includes('\n') && typ.indexOf('*/') === typ.length - 2) { - typ = typ.replace(/\n\t*/g, ' '); - } - const v = getComments(nx) || ''; - starred.forEach(([a, b]) => { - if (a != nm || b != typ.toLowerCase()) return; - typ = '*' + typ; - json = json.substring(0, json.length - 2) + ',omitempty"`'; - }); - if (typ[0] !== '*' && isStructType(nx.type)) typ = '*' + typ; - res = res.concat(`${v} ${goName(nx.name.getText())} ${typ}`, json, '\n'); - ans.push(`${v}${goName(nx.name.getText())} ${typ} ${json}\n`); - } else if (ts.isIndexSignatureDeclaration(nx)) { - const comment = nx.getText().replace(/[/]/g, ''); - if (nx.getText() == '[uri: string]: TextEdit[];') { - res = 'map[string][]TextEdit'; - } else if (nx.getText().startsWith('[id: ChangeAnnotationIdentifier]')) { - res = 'map[string]ChangeAnnotationIdentifier'; - } else if (nx.getText().startsWith('[uri: string')) { - res = 'map[string]interface{}'; - } else if (nx.getText().startsWith('[uri: DocumentUri')) { - res = 'map[DocumentURI][]TextEdit'; - } else if (nx.getText().startsWith('[key: string')) { - res = 'map[string]interface{}'; - } else { - throw new Error(`1100 handle ${nx.getText()} ${loc(nx)}`); - } - res += ` /*${comment}*/`; - ans.push(res); - return; - } else - throw new Error(`TypeLiteral had ${strKind(nx)}`); - }; - n.members.forEach(g); - // for some the generated type is wanted, for others it's not needed - if (!nm.startsWith('workspace')) { - if (res.startsWith('struct')) return res + '}'; // map[] is special - return res; - } - // these names have to be made unique - const genName = `${goName(nm)}${extraTypes.size}Gn`; - extraTypes.set(genName, ans); - return genName; -} - -// print all the types and constants and extra types -function outputTypes() { - // generate go types alphabeticaly - let v = Array.from(seenTypes.keys()); - v.sort(); - v.forEach((x) => toGo(seenTypes.get(x), x)); - u.prgo(u.computeHeader(true)); - u.prgo('import "encoding/json"\n\n'); - typesOut.forEach((s) => { - u.prgo(s); - // it's more convenient not to have to think about trailing newlines - // when generating types, but doc comments can't have an extra \n - if (s.indexOf('/**') < 0) u.prgo('\n'); - }); - u.prgo('\nconst (\n'); - constsOut.forEach((s) => { - u.prgo(s); - u.prgo('\n'); - }); - u.prgo(')\n'); - u.prgo('// Types created to name formal parameters and embedded structs\n'); - extraTypes.forEach((v, k) => { - u.prgo(` type ${k} struct {\n`); - v.forEach((s) => { - u.prgo(s); - u.prgo('\n'); - }); - u.prgo('}\n'); - }); -} - -// client and server ------------------ - -interface side { - methods: string[]; - cases: string[]; - calls: string[]; - name: string; // client or server - goName: string; // Client or Server - outputFile?: string; - fd?: number -} -let client: side = { - methods: [], - cases: [], - calls: [], - name: 'client', - goName: 'Client', -}; -let server: side = { - methods: [], - cases: [], - calls: [], - name: 'server', - goName: 'Server', -}; - -// commonly used output -const notNil = `if len(r.Params()) > 0 { - return true, reply(ctx, nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)) -}`; - -// Go code for notifications. Side is client or server, m is the request -// method -function goNot(side: side, m: string) { - if (m == '$/cancelRequest') return; // handled specially in protocol.go - const n = not.get(m); - const a = goType(n.typeArguments[0], m); - const nm = methodName(m); - side.methods.push(sig(nm, a, '')); - const caseHdr = ` case "${m}": // notif`; - let case1 = notNil; - if (a != '' && a != 'void') { - case1 = `var params ${a} - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - } - err:= ${side.name}.${nm}(ctx, ¶ms) - return true, reply(ctx, nil, err)`; - } else { - case1 = `err := ${side.name}.${nm}(ctx) - return true, reply(ctx, nil, err)`; - } - side.cases.push(`${caseHdr}\n${case1}`); - - const arg3 = a == '' || a == 'void' ? 'nil' : 'params'; - side.calls.push(` - func (s *${side.name}Dispatcher) ${sig(nm, a, '', true)} { - return s.sender.Notify(ctx, "${m}", ${arg3}) - }`); -} - -// Go code for requests. -function goReq(side: side, m: string) { - const n = req.get(m); - const nm = methodName(m); - let a = goType(n.typeArguments[0], m); - let b = goType(n.typeArguments[1], m); - if (n.getText().includes('Type0')) { - b = a; - a = ''; // workspace/workspaceFolders and shutdown - } - u.prb(`${side.name} req ${a != ''}, ${b != ''} ${nm} ${m} ${loc(n)} `); - side.methods.push(sig(nm, a, b)); - - const caseHdr = `case "${m}": // req`; - let case1 = notNil; - if (a != '') { - if (extraTypes.has('Param' + nm)) a = 'Param' + nm; - case1 = `var params ${a} - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - return true, sendParseError(ctx, reply, err) - }`; - if (a === 'ParamInitialize') { - case1 = `var params ${a} - if err := json.Unmarshal(r.Params(), ¶ms); err != nil { - if _, ok := err.(*json.UnmarshalTypeError); !ok { - return true, sendParseError(ctx, reply, err) - } - }`; - } - } - const arg2 = a == '' ? '' : ', ¶ms'; - // if case2 is not explicitly typed string, typescript makes it a union of strings - let case2: string = `if err := ${side.name}.${nm}(ctx${arg2}); err != nil { - event.Error(ctx, "", err) - }`; - if (b != '' && b != 'void') { - case2 = `resp, err := ${side.name}.${nm}(ctx${arg2}) - if err != nil { - return true, reply(ctx, nil, err) - } - return true, reply(ctx, resp, nil)`; - } else { // response is nil - case2 = `err := ${side.name}.${nm}(ctx${arg2}) - return true, reply(ctx, nil, err)`; - } - - side.cases.push(`${caseHdr}\n${case1}\n${case2}`); - - const callHdr = `func (s *${side.name}Dispatcher) ${sig(nm, a, b, true)} {`; - let callBody = `return s.sender.Call(ctx, "${m}", nil, nil)\n}`; - if (b != '' && b != 'void') { - const p2 = a == '' ? 'nil' : 'params'; - const returnType = indirect(b) ? `*${b}` : b; - callBody = `var result ${returnType} - if err := s.sender.Call(ctx, "${m}", ${p2}, &result); err != nil { - return nil, err - } - return result, nil - }`; - } else if (a != '') { - callBody = `return s.sender.Call(ctx, "${m}", params, nil) // Call, not Notify - }`; - } - side.calls.push(`${callHdr}\n${callBody}\n`); -} - -// make sure method names are unique -let seenNames = new Set(); -function methodName(m: string): string { - let i = m.indexOf('/'); - let s = m.substring(i + 1); - let x = s[0].toUpperCase() + s.substring(1); - for (let j = x.indexOf('/'); j >= 0; j = x.indexOf('/')) { - let suffix = x.substring(j + 1); - suffix = suffix[0].toUpperCase() + suffix.substring(1); - let prefix = x.substring(0, j); - x = prefix + suffix; - } - if (seenNames.has(x)) { - // various Resolve and Diagnostic - x += m[0].toUpperCase() + m.substring(1, i); - } - seenNames.add(x); - return x; -} - -// used in sig and in goReq -function indirect(s: string): boolean { - if (s == '' || s == 'void') return false; - const skip = (x: string) => s.startsWith(x); - if (skip('[]') || skip('interface') || skip('Declaration') || - skip('Definition') || skip('DocumentSelector')) - return false; - return true; -} - -// Go signatures for methods. -function sig(nm: string, a: string, b: string, names?: boolean): string { - if (a.indexOf('struct') != -1) { - const v = a.split('\n'); - extraTypes.set(`Param${nm}`, v.slice(1, v.length - 1)); - a = 'Param' + nm; - } - if (a == 'void') - a = ''; - else if (a != '') { - if (names) - a = ', params *' + a; - else - a = ', *' + a; - } - let ret = 'error'; - if (b != '' && b != 'void') { - // avoid * when it is senseless - if (indirect(b)) b = '*' + b; - ret = `(${b}, error)`; - } - let start = `${nm}(`; - if (names) { - start = start + 'ctx '; - } - return `${start}context.Context${a}) ${ret}`; -} - -// write the request/notification code -function output(side: side) { - // make sure the output file exists - if (!side.outputFile) { - side.outputFile = `ts${side.name}.go`; - side.fd = fs.openSync(side.outputFile, 'w'); - } - const f = function (s: string) { - fs.writeSync(side.fd!, s); - fs.writeSync(side.fd!, '\n'); - }; - f(u.computeHeader(false)); - f(` - import ( - "context" - "encoding/json" - - "golang.org/x/tools/internal/jsonrpc2" - ) - `); - const a = side.name[0].toUpperCase() + side.name.substring(1); - f(`type ${a} interface {`); - side.methods.forEach((v) => { f(v); }); - f('}\n'); - f(`func ${side.name}Dispatch(ctx context.Context, ${side.name} ${a}, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) { - switch r.Method() {`); - side.cases.forEach((v) => { f(v); }); - f(` - default: - return false, nil - } - }`); - side.calls.forEach((v) => { f(v); }); -} - -// Handling of non-standard requests, so we can add gopls-specific calls. -function nonstandardRequests() { - server.methods.push( - 'NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error)'); - server.calls.push( - `func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) { - var result interface{} - if err := s.sender.Call(ctx, method, params, &result); err != nil { - return nil, err - } - return result, nil - } - `); -} - -// ----- remember it's a scripting language -function main() { - if (u.gitHash != u.git()) { - throw new Error( - `git hash mismatch, wanted\n${u.gitHash} but source is at\n${u.git()}`); - } - u.createOutputFiles(); - parse(); - u.printAST(program); - // find the Requests and Nofificatations - for (const sourceFile of program.getSourceFiles()) { - if (!sourceFile.isDeclarationFile) { - ts.forEachChild(sourceFile, findRPCs); - } - } - // separate RPCs into client and server - setReceives(); - // visit every sourceFile collecting top-level type definitions - for (const sourceFile of program.getSourceFiles()) { - if (!sourceFile.isDeclarationFile) { - ts.forEachChild(sourceFile, genTypes); - } - } - // check that each thing occurs exactly once, and put pointers into - // seenTypes - checkOnce(); - // for each of Client and Server there are 3 parts to the output: - // 1. type X interface {methods} - // 2. func (h *serverHandler) Deliver(...) { switch r.method } - // 3. func (x *xDispatcher) Method(ctx, parm) - not.forEach( // notifications - (v, k) => { - receives.get(k) == 'client' ? goNot(client, k) : goNot(server, k); - }); - req.forEach( // requests - (v, k) => { - receives.get(k) == 'client' ? goReq(client, k) : goReq(server, k); - }); - nonstandardRequests(); - // find all the types implied by seenTypes and rpcs to try to avoid - // generating types that aren't used - moreTypes(); - // do merging - cleanData(); - // and print the Go code - outputTypes(); - console.log(`seen ${seenTypes.size + extraTypes.size}`); - output(client); - output(server); -} - -main(); diff --git a/internal/lsp/protocol/typescript/tsconfig.json b/internal/lsp/protocol/typescript/tsconfig.json deleted file mode 100644 index 14cfe0c7ee9..00000000000 --- a/internal/lsp/protocol/typescript/tsconfig.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "compilerOptions": { - "isolatedModules": true, - "moduleResolution": "node", - "lib":["ES2020"], - "sourceMap": true, // sourceMap or inlineSourceMap? and see inlineSources - "target": "ES5", - - "noFallthroughCasesInSwitch": false, // there is one legitimate on - "noImplicitReturns": true, - "noPropertyAccessFromIndexSignature": true, - "noUncheckedIndexedAccess": true, - "noUnusedLocals": true, - "noUnusedParameters": false, - "noEmitOnError": true, - - // "extendedDiagnostics": true, // for occasional amusement - - // "strict": true, // too many undefineds in types, etc - "alwaysStrict": true, - "noImplicitAny": true, - "noImplicitThis": true, - "strictBindCallApply": true, - "strictFunctionTypes": true, - "strictNullChecks": false, // doesn't like arrray access, among other things. - //"strictPropertyInitialization": true, // needs strictNullChecks - }, - "files": ["./code.ts", "./util.ts"] -} diff --git a/internal/lsp/protocol/typescript/util.ts b/internal/lsp/protocol/typescript/util.ts deleted file mode 100644 index 9475b26a157..00000000000 --- a/internal/lsp/protocol/typescript/util.ts +++ /dev/null @@ -1,254 +0,0 @@ - -// for us typescript ignorati, having an import makes this file a module -import * as fs from 'fs'; -import * as process from 'process'; -import * as ts from 'typescript'; - -// This file contains various utilities having to do with producing strings -// and managing output - -// ------ create files -let dir = process.env['HOME']; -const srcDir = '/vscode-languageserver-node'; -export const fnames = [ - `${dir}${srcDir}/protocol/src/common/protocol.ts`, - `${dir}/${srcDir}/protocol/src/browser/main.ts`, `${dir}${srcDir}/types/src/main.ts`, - `${dir}${srcDir}/jsonrpc/src/node/main.ts` -]; -export const gitHash = '696f9285bf849b73745682fdb1c1feac73eb8772'; -let outFname = 'tsprotocol.go'; -let fda: number, fdb: number, fde: number; // file descriptors - -export function createOutputFiles() { - fda = fs.openSync('/tmp/ts-a', 'w'); // dump of AST - fdb = fs.openSync('/tmp/ts-b', 'w'); // unused, for debugging - fde = fs.openSync(outFname, 'w'); // generated Go -} -export function pra(s: string) { - return (fs.writeSync(fda, s)); -} -export function prb(s: string) { - return (fs.writeSync(fdb, s)); -} -export function prgo(s: string) { - return (fs.writeSync(fde, s)); -} - -// Get the hash value of the git commit -export function git(): string { - let a = fs.readFileSync(`${dir}${srcDir}/.git/HEAD`).toString(); - // ref: refs/heads/foo, or a hash like - // cc12d1a1c7df935012cdef5d085cdba04a7c8ebe - if (a.charAt(a.length - 1) == '\n') { - a = a.substring(0, a.length - 1); - } - if (a.length == 40) { - return a; // a hash - } - if (a.substring(0, 5) == 'ref: ') { - const fname = `${dir}${srcDir}/.git/` + a.substring(5); - let b = fs.readFileSync(fname).toString(); - if (b.length == 41) { - return b.substring(0, 40); - } - } - throw new Error('failed to find the git commit hash'); -} - -// Produce a header for Go output files -export function computeHeader(pkgDoc: boolean): string { - let lastMod = 0; - let lastDate = new Date(); - for (const f of fnames) { - const st = fs.statSync(f); - if (st.mtimeMs > lastMod) { - lastMod = st.mtimeMs; - lastDate = st.mtime; - } - } - const cp = `// Copyright 2019 The Go Authors. All rights reserved. - // Use of this source code is governed by a BSD-style - // license that can be found in the LICENSE file. - - `; - const a = - '// Package protocol contains data types and code for LSP json rpcs\n' + - '// generated automatically from vscode-languageserver-node\n' + - `// commit: ${gitHash}\n` + - `// last fetched ${lastDate}\n`; - const b = 'package protocol\n'; - const c = '\n// Code generated (see typescript/README.md) DO NOT EDIT.\n\n'; - if (pkgDoc) { - return cp + c + a + b; - } - else { - return cp + c+ b + a; - } -} - -// Turn a typescript name into an exportable Go name, and appease lint -export function goName(s: string): string { - let ans = s; - if (s.charAt(0) == '_') { - // in the end, none of these are emitted. - ans = 'Inner' + s.substring(1); - } - else { ans = s.substring(0, 1).toUpperCase() + s.substring(1); } - ans = ans.replace(/Uri$/, 'URI'); - ans = ans.replace(/Id$/, 'ID'); - return ans; -} - -// Generate JSON tag for a struct field -export function JSON(n: ts.PropertySignature): string { - const json = `\`json:"${n.name.getText()}${n.questionToken !== undefined ? ',omitempty' : ''}"\``; - return json; -} - -// Generate modifying prefixes and suffixes to ensure -// consts are unique. (Go consts are package-level, but Typescript's are -// not.) Use suffixes to minimize changes to gopls. -export function constName(nm: string, type: string): string { - let pref = new Map([ - ['DiagnosticSeverity', 'Severity'], ['WatchKind', 'Watch'], - ['SignatureHelpTriggerKind', 'Sig'], ['CompletionItemTag', 'Compl'], - ['Integer', 'INT_'], ['Uinteger', 'UINT_'], ['CodeActionTriggerKind', 'CodeAction'] - ]); // typeName->prefix - let suff = new Map([ - ['CompletionItemKind', 'Completion'], ['InsertTextFormat', 'TextFormat'], - ['SymbolTag', 'Symbol'], ['FileOperationPatternKind', 'Op'], - ]); - let ans = nm; - if (pref.get(type)) ans = pref.get(type) + ans; - if (suff.has(type)) ans = ans + suff.get(type); - return ans; -} - -// Find the comments associated with an AST node -export function getComments(node: ts.Node): string { - const sf = node.getSourceFile(); - const start = node.getStart(sf, false); - const starta = node.getStart(sf, true); - const x = sf.text.substring(starta, start); - return x; -} - - -// --------- printing the AST, for debugging - -export function printAST(program: ts.Program) { - // dump the ast, for debugging - const f = function (n: ts.Node) { - describe(n, pra); - }; - for (const sourceFile of program.getSourceFiles()) { - if (!sourceFile.isDeclarationFile) { - // walk the tree to do stuff - ts.forEachChild(sourceFile, f); - } - } - pra('\n'); - for (const key of Object.keys(seenThings).sort()) { - pra(`${key}: ${seenThings.get(key)} \n`); - } -} - -// Used in printing the AST -let seenThings = new Map(); -function seenAdd(x: string) { - const u = seenThings.get(x); - seenThings.set(x, u === undefined ? 1 : u + 1); -} - -// eslint-disable-next-line no-unused-vars -function describe(node: ts.Node, pr: (_: string) => any) { - if (node === undefined) { - return; - } - let indent = ''; - - function f(n: ts.Node) { - seenAdd(kinds(n)); - if (ts.isIdentifier(n)) { - pr(`${indent} ${loc(n)} ${strKind(n)} ${n.text} \n`); - } - else if (ts.isPropertySignature(n) || ts.isEnumMember(n)) { - pra(`${indent} ${loc(n)} ${strKind(n)} \n`); - } - else if (ts.isTypeLiteralNode(n)) { - let m = n.members; - pr(`${indent} ${loc(n)} ${strKind(n)} ${m.length} \n`); - } - else if (ts.isStringLiteral(n)) { - pr(`${indent} ${loc(n)} ${strKind(n)} ${n.text} \n`); - } - else { pr(`${indent} ${loc(n)} ${strKind(n)} \n`); } - indent += ' .'; - ts.forEachChild(n, f); - indent = indent.slice(0, indent.length - 2); - } - f(node); -} - - -// For debugging, say where an AST node is in a file -export function loc(node: ts.Node | undefined): string { - if (!node) throw new Error('loc called with undefined (cannot happen!)'); - const sf = node.getSourceFile(); - const start = node.getStart(); - const x = sf.getLineAndCharacterOfPosition(start); - const full = node.getFullStart(); - const y = sf.getLineAndCharacterOfPosition(full); - let fn = sf.fileName; - const n = fn.search(/-node./); - fn = fn.substring(n + 6); - return `${fn} ${x.line + 1}: ${x.character + 1} (${y.line + 1}: ${y.character + 1})`; -} - -// --- various string stuff - -// return a string of the kinds of the immediate descendants -// as part of printing the AST tree -function kinds(n: ts.Node): string { - let res = 'Seen ' + strKind(n); - function f(n: ts.Node): void { res += ' ' + strKind(n); } - ts.forEachChild(n, f); - return res; -} - -// What kind of AST node is it? This would just be typescript's -// SyntaxKind[n.kind] except that the default names for some nodes -// are misleading -export function strKind(n: ts.Node | undefined): string { - if (n == null || n == undefined) { - return 'null'; - } - return kindToStr(n.kind); -} - -function kindToStr(k: ts.SyntaxKind): string { - const x = ts.SyntaxKind[k]; - // some of these have two names - switch (x) { - default: - return x; - case 'FirstAssignment': - return 'EqualsToken'; - case 'FirstBinaryOperator': - return 'LessThanToken'; - case 'FirstCompoundAssignment': - return 'PlusEqualsToken'; - case 'FirstContextualKeyword': - return 'AbstractKeyword'; - case 'FirstLiteralToken': - return 'NumericLiteral'; - case 'FirstNode': - return 'QualifiedName'; - case 'FirstTemplateToken': - return 'NoSubstitutionTemplateLiteral'; - case 'LastTemplateToken': - return 'TemplateTail'; - case 'FirstTypeNode': - return 'TypePredicate'; - } -} diff --git a/internal/lsp/references.go b/internal/lsp/references.go deleted file mode 100644 index f96e5532cb5..00000000000 --- a/internal/lsp/references.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" -) - -func (s *Server) references(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return nil, err - } - if snapshot.View().FileKind(fh) == source.Tmpl { - return template.References(ctx, snapshot, fh, params) - } - references, err := source.References(ctx, snapshot, fh, params.Position, params.Context.IncludeDeclaration) - if err != nil { - return nil, err - } - var locations []protocol.Location - for _, ref := range references { - refRange, err := ref.Range() - if err != nil { - return nil, err - } - locations = append(locations, protocol.Location{ - URI: protocol.URIFromSpanURI(ref.URI()), - Range: refRange, - }) - } - return locations, nil -} diff --git a/internal/lsp/regtest/doc.go b/internal/lsp/regtest/doc.go deleted file mode 100644 index e97276965b1..00000000000 --- a/internal/lsp/regtest/doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package regtest provides a framework for writing gopls regression tests. -// -// User reported regressions are often expressed in terms of editor -// interactions. For example: "When I open my editor in this directory, -// navigate to this file, and change this line, I get a diagnostic that doesn't -// make sense". In these cases reproducing, diagnosing, and writing a test to -// protect against this regression can be difficult. -// -// The regtest package provides an API for developers to express these types of -// user interactions in ordinary Go tests, validate them, and run them in a -// variety of execution modes (see gopls/doc/daemon.md for more information on -// execution modes). This is achieved roughly as follows: -// - the Runner type starts and connects to a gopls instance for each -// configured execution mode. -// - the Env type provides a collection of resources to use in writing tests -// (for example a temporary working directory and fake text editor) -// - user interactions with these resources are scripted using test wrappers -// around the API provided by the golang.org/x/tools/internal/lsp/fake -// package. -// -// Regressions are expressed in terms of Expectations, which at a high level -// are conditions that we expect to be met (or not to be met) at some point -// after performing the interactions in the test. This is necessary because the -// LSP is by construction asynchronous: both client and server can send -// each other notifications without formal acknowledgement that they have been -// fully processed. -// -// Simple Expectations may be combined to match specific conditions reported by -// the user. In the example above, a regtest validating that the user-reported -// bug had been fixed would "expect" that the editor never displays the -// confusing diagnostic. -package regtest diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go deleted file mode 100644 index f095c38f285..00000000000 --- a/internal/lsp/regtest/env.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "context" - "fmt" - "strings" - "sync" - "testing" - - "golang.org/x/tools/internal/jsonrpc2/servertest" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" -) - -// Env holds an initialized fake Editor, Workspace, and Server, which may be -// used for writing tests. It also provides adapter methods that call t.Fatal -// on any error, so that tests for the happy path may be written without -// checking errors. -type Env struct { - T testing.TB - Ctx context.Context - - // Most tests should not need to access the scratch area, editor, server, or - // connection, but they are available if needed. - Sandbox *fake.Sandbox - Editor *fake.Editor - Server servertest.Connector - - // mu guards the fields below, for the purpose of checking conditions on - // every change to diagnostics. - mu sync.Mutex - // For simplicity, each waiter gets a unique ID. - nextWaiterID int - state State - waiters map[int]*condition -} - -// State encapsulates the server state TODO: explain more -type State struct { - // diagnostics are a map of relative path->diagnostics params - diagnostics map[string]*protocol.PublishDiagnosticsParams - logs []*protocol.LogMessageParams - showMessage []*protocol.ShowMessageParams - showMessageRequest []*protocol.ShowMessageRequestParams - - registrations []*protocol.RegistrationParams - unregistrations []*protocol.UnregistrationParams - - // outstandingWork is a map of token->work summary. All tokens are assumed to - // be string, though the spec allows for numeric tokens as well. When work - // completes, it is deleted from this map. - outstandingWork map[protocol.ProgressToken]*workProgress - startedWork map[string]uint64 - completedWork map[string]uint64 -} - -type workProgress struct { - title, msg string - percent float64 -} - -func (s State) String() string { - var b strings.Builder - b.WriteString("#### log messages (see RPC logs for full text):\n") - for _, msg := range s.logs { - summary := fmt.Sprintf("%v: %q", msg.Type, msg.Message) - if len(summary) > 60 { - summary = summary[:57] + "..." - } - // Some logs are quite long, and since they should be reproduced in the RPC - // logs on any failure we include here just a short summary. - fmt.Fprint(&b, "\t"+summary+"\n") - } - b.WriteString("\n") - b.WriteString("#### diagnostics:\n") - for name, params := range s.diagnostics { - fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version)) - for _, d := range params.Diagnostics { - fmt.Fprintf(&b, "\t\t(%d, %d): %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Message) - } - } - b.WriteString("\n") - b.WriteString("#### outstanding work:\n") - for token, state := range s.outstandingWork { - name := state.title - if name == "" { - name = fmt.Sprintf("!NO NAME(token: %s)", token) - } - fmt.Fprintf(&b, "\t%s: %.2f\n", name, state.percent) - } - b.WriteString("#### completed work:\n") - for name, count := range s.completedWork { - fmt.Fprintf(&b, "\t%s: %d\n", name, count) - } - return b.String() -} - -// A condition is satisfied when all expectations are simultaneously -// met. At that point, the 'met' channel is closed. On any failure, err is set -// and the failed channel is closed. -type condition struct { - expectations []Expectation - verdict chan Verdict -} - -// NewEnv creates a new test environment using the given scratch environment -// and gopls server. -func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) *Env { - tb.Helper() - conn := ts.Connect(ctx) - env := &Env{ - T: tb, - Ctx: ctx, - Sandbox: sandbox, - Server: ts, - state: State{ - diagnostics: make(map[string]*protocol.PublishDiagnosticsParams), - outstandingWork: make(map[protocol.ProgressToken]*workProgress), - startedWork: make(map[string]uint64), - completedWork: make(map[string]uint64), - }, - waiters: make(map[int]*condition), - } - var hooks fake.ClientHooks - if withHooks { - hooks = fake.ClientHooks{ - OnDiagnostics: env.onDiagnostics, - OnLogMessage: env.onLogMessage, - OnWorkDoneProgressCreate: env.onWorkDoneProgressCreate, - OnProgress: env.onProgress, - OnShowMessage: env.onShowMessage, - OnShowMessageRequest: env.onShowMessageRequest, - OnRegistration: env.onRegistration, - OnUnregistration: env.onUnregistration, - } - } - editor, err := fake.NewEditor(sandbox, editorConfig).Connect(ctx, conn, hooks) - if err != nil { - tb.Fatal(err) - } - env.Editor = editor - return env -} - -func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - pth := e.Sandbox.Workdir.URIToPath(d.URI) - e.state.diagnostics[pth] = d - e.checkConditionsLocked() - return nil -} - -func (e *Env) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.showMessage = append(e.state.showMessage, m) - e.checkConditionsLocked() - return nil -} - -func (e *Env) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.showMessageRequest = append(e.state.showMessageRequest, m) - e.checkConditionsLocked() - return nil -} - -func (e *Env) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.logs = append(e.state.logs, m) - e.checkConditionsLocked() - return nil -} - -func (e *Env) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.outstandingWork[m.Token] = &workProgress{} - return nil -} - -func (e *Env) onProgress(_ context.Context, m *protocol.ProgressParams) error { - e.mu.Lock() - defer e.mu.Unlock() - work, ok := e.state.outstandingWork[m.Token] - if !ok { - panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m)) - } - v := m.Value.(map[string]interface{}) - switch kind := v["kind"]; kind { - case "begin": - work.title = v["title"].(string) - e.state.startedWork[work.title] = e.state.startedWork[work.title] + 1 - if msg, ok := v["message"]; ok { - work.msg = msg.(string) - } - case "report": - if pct, ok := v["percentage"]; ok { - work.percent = pct.(float64) - } - if msg, ok := v["message"]; ok { - work.msg = msg.(string) - } - case "end": - title := e.state.outstandingWork[m.Token].title - e.state.completedWork[title] = e.state.completedWork[title] + 1 - delete(e.state.outstandingWork, m.Token) - } - e.checkConditionsLocked() - return nil -} - -func (e *Env) onRegistration(_ context.Context, m *protocol.RegistrationParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.registrations = append(e.state.registrations, m) - e.checkConditionsLocked() - return nil -} - -func (e *Env) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.state.unregistrations = append(e.state.unregistrations, m) - e.checkConditionsLocked() - return nil -} - -func (e *Env) checkConditionsLocked() { - for id, condition := range e.waiters { - if v, _ := checkExpectations(e.state, condition.expectations); v != Unmet { - delete(e.waiters, id) - condition.verdict <- v - } - } -} - -// checkExpectations reports whether s meets all expectations. -func checkExpectations(s State, expectations []Expectation) (Verdict, string) { - finalVerdict := Met - var summary strings.Builder - for _, e := range expectations { - v := e.Check(s) - if v > finalVerdict { - finalVerdict = v - } - summary.WriteString(fmt.Sprintf("%v: %s\n", v, e.Description())) - } - return finalVerdict, summary.String() -} - -// DiagnosticsFor returns the current diagnostics for the file. It is useful -// after waiting on AnyDiagnosticAtCurrentVersion, when the desired diagnostic -// is not simply described by DiagnosticAt. -func (e *Env) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams { - e.mu.Lock() - defer e.mu.Unlock() - return e.state.diagnostics[name] -} - -// Await waits for all expectations to simultaneously be met. It should only be -// called from the main test goroutine. -func (e *Env) Await(expectations ...Expectation) { - e.T.Helper() - e.mu.Lock() - // Before adding the waiter, we check if the condition is currently met or - // failed to avoid a race where the condition was realized before Await was - // called. - switch verdict, summary := checkExpectations(e.state, expectations); verdict { - case Met: - e.mu.Unlock() - return - case Unmeetable: - failure := fmt.Sprintf("unmeetable expectations:\n%s\nstate:\n%v", summary, e.state) - e.mu.Unlock() - e.T.Fatal(failure) - } - cond := &condition{ - expectations: expectations, - verdict: make(chan Verdict), - } - e.waiters[e.nextWaiterID] = cond - e.nextWaiterID++ - e.mu.Unlock() - - var err error - select { - case <-e.Ctx.Done(): - err = e.Ctx.Err() - case v := <-cond.verdict: - if v != Met { - err = fmt.Errorf("condition has final verdict %v", v) - } - } - e.mu.Lock() - defer e.mu.Unlock() - _, summary := checkExpectations(e.state, expectations) - - // Debugging an unmet expectation can be tricky, so we put some effort into - // nicely formatting the failure. - if err != nil { - e.T.Fatalf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, e.state) - } -} diff --git a/internal/lsp/regtest/env_test.go b/internal/lsp/regtest/env_test.go deleted file mode 100644 index fe5864ca77c..00000000000 --- a/internal/lsp/regtest/env_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "context" - "encoding/json" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" -) - -func TestProgressUpdating(t *testing.T) { - e := &Env{ - state: State{ - outstandingWork: make(map[protocol.ProgressToken]*workProgress), - startedWork: make(map[string]uint64), - completedWork: make(map[string]uint64), - }, - } - ctx := context.Background() - if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{ - Token: "foo", - }); err != nil { - t.Fatal(err) - } - if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{ - Token: "bar", - }); err != nil { - t.Fatal(err) - } - updates := []struct { - token string - value interface{} - }{ - {"foo", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "foo work"}}, - {"bar", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "bar work"}}, - {"foo", protocol.WorkDoneProgressEnd{Kind: "end"}}, - {"bar", protocol.WorkDoneProgressReport{Kind: "report", Percentage: 42}}, - } - for _, update := range updates { - params := &protocol.ProgressParams{ - Token: update.token, - Value: update.value, - } - data, err := json.Marshal(params) - if err != nil { - t.Fatal(err) - } - var unmarshaled protocol.ProgressParams - if err := json.Unmarshal(data, &unmarshaled); err != nil { - t.Fatal(err) - } - if err := e.onProgress(ctx, &unmarshaled); err != nil { - t.Fatal(err) - } - } - if _, ok := e.state.outstandingWork["foo"]; ok { - t.Error("got work entry for \"foo\", want none") - } - got := *e.state.outstandingWork["bar"] - want := workProgress{title: "bar work", percent: 42} - if got != want { - t.Errorf("work progress for \"bar\": %v, want %v", got, want) - } -} diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go deleted file mode 100644 index ab808f9e8cf..00000000000 --- a/internal/lsp/regtest/expectation.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "fmt" - "regexp" - "strings" - - "golang.org/x/tools/internal/lsp" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/testenv" -) - -// An Expectation asserts that the state of the editor at a point in time -// matches an expected condition. This is used for signaling in tests when -// certain conditions in the editor are met. -type Expectation interface { - // Check determines whether the state of the editor satisfies the - // expectation, returning the results that met the condition. - Check(State) Verdict - // Description is a human-readable description of the expectation. - Description() string -} - -var ( - // InitialWorkspaceLoad is an expectation that the workspace initial load has - // completed. It is verified via workdone reporting. - InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1, false) -) - -// A Verdict is the result of checking an expectation against the current -// editor state. -type Verdict int - -// Order matters for the following constants: verdicts are sorted in order of -// decisiveness. -const ( - // Met indicates that an expectation is satisfied by the current state. - Met Verdict = iota - // Unmet indicates that an expectation is not currently met, but could be met - // in the future. - Unmet - // Unmeetable indicates that an expectation cannot be satisfied in the - // future. - Unmeetable -) - -func (v Verdict) String() string { - switch v { - case Met: - return "Met" - case Unmet: - return "Unmet" - case Unmeetable: - return "Unmeetable" - } - return fmt.Sprintf("unrecognized verdict %d", v) -} - -// SimpleExpectation holds an arbitrary check func, and implements the Expectation interface. -type SimpleExpectation struct { - check func(State) Verdict - description string -} - -// Check invokes e.check. -func (e SimpleExpectation) Check(s State) Verdict { - return e.check(s) -} - -// Description returns e.description. -func (e SimpleExpectation) Description() string { - return e.description -} - -// OnceMet returns an Expectation that, once the precondition is met, asserts -// that mustMeet is met. -func OnceMet(precondition Expectation, mustMeets ...Expectation) *SimpleExpectation { - check := func(s State) Verdict { - switch pre := precondition.Check(s); pre { - case Unmeetable: - return Unmeetable - case Met: - for _, mustMeet := range mustMeets { - verdict := mustMeet.Check(s) - if verdict != Met { - return Unmeetable - } - } - return Met - default: - return Unmet - } - } - description := describeExpectations(mustMeets...) - return &SimpleExpectation{ - check: check, - description: fmt.Sprintf("once %q is met, must have:\n%s", precondition.Description(), description), - } -} - -func describeExpectations(expectations ...Expectation) string { - var descriptions []string - for _, e := range expectations { - descriptions = append(descriptions, e.Description()) - } - return strings.Join(descriptions, "\n") -} - -// AnyOf returns an expectation that is satisfied when any of the given -// expectations is met. -func AnyOf(anyOf ...Expectation) *SimpleExpectation { - check := func(s State) Verdict { - for _, e := range anyOf { - verdict := e.Check(s) - if verdict == Met { - return Met - } - } - return Unmet - } - description := describeExpectations(anyOf...) - return &SimpleExpectation{ - check: check, - description: fmt.Sprintf("Any of:\n%s", description), - } -} - -// ReadDiagnostics is an 'expectation' that is used to read diagnostics -// atomically. It is intended to be used with 'OnceMet'. -func ReadDiagnostics(fileName string, into *protocol.PublishDiagnosticsParams) *SimpleExpectation { - check := func(s State) Verdict { - diags, ok := s.diagnostics[fileName] - if !ok { - return Unmeetable - } - *into = *diags - return Met - } - return &SimpleExpectation{ - check: check, - description: fmt.Sprintf("read diagnostics for %q", fileName), - } -} - -// NoOutstandingWork asserts that there is no work initiated using the LSP -// $/progress API that has not completed. -func NoOutstandingWork() SimpleExpectation { - check := func(s State) Verdict { - if len(s.outstandingWork) == 0 { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: "no outstanding work", - } -} - -// NoShowMessage asserts that the editor has not received a ShowMessage. -func NoShowMessage() SimpleExpectation { - check := func(s State) Verdict { - if len(s.showMessage) == 0 { - return Met - } - return Unmeetable - } - return SimpleExpectation{ - check: check, - description: "no ShowMessage received", - } -} - -// ShownMessage asserts that the editor has received a ShowMessageRequest -// containing the given substring. -func ShownMessage(containing string) SimpleExpectation { - check := func(s State) Verdict { - for _, m := range s.showMessage { - if strings.Contains(m.Message, containing) { - return Met - } - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: "received ShowMessage", - } -} - -// ShowMessageRequest asserts that the editor has received a ShowMessageRequest -// with an action item that has the given title. -func ShowMessageRequest(title string) SimpleExpectation { - check := func(s State) Verdict { - if len(s.showMessageRequest) == 0 { - return Unmet - } - // Only check the most recent one. - m := s.showMessageRequest[len(s.showMessageRequest)-1] - if len(m.Actions) == 0 || len(m.Actions) > 1 { - return Unmet - } - if m.Actions[0].Title == title { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: "received ShowMessageRequest", - } -} - -// DoneWithOpen expects all didOpen notifications currently sent by the editor -// to be completely processed. -func (e *Env) DoneWithOpen() Expectation { - opens := e.Editor.Stats().DidOpen - return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens, true) -} - -// StartedChange expects there to have been i work items started for -// processing didChange notifications. -func StartedChange(i uint64) Expectation { - return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), i) -} - -// DoneWithChange expects all didChange notifications currently sent by the -// editor to be completely processed. -func (e *Env) DoneWithChange() Expectation { - changes := e.Editor.Stats().DidChange - return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes, true) -} - -// DoneWithSave expects all didSave notifications currently sent by the editor -// to be completely processed. -func (e *Env) DoneWithSave() Expectation { - saves := e.Editor.Stats().DidSave - return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves, true) -} - -// DoneWithChangeWatchedFiles expects all didChangeWatchedFiles notifications -// currently sent by the editor to be completely processed. -func (e *Env) DoneWithChangeWatchedFiles() Expectation { - changes := e.Editor.Stats().DidChangeWatchedFiles - return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes, true) -} - -// DoneWithClose expects all didClose notifications currently sent by the -// editor to be completely processed. -func (e *Env) DoneWithClose() Expectation { - changes := e.Editor.Stats().DidClose - return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes, true) -} - -// StartedWork expect a work item to have been started >= atLeast times. -// -// See CompletedWork. -func StartedWork(title string, atLeast uint64) SimpleExpectation { - check := func(s State) Verdict { - if s.startedWork[title] >= atLeast { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: fmt.Sprintf("started work %q at least %d time(s)", title, atLeast), - } -} - -// CompletedWork expects a work item to have been completed >= atLeast times. -// -// Since the Progress API doesn't include any hidden metadata, we must use the -// progress notification title to identify the work we expect to be completed. -func CompletedWork(title string, count uint64, atLeast bool) SimpleExpectation { - check := func(s State) Verdict { - if s.completedWork[title] == count || atLeast && s.completedWork[title] > count { - return Met - } - return Unmet - } - desc := fmt.Sprintf("completed work %q %v times", title, count) - if atLeast { - desc = fmt.Sprintf("completed work %q at least %d time(s)", title, count) - } - return SimpleExpectation{ - check: check, - description: desc, - } -} - -// OutstandingWork expects a work item to be outstanding. The given title must -// be an exact match, whereas the given msg must only be contained in the work -// item's message. -func OutstandingWork(title, msg string) SimpleExpectation { - check := func(s State) Verdict { - for _, work := range s.outstandingWork { - if work.title == title && strings.Contains(work.msg, msg) { - return Met - } - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: fmt.Sprintf("outstanding work: %s", title), - } -} - -// LogExpectation is an expectation on the log messages received by the editor -// from gopls. -type LogExpectation struct { - check func([]*protocol.LogMessageParams) Verdict - description string -} - -// Check implements the Expectation interface. -func (e LogExpectation) Check(s State) Verdict { - return e.check(s.logs) -} - -// Description implements the Expectation interface. -func (e LogExpectation) Description() string { - return e.description -} - -// NoErrorLogs asserts that the client has not received any log messages of -// error severity. -func NoErrorLogs() LogExpectation { - return NoLogMatching(protocol.Error, "") -} - -// LogMatching asserts that the client has received a log message -// of type typ matching the regexp re. -func LogMatching(typ protocol.MessageType, re string, count int, atLeast bool) LogExpectation { - rec, err := regexp.Compile(re) - if err != nil { - panic(err) - } - check := func(msgs []*protocol.LogMessageParams) Verdict { - var found int - for _, msg := range msgs { - if msg.Type == typ && rec.Match([]byte(msg.Message)) { - found++ - } - } - // Check for an exact or "at least" match. - if found == count || (found >= count && atLeast) { - return Met - } - return Unmet - } - desc := fmt.Sprintf("log message matching %q expected %v times", re, count) - if atLeast { - desc = fmt.Sprintf("log message matching %q expected at least %v times", re, count) - } - return LogExpectation{ - check: check, - description: desc, - } -} - -// NoLogMatching asserts that the client has not received a log message -// of type typ matching the regexp re. If re is an empty string, any log -// message is considered a match. -func NoLogMatching(typ protocol.MessageType, re string) LogExpectation { - var r *regexp.Regexp - if re != "" { - var err error - r, err = regexp.Compile(re) - if err != nil { - panic(err) - } - } - check := func(msgs []*protocol.LogMessageParams) Verdict { - for _, msg := range msgs { - if msg.Type != typ { - continue - } - if r == nil || r.Match([]byte(msg.Message)) { - return Unmeetable - } - } - return Met - } - return LogExpectation{ - check: check, - description: fmt.Sprintf("no log message matching %q", re), - } -} - -// RegistrationExpectation is an expectation on the capability registrations -// received by the editor from gopls. -type RegistrationExpectation struct { - check func([]*protocol.RegistrationParams) Verdict - description string -} - -// Check implements the Expectation interface. -func (e RegistrationExpectation) Check(s State) Verdict { - return e.check(s.registrations) -} - -// Description implements the Expectation interface. -func (e RegistrationExpectation) Description() string { - return e.description -} - -// RegistrationMatching asserts that the client has received a capability -// registration matching the given regexp. -func RegistrationMatching(re string) RegistrationExpectation { - rec, err := regexp.Compile(re) - if err != nil { - panic(err) - } - check := func(params []*protocol.RegistrationParams) Verdict { - for _, p := range params { - for _, r := range p.Registrations { - if rec.Match([]byte(r.Method)) { - return Met - } - } - } - return Unmet - } - return RegistrationExpectation{ - check: check, - description: fmt.Sprintf("registration matching %q", re), - } -} - -// UnregistrationExpectation is an expectation on the capability -// unregistrations received by the editor from gopls. -type UnregistrationExpectation struct { - check func([]*protocol.UnregistrationParams) Verdict - description string -} - -// Check implements the Expectation interface. -func (e UnregistrationExpectation) Check(s State) Verdict { - return e.check(s.unregistrations) -} - -// Description implements the Expectation interface. -func (e UnregistrationExpectation) Description() string { - return e.description -} - -// UnregistrationMatching asserts that the client has received an -// unregistration whose ID matches the given regexp. -func UnregistrationMatching(re string) UnregistrationExpectation { - rec, err := regexp.Compile(re) - if err != nil { - panic(err) - } - check := func(params []*protocol.UnregistrationParams) Verdict { - for _, p := range params { - for _, r := range p.Unregisterations { - if rec.Match([]byte(r.Method)) { - return Met - } - } - } - return Unmet - } - return UnregistrationExpectation{ - check: check, - description: fmt.Sprintf("unregistration matching %q", re), - } -} - -// A DiagnosticExpectation is a condition that must be met by the current set -// of diagnostics for a file. -type DiagnosticExpectation struct { - // optionally, the position of the diagnostic and the regex used to calculate it. - pos *fake.Pos - re string - - // optionally, the message that the diagnostic should contain. - message string - - // whether the expectation is that the diagnostic is present, or absent. - present bool - - // path is the scratch workdir-relative path to the file being asserted on. - path string - - // optionally, the diagnostic source - source string -} - -// Check implements the Expectation interface. -func (e DiagnosticExpectation) Check(s State) Verdict { - diags, ok := s.diagnostics[e.path] - if !ok { - if !e.present { - return Met - } - return Unmet - } - - found := false - for _, d := range diags.Diagnostics { - if e.pos != nil { - if d.Range.Start.Line != uint32(e.pos.Line) || d.Range.Start.Character != uint32(e.pos.Column) { - continue - } - } - if e.message != "" { - if !strings.Contains(d.Message, e.message) { - continue - } - } - if e.source != "" && e.source != d.Source { - continue - } - found = true - break - } - - if found == e.present { - return Met - } - return Unmet -} - -// Description implements the Expectation interface. -func (e DiagnosticExpectation) Description() string { - desc := e.path + ":" - if !e.present { - desc += " no" - } - desc += " diagnostic" - if e.pos != nil { - desc += fmt.Sprintf(" at {line:%d, column:%d}", e.pos.Line, e.pos.Column) - if e.re != "" { - desc += fmt.Sprintf(" (location of %q)", e.re) - } - } - if e.message != "" { - desc += fmt.Sprintf(" with message %q", e.message) - } - if e.source != "" { - desc += fmt.Sprintf(" from source %q", e.source) - } - return desc -} - -// NoOutstandingDiagnostics asserts that the workspace has no outstanding -// diagnostic messages. -func NoOutstandingDiagnostics() Expectation { - check := func(s State) Verdict { - for _, diags := range s.diagnostics { - if len(diags.Diagnostics) > 0 { - return Unmet - } - } - return Met - } - return SimpleExpectation{ - check: check, - description: "no outstanding diagnostics", - } -} - -// EmptyDiagnostics asserts that empty diagnostics are sent for the -// workspace-relative path name. -func EmptyDiagnostics(name string) Expectation { - check := func(s State) Verdict { - if diags := s.diagnostics[name]; diags != nil && len(diags.Diagnostics) == 0 { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: fmt.Sprintf("empty diagnostics for %q", name), - } -} - -// EmptyOrNoDiagnostics asserts that either no diagnostics are sent for the -// workspace-relative path name, or empty diagnostics are sent. -// TODO(rFindley): this subtlety shouldn't be necessary. Gopls should always -// send at least one diagnostic set for open files. -func EmptyOrNoDiagnostics(name string) Expectation { - check := func(s State) Verdict { - if diags := s.diagnostics[name]; diags == nil || len(diags.Diagnostics) == 0 { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: fmt.Sprintf("empty or no diagnostics for %q", name), - } -} - -// NoDiagnostics asserts that no diagnostics are sent for the -// workspace-relative path name. It should be used primarily in conjunction -// with a OnceMet, as it has to check that all outstanding diagnostics have -// already been delivered. -func NoDiagnostics(name string) Expectation { - check := func(s State) Verdict { - if _, ok := s.diagnostics[name]; !ok { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: "no diagnostics", - } -} - -// AnyDiagnosticAtCurrentVersion asserts that there is a diagnostic report for -// the current edited version of the buffer corresponding to the given -// workdir-relative pathname. -func (e *Env) AnyDiagnosticAtCurrentVersion(name string) Expectation { - version := e.Editor.BufferVersion(name) - check := func(s State) Verdict { - diags, ok := s.diagnostics[name] - if ok && diags.Version == int32(version) { - return Met - } - return Unmet - } - return SimpleExpectation{ - check: check, - description: fmt.Sprintf("any diagnostics at version %d", version), - } -} - -// DiagnosticAtRegexp expects that there is a diagnostic entry at the start -// position matching the regexp search string re in the buffer specified by -// name. Note that this currently ignores the end position. -func (e *Env) DiagnosticAtRegexp(name, re string) DiagnosticExpectation { - e.T.Helper() - pos := e.RegexpSearch(name, re) - return DiagnosticExpectation{path: name, pos: &pos, re: re, present: true} -} - -// DiagnosticAtRegexpWithMessage is like DiagnosticAtRegexp, but it also -// checks for the content of the diagnostic message, -func (e *Env) DiagnosticAtRegexpWithMessage(name, re, msg string) DiagnosticExpectation { - e.T.Helper() - pos := e.RegexpSearch(name, re) - return DiagnosticExpectation{path: name, pos: &pos, re: re, present: true, message: msg} -} - -// DiagnosticAtRegexpFromSource expects a diagnostic at the first position -// matching re, from the given source. -func (e *Env) DiagnosticAtRegexpFromSource(name, re, source string) DiagnosticExpectation { - e.T.Helper() - pos := e.RegexpSearch(name, re) - return DiagnosticExpectation{path: name, pos: &pos, re: re, present: true, source: source} -} - -// DiagnosticAt asserts that there is a diagnostic entry at the position -// specified by line and col, for the workdir-relative path name. -func DiagnosticAt(name string, line, col int) DiagnosticExpectation { - return DiagnosticExpectation{path: name, pos: &fake.Pos{Line: line, Column: col}, present: true} -} - -// NoDiagnosticAtRegexp expects that there is no diagnostic entry at the start -// position matching the regexp search string re in the buffer specified by -// name. Note that this currently ignores the end position. -// This should only be used in combination with OnceMet for a given condition, -// otherwise it may always succeed. -func (e *Env) NoDiagnosticAtRegexp(name, re string) DiagnosticExpectation { - e.T.Helper() - pos := e.RegexpSearch(name, re) - return DiagnosticExpectation{path: name, pos: &pos, re: re, present: false} -} - -// NoDiagnosticAt asserts that there is no diagnostic entry at the position -// specified by line and col, for the workdir-relative path name. -// This should only be used in combination with OnceMet for a given condition, -// otherwise it may always succeed. -func NoDiagnosticAt(name string, line, col int) DiagnosticExpectation { - return DiagnosticExpectation{path: name, pos: &fake.Pos{Line: line, Column: col}, present: false} -} - -// NoDiagnosticWithMessage asserts that there is no diagnostic entry with the -// given message. -// -// This should only be used in combination with OnceMet for a given condition, -// otherwise it may always succeed. -func NoDiagnosticWithMessage(name, msg string) DiagnosticExpectation { - return DiagnosticExpectation{path: name, message: msg, present: false} -} - -// GoSumDiagnostic asserts that a "go.sum is out of sync" diagnostic for the -// given module (as formatted in a go.mod file, e.g. "example.com v1.0.0") is -// present. -func (e *Env) GoSumDiagnostic(name, module string) Expectation { - e.T.Helper() - // In 1.16, go.sum diagnostics should appear on the relevant module. Earlier - // errors have no information and appear on the module declaration. - if testenv.Go1Point() >= 16 { - return e.DiagnosticAtRegexpWithMessage(name, module, "go.sum is out of sync") - } else { - return e.DiagnosticAtRegexpWithMessage(name, `module`, "go.sum is out of sync") - } -} diff --git a/internal/lsp/regtest/regtest.go b/internal/lsp/regtest/regtest.go deleted file mode 100644 index 9ebc673f8c0..00000000000 --- a/internal/lsp/regtest/regtest.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "os" - "runtime" - "testing" - "time" - - "golang.org/x/tools/internal/lsp/cmd" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/testenv" - "golang.org/x/tools/internal/tool" -) - -var ( - runSubprocessTests = flag.Bool("enable_gopls_subprocess_tests", false, "run regtests against a gopls subprocess") - goplsBinaryPath = flag.String("gopls_test_binary", "", "path to the gopls binary for use as a remote, for use with the -enable_gopls_subprocess_tests flag") - regtestTimeout = flag.Duration("regtest_timeout", defaultRegtestTimeout(), "if nonzero, default timeout for each regtest; defaults to GOPLS_REGTEST_TIMEOUT") - skipCleanup = flag.Bool("regtest_skip_cleanup", false, "whether to skip cleaning up temp directories") - printGoroutinesOnFailure = flag.Bool("regtest_print_goroutines", false, "whether to print goroutines info on failure") - printLogs = flag.Bool("regtest_print_logs", false, "whether to print LSP logs") -) - -func defaultRegtestTimeout() time.Duration { - s := os.Getenv("GOPLS_REGTEST_TIMEOUT") - if s == "" { - return 0 - } - d, err := time.ParseDuration(s) - if err != nil { - fmt.Fprintf(os.Stderr, "invalid GOPLS_REGTEST_TIMEOUT %q: %v\n", s, err) - os.Exit(2) - } - return d -} - -var runner *Runner - -type regtestRunner interface { - Run(t *testing.T, files string, f TestFunc) -} - -func Run(t *testing.T, files string, f TestFunc) { - runner.Run(t, files, f) -} - -func WithOptions(opts ...RunOption) configuredRunner { - return configuredRunner{opts: opts} -} - -type configuredRunner struct { - opts []RunOption -} - -func (r configuredRunner) Run(t *testing.T, files string, f TestFunc) { - runner.Run(t, files, f, r.opts...) -} - -type RunMultiple []struct { - Name string - Runner regtestRunner -} - -func (r RunMultiple) Run(t *testing.T, files string, f TestFunc) { - for _, runner := range r { - t.Run(runner.Name, func(t *testing.T) { - runner.Runner.Run(t, files, f) - }) - } -} - -// The regtests run significantly slower on these operating systems, due to (we -// believe) kernel locking behavior. Only run in singleton mode on these -// operating system when using -short. -var slowGOOS = map[string]bool{ - "darwin": true, - "openbsd": true, - "plan9": true, -} - -func DefaultModes() Mode { - normal := Singleton | Experimental - if slowGOOS[runtime.GOOS] && testing.Short() { - normal = Singleton - } - if *runSubprocessTests { - return normal | SeparateProcess - } - return normal -} - -// Main sets up and tears down the shared regtest state. -func Main(m *testing.M, hook func(*source.Options)) { - testenv.ExitIfSmallMachine() - - // Disable GOPACKAGESDRIVER, as it can cause spurious test failures. - os.Setenv("GOPACKAGESDRIVER", "off") - - flag.Parse() - if os.Getenv("_GOPLS_TEST_BINARY_RUN_AS_GOPLS") == "true" { - tool.Main(context.Background(), cmd.New("gopls", "", nil, nil), os.Args[1:]) - os.Exit(0) - } - - runner = &Runner{ - DefaultModes: DefaultModes(), - Timeout: *regtestTimeout, - PrintGoroutinesOnFailure: *printGoroutinesOnFailure, - SkipCleanup: *skipCleanup, - OptionsHook: hook, - } - if *runSubprocessTests { - goplsPath := *goplsBinaryPath - if goplsPath == "" { - var err error - goplsPath, err = os.Executable() - if err != nil { - panic(fmt.Sprintf("finding test binary path: %v", err)) - } - } - runner.GoplsPath = goplsPath - } - dir, err := ioutil.TempDir("", "gopls-regtest-") - if err != nil { - panic(fmt.Errorf("creating regtest temp directory: %v", err)) - } - runner.TempDir = dir - - code := m.Run() - if err := runner.Close(); err != nil { - fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err) - // Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on - // Windows due to file locking, but this is OK for our CI. - // - // Fail on go1.13+, except for windows and android which have shutdown problems. - if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" { - os.Exit(1) - } - } - os.Exit(code) -} diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go deleted file mode 100644 index 3cfeb772a19..00000000000 --- a/internal/lsp/regtest/runner.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - "runtime/pprof" - "strings" - "sync" - "testing" - "time" - - exec "golang.org/x/sys/execabs" - - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/jsonrpc2/servertest" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/debug" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/lsprpc" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/testenv" - "golang.org/x/tools/internal/xcontext" -) - -// Mode is a bitmask that defines for which execution modes a test should run. -type Mode int - -const ( - // Singleton mode uses a separate in-process gopls instance for each test, - // and communicates over pipes to mimic the gopls sidecar execution mode, - // which communicates over stdin/stderr. - Singleton Mode = 1 << iota - // Forwarded forwards connections to a shared in-process gopls instance. - Forwarded - // SeparateProcess forwards connection to a shared separate gopls process. - SeparateProcess - // Experimental enables all of the experimental configurations that are - // being developed. - Experimental -) - -// A Runner runs tests in gopls execution environments, as specified by its -// modes. For modes that share state (for example, a shared cache or common -// remote), any tests that execute on the same Runner will share the same -// state. -type Runner struct { - DefaultModes Mode - Timeout time.Duration - GoplsPath string - PrintGoroutinesOnFailure bool - TempDir string - SkipCleanup bool - OptionsHook func(*source.Options) - - mu sync.Mutex - ts *servertest.TCPServer - socketDir string - // closers is a queue of clean-up functions to run at the end of the entire - // test suite. - closers []io.Closer -} - -type runConfig struct { - editor fake.EditorConfig - sandbox fake.SandboxConfig - modes Mode - noDefaultTimeout bool - debugAddr string - skipLogs bool - skipHooks bool - optionsHook func(*source.Options) -} - -func (r *Runner) defaultConfig() *runConfig { - return &runConfig{ - modes: r.DefaultModes, - optionsHook: r.OptionsHook, - } -} - -// A RunOption augments the behavior of the test runner. -type RunOption interface { - set(*runConfig) -} - -type optionSetter func(*runConfig) - -func (f optionSetter) set(opts *runConfig) { - f(opts) -} - -// NoDefaultTimeout removes the timeout set by the -regtest_timeout flag, for -// individual tests that are expected to run longer than is reasonable for -// ordinary regression tests. -func NoDefaultTimeout() RunOption { - return optionSetter(func(opts *runConfig) { - opts.noDefaultTimeout = true - }) -} - -// ProxyFiles configures a file proxy using the given txtar-encoded string. -func ProxyFiles(txt string) RunOption { - return optionSetter(func(opts *runConfig) { - opts.sandbox.ProxyFiles = fake.UnpackTxt(txt) - }) -} - -// Modes configures the execution modes that the test should run in. -func Modes(modes Mode) RunOption { - return optionSetter(func(opts *runConfig) { - opts.modes = modes - }) -} - -// Options configures the various server and user options. -func Options(hook func(*source.Options)) RunOption { - return optionSetter(func(opts *runConfig) { - old := opts.optionsHook - opts.optionsHook = func(o *source.Options) { - if old != nil { - old(o) - } - hook(o) - } - }) -} - -func SendPID() RunOption { - return optionSetter(func(opts *runConfig) { - opts.editor.SendPID = true - }) -} - -// EditorConfig is a RunOption option that configured the regtest editor. -type EditorConfig fake.EditorConfig - -func (c EditorConfig) set(opts *runConfig) { - opts.editor = fake.EditorConfig(c) -} - -// WorkspaceFolders configures the workdir-relative workspace folders to send -// to the LSP server. By default the editor sends a single workspace folder -// corresponding to the workdir root. To explicitly configure no workspace -// folders, use WorkspaceFolders with no arguments. -func WorkspaceFolders(relFolders ...string) RunOption { - if len(relFolders) == 0 { - // Use an empty non-nil slice to signal explicitly no folders. - relFolders = []string{} - } - return optionSetter(func(opts *runConfig) { - opts.editor.WorkspaceFolders = relFolders - }) -} - -// InGOPATH configures the workspace working directory to be GOPATH, rather -// than a separate working directory for use with modules. -func InGOPATH() RunOption { - return optionSetter(func(opts *runConfig) { - opts.sandbox.InGoPath = true - }) -} - -// DebugAddress configures a debug server bound to addr. This option is -// currently only supported when executing in Singleton mode. It is intended to -// be used for long-running stress tests. -func DebugAddress(addr string) RunOption { - return optionSetter(func(opts *runConfig) { - opts.debugAddr = addr - }) -} - -// SkipLogs skips the buffering of logs during test execution. It is intended -// for long-running stress tests. -func SkipLogs() RunOption { - return optionSetter(func(opts *runConfig) { - opts.skipLogs = true - }) -} - -// InExistingDir runs the test in a pre-existing directory. If set, no initial -// files may be passed to the runner. It is intended for long-running stress -// tests. -func InExistingDir(dir string) RunOption { - return optionSetter(func(opts *runConfig) { - opts.sandbox.Workdir = dir - }) -} - -// SkipHooks allows for disabling the test runner's client hooks that are used -// for instrumenting expectations (tracking diagnostics, logs, work done, -// etc.). It is intended for performance-sensitive stress tests or benchmarks. -func SkipHooks(skip bool) RunOption { - return optionSetter(func(opts *runConfig) { - opts.skipHooks = skip - }) -} - -// GOPROXY configures the test environment to have an explicit proxy value. -// This is intended for stress tests -- to ensure their isolation, regtests -// should instead use WithProxyFiles. -func GOPROXY(goproxy string) RunOption { - return optionSetter(func(opts *runConfig) { - opts.sandbox.GOPROXY = goproxy - }) -} - -// LimitWorkspaceScope sets the LimitWorkspaceScope configuration. -func LimitWorkspaceScope() RunOption { - return optionSetter(func(opts *runConfig) { - opts.editor.LimitWorkspaceScope = true - }) -} - -type TestFunc func(t *testing.T, env *Env) - -// Run executes the test function in the default configured gopls execution -// modes. For each a test run, a new workspace is created containing the -// un-txtared files specified by filedata. -func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) { - t.Helper() - checkBuilder(t) - - tests := []struct { - name string - mode Mode - getServer func(context.Context, *testing.T, func(*source.Options)) jsonrpc2.StreamServer - }{ - {"singleton", Singleton, singletonServer}, - {"forwarded", Forwarded, r.forwardedServer}, - {"separate_process", SeparateProcess, r.separateProcessServer}, - {"experimental", Experimental, experimentalServer}, - } - - for _, tc := range tests { - tc := tc - config := r.defaultConfig() - for _, opt := range opts { - opt.set(config) - } - if config.modes&tc.mode == 0 { - continue - } - if config.debugAddr != "" && tc.mode != Singleton { - // Debugging is useful for running stress tests, but since the daemon has - // likely already been started, it would be too late to debug. - t.Fatalf("debugging regtest servers only works in Singleton mode, "+ - "got debug addr %q and mode %v", config.debugAddr, tc.mode) - } - - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - if r.Timeout != 0 && !config.noDefaultTimeout { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.Timeout) - defer cancel() - } else if d, ok := testenv.Deadline(t); ok { - timeout := time.Until(d) * 19 / 20 // Leave an arbitrary 5% for cleanup. - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - ctx = debug.WithInstance(ctx, "", "off") - if config.debugAddr != "" { - di := debug.GetInstance(ctx) - di.Serve(ctx, config.debugAddr) - di.MonitorMemory(ctx) - } - - rootDir := filepath.Join(r.TempDir, filepath.FromSlash(t.Name())) - if err := os.MkdirAll(rootDir, 0755); err != nil { - t.Fatal(err) - } - files := fake.UnpackTxt(files) - if config.editor.WindowsLineEndings { - for name, data := range files { - files[name] = bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n")) - } - } - config.sandbox.Files = files - config.sandbox.RootDir = rootDir - sandbox, err := fake.NewSandbox(&config.sandbox) - if err != nil { - t.Fatal(err) - } - // Deferring the closure of ws until the end of the entire test suite - // has, in testing, given the LSP server time to properly shutdown and - // release any file locks held in workspace, which is a problem on - // Windows. This may still be flaky however, and in the future we need a - // better solution to ensure that all Go processes started by gopls have - // exited before we clean up. - r.AddCloser(sandbox) - ss := tc.getServer(ctx, t, config.optionsHook) - framer := jsonrpc2.NewRawStream - ls := &loggingFramer{} - if !config.skipLogs { - framer = ls.framer(jsonrpc2.NewRawStream) - } - ts := servertest.NewPipeServer(ctx, ss, framer) - env := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks) - defer func() { - if t.Failed() && r.PrintGoroutinesOnFailure { - pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) - } - if t.Failed() || *printLogs { - ls.printBuffers(t.Name(), os.Stderr) - } - // For tests that failed due to a timeout, don't fail to shutdown - // because ctx is done. - closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second) - defer cancel() - if err := env.Editor.Close(closeCtx); err != nil { - t.Errorf("closing editor: %v", err) - } - }() - // Always await the initial workspace load. - env.Await(InitialWorkspaceLoad) - test(t, env) - }) - } -} - -// longBuilders maps builders that are skipped when -short is set to a -// (possibly empty) justification. -var longBuilders = map[string]string{ - "openbsd-amd64-64": "golang.org/issues/42789", - "openbsd-386-64": "golang.org/issues/42789", - "openbsd-386-68": "golang.org/issues/42789", - "openbsd-amd64-68": "golang.org/issues/42789", - "darwin-amd64-10_12": "", - "freebsd-amd64-race": "", - "illumos-amd64": "", - "netbsd-arm-bsiegert": "", - "solaris-amd64-oraclerel": "", - "windows-arm-zx2c4": "", -} - -func checkBuilder(t *testing.T) { - t.Helper() - builder := os.Getenv("GO_BUILDER_NAME") - if reason, ok := longBuilders[builder]; ok && testing.Short() { - if reason != "" { - t.Skipf("Skipping %s with -short due to %s", builder, reason) - } else { - t.Skipf("Skipping %s with -short", builder) - } - } -} - -type loggingFramer struct { - mu sync.Mutex - buf *safeBuffer -} - -// safeBuffer is a threadsafe buffer for logs. -type safeBuffer struct { - mu sync.Mutex - buf bytes.Buffer -} - -func (b *safeBuffer) Write(p []byte) (int, error) { - b.mu.Lock() - defer b.mu.Unlock() - return b.buf.Write(p) -} - -func (s *loggingFramer) framer(f jsonrpc2.Framer) jsonrpc2.Framer { - return func(nc net.Conn) jsonrpc2.Stream { - s.mu.Lock() - framed := false - if s.buf == nil { - s.buf = &safeBuffer{buf: bytes.Buffer{}} - framed = true - } - s.mu.Unlock() - stream := f(nc) - if framed { - return protocol.LoggingStream(stream, s.buf) - } - return stream - } -} - -func (s *loggingFramer) printBuffers(testname string, w io.Writer) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.buf == nil { - return - } - fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs for %q\n", testname) - s.buf.mu.Lock() - io.Copy(w, &s.buf.buf) - s.buf.mu.Unlock() - fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname) -} - -func singletonServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - return lsprpc.NewStreamServer(cache.New(optsHook), false) -} - -func experimentalServer(_ context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - options := func(o *source.Options) { - optsHook(o) - o.EnableAllExperiments() - // ExperimentalWorkspaceModule is not (as of writing) enabled by - // source.Options.EnableAllExperiments, but we want to test it. - o.ExperimentalWorkspaceModule = true - } - return lsprpc.NewStreamServer(cache.New(options), false) -} - -func (r *Runner) forwardedServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - ts := r.getTestServer(optsHook) - return newForwarder("tcp", ts.Addr) -} - -// getTestServer gets the shared test server instance to connect to, or creates -// one if it doesn't exist. -func (r *Runner) getTestServer(optsHook func(*source.Options)) *servertest.TCPServer { - r.mu.Lock() - defer r.mu.Unlock() - if r.ts == nil { - ctx := context.Background() - ctx = debug.WithInstance(ctx, "", "off") - ss := lsprpc.NewStreamServer(cache.New(optsHook), false) - r.ts = servertest.NewTCPServer(ctx, ss, nil) - } - return r.ts -} - -func (r *Runner) separateProcessServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - // TODO(rfindley): can we use the autostart behavior here, instead of - // pre-starting the remote? - socket := r.getRemoteSocket(t) - return newForwarder("unix", socket) -} - -func newForwarder(network, address string) *lsprpc.Forwarder { - server, err := lsprpc.NewForwarder(network+";"+address, nil) - if err != nil { - // This should never happen, as we are passing an explicit address. - panic(fmt.Sprintf("internal error: unable to create forwarder: %v", err)) - } - return server -} - -// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running -// tests. It's a trick to allow tests to find a binary to use to start a gopls -// subprocess. -const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS" - -func (r *Runner) getRemoteSocket(t *testing.T) string { - t.Helper() - r.mu.Lock() - defer r.mu.Unlock() - const daemonFile = "gopls-test-daemon" - if r.socketDir != "" { - return filepath.Join(r.socketDir, daemonFile) - } - - if r.GoplsPath == "" { - t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured") - } - var err error - r.socketDir, err = ioutil.TempDir(r.TempDir, "gopls-regtest-socket") - if err != nil { - t.Fatalf("creating tempdir: %v", err) - } - socket := filepath.Join(r.socketDir, daemonFile) - args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"} - cmd := exec.Command(r.GoplsPath, args...) - cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true") - var stderr bytes.Buffer - cmd.Stderr = &stderr - go func() { - if err := cmd.Run(); err != nil { - panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String())) - } - }() - return socket -} - -// AddCloser schedules a closer to be closed at the end of the test run. This -// is useful for Windows in particular, as -func (r *Runner) AddCloser(closer io.Closer) { - r.mu.Lock() - defer r.mu.Unlock() - r.closers = append(r.closers, closer) -} - -// Close cleans up resource that have been allocated to this workspace. -func (r *Runner) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - - var errmsgs []string - if r.ts != nil { - if err := r.ts.Close(); err != nil { - errmsgs = append(errmsgs, err.Error()) - } - } - if r.socketDir != "" { - if err := os.RemoveAll(r.socketDir); err != nil { - errmsgs = append(errmsgs, err.Error()) - } - } - if !r.SkipCleanup { - for _, closer := range r.closers { - if err := closer.Close(); err != nil { - errmsgs = append(errmsgs, err.Error()) - } - } - if err := os.RemoveAll(r.TempDir); err != nil { - errmsgs = append(errmsgs, err.Error()) - } - } - if len(errmsgs) > 0 { - return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t")) - } - return nil -} diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go deleted file mode 100644 index 9031e71f1f1..00000000000 --- a/internal/lsp/regtest/wrappers.go +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package regtest - -import ( - "encoding/json" - "path" - "testing" - - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/fake" - "golang.org/x/tools/internal/lsp/protocol" -) - -func (e *Env) ChangeFilesOnDisk(events []fake.FileEvent) { - e.T.Helper() - if err := e.Sandbox.Workdir.ChangeFilesOnDisk(e.Ctx, events); err != nil { - e.T.Fatal(err) - } -} - -// RemoveWorkspaceFile deletes a file on disk but does nothing in the -// editor. It calls t.Fatal on any error. -func (e *Env) RemoveWorkspaceFile(name string) { - e.T.Helper() - if err := e.Sandbox.Workdir.RemoveFile(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// ReadWorkspaceFile reads a file from the workspace, calling t.Fatal on any -// error. -func (e *Env) ReadWorkspaceFile(name string) string { - e.T.Helper() - content, err := e.Sandbox.Workdir.ReadFile(name) - if err != nil { - e.T.Fatal(err) - } - return content -} - -// WriteWorkspaceFile writes a file to disk but does nothing in the editor. -// It calls t.Fatal on any error. -func (e *Env) WriteWorkspaceFile(name, content string) { - e.T.Helper() - if err := e.Sandbox.Workdir.WriteFile(e.Ctx, name, content); err != nil { - e.T.Fatal(err) - } -} - -// WriteWorkspaceFiles deletes a file on disk but does nothing in the -// editor. It calls t.Fatal on any error. -func (e *Env) WriteWorkspaceFiles(files map[string]string) { - e.T.Helper() - if err := e.Sandbox.Workdir.WriteFiles(e.Ctx, files); err != nil { - e.T.Fatal(err) - } -} - -// OpenFile opens a file in the editor, calling t.Fatal on any error. -func (e *Env) OpenFile(name string) { - e.T.Helper() - if err := e.Editor.OpenFile(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// CreateBuffer creates a buffer in the editor, calling t.Fatal on any error. -func (e *Env) CreateBuffer(name string, content string) { - e.T.Helper() - if err := e.Editor.CreateBuffer(e.Ctx, name, content); err != nil { - e.T.Fatal(err) - } -} - -// CloseBuffer closes an editor buffer without saving, calling t.Fatal on any -// error. -func (e *Env) CloseBuffer(name string) { - e.T.Helper() - if err := e.Editor.CloseBuffer(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// EditBuffer applies edits to an editor buffer, calling t.Fatal on any error. -func (e *Env) EditBuffer(name string, edits ...fake.Edit) { - e.T.Helper() - if err := e.Editor.EditBuffer(e.Ctx, name, edits); err != nil { - e.T.Fatal(err) - } -} - -func (e *Env) SetBufferContent(name string, content string) { - e.T.Helper() - if err := e.Editor.SetBufferContent(e.Ctx, name, content); err != nil { - e.T.Fatal(err) - } -} - -// RegexpRange returns the range of the first match for re in the buffer -// specified by name, calling t.Fatal on any error. It first searches for the -// position in open buffers, then in workspace files. -func (e *Env) RegexpRange(name, re string) (fake.Pos, fake.Pos) { - e.T.Helper() - start, end, err := e.Editor.RegexpRange(name, re) - if err == fake.ErrUnknownBuffer { - start, end, err = e.Sandbox.Workdir.RegexpRange(name, re) - } - if err != nil { - e.T.Fatalf("RegexpRange: %v, %v", name, err) - } - return start, end -} - -// RegexpSearch returns the starting position of the first match for re in the -// buffer specified by name, calling t.Fatal on any error. It first searches -// for the position in open buffers, then in workspace files. -func (e *Env) RegexpSearch(name, re string) fake.Pos { - e.T.Helper() - pos, err := e.Editor.RegexpSearch(name, re) - if err == fake.ErrUnknownBuffer { - pos, err = e.Sandbox.Workdir.RegexpSearch(name, re) - } - if err != nil { - e.T.Fatalf("RegexpSearch: %v, %v", name, err) - } - return pos -} - -// RegexpReplace replaces the first group in the first match of regexpStr with -// the replace text, calling t.Fatal on any error. -func (e *Env) RegexpReplace(name, regexpStr, replace string) { - e.T.Helper() - if err := e.Editor.RegexpReplace(e.Ctx, name, regexpStr, replace); err != nil { - e.T.Fatalf("RegexpReplace: %v", err) - } -} - -// SaveBuffer saves an editor buffer, calling t.Fatal on any error. -func (e *Env) SaveBuffer(name string) { - e.T.Helper() - if err := e.Editor.SaveBuffer(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -func (e *Env) SaveBufferWithoutActions(name string) { - e.T.Helper() - if err := e.Editor.SaveBufferWithoutActions(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// GoToDefinition goes to definition in the editor, calling t.Fatal on any -// error. It returns the path and position of the resulting jump. -func (e *Env) GoToDefinition(name string, pos fake.Pos) (string, fake.Pos) { - e.T.Helper() - n, p, err := e.Editor.GoToDefinition(e.Ctx, name, pos) - if err != nil { - e.T.Fatal(err) - } - return n, p -} - -// Symbol returns symbols matching query -func (e *Env) Symbol(query string) []fake.SymbolInformation { - e.T.Helper() - r, err := e.Editor.Symbol(e.Ctx, query) - if err != nil { - e.T.Fatal(err) - } - return r -} - -// FormatBuffer formats the editor buffer, calling t.Fatal on any error. -func (e *Env) FormatBuffer(name string) { - e.T.Helper() - if err := e.Editor.FormatBuffer(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// OrganizeImports processes the source.organizeImports codeAction, calling -// t.Fatal on any error. -func (e *Env) OrganizeImports(name string) { - e.T.Helper() - if err := e.Editor.OrganizeImports(e.Ctx, name); err != nil { - e.T.Fatal(err) - } -} - -// ApplyQuickFixes processes the quickfix codeAction, calling t.Fatal on any error. -func (e *Env) ApplyQuickFixes(path string, diagnostics []protocol.Diagnostic) { - e.T.Helper() - if err := e.Editor.ApplyQuickFixes(e.Ctx, path, nil, diagnostics); err != nil { - e.T.Fatal(err) - } -} - -// ApplyCodeAction applies the given code action. -func (e *Env) ApplyCodeAction(action protocol.CodeAction) { - e.T.Helper() - if err := e.Editor.ApplyCodeAction(e.Ctx, action); err != nil { - e.T.Fatal(err) - } -} - -// GetQuickFixes returns the available quick fix code actions. -func (e *Env) GetQuickFixes(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction { - e.T.Helper() - actions, err := e.Editor.GetQuickFixes(e.Ctx, path, nil, diagnostics) - if err != nil { - e.T.Fatal(err) - } - return actions -} - -// Hover in the editor, calling t.Fatal on any error. -func (e *Env) Hover(name string, pos fake.Pos) (*protocol.MarkupContent, fake.Pos) { - e.T.Helper() - c, p, err := e.Editor.Hover(e.Ctx, name, pos) - if err != nil { - e.T.Fatal(err) - } - return c, p -} - -func (e *Env) DocumentLink(name string) []protocol.DocumentLink { - e.T.Helper() - links, err := e.Editor.DocumentLink(e.Ctx, name) - if err != nil { - e.T.Fatal(err) - } - return links -} - -func (e *Env) DocumentHighlight(name string, pos fake.Pos) []protocol.DocumentHighlight { - e.T.Helper() - highlights, err := e.Editor.DocumentHighlight(e.Ctx, name, pos) - if err != nil { - e.T.Fatal(err) - } - return highlights -} - -// RunGenerate runs go:generate on the given dir, calling t.Fatal on any error. -// It waits for the generate command to complete and checks for file changes -// before returning. -func (e *Env) RunGenerate(dir string) { - e.T.Helper() - if err := e.Editor.RunGenerate(e.Ctx, dir); err != nil { - e.T.Fatal(err) - } - e.Await(NoOutstandingWork()) - // Ideally the fake.Workspace would handle all synthetic file watching, but - // we help it out here as we need to wait for the generate command to - // complete before checking the filesystem. - e.CheckForFileChanges() -} - -// RunGoCommand runs the given command in the sandbox's default working -// directory. -func (e *Env) RunGoCommand(verb string, args ...string) { - e.T.Helper() - if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, true); err != nil { - e.T.Fatal(err) - } -} - -// RunGoCommandInDir is like RunGoCommand, but executes in the given -// relative directory of the sandbox. -func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) { - e.T.Helper() - if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, true); err != nil { - e.T.Fatal(err) - } -} - -// DumpGoSum prints the correct go.sum contents for dir in txtar format, -// for use in creating regtests. -func (e *Env) DumpGoSum(dir string) { - e.T.Helper() - - if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil { - e.T.Fatal(err) - } - sumFile := path.Join(dir, "/go.sum") - e.T.Log("\n\n-- " + sumFile + " --\n" + e.ReadWorkspaceFile(sumFile)) - e.T.Fatal("see contents above") -} - -// CheckForFileChanges triggers a manual poll of the workspace for any file -// changes since creation, or since last polling. It is a workaround for the -// lack of true file watching support in the fake workspace. -func (e *Env) CheckForFileChanges() { - e.T.Helper() - if err := e.Sandbox.Workdir.CheckForFileChanges(e.Ctx); err != nil { - e.T.Fatal(err) - } -} - -// CodeLens calls textDocument/codeLens for the given path, calling t.Fatal on -// any error. -func (e *Env) CodeLens(path string) []protocol.CodeLens { - e.T.Helper() - lens, err := e.Editor.CodeLens(e.Ctx, path) - if err != nil { - e.T.Fatal(err) - } - return lens -} - -// ExecuteCodeLensCommand executes the command for the code lens matching the -// given command name. -func (e *Env) ExecuteCodeLensCommand(path string, cmd command.Command) { - e.T.Helper() - lenses := e.CodeLens(path) - var lens protocol.CodeLens - var found bool - for _, l := range lenses { - if l.Command.Command == cmd.ID() { - lens = l - found = true - } - } - if !found { - e.T.Fatalf("found no command with the ID %s", cmd.ID()) - } - e.ExecuteCommand(&protocol.ExecuteCommandParams{ - Command: lens.Command.Command, - Arguments: lens.Command.Arguments, - }, nil) -} - -func (e *Env) ExecuteCommand(params *protocol.ExecuteCommandParams, result interface{}) { - e.T.Helper() - response, err := e.Editor.ExecuteCommand(e.Ctx, params) - if err != nil { - e.T.Fatal(err) - } - if result == nil { - return - } - // Hack: The result of an executeCommand request will be unmarshaled into - // maps. Re-marshal and unmarshal into the type we expect. - // - // This could be improved by generating a jsonrpc2 command client from the - // command.Interface, but that should only be done if we're consolidating - // this part of the tsprotocol generation. - data, err := json.Marshal(response) - if err != nil { - e.T.Fatal(err) - } - if err := json.Unmarshal(data, result); err != nil { - e.T.Fatal(err) - } -} - -// WorkspaceSymbol calls workspace/symbol -func (e *Env) WorkspaceSymbol(sym string) []protocol.SymbolInformation { - e.T.Helper() - ans, err := e.Editor.Symbols(e.Ctx, sym) - if err != nil { - e.T.Fatal(err) - } - return ans -} - -// References calls textDocument/references for the given path at the given -// position. -func (e *Env) References(path string, pos fake.Pos) []protocol.Location { - e.T.Helper() - locations, err := e.Editor.References(e.Ctx, path, pos) - if err != nil { - e.T.Fatal(err) - } - return locations -} - -func (e *Env) Rename(path string, pos fake.Pos, newName string) { - e.T.Helper() - if err := e.Editor.Rename(e.Ctx, path, pos, newName); err != nil { - e.T.Fatal(err) - } -} - -// Completion executes a completion request on the server. -func (e *Env) Completion(path string, pos fake.Pos) *protocol.CompletionList { - e.T.Helper() - completions, err := e.Editor.Completion(e.Ctx, path, pos) - if err != nil { - e.T.Fatal(err) - } - return completions -} - -// AcceptCompletion accepts a completion for the given item at the given -// position. -func (e *Env) AcceptCompletion(path string, pos fake.Pos, item protocol.CompletionItem) { - e.T.Helper() - if err := e.Editor.AcceptCompletion(e.Ctx, path, pos, item); err != nil { - e.T.Fatal(err) - } -} - -// CodeAction calls testDocument/codeAction for the given path, and calls -// t.Fatal if there are errors. -func (e *Env) CodeAction(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction { - e.T.Helper() - actions, err := e.Editor.CodeAction(e.Ctx, path, nil, diagnostics) - if err != nil { - e.T.Fatal(err) - } - return actions -} - -func (e *Env) ChangeConfiguration(t *testing.T, config *fake.EditorConfig) { - e.Editor.Config = *config - if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &protocol.DidChangeConfigurationParams{ - // gopls currently ignores the Settings field - }); err != nil { - t.Fatal(err) - } -} - -// ChangeEnv modifies the editor environment and reconfigures the LSP client. -// TODO: extend this to "ChangeConfiguration", once we refactor the way editor -// configuration is defined. -func (e *Env) ChangeEnv(overlay map[string]string) { - e.T.Helper() - // TODO: to be correct, this should probably be synchronized, but right now - // configuration is only ever modified synchronously in a regtest, so this - // correctness can wait for the previously mentioned refactoring. - if e.Editor.Config.Env == nil { - e.Editor.Config.Env = make(map[string]string) - } - for k, v := range overlay { - e.Editor.Config.Env[k] = v - } - var params protocol.DidChangeConfigurationParams - if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, ¶ms); err != nil { - e.T.Fatal(err) - } -} diff --git a/internal/lsp/rename.go b/internal/lsp/rename.go deleted file mode 100644 index 739ae906b37..00000000000 --- a/internal/lsp/rename.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - edits, err := source.Rename(ctx, snapshot, fh, params.Position, params.NewName) - if err != nil { - return nil, err - } - - var docChanges []protocol.TextDocumentEdit - for uri, e := range edits { - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - return nil, err - } - docChanges = append(docChanges, documentChanges(fh, e)...) - } - return &protocol.WorkspaceEdit{ - DocumentChanges: docChanges, - }, nil -} - -func (s *Server) prepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - // Do not return errors here, as it adds clutter. - // Returning a nil result means there is not a valid rename. - item, usererr, err := source.PrepareRename(ctx, snapshot, fh, params.Position) - if err != nil { - // Return usererr here rather than err, to avoid cluttering the UI with - // internal error details. - return nil, usererr - } - return &protocol.PrepareRename2Gn{ - Range: item.Range, - Placeholder: item.Text, - }, nil -} diff --git a/internal/lsp/reset_golden.sh b/internal/lsp/reset_golden.sh deleted file mode 100755 index 2689407ca15..00000000000 --- a/internal/lsp/reset_golden.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -find ./internal/lsp/ -name *.golden -delete -go test ./internal/lsp/source -golden -go test ./internal/lsp/ -golden -go test ./internal/lsp/cmd -golden diff --git a/internal/lsp/safetoken/safetoken.go b/internal/lsp/safetoken/safetoken.go deleted file mode 100644 index 6898df0bd74..00000000000 --- a/internal/lsp/safetoken/safetoken.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package safetoken provides wrappers around methods in go/token, that return -// errors rather than panicking. -package safetoken - -import ( - "fmt" - "go/token" -) - -// Offset returns tok.Offset(pos), but first checks that the pos is in range -// for the given file. -func Offset(tf *token.File, pos token.Pos) (int, error) { - if !InRange(tf, pos) { - return -1, fmt.Errorf("pos %v is not in range for file [%v:%v)", pos, tf.Base(), tf.Base()+tf.Size()) - } - return tf.Offset(pos), nil -} - -// Pos returns tok.Pos(offset), but first checks that the offset is valid for -// the given file. -func Pos(tf *token.File, offset int) (token.Pos, error) { - if offset < 0 || offset > tf.Size() { - return token.NoPos, fmt.Errorf("offset %v is not in range for file of size %v", offset, tf.Size()) - } - return tf.Pos(offset), nil -} - -// InRange reports whether the given position is in the given token.File. -func InRange(tf *token.File, pos token.Pos) bool { - size := tf.Pos(tf.Size()) - return int(pos) >= tf.Base() && pos <= size -} diff --git a/internal/lsp/safetoken/safetoken_test.go b/internal/lsp/safetoken/safetoken_test.go deleted file mode 100644 index 43d73a74d78..00000000000 --- a/internal/lsp/safetoken/safetoken_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package safetoken_test - -import ( - "go/token" - "go/types" - "testing" - - "golang.org/x/tools/go/packages" -) - -// This test reports any unexpected uses of (*go/token.File).Offset within -// the gopls codebase to ensure that we don't check in more code that is prone -// to panicking. All calls to (*go/token.File).Offset should be replaced with -// calls to safetoken.Offset. -func TestTokenOffset(t *testing.T) { - fset := token.NewFileSet() - pkgs, err := packages.Load(&packages.Config{ - Fset: fset, - Mode: packages.NeedName | packages.NeedModule | packages.NeedCompiledGoFiles | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps, - }, "go/token", "golang.org/x/tools/internal/lsp/...", "golang.org/x/tools/gopls/...") - if err != nil { - t.Fatal(err) - } - var tokenPkg, safePkg *packages.Package - for _, pkg := range pkgs { - switch pkg.PkgPath { - case "go/token": - tokenPkg = pkg - case "golang.org/x/tools/internal/lsp/safetoken": - safePkg = pkg - } - } - - if tokenPkg == nil { - t.Fatal("missing package go/token") - } - if safePkg == nil { - t.Fatal("missing package golang.org/x/tools/internal/lsp/safetoken") - } - - fileObj := tokenPkg.Types.Scope().Lookup("File") - tokenOffset, _, _ := types.LookupFieldOrMethod(fileObj.Type(), true, fileObj.Pkg(), "Offset") - - safeOffset := safePkg.Types.Scope().Lookup("Offset").(*types.Func) - - for _, pkg := range pkgs { - if pkg.PkgPath == "go/token" { // Allow usage from within go/token itself. - continue - } - for ident, obj := range pkg.TypesInfo.Uses { - if obj != tokenOffset { - continue - } - if safeOffset.Pos() <= ident.Pos() && ident.Pos() <= safeOffset.Scope().End() { - continue // accepted usage - } - t.Errorf(`%s: Unexpected use of (*go/token.File).Offset. Please use golang.org/x/tools/internal/lsp/safetoken.Offset instead.`, fset.Position(ident.Pos())) - } - } -} diff --git a/internal/lsp/signature_help.go b/internal/lsp/signature_help.go deleted file mode 100644 index 24dee1b9a8d..00000000000 --- a/internal/lsp/signature_help.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) signatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) { - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) - defer release() - if !ok { - return nil, err - } - info, activeParameter, err := source.SignatureHelp(ctx, snapshot, fh, params.Position) - if err != nil { - event.Error(ctx, "no signature help", err, tag.Position.Of(params.Position)) - return nil, nil - } - return &protocol.SignatureHelp{ - Signatures: []protocol.SignatureInformation{*info}, - ActiveParameter: uint32(activeParameter), - }, nil -} diff --git a/internal/lsp/source/add_import.go b/internal/lsp/source/add_import.go deleted file mode 100644 index 816acc2c25b..00000000000 --- a/internal/lsp/source/add_import.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/protocol" -) - -// AddImport adds a single import statement to the given file -func AddImport(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, importPath string) ([]protocol.TextEdit, error) { - _, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, err - } - return ComputeOneImportFixEdits(snapshot, pgf, &imports.ImportFix{ - StmtInfo: imports.ImportInfo{ - ImportPath: importPath, - }, - FixType: imports.AddImport, - }) -} diff --git a/internal/lsp/source/call_hierarchy.go b/internal/lsp/source/call_hierarchy.go deleted file mode 100644 index c2c8a1866d0..00000000000 --- a/internal/lsp/source/call_hierarchy.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "errors" - "fmt" - "go/ast" - "go/token" - "go/types" - "path/filepath" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// PrepareCallHierarchy returns an array of CallHierarchyItem for a file and the position within the file. -func PrepareCallHierarchy(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyItem, error) { - ctx, done := event.Start(ctx, "source.PrepareCallHierarchy") - defer done() - - identifier, err := Identifier(ctx, snapshot, fh, pos) - if err != nil { - if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) { - return nil, nil - } - return nil, err - } - - // The identifier can be nil if it is an import spec. - if identifier == nil || identifier.Declaration.obj == nil { - return nil, nil - } - - if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok { - return nil, nil - } - - if len(identifier.Declaration.MappedRange) == 0 { - return nil, nil - } - declMappedRange := identifier.Declaration.MappedRange[0] - rng, err := declMappedRange.Range() - if err != nil { - return nil, err - } - - callHierarchyItem := protocol.CallHierarchyItem{ - Name: identifier.Name, - Kind: protocol.Function, - Tags: []protocol.SymbolTag{}, - Detail: fmt.Sprintf("%s • %s", identifier.Declaration.obj.Pkg().Path(), filepath.Base(declMappedRange.URI().Filename())), - URI: protocol.DocumentURI(declMappedRange.URI()), - Range: rng, - SelectionRange: rng, - } - return []protocol.CallHierarchyItem{callHierarchyItem}, nil -} - -// IncomingCalls returns an array of CallHierarchyIncomingCall for a file and the position within the file. -func IncomingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyIncomingCall, error) { - ctx, done := event.Start(ctx, "source.IncomingCalls") - defer done() - - refs, err := References(ctx, snapshot, fh, pos, false) - if err != nil { - if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) { - return nil, nil - } - return nil, err - } - - return toProtocolIncomingCalls(ctx, snapshot, refs) -} - -// toProtocolIncomingCalls returns an array of protocol.CallHierarchyIncomingCall for ReferenceInfo's. -// References inside same enclosure are assigned to the same enclosing function. -func toProtocolIncomingCalls(ctx context.Context, snapshot Snapshot, refs []*ReferenceInfo) ([]protocol.CallHierarchyIncomingCall, error) { - // an enclosing node could have multiple calls to a reference, we only show the enclosure - // once in the result but highlight all calls using FromRanges (ranges at which the calls occur) - var incomingCalls = map[protocol.Location]*protocol.CallHierarchyIncomingCall{} - for _, ref := range refs { - refRange, err := ref.Range() - if err != nil { - return nil, err - } - - callItem, err := enclosingNodeCallItem(snapshot, ref.pkg, ref.URI(), ref.ident.NamePos) - if err != nil { - event.Error(ctx, "error getting enclosing node", err, tag.Method.Of(ref.Name)) - continue - } - loc := protocol.Location{ - URI: callItem.URI, - Range: callItem.Range, - } - - if incomingCall, ok := incomingCalls[loc]; ok { - incomingCall.FromRanges = append(incomingCall.FromRanges, refRange) - continue - } - incomingCalls[loc] = &protocol.CallHierarchyIncomingCall{ - From: callItem, - FromRanges: []protocol.Range{refRange}, - } - } - - incomingCallItems := make([]protocol.CallHierarchyIncomingCall, 0, len(incomingCalls)) - for _, callItem := range incomingCalls { - incomingCallItems = append(incomingCallItems, *callItem) - } - return incomingCallItems, nil -} - -// enclosingNodeCallItem creates a CallHierarchyItem representing the function call at pos -func enclosingNodeCallItem(snapshot Snapshot, pkg Package, uri span.URI, pos token.Pos) (protocol.CallHierarchyItem, error) { - pgf, err := pkg.File(uri) - if err != nil { - return protocol.CallHierarchyItem{}, err - } - - var funcDecl *ast.FuncDecl - var funcLit *ast.FuncLit // innermost function literal - var litCount int - // Find the enclosing function, if any, and the number of func literals in between. - path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) -outer: - for _, node := range path { - switch n := node.(type) { - case *ast.FuncDecl: - funcDecl = n - break outer - case *ast.FuncLit: - litCount++ - if litCount > 1 { - continue - } - funcLit = n - } - } - - nameIdent := path[len(path)-1].(*ast.File).Name - kind := protocol.Package - if funcDecl != nil { - nameIdent = funcDecl.Name - kind = protocol.Function - } - - nameStart, nameEnd := nameIdent.NamePos, nameIdent.NamePos+token.Pos(len(nameIdent.Name)) - if funcLit != nil { - nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos() - kind = protocol.Function - } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, nameStart, nameEnd).Range() - if err != nil { - return protocol.CallHierarchyItem{}, err - } - - name := nameIdent.Name - for i := 0; i < litCount; i++ { - name += ".func()" - } - - return protocol.CallHierarchyItem{ - Name: name, - Kind: kind, - Tags: []protocol.SymbolTag{}, - Detail: fmt.Sprintf("%s • %s", pkg.PkgPath(), filepath.Base(uri.Filename())), - URI: protocol.DocumentURI(uri), - Range: rng, - SelectionRange: rng, - }, nil -} - -// OutgoingCalls returns an array of CallHierarchyOutgoingCall for a file and the position within the file. -func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyOutgoingCall, error) { - ctx, done := event.Start(ctx, "source.OutgoingCalls") - defer done() - - identifier, err := Identifier(ctx, snapshot, fh, pos) - if err != nil { - if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) { - return nil, nil - } - return nil, err - } - - if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok { - return nil, nil - } - if identifier.Declaration.node == nil { - return nil, nil - } - if len(identifier.Declaration.MappedRange) == 0 { - return nil, nil - } - declMappedRange := identifier.Declaration.MappedRange[0] - callExprs, err := collectCallExpressions(snapshot.FileSet(), declMappedRange.m, identifier.Declaration.node) - if err != nil { - return nil, err - } - - return toProtocolOutgoingCalls(ctx, snapshot, fh, callExprs) -} - -// collectCallExpressions collects call expression ranges inside a function. -func collectCallExpressions(fset *token.FileSet, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) { - type callPos struct { - start, end token.Pos - } - callPositions := []callPos{} - - ast.Inspect(node, func(n ast.Node) bool { - if call, ok := n.(*ast.CallExpr); ok { - var start, end token.Pos - switch n := call.Fun.(type) { - case *ast.SelectorExpr: - start, end = n.Sel.NamePos, call.Lparen - case *ast.Ident: - start, end = n.NamePos, call.Lparen - case *ast.FuncLit: - // while we don't add the function literal as an 'outgoing' call - // we still want to traverse into it - return true - default: - // ignore any other kind of call expressions - // for ex: direct function literal calls since that's not an 'outgoing' call - return false - } - callPositions = append(callPositions, callPos{start: start, end: end}) - } - return true - }) - - callRanges := []protocol.Range{} - for _, call := range callPositions { - callRange, err := NewMappedRange(fset, mapper, call.start, call.end).Range() - if err != nil { - return nil, err - } - callRanges = append(callRanges, callRange) - } - return callRanges, nil -} - -// toProtocolOutgoingCalls returns an array of protocol.CallHierarchyOutgoingCall for ast call expressions. -// Calls to the same function are assigned to the same declaration. -func toProtocolOutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, callRanges []protocol.Range) ([]protocol.CallHierarchyOutgoingCall, error) { - // Multiple calls could be made to the same function, defined by "same declaration - // AST node & same identifier name" to provide a unique identifier key even when - // the func is declared in a struct or interface. - type key struct { - decl ast.Node - name string - } - outgoingCalls := map[key]*protocol.CallHierarchyOutgoingCall{} - for _, callRange := range callRanges { - identifier, err := Identifier(ctx, snapshot, fh, callRange.Start) - if err != nil { - if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) { - continue - } - return nil, err - } - - // ignore calls to builtin functions - if identifier.Declaration.obj.Pkg() == nil { - continue - } - - if outgoingCall, ok := outgoingCalls[key{identifier.Declaration.node, identifier.Name}]; ok { - outgoingCall.FromRanges = append(outgoingCall.FromRanges, callRange) - continue - } - - if len(identifier.Declaration.MappedRange) == 0 { - continue - } - declMappedRange := identifier.Declaration.MappedRange[0] - rng, err := declMappedRange.Range() - if err != nil { - return nil, err - } - - outgoingCalls[key{identifier.Declaration.node, identifier.Name}] = &protocol.CallHierarchyOutgoingCall{ - To: protocol.CallHierarchyItem{ - Name: identifier.Name, - Kind: protocol.Function, - Tags: []protocol.SymbolTag{}, - Detail: fmt.Sprintf("%s • %s", identifier.Declaration.obj.Pkg().Path(), filepath.Base(declMappedRange.URI().Filename())), - URI: protocol.DocumentURI(declMappedRange.URI()), - Range: rng, - SelectionRange: rng, - }, - FromRanges: []protocol.Range{callRange}, - } - } - - outgoingCallItems := make([]protocol.CallHierarchyOutgoingCall, 0, len(outgoingCalls)) - for _, callItem := range outgoingCalls { - outgoingCallItems = append(outgoingCallItems, *callItem) - } - return outgoingCallItems, nil -} diff --git a/internal/lsp/source/code_lens.go b/internal/lsp/source/code_lens.go deleted file mode 100644 index 0ab857ac600..00000000000 --- a/internal/lsp/source/code_lens.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "go/ast" - "go/token" - "go/types" - "path/filepath" - "regexp" - "strings" - - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -type LensFunc func(context.Context, Snapshot, FileHandle) ([]protocol.CodeLens, error) - -// LensFuncs returns the supported lensFuncs for Go files. -func LensFuncs() map[command.Command]LensFunc { - return map[command.Command]LensFunc{ - command.Generate: goGenerateCodeLens, - command.Test: runTestCodeLens, - command.RegenerateCgo: regenerateCgoLens, - command.GCDetails: toggleDetailsCodeLens, - } -} - -var ( - testRe = regexp.MustCompile("^Test[^a-z]") - benchmarkRe = regexp.MustCompile("^Benchmark[^a-z]") -) - -func runTestCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { - codeLens := make([]protocol.CodeLens, 0) - - fns, err := TestsAndBenchmarks(ctx, snapshot, fh) - if err != nil { - return nil, err - } - puri := protocol.URIFromSpanURI(fh.URI()) - for _, fn := range fns.Tests { - cmd, err := command.NewTestCommand("run test", puri, []string{fn.Name}, nil) - if err != nil { - return nil, err - } - rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start} - codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) - } - - for _, fn := range fns.Benchmarks { - cmd, err := command.NewTestCommand("run benchmark", puri, nil, []string{fn.Name}) - if err != nil { - return nil, err - } - rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start} - codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) - } - - if len(fns.Benchmarks) > 0 { - _, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage) - if err != nil { - return nil, err - } - // add a code lens to the top of the file which runs all benchmarks in the file - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() - if err != nil { - return nil, err - } - var benches []string - for _, fn := range fns.Benchmarks { - benches = append(benches, fn.Name) - } - cmd, err := command.NewTestCommand("run file benchmarks", puri, nil, benches) - if err != nil { - return nil, err - } - codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd}) - } - return codeLens, nil -} - -type testFn struct { - Name string - Rng protocol.Range -} - -type testFns struct { - Tests []testFn - Benchmarks []testFn -} - -func TestsAndBenchmarks(ctx context.Context, snapshot Snapshot, fh FileHandle) (testFns, error) { - var out testFns - - if !strings.HasSuffix(fh.URI().Filename(), "_test.go") { - return out, nil - } - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage) - if err != nil { - return out, err - } - - for _, d := range pgf.File.Decls { - fn, ok := d.(*ast.FuncDecl) - if !ok { - continue - } - - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, d.Pos(), fn.End()).Range() - if err != nil { - return out, err - } - - if matchTestFunc(fn, pkg, testRe, "T") { - out.Tests = append(out.Tests, testFn{fn.Name.Name, rng}) - } - - if matchTestFunc(fn, pkg, benchmarkRe, "B") { - out.Benchmarks = append(out.Benchmarks, testFn{fn.Name.Name, rng}) - } - } - - return out, nil -} - -func matchTestFunc(fn *ast.FuncDecl, pkg Package, nameRe *regexp.Regexp, paramID string) bool { - // Make sure that the function name matches a test function. - if !nameRe.MatchString(fn.Name.Name) { - return false - } - info := pkg.GetTypesInfo() - if info == nil { - return false - } - obj := info.ObjectOf(fn.Name) - if obj == nil { - return false - } - sig, ok := obj.Type().(*types.Signature) - if !ok { - return false - } - // Test functions should have only one parameter. - if sig.Params().Len() != 1 { - return false - } - - // Check the type of the only parameter - paramTyp, ok := sig.Params().At(0).Type().(*types.Pointer) - if !ok { - return false - } - named, ok := paramTyp.Elem().(*types.Named) - if !ok { - return false - } - namedObj := named.Obj() - if namedObj.Pkg().Path() != "testing" { - return false - } - return namedObj.Id() == paramID -} - -func goGenerateCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { - pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) - if err != nil { - return nil, err - } - const ggDirective = "//go:generate" - for _, c := range pgf.File.Comments { - for _, l := range c.List { - if !strings.HasPrefix(l.Text, ggDirective) { - continue - } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range() - if err != nil { - return nil, err - } - dir := protocol.URIFromSpanURI(span.URIFromPath(filepath.Dir(fh.URI().Filename()))) - nonRecursiveCmd, err := command.NewGenerateCommand("run go generate", command.GenerateArgs{Dir: dir, Recursive: false}) - if err != nil { - return nil, err - } - recursiveCmd, err := command.NewGenerateCommand("run go generate ./...", command.GenerateArgs{Dir: dir, Recursive: true}) - if err != nil { - return nil, err - } - return []protocol.CodeLens{ - {Range: rng, Command: recursiveCmd}, - {Range: rng, Command: nonRecursiveCmd}, - }, nil - - } - } - return nil, nil -} - -func regenerateCgoLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { - pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) - if err != nil { - return nil, err - } - var c *ast.ImportSpec - for _, imp := range pgf.File.Imports { - if imp.Path.Value == `"C"` { - c = imp - } - } - if c == nil { - return nil, nil - } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, c.Pos(), c.EndPos).Range() - if err != nil { - return nil, err - } - puri := protocol.URIFromSpanURI(fh.URI()) - cmd, err := command.NewRegenerateCgoCommand("regenerate cgo definitions", command.URIArg{URI: puri}) - if err != nil { - return nil, err - } - return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil -} - -func toggleDetailsCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) { - _, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage) - if err != nil { - return nil, err - } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() - if err != nil { - return nil, err - } - puri := protocol.URIFromSpanURI(fh.URI()) - cmd, err := command.NewGCDetailsCommand("Toggle gc annotation details", puri) - if err != nil { - return nil, err - } - return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil -} diff --git a/internal/lsp/source/comment_test.go b/internal/lsp/source/comment_test.go deleted file mode 100644 index 9efde16ef3c..00000000000 --- a/internal/lsp/source/comment_test.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "reflect" - "strings" - "testing" -) - -// This file is a copy of go/doc/comment_test.go with the exception for -// the test cases for TestEmphasize and TestCommentEscape - -var headingTests = []struct { - line string - ok bool -}{ - {"Section", true}, - {"A typical usage", true}, - {"Ī”Ī›Īž is Greek", true}, - {"Foo 42", true}, - {"", false}, - {"section", false}, - {"A typical usage:", false}, - {"This code:", false}, - {"Ī“ is Greek", false}, - {"Foo §", false}, - {"Fermat's Last Sentence", true}, - {"Fermat's", true}, - {"'sX", false}, - {"Ted 'Too' Bar", false}, - {"Use n+m", false}, - {"Scanning:", false}, - {"N:M", false}, -} - -func TestIsHeading(t *testing.T) { - for _, tt := range headingTests { - if h := heading(tt.line); (len(h) > 0) != tt.ok { - t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok) - } - } -} - -var blocksTests = []struct { - in string - out []block - text string -}{ - { - in: `Para 1. -Para 1 line 2. - -Para 2. - -Section - -Para 3. - - pre - pre1 - -Para 4. - - pre - pre1 - - pre2 - -Para 5. - - - pre - - - pre1 - pre2 - -Para 6. - pre - pre2 -`, - out: []block{ - {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}}, - {opPara, []string{"Para 2.\n"}}, - {opHead, []string{"Section"}}, - {opPara, []string{"Para 3.\n"}}, - {opPre, []string{"pre\n", "pre1\n"}}, - {opPara, []string{"Para 4.\n"}}, - {opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}}, - {opPara, []string{"Para 5.\n"}}, - {opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}}, - {opPara, []string{"Para 6.\n"}}, - {opPre, []string{"pre\n", "pre2\n"}}, - }, - text: `. Para 1. Para 1 line 2. - -. Para 2. - - -. Section - -. Para 3. - -$ pre -$ pre1 - -. Para 4. - -$ pre -$ pre1 - -$ pre2 - -. Para 5. - -$ pre - - -$ pre1 -$ pre2 - -. Para 6. - -$ pre -$ pre2 -`, - }, - { - in: "Para.\n\tshould not be ``escaped''", - out: []block{ - {opPara, []string{"Para.\n"}}, - {opPre, []string{"should not be ``escaped''"}}, - }, - text: ". Para.\n\n$ should not be ``escaped''", - }, - { - in: "// A very long line of 46 char for line wrapping.", - out: []block{ - {opPara, []string{"// A very long line of 46 char for line wrapping."}}, - }, - text: `. // A very long line of 46 char for line -. // wrapping. -`, - }, - { - in: `/* A very long line of 46 char for line wrapping. -A very long line of 46 char for line wrapping. */`, - out: []block{ - {opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}}, - }, - text: `. /* A very long line of 46 char for line -. wrapping. A very long line of 46 char -. for line wrapping. */ -`, - }, -} - -func TestBlocks(t *testing.T) { - for i, tt := range blocksTests { - b := blocks(tt.in) - if !reflect.DeepEqual(b, tt.out) { - t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out) - } - } -} - -// This has been modified from go/doc to use markdown links instead of html ones -// and use markdown escaping instead oh html -var emphasizeTests = []struct { - in, out string -}{ - {"", ""}, - {"http://[::1]:8080/foo.txt", `[http\:\/\/\[\:\:1\]\:8080\/foo\.txt](http://[::1]:8080/foo.txt)`}, - {"before (https://www.google.com) after", `before \([https\:\/\/www\.google\.com](https://www.google.com)\) after`}, - {"before https://www.google.com:30/x/y/z:b::c. After", `before [https\:\/\/www\.google\.com\:30\/x\/y\/z\:b\:\:c](https://www.google.com:30/x/y/z:b::c)\. After`}, - {"http://www.google.com/path/:;!-/?query=%34b#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34b\#093124](http://www.google.com/path/:;!-/?query=%34b#093124)`}, - {"http://www.google.com/path/:;!-/?query=%34bar#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34bar\#093124](http://www.google.com/path/:;!-/?query=%34bar#093124)`}, - {"http://www.google.com/index.html! After", `[http\:\/\/www\.google\.com\/index\.html](http://www.google.com/index.html)\! After`}, - {"http://www.google.com/", `[http\:\/\/www\.google\.com\/](http://www.google.com/)`}, - {"https://www.google.com/", `[https\:\/\/www\.google\.com\/](https://www.google.com/)`}, - {"http://www.google.com/path.", `[http\:\/\/www\.google\.com\/path](http://www.google.com/path)\.`}, - {"http://en.wikipedia.org/wiki/Camellia_(cipher)", `[http\:\/\/en\.wikipedia\.org\/wiki\/Camellia\_\(cipher\)](http://en.wikipedia.org/wiki/Camellia_\(cipher\))`}, - {"(http://www.google.com/)", `\([http\:\/\/www\.google\.com\/](http://www.google.com/)\)`}, - {"http://gmail.com)", `[http\:\/\/gmail\.com](http://gmail.com)\)`}, - {"((http://gmail.com))", `\(\([http\:\/\/gmail\.com](http://gmail.com)\)\)`}, - {"http://gmail.com ((http://gmail.com)) ()", `[http\:\/\/gmail\.com](http://gmail.com) \(\([http\:\/\/gmail\.com](http://gmail.com)\)\) \(\)`}, - {"Foo bar http://example.com/ quux!", `Foo bar [http\:\/\/example\.com\/](http://example.com/) quux\!`}, - {"Hello http://example.com/%2f/ /world.", `Hello [http\:\/\/example\.com\/\%2f\/](http://example.com/%2f/) \/world\.`}, - {"Lorem http: ipsum //host/path", `Lorem http\: ipsum \/\/host\/path`}, - {"javascript://is/not/linked", `javascript\:\/\/is\/not\/linked`}, - {"http://foo", `[http\:\/\/foo](http://foo)`}, - {"art by [[https://www.example.com/person/][Person Name]]", `art by \[\[[https\:\/\/www\.example\.com\/person\/](https://www.example.com/person/)\]\[Person Name\]\]`}, - {"please visit (http://golang.org/)", `please visit \([http\:\/\/golang\.org\/](http://golang.org/)\)`}, - {"please visit http://golang.org/hello())", `please visit [http\:\/\/golang\.org\/hello\(\)](http://golang.org/hello\(\))\)`}, - {"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `[http\:\/\/git\.qemu\.org\/\?p\=qemu\.git\;a\=blob\;f\=qapi\-schema\.json\;hb\=HEAD](http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD)`}, - {"https://foo.bar/bal/x(])", `[https\:\/\/foo\.bar\/bal\/x\(](https://foo.bar/bal/x\()\]\)`}, - {"foo [ http://bar(])", `foo \[ [http\:\/\/bar\(](http://bar\()\]\)`}, -} - -func TestEmphasize(t *testing.T) { - for i, tt := range emphasizeTests { - var buf bytes.Buffer - emphasize(&buf, tt.in, true) - out := buf.String() - if out != tt.out { - t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out) - } - } -} - -func TestCommentEscape(t *testing.T) { - //ldquo -> ulquo and rdquo -> urquo - commentTests := []struct { - in, out string - }{ - {"typically invoked as ``go tool asm'',", "typically invoked as " + ulquo + "go tool asm" + urquo + ","}, - {"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ulquo + "go help test" + urquo + " and " + ulquo + "go help testflag" + urquo}} - for i, tt := range commentTests { - var buf strings.Builder - commentEscape(&buf, tt.in, true) - out := buf.String() - if out != tt.out { - t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out) - } - } -} - -func TestCommentToMarkdown(t *testing.T) { - tests := []struct { - in, out string - }{ - { - in: "F declaration.\n", - out: "F declaration\\.\n", - }, - { - in: ` -F declaration. Lorem ipsum dolor sit amet. -Etiam mattis eros at orci mollis molestie. -`, - out: ` -F declaration\. Lorem ipsum dolor sit amet\. -Etiam mattis eros at orci mollis molestie\. -`, - }, - { - in: ` -F declaration. - -Lorem ipsum dolor sit amet. -Sed id dui turpis. - - - - -Aenean tempus velit non auctor eleifend. -Aenean efficitur a sem id ultricies. - - -Phasellus efficitur mauris et viverra bibendum. -`, - out: ` -F declaration\. - -Lorem ipsum dolor sit amet\. -Sed id dui turpis\. - -Aenean tempus velit non auctor eleifend\. -Aenean efficitur a sem id ultricies\. - -Phasellus efficitur mauris et viverra bibendum\. -`, - }, - { - in: ` -F declaration. - -Aenean tempus velit non auctor eleifend. - -Section - -Lorem ipsum dolor sit amet, consectetur adipiscing elit. - - func foo() {} - - - func bar() {} - -Fusce lorem lacus. - - func foo() {} - - func bar() {} - -Maecenas in lobortis lectus. - - func foo() {} - - func bar() {} - -Phasellus efficitur mauris et viverra bibendum. -`, - out: ` -F declaration\. - -Aenean tempus velit non auctor eleifend\. - -### Section - -Lorem ipsum dolor sit amet, consectetur adipiscing elit\. - - func foo() {} - - - func bar() {} - -Fusce lorem lacus\. - - func foo() {} - - func bar() {} - -Maecenas in lobortis lectus\. - - func foo() {} - - func bar() {} - -Phasellus efficitur mauris et viverra bibendum\. -`, - }, - { - in: ` -F declaration. - - func foo() { - fmt.Println("foo") - } - func bar() { - fmt.Println("bar") - } -`, - out: ` -F declaration\. - - func foo() { - fmt.Println("foo") - } - func bar() { - fmt.Println("bar") - } -`, - }, - } - for i, tt := range tests { - // Comments start with new lines for better readability. So, we should trim them. - tt.in = strings.TrimPrefix(tt.in, "\n") - tt.out = strings.TrimPrefix(tt.out, "\n") - - if out := CommentToMarkdown(tt.in); out != tt.out { - t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out) - } - } -} diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go deleted file mode 100644 index bb1c68d2238..00000000000 --- a/internal/lsp/source/completion/completion.go +++ /dev/null @@ -1,3006 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package completion provides core functionality for code completion in Go -// editors and tools. -package completion - -import ( - "context" - "fmt" - "go/ast" - "go/constant" - "go/scanner" - "go/token" - "go/types" - "math" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/fuzzy" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typeparams" -) - -type CompletionItem struct { - // Label is the primary text the user sees for this completion item. - Label string - - // Detail is supplemental information to present to the user. - // This often contains the type or return type of the completion item. - Detail string - - // InsertText is the text to insert if this item is selected. - // Any of the prefix that has already been typed is not trimmed. - // The insert text does not contain snippets. - InsertText string - - Kind protocol.CompletionItemKind - Tags []protocol.CompletionItemTag - Deprecated bool // Deprecated, prefer Tags if available - - // An optional array of additional TextEdits that are applied when - // selecting this completion. - // - // Additional text edits should be used to change text unrelated to the current cursor position - // (for example adding an import statement at the top of the file if the completion item will - // insert an unqualified type). - AdditionalTextEdits []protocol.TextEdit - - // Depth is how many levels were searched to find this completion. - // For example when completing "foo<>", "fooBar" is depth 0, and - // "fooBar.Baz" is depth 1. - Depth int - - // Score is the internal relevance score. - // A higher score indicates that this completion item is more relevant. - Score float64 - - // snippet is the LSP snippet for the completion item. The LSP - // specification contains details about LSP snippets. For example, a - // snippet for a function with the following signature: - // - // func foo(a, b, c int) - // - // would be: - // - // foo(${1:a int}, ${2: b int}, ${3: c int}) - // - // If Placeholders is false in the CompletionOptions, the above - // snippet would instead be: - // - // foo(${1:}) - snippet *snippet.Builder - - // Documentation is the documentation for the completion item. - Documentation string - - // obj is the object from which this candidate was derived, if any. - // obj is for internal use only. - obj types.Object -} - -// completionOptions holds completion specific configuration. -type completionOptions struct { - unimported bool - documentation bool - fullDocumentation bool - placeholders bool - literal bool - snippets bool - postfix bool - matcher source.Matcher - budget time.Duration -} - -// Snippet is a convenience returns the snippet if available, otherwise -// the InsertText. -// used for an item, depending on if the callee wants placeholders or not. -func (i *CompletionItem) Snippet() string { - if i.snippet != nil { - return i.snippet.String() - } - return i.InsertText -} - -// Scoring constants are used for weighting the relevance of different candidates. -const ( - // stdScore is the base score for all completion items. - stdScore float64 = 1.0 - - // highScore indicates a very relevant completion item. - highScore float64 = 10.0 - - // lowScore indicates an irrelevant or not useful completion item. - lowScore float64 = 0.01 -) - -// matcher matches a candidate's label against the user input. The -// returned score reflects the quality of the match. A score of zero -// indicates no match, and a score of one means a perfect match. -type matcher interface { - Score(candidateLabel string) (score float32) -} - -// prefixMatcher implements case sensitive prefix matching. -type prefixMatcher string - -func (pm prefixMatcher) Score(candidateLabel string) float32 { - if strings.HasPrefix(candidateLabel, string(pm)) { - return 1 - } - return -1 -} - -// insensitivePrefixMatcher implements case insensitive prefix matching. -type insensitivePrefixMatcher string - -func (ipm insensitivePrefixMatcher) Score(candidateLabel string) float32 { - if strings.HasPrefix(strings.ToLower(candidateLabel), string(ipm)) { - return 1 - } - return -1 -} - -// completer contains the necessary information for a single completion request. -type completer struct { - snapshot source.Snapshot - pkg source.Package - qf types.Qualifier - opts *completionOptions - - // completionContext contains information about the trigger for this - // completion request. - completionContext completionContext - - // fh is a handle to the file associated with this completion request. - fh source.FileHandle - - // filename is the name of the file associated with this completion request. - filename string - - // file is the AST of the file associated with this completion request. - file *ast.File - - // pos is the position at which the request was triggered. - pos token.Pos - - // path is the path of AST nodes enclosing the position. - path []ast.Node - - // seen is the map that ensures we do not return duplicate results. - seen map[types.Object]bool - - // items is the list of completion items returned. - items []CompletionItem - - // completionCallbacks is a list of callbacks to collect completions that - // require expensive operations. This includes operations where we search - // through the entire module cache. - completionCallbacks []func(opts *imports.Options) error - - // surrounding describes the identifier surrounding the position. - surrounding *Selection - - // inference contains information we've inferred about ideal - // candidates such as the candidate's type. - inference candidateInference - - // enclosingFunc contains information about the function enclosing - // the position. - enclosingFunc *funcInfo - - // enclosingCompositeLiteral contains information about the composite literal - // enclosing the position. - enclosingCompositeLiteral *compLitInfo - - // deepState contains the current state of our deep completion search. - deepState deepCompletionState - - // matcher matches the candidates against the surrounding prefix. - matcher matcher - - // methodSetCache caches the types.NewMethodSet call, which is relatively - // expensive and can be called many times for the same type while searching - // for deep completions. - methodSetCache map[methodSetKey]*types.MethodSet - - // mapper converts the positions in the file from which the completion originated. - mapper *protocol.ColumnMapper - - // startTime is when we started processing this completion request. It does - // not include any time the request spent in the queue. - startTime time.Time - - // scopes contains all scopes defined by nodes in our path, - // including nil values for nodes that don't defined a scope. It - // also includes our package scope and the universal scope at the - // end. - scopes []*types.Scope -} - -// funcInfo holds info about a function object. -type funcInfo struct { - // sig is the function declaration enclosing the position. - sig *types.Signature - - // body is the function's body. - body *ast.BlockStmt -} - -type compLitInfo struct { - // cl is the *ast.CompositeLit enclosing the position. - cl *ast.CompositeLit - - // clType is the type of cl. - clType types.Type - - // kv is the *ast.KeyValueExpr enclosing the position, if any. - kv *ast.KeyValueExpr - - // inKey is true if we are certain the position is in the key side - // of a key-value pair. - inKey bool - - // maybeInFieldName is true if inKey is false and it is possible - // we are completing a struct field name. For example, - // "SomeStruct{<>}" will be inKey=false, but maybeInFieldName=true - // because we _could_ be completing a field name. - maybeInFieldName bool -} - -type importInfo struct { - importPath string - name string - pkg source.Package -} - -type methodSetKey struct { - typ types.Type - addressable bool -} - -type completionContext struct { - // triggerCharacter is the character used to trigger completion at current - // position, if any. - triggerCharacter string - - // triggerKind is information about how a completion was triggered. - triggerKind protocol.CompletionTriggerKind - - // commentCompletion is true if we are completing a comment. - commentCompletion bool - - // packageCompletion is true if we are completing a package name. - packageCompletion bool -} - -// A Selection represents the cursor position and surrounding identifier. -type Selection struct { - content string - cursor token.Pos - rng span.Range -} - -func (p Selection) Content() string { - return p.content -} - -func (p Selection) Start() token.Pos { - return p.rng.Start -} - -func (p Selection) End() token.Pos { - return p.rng.End -} - -func (p Selection) Prefix() string { - return p.content[:p.cursor-p.rng.Start] -} - -func (p Selection) Suffix() string { - return p.content[p.cursor-p.rng.Start:] -} - -func (c *completer) setSurrounding(ident *ast.Ident) { - if c.surrounding != nil { - return - } - if !(ident.Pos() <= c.pos && c.pos <= ident.End()) { - return - } - - c.surrounding = &Selection{ - content: ident.Name, - cursor: c.pos, - // Overwrite the prefix only. - rng: span.NewRange(c.snapshot.FileSet(), ident.Pos(), ident.End()), - } - - c.setMatcherFromPrefix(c.surrounding.Prefix()) -} - -func (c *completer) setMatcherFromPrefix(prefix string) { - switch c.opts.matcher { - case source.Fuzzy: - c.matcher = fuzzy.NewMatcher(prefix) - case source.CaseSensitive: - c.matcher = prefixMatcher(prefix) - default: - c.matcher = insensitivePrefixMatcher(strings.ToLower(prefix)) - } -} - -func (c *completer) getSurrounding() *Selection { - if c.surrounding == nil { - c.surrounding = &Selection{ - content: "", - cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), c.pos, c.pos), - } - } - return c.surrounding -} - -// candidate represents a completion candidate. -type candidate struct { - // obj is the types.Object to complete to. - obj types.Object - - // score is used to rank candidates. - score float64 - - // name is the deep object name path, e.g. "foo.bar" - name string - - // detail is additional information about this item. If not specified, - // defaults to type string for the object. - detail string - - // path holds the path from the search root (excluding the candidate - // itself) for a deep candidate. - path []types.Object - - // pathInvokeMask is a bit mask tracking whether each entry in path - // should be formatted with "()" (i.e. whether it is a function - // invocation). - pathInvokeMask uint16 - - // mods contains modifications that should be applied to the - // candidate when inserted. For example, "foo" may be inserted as - // "*foo" or "foo()". - mods []typeModKind - - // addressable is true if a pointer can be taken to the candidate. - addressable bool - - // convertTo is a type that this candidate should be cast to. For - // example, if convertTo is float64, "foo" should be formatted as - // "float64(foo)". - convertTo types.Type - - // imp is the import that needs to be added to this package in order - // for this candidate to be valid. nil if no import needed. - imp *importInfo -} - -func (c candidate) hasMod(mod typeModKind) bool { - for _, m := range c.mods { - if m == mod { - return true - } - } - return false -} - -// ErrIsDefinition is an error that informs the user they got no -// completions because they tried to complete the name of a new object -// being defined. -type ErrIsDefinition struct { - objStr string -} - -func (e ErrIsDefinition) Error() string { - msg := "this is a definition" - if e.objStr != "" { - msg += " of " + e.objStr - } - return msg -} - -// Completion returns a list of possible candidates for completion, given a -// a file and a position. -// -// The selection is computed based on the preceding identifier and can be used by -// the client to score the quality of the completion. For instance, some clients -// may tolerate imperfect matches as valid completion results, since users may make typos. -func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, protoPos protocol.Position, protoContext protocol.CompletionContext) ([]CompletionItem, *Selection, error) { - ctx, done := event.Start(ctx, "completion.Completion") - defer done() - - startTime := time.Now() - - pkg, pgf, err := source.GetParsedFile(ctx, snapshot, fh, source.NarrowestPackage) - if err != nil || pgf.File.Package == token.NoPos { - // If we can't parse this file or find position for the package - // keyword, it may be missing a package declaration. Try offering - // suggestions for the package declaration. - // Note that this would be the case even if the keyword 'package' is - // present but no package name exists. - items, surrounding, innerErr := packageClauseCompletions(ctx, snapshot, fh, protoPos) - if innerErr != nil { - // return the error for GetParsedFile since it's more relevant in this situation. - return nil, nil, fmt.Errorf("getting file for Completion: %w (package completions: %v)", err, innerErr) - } - return items, surrounding, nil - } - pos, err := pgf.Mapper.Pos(protoPos) - if err != nil { - return nil, nil, err - } - // Completion is based on what precedes the cursor. - // Find the path to the position before pos. - path, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1) - if path == nil { - return nil, nil, fmt.Errorf("cannot find node enclosing position") - } - - // Check if completion at this position is valid. If not, return early. - switch n := path[0].(type) { - case *ast.BasicLit: - // Skip completion inside literals except for ImportSpec - if len(path) > 1 { - if _, ok := path[1].(*ast.ImportSpec); ok { - break - } - } - return nil, nil, nil - case *ast.CallExpr: - if n.Ellipsis.IsValid() && pos > n.Ellipsis && pos <= n.Ellipsis+token.Pos(len("...")) { - // Don't offer completions inside or directly after "...". For - // example, don't offer completions at "<>" in "foo(bar...<>"). - return nil, nil, nil - } - case *ast.Ident: - // reject defining identifiers - if obj, ok := pkg.GetTypesInfo().Defs[n]; ok { - if v, ok := obj.(*types.Var); ok && v.IsField() && v.Embedded() { - // An anonymous field is also a reference to a type. - } else if pgf.File.Name == n { - // Don't skip completions if Ident is for package name. - break - } else { - objStr := "" - if obj != nil { - qual := types.RelativeTo(pkg.GetTypes()) - objStr = types.ObjectString(obj, qual) - } - ans, sel := definition(path, obj, snapshot.FileSet(), fh) - if ans != nil { - sort.Slice(ans, func(i, j int) bool { - return ans[i].Score > ans[j].Score - }) - return ans, sel, nil - } - return nil, nil, ErrIsDefinition{objStr: objStr} - } - } - } - - // Collect all surrounding scopes, innermost first. - scopes := source.CollectScopes(pkg.GetTypesInfo(), path, pos) - scopes = append(scopes, pkg.GetTypes().Scope(), types.Universe) - - opts := snapshot.View().Options() - c := &completer{ - pkg: pkg, - snapshot: snapshot, - qf: source.Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()), - completionContext: completionContext{ - triggerCharacter: protoContext.TriggerCharacter, - triggerKind: protoContext.TriggerKind, - }, - fh: fh, - filename: fh.URI().Filename(), - file: pgf.File, - path: path, - pos: pos, - seen: make(map[types.Object]bool), - enclosingFunc: enclosingFunction(path, pkg.GetTypesInfo()), - enclosingCompositeLiteral: enclosingCompositeLiteral(path, pos, pkg.GetTypesInfo()), - deepState: deepCompletionState{ - enabled: opts.DeepCompletion, - }, - opts: &completionOptions{ - matcher: opts.Matcher, - unimported: opts.CompleteUnimported, - documentation: opts.CompletionDocumentation && opts.HoverKind != source.NoDocumentation, - fullDocumentation: opts.HoverKind == source.FullDocumentation, - placeholders: opts.UsePlaceholders, - literal: opts.LiteralCompletions && opts.InsertTextFormat == protocol.SnippetTextFormat, - budget: opts.CompletionBudget, - snippets: opts.InsertTextFormat == protocol.SnippetTextFormat, - postfix: opts.ExperimentalPostfixCompletions, - }, - // default to a matcher that always matches - matcher: prefixMatcher(""), - methodSetCache: make(map[methodSetKey]*types.MethodSet), - mapper: pgf.Mapper, - startTime: startTime, - scopes: scopes, - } - - var cancel context.CancelFunc - if c.opts.budget == 0 { - ctx, cancel = context.WithCancel(ctx) - } else { - // timeoutDuration is the completion budget remaining. If less than - // 10ms, set to 10ms - timeoutDuration := time.Until(c.startTime.Add(c.opts.budget)) - if timeoutDuration < 10*time.Millisecond { - timeoutDuration = 10 * time.Millisecond - } - ctx, cancel = context.WithTimeout(ctx, timeoutDuration) - } - defer cancel() - - if surrounding := c.containingIdent(pgf.Src); surrounding != nil { - c.setSurrounding(surrounding) - } - - c.inference = expectedCandidate(ctx, c) - - err = c.collectCompletions(ctx) - if err != nil { - return nil, nil, err - } - - // Deep search collected candidates and their members for more candidates. - c.deepSearch(ctx) - - for _, callback := range c.completionCallbacks { - if err := c.snapshot.RunProcessEnvFunc(ctx, callback); err != nil { - return nil, nil, err - } - } - - // Search candidates populated by expensive operations like - // unimportedMembers etc. for more completion items. - c.deepSearch(ctx) - - // Statement candidates offer an entire statement in certain contexts, as - // opposed to a single object. Add statement candidates last because they - // depend on other candidates having already been collected. - c.addStatementCandidates() - - c.sortItems() - return c.items, c.getSurrounding(), nil -} - -// collectCompletions adds possible completion candidates to either the deep -// search queue or completion items directly for different completion contexts. -func (c *completer) collectCompletions(ctx context.Context) error { - // Inside import blocks, return completions for unimported packages. - for _, importSpec := range c.file.Imports { - if !(importSpec.Path.Pos() <= c.pos && c.pos <= importSpec.Path.End()) { - continue - } - return c.populateImportCompletions(ctx, importSpec) - } - - // Inside comments, offer completions for the name of the relevant symbol. - for _, comment := range c.file.Comments { - if comment.Pos() < c.pos && c.pos <= comment.End() { - c.populateCommentCompletions(ctx, comment) - return nil - } - } - - // Struct literals are handled entirely separately. - if c.wantStructFieldCompletions() { - // If we are definitely completing a struct field name, deep completions - // don't make sense. - if c.enclosingCompositeLiteral.inKey { - c.deepState.enabled = false - } - return c.structLiteralFieldName(ctx) - } - - if lt := c.wantLabelCompletion(); lt != labelNone { - c.labels(lt) - return nil - } - - if c.emptySwitchStmt() { - // Empty switch statements only admit "default" and "case" keywords. - c.addKeywordItems(map[string]bool{}, highScore, CASE, DEFAULT) - return nil - } - - switch n := c.path[0].(type) { - case *ast.Ident: - if c.file.Name == n { - return c.packageNameCompletions(ctx, c.fh.URI(), n) - } else if sel, ok := c.path[1].(*ast.SelectorExpr); ok && sel.Sel == n { - // Is this the Sel part of a selector? - return c.selector(ctx, sel) - } - return c.lexical(ctx) - // The function name hasn't been typed yet, but the parens are there: - // recv.‸(arg) - case *ast.TypeAssertExpr: - // Create a fake selector expression. - return c.selector(ctx, &ast.SelectorExpr{X: n.X}) - case *ast.SelectorExpr: - return c.selector(ctx, n) - // At the file scope, only keywords are allowed. - case *ast.BadDecl, *ast.File: - c.addKeywordCompletions() - default: - // fallback to lexical completions - return c.lexical(ctx) - } - - return nil -} - -// containingIdent returns the *ast.Ident containing pos, if any. It -// synthesizes an *ast.Ident to allow completion in the face of -// certain syntax errors. -func (c *completer) containingIdent(src []byte) *ast.Ident { - // In the normal case, our leaf AST node is the identifier being completed. - if ident, ok := c.path[0].(*ast.Ident); ok { - return ident - } - - pos, tkn, lit := c.scanToken(src) - if !pos.IsValid() { - return nil - } - - fakeIdent := &ast.Ident{Name: lit, NamePos: pos} - - if _, isBadDecl := c.path[0].(*ast.BadDecl); isBadDecl { - // You don't get *ast.Idents at the file level, so look for bad - // decls and use the manually extracted token. - return fakeIdent - } else if c.emptySwitchStmt() { - // Only keywords are allowed in empty switch statements. - // *ast.Idents are not parsed, so we must use the manually - // extracted token. - return fakeIdent - } else if tkn.IsKeyword() { - // Otherwise, manually extract the prefix if our containing token - // is a keyword. This improves completion after an "accidental - // keyword", e.g. completing to "variance" in "someFunc(var<>)". - return fakeIdent - } - - return nil -} - -// scanToken scans pgh's contents for the token containing pos. -func (c *completer) scanToken(contents []byte) (token.Pos, token.Token, string) { - tok := c.snapshot.FileSet().File(c.pos) - - var s scanner.Scanner - s.Init(tok, contents, nil, 0) - for { - tknPos, tkn, lit := s.Scan() - if tkn == token.EOF || tknPos >= c.pos { - return token.NoPos, token.ILLEGAL, "" - } - - if len(lit) > 0 && tknPos <= c.pos && c.pos <= tknPos+token.Pos(len(lit)) { - return tknPos, tkn, lit - } - } -} - -func (c *completer) sortItems() { - sort.SliceStable(c.items, func(i, j int) bool { - // Sort by score first. - if c.items[i].Score != c.items[j].Score { - return c.items[i].Score > c.items[j].Score - } - - // Then sort by label so order stays consistent. This also has the - // effect of preferring shorter candidates. - return c.items[i].Label < c.items[j].Label - }) -} - -// emptySwitchStmt reports whether pos is in an empty switch or select -// statement. -func (c *completer) emptySwitchStmt() bool { - block, ok := c.path[0].(*ast.BlockStmt) - if !ok || len(block.List) > 0 || len(c.path) == 1 { - return false - } - - switch c.path[1].(type) { - case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - return true - default: - return false - } -} - -// populateImportCompletions yields completions for an import path around the cursor. -// -// Completions are suggested at the directory depth of the given import path so -// that we don't overwhelm the user with a large list of possibilities. As an -// example, a completion for the prefix "golang" results in "golang.org/". -// Completions for "golang.org/" yield its subdirectories -// (i.e. "golang.org/x/"). The user is meant to accept completion suggestions -// until they reach a complete import path. -func (c *completer) populateImportCompletions(ctx context.Context, searchImport *ast.ImportSpec) error { - if !strings.HasPrefix(searchImport.Path.Value, `"`) { - return nil - } - - // deepSearch is not valuable for import completions. - c.deepState.enabled = false - - importPath := searchImport.Path.Value - - // Extract the text between the quotes (if any) in an import spec. - // prefix is the part of import path before the cursor. - prefixEnd := c.pos - searchImport.Path.Pos() - prefix := strings.Trim(importPath[:prefixEnd], `"`) - - // The number of directories in the import path gives us the depth at - // which to search. - depth := len(strings.Split(prefix, "/")) - 1 - - content := importPath - start, end := searchImport.Path.Pos(), searchImport.Path.End() - namePrefix, nameSuffix := `"`, `"` - // If a starting quote is present, adjust surrounding to either after the - // cursor or after the first slash (/), except if cursor is at the starting - // quote. Otherwise we provide a completion including the starting quote. - if strings.HasPrefix(importPath, `"`) && c.pos > searchImport.Path.Pos() { - content = content[1:] - start++ - if depth > 0 { - // Adjust textEdit start to replacement range. For ex: if current - // path was "golang.or/x/to<>ols/internal/", where <> is the cursor - // position, start of the replacement range would be after - // "golang.org/x/". - path := strings.SplitAfter(prefix, "/") - numChars := len(strings.Join(path[:len(path)-1], "")) - content = content[numChars:] - start += token.Pos(numChars) - } - namePrefix = "" - } - - // We won't provide an ending quote if one is already present, except if - // cursor is after the ending quote but still in import spec. This is - // because cursor has to be in our textEdit range. - if strings.HasSuffix(importPath, `"`) && c.pos < searchImport.Path.End() { - end-- - content = content[:len(content)-1] - nameSuffix = "" - } - - c.surrounding = &Selection{ - content: content, - cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), start, end), - } - - seenImports := make(map[string]struct{}) - for _, importSpec := range c.file.Imports { - if importSpec.Path.Value == importPath { - continue - } - seenImportPath, err := strconv.Unquote(importSpec.Path.Value) - if err != nil { - return err - } - seenImports[seenImportPath] = struct{}{} - } - - var mu sync.Mutex // guard c.items locally, since searchImports is called in parallel - seen := make(map[string]struct{}) - searchImports := func(pkg imports.ImportFix) { - path := pkg.StmtInfo.ImportPath - if _, ok := seenImports[path]; ok { - return - } - - // Any package path containing fewer directories than the search - // prefix is not a match. - pkgDirList := strings.Split(path, "/") - if len(pkgDirList) < depth+1 { - return - } - pkgToConsider := strings.Join(pkgDirList[:depth+1], "/") - - name := pkgDirList[depth] - // if we're adding an opening quote to completion too, set name to full - // package path since we'll need to overwrite that range. - if namePrefix == `"` { - name = pkgToConsider - } - - score := pkg.Relevance - if len(pkgDirList)-1 == depth { - score *= highScore - } else { - // For incomplete package paths, add a terminal slash to indicate that the - // user should keep triggering completions. - name += "/" - pkgToConsider += "/" - } - - if _, ok := seen[pkgToConsider]; ok { - return - } - seen[pkgToConsider] = struct{}{} - - mu.Lock() - defer mu.Unlock() - - name = namePrefix + name + nameSuffix - obj := types.NewPkgName(0, nil, name, types.NewPackage(pkgToConsider, name)) - c.deepState.enqueue(candidate{ - obj: obj, - detail: fmt.Sprintf("%q", pkgToConsider), - score: score, - }) - } - - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { - return imports.GetImportPaths(ctx, searchImports, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) - }) - return nil -} - -// populateCommentCompletions yields completions for comments preceding or in declarations. -func (c *completer) populateCommentCompletions(ctx context.Context, comment *ast.CommentGroup) { - // If the completion was triggered by a period, ignore it. These types of - // completions will not be useful in comments. - if c.completionContext.triggerCharacter == "." { - return - } - - // Using the comment position find the line after - file := c.snapshot.FileSet().File(comment.End()) - if file == nil { - return - } - - // Deep completion doesn't work properly in comments since we don't - // have a type object to complete further. - c.deepState.enabled = false - c.completionContext.commentCompletion = true - - // Documentation isn't useful in comments, since it might end up being the - // comment itself. - c.opts.documentation = false - - commentLine := file.Line(comment.End()) - - // comment is valid, set surrounding as word boundaries around cursor - c.setSurroundingForComment(comment) - - // Using the next line pos, grab and parse the exported symbol on that line - for _, n := range c.file.Decls { - declLine := file.Line(n.Pos()) - // if the comment is not in, directly above or on the same line as a declaration - if declLine != commentLine && declLine != commentLine+1 && - !(n.Pos() <= comment.Pos() && comment.End() <= n.End()) { - continue - } - switch node := n.(type) { - // handle const, vars, and types - case *ast.GenDecl: - for _, spec := range node.Specs { - switch spec := spec.(type) { - case *ast.ValueSpec: - for _, name := range spec.Names { - if name.String() == "_" { - continue - } - obj := c.pkg.GetTypesInfo().ObjectOf(name) - c.deepState.enqueue(candidate{obj: obj, score: stdScore}) - } - case *ast.TypeSpec: - // add TypeSpec fields to completion - switch typeNode := spec.Type.(type) { - case *ast.StructType: - c.addFieldItems(ctx, typeNode.Fields) - case *ast.FuncType: - c.addFieldItems(ctx, typeNode.Params) - c.addFieldItems(ctx, typeNode.Results) - case *ast.InterfaceType: - c.addFieldItems(ctx, typeNode.Methods) - } - - if spec.Name.String() == "_" { - continue - } - - obj := c.pkg.GetTypesInfo().ObjectOf(spec.Name) - // Type name should get a higher score than fields but not highScore by default - // since field near a comment cursor gets a highScore - score := stdScore * 1.1 - // If type declaration is on the line after comment, give it a highScore. - if declLine == commentLine+1 { - score = highScore - } - - c.deepState.enqueue(candidate{obj: obj, score: score}) - } - } - // handle functions - case *ast.FuncDecl: - c.addFieldItems(ctx, node.Recv) - c.addFieldItems(ctx, node.Type.Params) - c.addFieldItems(ctx, node.Type.Results) - - // collect receiver struct fields - if node.Recv != nil { - for _, fields := range node.Recv.List { - for _, name := range fields.Names { - obj := c.pkg.GetTypesInfo().ObjectOf(name) - if obj == nil { - continue - } - - recvType := obj.Type().Underlying() - if ptr, ok := recvType.(*types.Pointer); ok { - recvType = ptr.Elem() - } - recvStruct, ok := recvType.Underlying().(*types.Struct) - if !ok { - continue - } - for i := 0; i < recvStruct.NumFields(); i++ { - field := recvStruct.Field(i) - c.deepState.enqueue(candidate{obj: field, score: lowScore}) - } - } - } - } - - if node.Name.String() == "_" { - continue - } - - obj := c.pkg.GetTypesInfo().ObjectOf(node.Name) - if obj == nil || obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() { - continue - } - - c.deepState.enqueue(candidate{obj: obj, score: highScore}) - } - } -} - -// sets word boundaries surrounding a cursor for a comment -func (c *completer) setSurroundingForComment(comments *ast.CommentGroup) { - var cursorComment *ast.Comment - for _, comment := range comments.List { - if c.pos >= comment.Pos() && c.pos <= comment.End() { - cursorComment = comment - break - } - } - // if cursor isn't in the comment - if cursorComment == nil { - return - } - - // index of cursor in comment text - cursorOffset := int(c.pos - cursorComment.Pos()) - start, end := cursorOffset, cursorOffset - for start > 0 && isValidIdentifierChar(cursorComment.Text[start-1]) { - start-- - } - for end < len(cursorComment.Text) && isValidIdentifierChar(cursorComment.Text[end]) { - end++ - } - - c.surrounding = &Selection{ - content: cursorComment.Text[start:end], - cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)), - } - c.setMatcherFromPrefix(c.surrounding.Prefix()) -} - -// isValidIdentifierChar returns true if a byte is a valid go identifier -// character, i.e. unicode letter or digit or underscore. -func isValidIdentifierChar(char byte) bool { - charRune := rune(char) - return unicode.In(charRune, unicode.Letter, unicode.Digit) || char == '_' -} - -// adds struct fields, interface methods, function declaration fields to completion -func (c *completer) addFieldItems(ctx context.Context, fields *ast.FieldList) { - if fields == nil { - return - } - - cursor := c.surrounding.cursor - for _, field := range fields.List { - for _, name := range field.Names { - if name.String() == "_" { - continue - } - obj := c.pkg.GetTypesInfo().ObjectOf(name) - if obj == nil { - continue - } - - // if we're in a field comment/doc, score that field as more relevant - score := stdScore - if field.Comment != nil && field.Comment.Pos() <= cursor && cursor <= field.Comment.End() { - score = highScore - } else if field.Doc != nil && field.Doc.Pos() <= cursor && cursor <= field.Doc.End() { - score = highScore - } - - c.deepState.enqueue(candidate{obj: obj, score: score}) - } - } -} - -func (c *completer) wantStructFieldCompletions() bool { - clInfo := c.enclosingCompositeLiteral - if clInfo == nil { - return false - } - - return clInfo.isStruct() && (clInfo.inKey || clInfo.maybeInFieldName) -} - -func (c *completer) wantTypeName() bool { - return !c.completionContext.commentCompletion && c.inference.typeName.wantTypeName -} - -// See https://golang.org/issue/36001. Unimported completions are expensive. -const ( - maxUnimportedPackageNames = 5 - unimportedMemberTarget = 100 -) - -// selector finds completions for the specified selector expression. -func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { - c.inference.objChain = objChain(c.pkg.GetTypesInfo(), sel.X) - - // Is sel a qualified identifier? - if id, ok := sel.X.(*ast.Ident); ok { - if pkgName, ok := c.pkg.GetTypesInfo().Uses[id].(*types.PkgName); ok { - var pkg source.Package - for _, imp := range c.pkg.Imports() { - if imp.PkgPath() == pkgName.Imported().Path() { - pkg = imp - } - } - // If the package is not imported, try searching for unimported - // completions. - if pkg == nil && c.opts.unimported { - if err := c.unimportedMembers(ctx, id); err != nil { - return err - } - } - c.packageMembers(pkgName.Imported(), stdScore, nil, func(cand candidate) { - c.deepState.enqueue(cand) - }) - return nil - } - } - - // Invariant: sel is a true selector. - tv, ok := c.pkg.GetTypesInfo().Types[sel.X] - if ok { - c.methodsAndFields(tv.Type, tv.Addressable(), nil, func(cand candidate) { - c.deepState.enqueue(cand) - }) - - c.addPostfixSnippetCandidates(ctx, sel) - - return nil - } - - // Try unimported packages. - if id, ok := sel.X.(*ast.Ident); ok && c.opts.unimported { - if err := c.unimportedMembers(ctx, id); err != nil { - return err - } - } - return nil -} - -func (c *completer) unimportedMembers(ctx context.Context, id *ast.Ident) error { - // Try loaded packages first. They're relevant, fast, and fully typed. - known, err := c.snapshot.CachedImportPaths(ctx) - if err != nil { - return err - } - - var paths []string - for path, pkg := range known { - if pkg.GetTypes().Name() != id.Name { - continue - } - paths = append(paths, path) - } - - var relevances map[string]float64 - if len(paths) != 0 { - if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { - var err error - relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) - return err - }); err != nil { - return err - } - } - sort.Slice(paths, func(i, j int) bool { - return relevances[paths[i]] > relevances[paths[j]] - }) - - for _, path := range paths { - pkg := known[path] - if pkg.GetTypes().Name() != id.Name { - continue - } - imp := &importInfo{ - importPath: path, - pkg: pkg, - } - if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() { - imp.name = pkg.GetTypes().Name() - } - c.packageMembers(pkg.GetTypes(), unimportedScore(relevances[path]), imp, func(cand candidate) { - c.deepState.enqueue(cand) - }) - if len(c.items) >= unimportedMemberTarget { - return nil - } - } - - ctx, cancel := context.WithCancel(ctx) - - var mu sync.Mutex - add := func(pkgExport imports.PackageExport) { - mu.Lock() - defer mu.Unlock() - if _, ok := known[pkgExport.Fix.StmtInfo.ImportPath]; ok { - return // We got this one above. - } - - // Continue with untyped proposals. - pkg := types.NewPackage(pkgExport.Fix.StmtInfo.ImportPath, pkgExport.Fix.IdentName) - for _, export := range pkgExport.Exports { - score := unimportedScore(pkgExport.Fix.Relevance) - c.deepState.enqueue(candidate{ - obj: types.NewVar(0, pkg, export, nil), - score: score, - imp: &importInfo{ - importPath: pkgExport.Fix.StmtInfo.ImportPath, - name: pkgExport.Fix.StmtInfo.Name, - }, - }) - } - if len(c.items) >= unimportedMemberTarget { - cancel() - } - } - - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { - defer cancel() - return imports.GetPackageExports(ctx, add, id.Name, c.filename, c.pkg.GetTypes().Name(), opts.Env) - }) - return nil -} - -// unimportedScore returns a score for an unimported package that is generally -// lower than other candidates. -func unimportedScore(relevance float64) float64 { - return (stdScore + .1*relevance) / 2 -} - -func (c *completer) packageMembers(pkg *types.Package, score float64, imp *importInfo, cb func(candidate)) { - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - cb(candidate{ - obj: obj, - score: score, - imp: imp, - addressable: isVar(obj), - }) - } -} - -func (c *completer) methodsAndFields(typ types.Type, addressable bool, imp *importInfo, cb func(candidate)) { - mset := c.methodSetCache[methodSetKey{typ, addressable}] - if mset == nil { - if addressable && !types.IsInterface(typ) && !isPointer(typ) { - // Add methods of *T, which includes methods with receiver T. - mset = types.NewMethodSet(types.NewPointer(typ)) - } else { - // Add methods of T. - mset = types.NewMethodSet(typ) - } - c.methodSetCache[methodSetKey{typ, addressable}] = mset - } - - if typ.String() == "*testing.F" && addressable { - // is that a sufficient test? (or is more care needed?) - if c.fuzz(typ, mset, imp, cb, c.snapshot.FileSet()) { - return - } - } - - for i := 0; i < mset.Len(); i++ { - cb(candidate{ - obj: mset.At(i).Obj(), - score: stdScore, - imp: imp, - addressable: addressable || isPointer(typ), - }) - } - - // Add fields of T. - eachField(typ, func(v *types.Var) { - cb(candidate{ - obj: v, - score: stdScore - 0.01, - imp: imp, - addressable: addressable || isPointer(typ), - }) - }) -} - -// lexical finds completions in the lexical environment. -func (c *completer) lexical(ctx context.Context) error { - var ( - builtinIota = types.Universe.Lookup("iota") - builtinNil = types.Universe.Lookup("nil") - // comparable is an interface that exists on the dev.typeparams Go branch. - // Filter it out from completion results to stabilize tests. - // TODO(rFindley) update (or remove) our handling for comparable once the - // type parameter API has stabilized. - builtinAny = types.Universe.Lookup("any") - builtinComparable = types.Universe.Lookup("comparable") - ) - - // Track seen variables to avoid showing completions for shadowed variables. - // This works since we look at scopes from innermost to outermost. - seen := make(map[string]struct{}) - - // Process scopes innermost first. - for i, scope := range c.scopes { - if scope == nil { - continue - } - - Names: - for _, name := range scope.Names() { - declScope, obj := scope.LookupParent(name, c.pos) - if declScope != scope { - continue // Name was declared in some enclosing scope, or not at all. - } - if obj == builtinComparable || obj == builtinAny { - continue - } - - // If obj's type is invalid, find the AST node that defines the lexical block - // containing the declaration of obj. Don't resolve types for packages. - if !isPkgName(obj) && !typeIsValid(obj.Type()) { - // Match the scope to its ast.Node. If the scope is the package scope, - // use the *ast.File as the starting node. - var node ast.Node - if i < len(c.path) { - node = c.path[i] - } else if i == len(c.path) { // use the *ast.File for package scope - node = c.path[i-1] - } - if node != nil { - if resolved := resolveInvalid(c.snapshot.FileSet(), obj, node, c.pkg.GetTypesInfo()); resolved != nil { - obj = resolved - } - } - } - - // Don't use LHS of decl in RHS. - for _, ident := range enclosingDeclLHS(c.path) { - if obj.Pos() == ident.Pos() { - continue Names - } - } - - // Don't suggest "iota" outside of const decls. - if obj == builtinIota && !c.inConstDecl() { - continue - } - - // Rank outer scopes lower than inner. - score := stdScore * math.Pow(.99, float64(i)) - - // Dowrank "nil" a bit so it is ranked below more interesting candidates. - if obj == builtinNil { - score /= 2 - } - - // If we haven't already added a candidate for an object with this name. - if _, ok := seen[obj.Name()]; !ok { - seen[obj.Name()] = struct{}{} - c.deepState.enqueue(candidate{ - obj: obj, - score: score, - addressable: isVar(obj), - }) - } - } - } - - if c.inference.objType != nil { - if named, _ := source.Deref(c.inference.objType).(*types.Named); named != nil { - // If we expected a named type, check the type's package for - // completion items. This is useful when the current file hasn't - // imported the type's package yet. - - if named.Obj() != nil && named.Obj().Pkg() != nil { - pkg := named.Obj().Pkg() - - // Make sure the package name isn't already in use by another - // object, and that this file doesn't import the package yet. - if _, ok := seen[pkg.Name()]; !ok && pkg != c.pkg.GetTypes() && !alreadyImports(c.file, pkg.Path()) { - seen[pkg.Name()] = struct{}{} - obj := types.NewPkgName(0, nil, pkg.Name(), pkg) - imp := &importInfo{ - importPath: pkg.Path(), - } - if imports.ImportPathToAssumedName(pkg.Path()) != pkg.Name() { - imp.name = pkg.Name() - } - c.deepState.enqueue(candidate{ - obj: obj, - score: stdScore, - imp: imp, - }) - } - } - } - } - - if c.opts.unimported { - if err := c.unimportedPackages(ctx, seen); err != nil { - return err - } - } - - if c.inference.typeName.isTypeParam { - // If we are completing a type param, offer each structural type. - // This ensures we suggest "[]int" and "[]float64" for a constraint - // with type union "[]int | []float64". - if t, _ := c.inference.objType.(*types.Interface); t != nil { - terms, _ := typeparams.InterfaceTermSet(t) - for _, term := range terms { - c.injectType(ctx, term.Type()) - } - } - } else { - c.injectType(ctx, c.inference.objType) - } - - // Add keyword completion items appropriate in the current context. - c.addKeywordCompletions() - - return nil -} - -// injectType manufacters candidates based on the given type. This is -// intended for types not discoverable via lexical search, such as -// composite and/or generic types. For example, if the type is "[]int", -// this method makes sure you get candidates "[]int{}" and "[]int" -// (the latter applies when completing a type name). -func (c *completer) injectType(ctx context.Context, t types.Type) { - if t == nil { - return - } - - t = source.Deref(t) - - // If we have an expected type and it is _not_ a named type, handle - // it specially. Non-named types like "[]int" will never be - // considered via a lexical search, so we need to directly inject - // them. Also allow generic types since lexical search does not - // infer instantiated versions of them. - if named, _ := t.(*types.Named); named == nil || typeparams.ForNamed(named).Len() > 0 { - // If our expected type is "[]int", this will add a literal - // candidate of "[]int{}". - c.literal(ctx, t, nil) - - if _, isBasic := t.(*types.Basic); !isBasic { - // If we expect a non-basic type name (e.g. "[]int"), hack up - // a named type whose name is literally "[]int". This allows - // us to reuse our object based completion machinery. - fakeNamedType := candidate{ - obj: types.NewTypeName(token.NoPos, nil, types.TypeString(t, c.qf), t), - score: stdScore, - } - // Make sure the type name matches before considering - // candidate. This cuts down on useless candidates. - if c.matchingTypeName(&fakeNamedType) { - c.deepState.enqueue(fakeNamedType) - } - } - } -} - -func (c *completer) unimportedPackages(ctx context.Context, seen map[string]struct{}) error { - var prefix string - if c.surrounding != nil { - prefix = c.surrounding.Prefix() - } - - // Don't suggest unimported packages if we have absolutely nothing - // to go on. - if prefix == "" { - return nil - } - - count := 0 - - known, err := c.snapshot.CachedImportPaths(ctx) - if err != nil { - return err - } - var paths []string - for path, pkg := range known { - if !strings.HasPrefix(pkg.GetTypes().Name(), prefix) { - continue - } - paths = append(paths, path) - } - - var relevances map[string]float64 - if len(paths) != 0 { - if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { - var err error - relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) - return err - }); err != nil { - return err - } - } - - sort.Slice(paths, func(i, j int) bool { - if relevances[paths[i]] != relevances[paths[j]] { - return relevances[paths[i]] > relevances[paths[j]] - } - - // Fall back to lexical sort to keep truncated set of candidates - // in a consistent order. - return paths[i] < paths[j] - }) - - for _, path := range paths { - pkg := known[path] - if _, ok := seen[pkg.GetTypes().Name()]; ok { - continue - } - imp := &importInfo{ - importPath: path, - pkg: pkg, - } - if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() { - imp.name = pkg.GetTypes().Name() - } - if count >= maxUnimportedPackageNames { - return nil - } - c.deepState.enqueue(candidate{ - // Pass an empty *types.Package to disable deep completions. - obj: types.NewPkgName(0, nil, pkg.GetTypes().Name(), types.NewPackage(path, pkg.Name())), - score: unimportedScore(relevances[path]), - imp: imp, - }) - count++ - } - - ctx, cancel := context.WithCancel(ctx) - - var mu sync.Mutex - add := func(pkg imports.ImportFix) { - mu.Lock() - defer mu.Unlock() - if _, ok := seen[pkg.IdentName]; ok { - return - } - if _, ok := relevances[pkg.StmtInfo.ImportPath]; ok { - return - } - - if count >= maxUnimportedPackageNames { - cancel() - return - } - - // Do not add the unimported packages to seen, since we can have - // multiple packages of the same name as completion suggestions, since - // only one will be chosen. - obj := types.NewPkgName(0, nil, pkg.IdentName, types.NewPackage(pkg.StmtInfo.ImportPath, pkg.IdentName)) - c.deepState.enqueue(candidate{ - obj: obj, - score: unimportedScore(pkg.Relevance), - imp: &importInfo{ - importPath: pkg.StmtInfo.ImportPath, - name: pkg.StmtInfo.Name, - }, - }) - count++ - } - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { - defer cancel() - return imports.GetAllCandidates(ctx, add, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) - }) - return nil -} - -// alreadyImports reports whether f has an import with the specified path. -func alreadyImports(f *ast.File, path string) bool { - for _, s := range f.Imports { - if source.ImportPath(s) == path { - return true - } - } - return false -} - -func (c *completer) inConstDecl() bool { - for _, n := range c.path { - if decl, ok := n.(*ast.GenDecl); ok && decl.Tok == token.CONST { - return true - } - } - return false -} - -// structLiteralFieldName finds completions for struct field names inside a struct literal. -func (c *completer) structLiteralFieldName(ctx context.Context) error { - clInfo := c.enclosingCompositeLiteral - - // Mark fields of the composite literal that have already been set, - // except for the current field. - addedFields := make(map[*types.Var]bool) - for _, el := range clInfo.cl.Elts { - if kvExpr, ok := el.(*ast.KeyValueExpr); ok { - if clInfo.kv == kvExpr { - continue - } - - if key, ok := kvExpr.Key.(*ast.Ident); ok { - if used, ok := c.pkg.GetTypesInfo().Uses[key]; ok { - if usedVar, ok := used.(*types.Var); ok { - addedFields[usedVar] = true - } - } - } - } - } - - deltaScore := 0.0001 - switch t := clInfo.clType.(type) { - case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - field := t.Field(i) - if !addedFields[field] { - c.deepState.enqueue(candidate{ - obj: field, - score: highScore - float64(i)*deltaScore, - }) - } - } - - // Add lexical completions if we aren't certain we are in the key part of a - // key-value pair. - if clInfo.maybeInFieldName { - return c.lexical(ctx) - } - default: - return c.lexical(ctx) - } - - return nil -} - -func (cl *compLitInfo) isStruct() bool { - _, ok := cl.clType.(*types.Struct) - return ok -} - -// enclosingCompositeLiteral returns information about the composite literal enclosing the -// position. -func enclosingCompositeLiteral(path []ast.Node, pos token.Pos, info *types.Info) *compLitInfo { - for _, n := range path { - switch n := n.(type) { - case *ast.CompositeLit: - // The enclosing node will be a composite literal if the user has just - // opened the curly brace (e.g. &x{<>) or the completion request is triggered - // from an already completed composite literal expression (e.g. &x{foo: 1, <>}) - // - // The position is not part of the composite literal unless it falls within the - // curly braces (e.g. "foo.Foo<>Struct{}"). - if !(n.Lbrace < pos && pos <= n.Rbrace) { - // Keep searching since we may yet be inside a composite literal. - // For example "Foo{B: Ba<>{}}". - break - } - - tv, ok := info.Types[n] - if !ok { - return nil - } - - clInfo := compLitInfo{ - cl: n, - clType: source.Deref(tv.Type).Underlying(), - } - - var ( - expr ast.Expr - hasKeys bool - ) - for _, el := range n.Elts { - // Remember the expression that the position falls in, if any. - if el.Pos() <= pos && pos <= el.End() { - expr = el - } - - if kv, ok := el.(*ast.KeyValueExpr); ok { - hasKeys = true - // If expr == el then we know the position falls in this expression, - // so also record kv as the enclosing *ast.KeyValueExpr. - if expr == el { - clInfo.kv = kv - break - } - } - } - - if clInfo.kv != nil { - // If in a *ast.KeyValueExpr, we know we are in the key if the position - // is to the left of the colon (e.g. "Foo{F<>: V}". - clInfo.inKey = pos <= clInfo.kv.Colon - } else if hasKeys { - // If we aren't in a *ast.KeyValueExpr but the composite literal has - // other *ast.KeyValueExprs, we must be on the key side of a new - // *ast.KeyValueExpr (e.g. "Foo{F: V, <>}"). - clInfo.inKey = true - } else { - switch clInfo.clType.(type) { - case *types.Struct: - if len(n.Elts) == 0 { - // If the struct literal is empty, next could be a struct field - // name or an expression (e.g. "Foo{<>}" could become "Foo{F:}" - // or "Foo{someVar}"). - clInfo.maybeInFieldName = true - } else if len(n.Elts) == 1 { - // If there is one expression and the position is in that expression - // and the expression is an identifier, we may be writing a field - // name or an expression (e.g. "Foo{F<>}"). - _, clInfo.maybeInFieldName = expr.(*ast.Ident) - } - case *types.Map: - // If we aren't in a *ast.KeyValueExpr we must be adding a new key - // to the map. - clInfo.inKey = true - } - } - - return &clInfo - default: - if breaksExpectedTypeInference(n, pos) { - return nil - } - } - } - - return nil -} - -// enclosingFunction returns the signature and body of the function -// enclosing the given position. -func enclosingFunction(path []ast.Node, info *types.Info) *funcInfo { - for _, node := range path { - switch t := node.(type) { - case *ast.FuncDecl: - if obj, ok := info.Defs[t.Name]; ok { - return &funcInfo{ - sig: obj.Type().(*types.Signature), - body: t.Body, - } - } - case *ast.FuncLit: - if typ, ok := info.Types[t]; ok { - if sig, _ := typ.Type.(*types.Signature); sig == nil { - // golang/go#49397: it should not be possible, but we somehow arrived - // here with a non-signature type, most likely due to AST mangling - // such that node.Type is not a FuncType. - return nil - } - return &funcInfo{ - sig: typ.Type.(*types.Signature), - body: t.Body, - } - } - } - } - return nil -} - -func (c *completer) expectedCompositeLiteralType() types.Type { - clInfo := c.enclosingCompositeLiteral - switch t := clInfo.clType.(type) { - case *types.Slice: - if clInfo.inKey { - return types.Typ[types.UntypedInt] - } - return t.Elem() - case *types.Array: - if clInfo.inKey { - return types.Typ[types.UntypedInt] - } - return t.Elem() - case *types.Map: - if clInfo.inKey { - return t.Key() - } - return t.Elem() - case *types.Struct: - // If we are completing a key (i.e. field name), there is no expected type. - if clInfo.inKey { - return nil - } - - // If we are in a key-value pair, but not in the key, then we must be on the - // value side. The expected type of the value will be determined from the key. - if clInfo.kv != nil { - if key, ok := clInfo.kv.Key.(*ast.Ident); ok { - for i := 0; i < t.NumFields(); i++ { - if field := t.Field(i); field.Name() == key.Name { - return field.Type() - } - } - } - } else { - // If we aren't in a key-value pair and aren't in the key, we must be using - // implicit field names. - - // The order of the literal fields must match the order in the struct definition. - // Find the element that the position belongs to and suggest that field's type. - if i := exprAtPos(c.pos, clInfo.cl.Elts); i < t.NumFields() { - return t.Field(i).Type() - } - } - } - return nil -} - -// typeMod represents an operator that changes the expected type. -type typeMod struct { - mod typeModKind - arrayLen int64 -} - -type typeModKind int - -const ( - dereference typeModKind = iota // pointer indirection: "*" - reference // adds level of pointer: "&" for values, "*" for type names - chanRead // channel read operator: "<-" - sliceType // make a slice type: "[]" in "[]int" - arrayType // make an array type: "[2]" in "[2]int" - invoke // make a function call: "()" in "foo()" - takeSlice // take slice of array: "[:]" in "foo[:]" - takeDotDotDot // turn slice into variadic args: "..." in "foo..." - index // index into slice/array: "[0]" in "foo[0]" -) - -type objKind int - -const ( - kindAny objKind = 0 - kindArray objKind = 1 << iota - kindSlice - kindChan - kindMap - kindStruct - kindString - kindInt - kindBool - kindBytes - kindPtr - kindFloat - kindComplex - kindError - kindStringer - kindFunc -) - -// penalizedObj represents an object that should be disfavored as a -// completion candidate. -type penalizedObj struct { - // objChain is the full "chain", e.g. "foo.bar().baz" becomes - // []types.Object{foo, bar, baz}. - objChain []types.Object - // penalty is score penalty in the range (0, 1). - penalty float64 -} - -// candidateInference holds information we have inferred about a type that can be -// used at the current position. -type candidateInference struct { - // objType is the desired type of an object used at the query position. - objType types.Type - - // objKind is a mask of expected kinds of types such as "map", "slice", etc. - objKind objKind - - // variadic is true if we are completing the initial variadic - // parameter. For example: - // append([]T{}, <>) // objType=T variadic=true - // append([]T{}, T{}, <>) // objType=T variadic=false - variadic bool - - // modifiers are prefixes such as "*", "&" or "<-" that influence how - // a candidate type relates to the expected type. - modifiers []typeMod - - // convertibleTo is a type our candidate type must be convertible to. - convertibleTo types.Type - - // typeName holds information about the expected type name at - // position, if any. - typeName typeNameInference - - // assignees are the types that would receive a function call's - // results at the position. For example: - // - // foo := 123 - // foo, bar := <> - // - // at "<>", the assignees are [int, ]. - assignees []types.Type - - // variadicAssignees is true if we could be completing an inner - // function call that fills out an outer function call's variadic - // params. For example: - // - // func foo(int, ...string) {} - // - // foo(<>) // variadicAssignees=true - // foo(bar<>) // variadicAssignees=true - // foo(bar, baz<>) // variadicAssignees=false - variadicAssignees bool - - // penalized holds expressions that should be disfavored as - // candidates. For example, it tracks expressions already used in a - // switch statement's other cases. Each expression is tracked using - // its entire object "chain" allowing differentiation between - // "a.foo" and "b.foo" when "a" and "b" are the same type. - penalized []penalizedObj - - // objChain contains the chain of objects representing the - // surrounding *ast.SelectorExpr. For example, if we are completing - // "foo.bar.ba<>", objChain will contain []types.Object{foo, bar}. - objChain []types.Object -} - -// typeNameInference holds information about the expected type name at -// position. -type typeNameInference struct { - // wantTypeName is true if we expect the name of a type. - wantTypeName bool - - // modifiers are prefixes such as "*", "&" or "<-" that influence how - // a candidate type relates to the expected type. - modifiers []typeMod - - // assertableFrom is a type that must be assertable to our candidate type. - assertableFrom types.Type - - // wantComparable is true if we want a comparable type. - wantComparable bool - - // seenTypeSwitchCases tracks types that have already been used by - // the containing type switch. - seenTypeSwitchCases []types.Type - - // compLitType is true if we are completing a composite literal type - // name, e.g "foo<>{}". - compLitType bool - - // isTypeParam is true if we are completing a type instantiation parameter - isTypeParam bool -} - -// expectedCandidate returns information about the expected candidate -// for an expression at the query position. -func expectedCandidate(ctx context.Context, c *completer) (inf candidateInference) { - inf.typeName = expectTypeName(c) - - if c.enclosingCompositeLiteral != nil { - inf.objType = c.expectedCompositeLiteralType() - } - -Nodes: - for i, node := range c.path { - switch node := node.(type) { - case *ast.BinaryExpr: - // Determine if query position comes from left or right of op. - e := node.X - if c.pos < node.OpPos { - e = node.Y - } - if tv, ok := c.pkg.GetTypesInfo().Types[e]; ok { - switch node.Op { - case token.LAND, token.LOR: - // Don't infer "bool" type for "&&" or "||". Often you want - // to compose a boolean expression from non-boolean - // candidates. - default: - inf.objType = tv.Type - } - break Nodes - } - case *ast.AssignStmt: - // Only rank completions if you are on the right side of the token. - if c.pos > node.TokPos { - i := exprAtPos(c.pos, node.Rhs) - if i >= len(node.Lhs) { - i = len(node.Lhs) - 1 - } - if tv, ok := c.pkg.GetTypesInfo().Types[node.Lhs[i]]; ok { - inf.objType = tv.Type - } - - // If we have a single expression on the RHS, record the LHS - // assignees so we can favor multi-return function calls with - // matching result values. - if len(node.Rhs) <= 1 { - for _, lhs := range node.Lhs { - inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(lhs)) - } - } else { - // Otherwise, record our single assignee, even if its type is - // not available. We use this info to downrank functions - // with the wrong number of result values. - inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(node.Lhs[i])) - } - } - return inf - case *ast.ValueSpec: - if node.Type != nil && c.pos > node.Type.End() { - inf.objType = c.pkg.GetTypesInfo().TypeOf(node.Type) - } - return inf - case *ast.CallExpr: - // Only consider CallExpr args if position falls between parens. - if node.Lparen < c.pos && c.pos <= node.Rparen { - // For type conversions like "int64(foo)" we can only infer our - // desired type is convertible to int64. - if typ := typeConversion(node, c.pkg.GetTypesInfo()); typ != nil { - inf.convertibleTo = typ - break Nodes - } - - sig, _ := c.pkg.GetTypesInfo().Types[node.Fun].Type.(*types.Signature) - - if sig != nil && typeparams.ForSignature(sig).Len() > 0 { - // If we are completing a generic func call, re-check the call expression. - // This allows type param inference to work in cases like: - // - // func foo[T any](T) {} - // foo[int](<>) // <- get "int" completions instead of "T" - // - // TODO: remove this after https://go.dev/issue/52503 - info := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)} - types.CheckExpr(c.snapshot.FileSet(), c.pkg.GetTypes(), node.Fun.Pos(), node.Fun, info) - sig, _ = info.Types[node.Fun].Type.(*types.Signature) - } - - if sig != nil { - inf = c.expectedCallParamType(inf, node, sig) - } - - if funIdent, ok := node.Fun.(*ast.Ident); ok { - obj := c.pkg.GetTypesInfo().ObjectOf(funIdent) - - if obj != nil && obj.Parent() == types.Universe { - // Defer call to builtinArgType so we can provide it the - // inferred type from its parent node. - defer func() { - inf = c.builtinArgType(obj, node, inf) - inf.objKind = c.builtinArgKind(ctx, obj, node) - }() - - // The expected type of builtin arguments like append() is - // the expected type of the builtin call itself. For - // example: - // - // var foo []int = append(<>) - // - // To find the expected type at <> we "skip" the append() - // node and get the expected type one level up, which is - // []int. - continue Nodes - } - } - - return inf - } - case *ast.ReturnStmt: - if c.enclosingFunc != nil { - sig := c.enclosingFunc.sig - // Find signature result that corresponds to our return statement. - if resultIdx := exprAtPos(c.pos, node.Results); resultIdx < len(node.Results) { - if resultIdx < sig.Results().Len() { - inf.objType = sig.Results().At(resultIdx).Type() - } - } - } - return inf - case *ast.CaseClause: - if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, node).(*ast.SwitchStmt); ok { - if tv, ok := c.pkg.GetTypesInfo().Types[swtch.Tag]; ok { - inf.objType = tv.Type - - // Record which objects have already been used in the case - // statements so we don't suggest them again. - for _, cc := range swtch.Body.List { - for _, caseExpr := range cc.(*ast.CaseClause).List { - // Don't record the expression we are currently completing. - if caseExpr.Pos() < c.pos && c.pos <= caseExpr.End() { - continue - } - - if objs := objChain(c.pkg.GetTypesInfo(), caseExpr); len(objs) > 0 { - inf.penalized = append(inf.penalized, penalizedObj{objChain: objs, penalty: 0.1}) - } - } - } - } - } - return inf - case *ast.SliceExpr: - // Make sure position falls within the brackets (e.g. "foo[a:<>]"). - if node.Lbrack < c.pos && c.pos <= node.Rbrack { - inf.objType = types.Typ[types.UntypedInt] - } - return inf - case *ast.IndexExpr: - // Make sure position falls within the brackets (e.g. "foo[<>]"). - if node.Lbrack < c.pos && c.pos <= node.Rbrack { - if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok { - switch t := tv.Type.Underlying().(type) { - case *types.Map: - inf.objType = t.Key() - case *types.Slice, *types.Array: - inf.objType = types.Typ[types.UntypedInt] - } - - if ct := expectedConstraint(tv.Type, 0); ct != nil { - inf.objType = ct - inf.typeName.wantTypeName = true - inf.typeName.isTypeParam = true - } - } - } - return inf - case *typeparams.IndexListExpr: - if node.Lbrack < c.pos && c.pos <= node.Rbrack { - if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok { - if ct := expectedConstraint(tv.Type, exprAtPos(c.pos, node.Indices)); ct != nil { - inf.objType = ct - inf.typeName.wantTypeName = true - inf.typeName.isTypeParam = true - } - } - } - return inf - case *ast.SendStmt: - // Make sure we are on right side of arrow (e.g. "foo <- <>"). - if c.pos > node.Arrow+1 { - if tv, ok := c.pkg.GetTypesInfo().Types[node.Chan]; ok { - if ch, ok := tv.Type.Underlying().(*types.Chan); ok { - inf.objType = ch.Elem() - } - } - } - return inf - case *ast.RangeStmt: - if source.NodeContains(node.X, c.pos) { - inf.objKind |= kindSlice | kindArray | kindMap | kindString - if node.Value == nil { - inf.objKind |= kindChan - } - } - return inf - case *ast.StarExpr: - inf.modifiers = append(inf.modifiers, typeMod{mod: dereference}) - case *ast.UnaryExpr: - switch node.Op { - case token.AND: - inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) - case token.ARROW: - inf.modifiers = append(inf.modifiers, typeMod{mod: chanRead}) - } - case *ast.DeferStmt, *ast.GoStmt: - inf.objKind |= kindFunc - return inf - default: - if breaksExpectedTypeInference(node, c.pos) { - return inf - } - } - } - - return inf -} - -func (c *completer) expectedCallParamType(inf candidateInference, node *ast.CallExpr, sig *types.Signature) candidateInference { - numParams := sig.Params().Len() - if numParams == 0 { - return inf - } - - exprIdx := exprAtPos(c.pos, node.Args) - - // If we have one or zero arg expressions, we may be - // completing to a function call that returns multiple - // values, in turn getting passed in to the surrounding - // call. Record the assignees so we can favor function - // calls that return matching values. - if len(node.Args) <= 1 && exprIdx == 0 { - for i := 0; i < sig.Params().Len(); i++ { - inf.assignees = append(inf.assignees, sig.Params().At(i).Type()) - } - - // Record that we may be completing into variadic parameters. - inf.variadicAssignees = sig.Variadic() - } - - // Make sure not to run past the end of expected parameters. - if exprIdx >= numParams { - inf.objType = sig.Params().At(numParams - 1).Type() - } else { - inf.objType = sig.Params().At(exprIdx).Type() - } - - if sig.Variadic() && exprIdx >= (numParams-1) { - // If we are completing a variadic param, deslice the variadic type. - inf.objType = deslice(inf.objType) - // Record whether we are completing the initial variadic param. - inf.variadic = exprIdx == numParams-1 && len(node.Args) <= numParams - - // Check if we can infer object kind from printf verb. - inf.objKind |= printfArgKind(c.pkg.GetTypesInfo(), node, exprIdx) - } - - // If our expected type is an uninstantiated generic type param, - // swap to the constraint which will do a decent job filtering - // candidates. - if tp, _ := inf.objType.(*typeparams.TypeParam); tp != nil { - inf.objType = tp.Constraint() - } - - return inf -} - -func expectedConstraint(t types.Type, idx int) types.Type { - var tp *typeparams.TypeParamList - if named, _ := t.(*types.Named); named != nil { - tp = typeparams.ForNamed(named) - } else if sig, _ := t.Underlying().(*types.Signature); sig != nil { - tp = typeparams.ForSignature(sig) - } - if tp == nil || idx >= tp.Len() { - return nil - } - return tp.At(idx).Constraint() -} - -// objChain decomposes e into a chain of objects if possible. For -// example, "foo.bar().baz" will yield []types.Object{foo, bar, baz}. -// If any part can't be turned into an object, return nil. -func objChain(info *types.Info, e ast.Expr) []types.Object { - var objs []types.Object - - for e != nil { - switch n := e.(type) { - case *ast.Ident: - obj := info.ObjectOf(n) - if obj == nil { - return nil - } - objs = append(objs, obj) - e = nil - case *ast.SelectorExpr: - obj := info.ObjectOf(n.Sel) - if obj == nil { - return nil - } - objs = append(objs, obj) - e = n.X - case *ast.CallExpr: - if len(n.Args) > 0 { - return nil - } - e = n.Fun - default: - return nil - } - } - - // Reverse order so the layout matches the syntactic order. - for i := 0; i < len(objs)/2; i++ { - objs[i], objs[len(objs)-1-i] = objs[len(objs)-1-i], objs[i] - } - - return objs -} - -// applyTypeModifiers applies the list of type modifiers to a type. -// It returns nil if the modifiers could not be applied. -func (ci candidateInference) applyTypeModifiers(typ types.Type, addressable bool) types.Type { - for _, mod := range ci.modifiers { - switch mod.mod { - case dereference: - // For every "*" indirection operator, remove a pointer layer - // from candidate type. - if ptr, ok := typ.Underlying().(*types.Pointer); ok { - typ = ptr.Elem() - } else { - return nil - } - case reference: - // For every "&" address operator, add another pointer layer to - // candidate type, if the candidate is addressable. - if addressable { - typ = types.NewPointer(typ) - } else { - return nil - } - case chanRead: - // For every "<-" operator, remove a layer of channelness. - if ch, ok := typ.(*types.Chan); ok { - typ = ch.Elem() - } else { - return nil - } - } - } - - return typ -} - -// applyTypeNameModifiers applies the list of type modifiers to a type name. -func (ci candidateInference) applyTypeNameModifiers(typ types.Type) types.Type { - for _, mod := range ci.typeName.modifiers { - switch mod.mod { - case reference: - typ = types.NewPointer(typ) - case arrayType: - typ = types.NewArray(typ, mod.arrayLen) - case sliceType: - typ = types.NewSlice(typ) - } - } - return typ -} - -// matchesVariadic returns true if we are completing a variadic -// parameter and candType is a compatible slice type. -func (ci candidateInference) matchesVariadic(candType types.Type) bool { - return ci.variadic && ci.objType != nil && types.AssignableTo(candType, types.NewSlice(ci.objType)) -} - -// findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or -// *ast.TypeSwitchStmt. path should start from the case clause's first ancestor. -func findSwitchStmt(path []ast.Node, pos token.Pos, c *ast.CaseClause) ast.Stmt { - // Make sure position falls within a "case <>:" clause. - if exprAtPos(pos, c.List) >= len(c.List) { - return nil - } - // A case clause is always nested within a block statement in a switch statement. - if len(path) < 2 { - return nil - } - if _, ok := path[0].(*ast.BlockStmt); !ok { - return nil - } - switch s := path[1].(type) { - case *ast.SwitchStmt: - return s - case *ast.TypeSwitchStmt: - return s - default: - return nil - } -} - -// breaksExpectedTypeInference reports if an expression node's type is unrelated -// to its child expression node types. For example, "Foo{Bar: x.Baz(<>)}" should -// expect a function argument, not a composite literal value. -func breaksExpectedTypeInference(n ast.Node, pos token.Pos) bool { - switch n := n.(type) { - case *ast.CompositeLit: - // Doesn't break inference if pos is in type name. - // For example: "Foo<>{Bar: 123}" - return !source.NodeContains(n.Type, pos) - case *ast.CallExpr: - // Doesn't break inference if pos is in func name. - // For example: "Foo<>(123)" - return !source.NodeContains(n.Fun, pos) - case *ast.FuncLit, *ast.IndexExpr, *ast.SliceExpr: - return true - default: - return false - } -} - -// expectTypeName returns information about the expected type name at position. -func expectTypeName(c *completer) typeNameInference { - var inf typeNameInference - -Nodes: - for i, p := range c.path { - switch n := p.(type) { - case *ast.FieldList: - // Expect a type name if pos is in a FieldList. This applies to - // FuncType params/results, FuncDecl receiver, StructType, and - // InterfaceType. We don't need to worry about the field name - // because completion bails out early if pos is in an *ast.Ident - // that defines an object. - inf.wantTypeName = true - break Nodes - case *ast.CaseClause: - // Expect type names in type switch case clauses. - if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, n).(*ast.TypeSwitchStmt); ok { - // The case clause types must be assertable from the type switch parameter. - ast.Inspect(swtch.Assign, func(n ast.Node) bool { - if ta, ok := n.(*ast.TypeAssertExpr); ok { - inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(ta.X) - return false - } - return true - }) - inf.wantTypeName = true - - // Track the types that have already been used in this - // switch's case statements so we don't recommend them. - for _, e := range swtch.Body.List { - for _, typeExpr := range e.(*ast.CaseClause).List { - // Skip if type expression contains pos. We don't want to - // count it as already used if the user is completing it. - if typeExpr.Pos() < c.pos && c.pos <= typeExpr.End() { - continue - } - - if t := c.pkg.GetTypesInfo().TypeOf(typeExpr); t != nil { - inf.seenTypeSwitchCases = append(inf.seenTypeSwitchCases, t) - } - } - } - - break Nodes - } - return typeNameInference{} - case *ast.TypeAssertExpr: - // Expect type names in type assert expressions. - if n.Lparen < c.pos && c.pos <= n.Rparen { - // The type in parens must be assertable from the expression type. - inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(n.X) - inf.wantTypeName = true - break Nodes - } - return typeNameInference{} - case *ast.StarExpr: - inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) - case *ast.CompositeLit: - // We want a type name if position is in the "Type" part of a - // composite literal (e.g. "Foo<>{}"). - if n.Type != nil && n.Type.Pos() <= c.pos && c.pos <= n.Type.End() { - inf.wantTypeName = true - inf.compLitType = true - - if i < len(c.path)-1 { - // Track preceding "&" operator. Technically it applies to - // the composite literal and not the type name, but if - // affects our type completion nonetheless. - if u, ok := c.path[i+1].(*ast.UnaryExpr); ok && u.Op == token.AND { - inf.modifiers = append(inf.modifiers, typeMod{mod: reference}) - } - } - } - break Nodes - case *ast.ArrayType: - // If we are inside the "Elt" part of an array type, we want a type name. - if n.Elt.Pos() <= c.pos && c.pos <= n.Elt.End() { - inf.wantTypeName = true - if n.Len == nil { - // No "Len" expression means a slice type. - inf.modifiers = append(inf.modifiers, typeMod{mod: sliceType}) - } else { - // Try to get the array type using the constant value of "Len". - tv, ok := c.pkg.GetTypesInfo().Types[n.Len] - if ok && tv.Value != nil && tv.Value.Kind() == constant.Int { - if arrayLen, ok := constant.Int64Val(tv.Value); ok { - inf.modifiers = append(inf.modifiers, typeMod{mod: arrayType, arrayLen: arrayLen}) - } - } - } - - // ArrayTypes can be nested, so keep going if our parent is an - // ArrayType. - if i < len(c.path)-1 { - if _, ok := c.path[i+1].(*ast.ArrayType); ok { - continue Nodes - } - } - - break Nodes - } - case *ast.MapType: - inf.wantTypeName = true - if n.Key != nil { - inf.wantComparable = source.NodeContains(n.Key, c.pos) - } else { - // If the key is empty, assume we are completing the key if - // pos is directly after the "map[". - inf.wantComparable = c.pos == n.Pos()+token.Pos(len("map[")) - } - break Nodes - case *ast.ValueSpec: - inf.wantTypeName = source.NodeContains(n.Type, c.pos) - break Nodes - case *ast.TypeSpec: - inf.wantTypeName = source.NodeContains(n.Type, c.pos) - default: - if breaksExpectedTypeInference(p, c.pos) { - return typeNameInference{} - } - } - } - - return inf -} - -func (c *completer) fakeObj(T types.Type) *types.Var { - return types.NewVar(token.NoPos, c.pkg.GetTypes(), "", T) -} - -// derivableTypes iterates types you can derive from t. For example, -// from "foo" we might derive "&foo", and "foo()". -func derivableTypes(t types.Type, addressable bool, f func(t types.Type, addressable bool, mod typeModKind) bool) bool { - switch t := t.Underlying().(type) { - case *types.Signature: - // If t is a func type with a single result, offer the result type. - if t.Results().Len() == 1 && f(t.Results().At(0).Type(), false, invoke) { - return true - } - case *types.Array: - if f(t.Elem(), true, index) { - return true - } - // Try converting array to slice. - if f(types.NewSlice(t.Elem()), false, takeSlice) { - return true - } - case *types.Pointer: - if f(t.Elem(), false, dereference) { - return true - } - case *types.Slice: - if f(t.Elem(), true, index) { - return true - } - case *types.Map: - if f(t.Elem(), false, index) { - return true - } - case *types.Chan: - if f(t.Elem(), false, chanRead) { - return true - } - } - - // Check if c is addressable and a pointer to c matches our type inference. - if addressable && f(types.NewPointer(t), false, reference) { - return true - } - - return false -} - -// anyCandType reports whether f returns true for any candidate type -// derivable from c. It searches up to three levels of type -// modification. For example, given "foo" we could discover "***foo" -// or "*foo()". -func (c *candidate) anyCandType(f func(t types.Type, addressable bool) bool) bool { - if c.obj == nil || c.obj.Type() == nil { - return false - } - - const maxDepth = 3 - - var searchTypes func(t types.Type, addressable bool, mods []typeModKind) bool - searchTypes = func(t types.Type, addressable bool, mods []typeModKind) bool { - if f(t, addressable) { - if len(mods) > 0 { - newMods := make([]typeModKind, len(mods)+len(c.mods)) - copy(newMods, mods) - copy(newMods[len(mods):], c.mods) - c.mods = newMods - } - return true - } - - if len(mods) == maxDepth { - return false - } - - return derivableTypes(t, addressable, func(t types.Type, addressable bool, mod typeModKind) bool { - return searchTypes(t, addressable, append(mods, mod)) - }) - } - - return searchTypes(c.obj.Type(), c.addressable, make([]typeModKind, 0, maxDepth)) -} - -// matchingCandidate reports whether cand matches our type inferences. -// It mutates cand's score in certain cases. -func (c *completer) matchingCandidate(cand *candidate) bool { - if c.completionContext.commentCompletion { - return false - } - - // Bail out early if we are completing a field name in a composite literal. - if v, ok := cand.obj.(*types.Var); ok && v.IsField() && c.wantStructFieldCompletions() { - return true - } - - if isTypeName(cand.obj) { - return c.matchingTypeName(cand) - } else if c.wantTypeName() { - // If we want a type, a non-type object never matches. - return false - } - - if c.inference.candTypeMatches(cand) { - return true - } - - candType := cand.obj.Type() - if candType == nil { - return false - } - - if sig, ok := candType.Underlying().(*types.Signature); ok { - if c.inference.assigneesMatch(cand, sig) { - // Invoke the candidate if its results are multi-assignable. - cand.mods = append(cand.mods, invoke) - return true - } - } - - // Default to invoking *types.Func candidates. This is so function - // completions in an empty statement (or other cases with no expected type) - // are invoked by default. - if isFunc(cand.obj) { - cand.mods = append(cand.mods, invoke) - } - - return false -} - -// candTypeMatches reports whether cand makes a good completion -// candidate given the candidate inference. cand's score may be -// mutated to downrank the candidate in certain situations. -func (ci *candidateInference) candTypeMatches(cand *candidate) bool { - var ( - expTypes = make([]types.Type, 0, 2) - variadicType types.Type - ) - if ci.objType != nil { - expTypes = append(expTypes, ci.objType) - - if ci.variadic { - variadicType = types.NewSlice(ci.objType) - expTypes = append(expTypes, variadicType) - } - } - - return cand.anyCandType(func(candType types.Type, addressable bool) bool { - // Take into account any type modifiers on the expected type. - candType = ci.applyTypeModifiers(candType, addressable) - if candType == nil { - return false - } - - if ci.convertibleTo != nil && types.ConvertibleTo(candType, ci.convertibleTo) { - return true - } - - for _, expType := range expTypes { - if isEmptyInterface(expType) { - continue - } - - matches := ci.typeMatches(expType, candType) - if !matches { - // If candType doesn't otherwise match, consider if we can - // convert candType directly to expType. - if considerTypeConversion(candType, expType, cand.path) { - cand.convertTo = expType - // Give a major score penalty so we always prefer directly - // assignable candidates, all else equal. - cand.score *= 0.5 - return true - } - - continue - } - - if expType == variadicType { - cand.mods = append(cand.mods, takeDotDotDot) - } - - // Lower candidate score for untyped conversions. This avoids - // ranking untyped constants above candidates with an exact type - // match. Don't lower score of builtin constants, e.g. "true". - if isUntyped(candType) && !types.Identical(candType, expType) && cand.obj.Parent() != types.Universe { - // Bigger penalty for deep completions into other packages to - // avoid random constants from other packages popping up all - // the time. - if len(cand.path) > 0 && isPkgName(cand.path[0]) { - cand.score *= 0.5 - } else { - cand.score *= 0.75 - } - } - - return true - } - - // If we don't have a specific expected type, fall back to coarser - // object kind checks. - if ci.objType == nil || isEmptyInterface(ci.objType) { - // If we were able to apply type modifiers to our candidate type, - // count that as a match. For example: - // - // var foo chan int - // <-fo<> - // - // We were able to apply the "<-" type modifier to "foo", so "foo" - // matches. - if len(ci.modifiers) > 0 { - return true - } - - // If we didn't have an exact type match, check if our object kind - // matches. - if ci.kindMatches(candType) { - if ci.objKind == kindFunc { - cand.mods = append(cand.mods, invoke) - } - return true - } - } - - return false - }) -} - -// considerTypeConversion returns true if we should offer a completion -// automatically converting "from" to "to". -func considerTypeConversion(from, to types.Type, path []types.Object) bool { - // Don't offer to convert deep completions from other packages. - // Otherwise there are many random package level consts/vars that - // pop up as candidates all the time. - if len(path) > 0 && isPkgName(path[0]) { - return false - } - - if _, ok := from.(*typeparams.TypeParam); ok { - return false - } - - if !types.ConvertibleTo(from, to) { - return false - } - - // Don't offer to convert ints to strings since that probably - // doesn't do what the user wants. - if isBasicKind(from, types.IsInteger) && isBasicKind(to, types.IsString) { - return false - } - - return true -} - -// typeMatches reports whether an object of candType makes a good -// completion candidate given the expected type expType. -func (ci *candidateInference) typeMatches(expType, candType types.Type) bool { - // Handle untyped values specially since AssignableTo gives false negatives - // for them (see https://golang.org/issue/32146). - if candBasic, ok := candType.Underlying().(*types.Basic); ok { - if expBasic, ok := expType.Underlying().(*types.Basic); ok { - // Note that the candidate and/or the expected can be untyped. - // In "fo<> == 100" the expected type is untyped, and the - // candidate could also be an untyped constant. - - // Sort by is_untyped and then by is_int to simplify below logic. - a, b := candBasic.Info(), expBasic.Info() - if a&types.IsUntyped == 0 || (b&types.IsInteger > 0 && b&types.IsUntyped > 0) { - a, b = b, a - } - - // If at least one is untyped... - if a&types.IsUntyped > 0 { - switch { - // Untyped integers are compatible with floats. - case a&types.IsInteger > 0 && b&types.IsFloat > 0: - return true - - // Check if their constant kind (bool|int|float|complex|string) matches. - // This doesn't take into account the constant value, so there will be some - // false positives due to integer sign and overflow. - case a&types.IsConstType == b&types.IsConstType: - return true - } - } - } - } - - // AssignableTo covers the case where the types are equal, but also handles - // cases like assigning a concrete type to an interface type. - return types.AssignableTo(candType, expType) -} - -// kindMatches reports whether candType's kind matches our expected -// kind (e.g. slice, map, etc.). -func (ci *candidateInference) kindMatches(candType types.Type) bool { - return ci.objKind > 0 && ci.objKind&candKind(candType) > 0 -} - -// assigneesMatch reports whether an invocation of sig matches the -// number and type of any assignees. -func (ci *candidateInference) assigneesMatch(cand *candidate, sig *types.Signature) bool { - if len(ci.assignees) == 0 { - return false - } - - // Uniresult functions are always usable and are handled by the - // normal, non-assignees type matching logic. - if sig.Results().Len() == 1 { - return false - } - - // Don't prefer completing into func(...interface{}) calls since all - // functions would match. - if ci.variadicAssignees && len(ci.assignees) == 1 && isEmptyInterface(deslice(ci.assignees[0])) { - return false - } - - var numberOfResultsCouldMatch bool - if ci.variadicAssignees { - numberOfResultsCouldMatch = sig.Results().Len() >= len(ci.assignees)-1 - } else { - numberOfResultsCouldMatch = sig.Results().Len() == len(ci.assignees) - } - - // If our signature doesn't return the right number of values, it's - // not a match, so downrank it. For example: - // - // var foo func() (int, int) - // a, b, c := <> // downrank "foo()" since it only returns two values - if !numberOfResultsCouldMatch { - cand.score /= 2 - return false - } - - // If at least one assignee has a valid type, and all valid - // assignees match the corresponding sig result value, the signature - // is a match. - allMatch := false - for i := 0; i < sig.Results().Len(); i++ { - var assignee types.Type - - // If we are completing into variadic parameters, deslice the - // expected variadic type. - if ci.variadicAssignees && i >= len(ci.assignees)-1 { - assignee = ci.assignees[len(ci.assignees)-1] - if elem := deslice(assignee); elem != nil { - assignee = elem - } - } else { - assignee = ci.assignees[i] - } - - if assignee == nil { - continue - } - - allMatch = ci.typeMatches(assignee, sig.Results().At(i).Type()) - if !allMatch { - break - } - } - return allMatch -} - -func (c *completer) matchingTypeName(cand *candidate) bool { - if !c.wantTypeName() { - return false - } - - typeMatches := func(candType types.Type) bool { - // Take into account any type name modifier prefixes. - candType = c.inference.applyTypeNameModifiers(candType) - - if from := c.inference.typeName.assertableFrom; from != nil { - // Don't suggest the starting type in type assertions. For example, - // if "foo" is an io.Writer, don't suggest "foo.(io.Writer)". - if types.Identical(from, candType) { - return false - } - - if intf, ok := from.Underlying().(*types.Interface); ok { - if !types.AssertableTo(intf, candType) { - return false - } - } - } - - if c.inference.typeName.wantComparable && !types.Comparable(candType) { - return false - } - - // Skip this type if it has already been used in another type - // switch case. - for _, seen := range c.inference.typeName.seenTypeSwitchCases { - if types.Identical(candType, seen) { - return false - } - } - - // We can expect a type name and have an expected type in cases like: - // - // var foo []int - // foo = []i<> - // - // Where our expected type is "[]int", and we expect a type name. - if c.inference.objType != nil { - return types.AssignableTo(candType, c.inference.objType) - } - - // Default to saying any type name is a match. - return true - } - - t := cand.obj.Type() - - if typeMatches(t) { - return true - } - - if !source.IsInterface(t) && typeMatches(types.NewPointer(t)) { - if c.inference.typeName.compLitType { - // If we are completing a composite literal type as in - // "foo<>{}", to make a pointer we must prepend "&". - cand.mods = append(cand.mods, reference) - } else { - // If we are completing a normal type name such as "foo<>", to - // make a pointer we must prepend "*". - cand.mods = append(cand.mods, dereference) - } - return true - } - - return false -} - -var ( - // "interface { Error() string }" (i.e. error) - errorIntf = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) - - // "interface { String() string }" (i.e. fmt.Stringer) - stringerIntf = types.NewInterfaceType([]*types.Func{ - types.NewFunc(token.NoPos, nil, "String", types.NewSignature( - nil, - nil, - types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])), - false, - )), - }, nil).Complete() - - byteType = types.Universe.Lookup("byte").Type() -) - -// candKind returns the objKind of candType, if any. -func candKind(candType types.Type) objKind { - var kind objKind - - switch t := candType.Underlying().(type) { - case *types.Array: - kind |= kindArray - if t.Elem() == byteType { - kind |= kindBytes - } - case *types.Slice: - kind |= kindSlice - if t.Elem() == byteType { - kind |= kindBytes - } - case *types.Chan: - kind |= kindChan - case *types.Map: - kind |= kindMap - case *types.Pointer: - kind |= kindPtr - - // Some builtins handle array pointers as arrays, so just report a pointer - // to an array as an array. - if _, isArray := t.Elem().Underlying().(*types.Array); isArray { - kind |= kindArray - } - case *types.Basic: - switch info := t.Info(); { - case info&types.IsString > 0: - kind |= kindString - case info&types.IsInteger > 0: - kind |= kindInt - case info&types.IsFloat > 0: - kind |= kindFloat - case info&types.IsComplex > 0: - kind |= kindComplex - case info&types.IsBoolean > 0: - kind |= kindBool - } - case *types.Signature: - return kindFunc - } - - if types.Implements(candType, errorIntf) { - kind |= kindError - } - - if types.Implements(candType, stringerIntf) { - kind |= kindStringer - } - - return kind -} - -// innermostScope returns the innermost scope for c.pos. -func (c *completer) innermostScope() *types.Scope { - for _, s := range c.scopes { - if s != nil { - return s - } - } - return nil -} diff --git a/internal/lsp/source/completion/definition.go b/internal/lsp/source/completion/definition.go deleted file mode 100644 index 44d5a33b2f4..00000000000 --- a/internal/lsp/source/completion/definition.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "go/ast" - "go/token" - "go/types" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -// some definitions can be completed -// So far, TestFoo(t *testing.T), TestMain(m *testing.M) -// BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F) - -// path[0] is known to be *ast.Ident -func definition(path []ast.Node, obj types.Object, fset *token.FileSet, fh source.FileHandle) ([]CompletionItem, *Selection) { - if _, ok := obj.(*types.Func); !ok { - return nil, nil // not a function at all - } - if !strings.HasSuffix(fh.URI().Filename(), "_test.go") { - return nil, nil - } - - name := path[0].(*ast.Ident).Name - if len(name) == 0 { - // can't happen - return nil, nil - } - pos := path[0].Pos() - sel := &Selection{ - content: "", - cursor: pos, - rng: span.NewRange(fset, pos, pos), - } - var ans []CompletionItem - - // Always suggest TestMain, if possible - if strings.HasPrefix("TestMain", name) { - ans = []CompletionItem{defItem("TestMain(m *testing.M)", obj)} - } - - // If a snippet is possible, suggest it - if strings.HasPrefix("Test", name) { - ans = append(ans, defSnippet("Test", "Xxx", "(t *testing.T)", obj)) - return ans, sel - } else if strings.HasPrefix("Benchmark", name) { - ans = append(ans, defSnippet("Benchmark", "Xxx", "(b *testing.B)", obj)) - return ans, sel - } else if strings.HasPrefix("Fuzz", name) { - ans = append(ans, defSnippet("Fuzz", "Xxx", "(f *testing.F)", obj)) - return ans, sel - } - - // Fill in the argument for what the user has already typed - if got := defMatches(name, "Test", path, "(t *testing.T)"); got != "" { - ans = append(ans, defItem(got, obj)) - } else if got := defMatches(name, "Benchmark", path, "(b *testing.B)"); got != "" { - ans = append(ans, defItem(got, obj)) - } else if got := defMatches(name, "Fuzz", path, "(f *testing.F)"); got != "" { - ans = append(ans, defItem(got, obj)) - } - return ans, sel -} - -func defMatches(name, pat string, path []ast.Node, arg string) string { - idx := strings.Index(name, pat) - if idx < 0 { - return "" - } - c, _ := utf8.DecodeRuneInString(name[len(pat):]) - if unicode.IsLower(c) { - return "" - } - fd, ok := path[1].(*ast.FuncDecl) - if !ok { - // we don't know what's going on - return "" - } - fp := fd.Type.Params - if fp != nil && len(fp.List) > 0 { - // signature already there, minimal suggestion - return name - } - // suggesting signature too - return name + arg -} - -func defSnippet(prefix, placeholder, suffix string, obj types.Object) CompletionItem { - var sn snippet.Builder - sn.WriteText(prefix) - if placeholder != "" { - sn.WritePlaceholder(func(b *snippet.Builder) { b.WriteText(placeholder) }) - } - sn.WriteText(suffix + " {\n") - sn.WriteFinalTabstop() - sn.WriteText("\n}") - return CompletionItem{ - Label: prefix + placeholder + suffix, - Detail: "tab, type the rest of the name, then tab", - Kind: protocol.FunctionCompletion, - Depth: 0, - Score: 10, - snippet: &sn, - Documentation: prefix + " test function", - obj: obj, - } -} -func defItem(val string, obj types.Object) CompletionItem { - return CompletionItem{ - Label: val, - InsertText: val, - Kind: protocol.FunctionCompletion, - Depth: 0, - Score: 9, // prefer the snippets when available - Documentation: "complete the parameter", - obj: obj, - } -} diff --git a/internal/lsp/source/completion/format.go b/internal/lsp/source/completion/format.go deleted file mode 100644 index 72498cc6874..00000000000 --- a/internal/lsp/source/completion/format.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "context" - "errors" - "fmt" - "go/ast" - "go/doc" - "go/types" - "strings" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typeparams" -) - -var ( - errNoMatch = errors.New("not a surrounding match") - errLowScore = errors.New("not a high scoring candidate") -) - -// item formats a candidate to a CompletionItem. -func (c *completer) item(ctx context.Context, cand candidate) (CompletionItem, error) { - obj := cand.obj - - // if the object isn't a valid match against the surrounding, return early. - matchScore := c.matcher.Score(cand.name) - if matchScore <= 0 { - return CompletionItem{}, errNoMatch - } - cand.score *= float64(matchScore) - - // Ignore deep candidates that wont be in the MaxDeepCompletions anyway. - if len(cand.path) != 0 && !c.deepState.isHighScore(cand.score) { - return CompletionItem{}, errLowScore - } - - // Handle builtin types separately. - if obj.Parent() == types.Universe { - return c.formatBuiltin(ctx, cand) - } - - var ( - label = cand.name - detail = types.TypeString(obj.Type(), c.qf) - insert = label - kind = protocol.TextCompletion - snip snippet.Builder - protocolEdits []protocol.TextEdit - ) - if obj.Type() == nil { - detail = "" - } - if isTypeName(obj) && c.wantTypeParams() { - x := cand.obj.(*types.TypeName) - if named, ok := x.Type().(*types.Named); ok { - tp := typeparams.ForNamed(named) - label += source.FormatTypeParams(tp) - insert = label // maintain invariant above (label == insert) - } - } - - snip.WriteText(insert) - - switch obj := obj.(type) { - case *types.TypeName: - detail, kind = source.FormatType(obj.Type(), c.qf) - case *types.Const: - kind = protocol.ConstantCompletion - case *types.Var: - if _, ok := obj.Type().(*types.Struct); ok { - detail = "struct{...}" // for anonymous structs - } else if obj.IsField() { - detail = source.FormatVarType(ctx, c.snapshot, c.pkg, obj, c.qf) - } - if obj.IsField() { - kind = protocol.FieldCompletion - c.structFieldSnippet(cand, detail, &snip) - } else { - kind = protocol.VariableCompletion - } - if obj.Type() == nil { - break - } - case *types.Func: - sig, ok := obj.Type().Underlying().(*types.Signature) - if !ok { - break - } - kind = protocol.FunctionCompletion - if sig != nil && sig.Recv() != nil { - kind = protocol.MethodCompletion - } - case *types.PkgName: - kind = protocol.ModuleCompletion - detail = fmt.Sprintf("%q", obj.Imported().Path()) - case *types.Label: - kind = protocol.ConstantCompletion - detail = "label" - } - - var prefix string - for _, mod := range cand.mods { - switch mod { - case reference: - prefix = "&" + prefix - case dereference: - prefix = "*" + prefix - case chanRead: - prefix = "<-" + prefix - } - } - - var ( - suffix string - funcType = obj.Type() - ) -Suffixes: - for _, mod := range cand.mods { - switch mod { - case invoke: - if sig, ok := funcType.Underlying().(*types.Signature); ok { - s := source.NewSignature(ctx, c.snapshot, c.pkg, sig, nil, c.qf) - c.functionCallSnippet("", s.TypeParams(), s.Params(), &snip) - if sig.Results().Len() == 1 { - funcType = sig.Results().At(0).Type() - } - detail = "func" + s.Format() - } - - if !c.opts.snippets { - // Without snippets the candidate will not include "()". Don't - // add further suffixes since they will be invalid. For - // example, with snippets "foo()..." would become "foo..." - // without snippets if we added the dotDotDot. - break Suffixes - } - case takeSlice: - suffix += "[:]" - case takeDotDotDot: - suffix += "..." - case index: - snip.WriteText("[") - snip.WritePlaceholder(nil) - snip.WriteText("]") - } - } - - // If this candidate needs an additional import statement, - // add the additional text edits needed. - if cand.imp != nil { - addlEdits, err := c.importEdits(cand.imp) - - if err != nil { - return CompletionItem{}, err - } - - protocolEdits = append(protocolEdits, addlEdits...) - if kind != protocol.ModuleCompletion { - if detail != "" { - detail += " " - } - detail += fmt.Sprintf("(from %q)", cand.imp.importPath) - } - } - - if cand.convertTo != nil { - typeName := types.TypeString(cand.convertTo, c.qf) - - switch cand.convertTo.(type) { - // We need extra parens when casting to these types. For example, - // we need "(*int)(foo)", not "*int(foo)". - case *types.Pointer, *types.Signature: - typeName = "(" + typeName + ")" - } - - prefix = typeName + "(" + prefix - suffix = ")" - } - - if prefix != "" { - // If we are in a selector, add an edit to place prefix before selector. - if sel := enclosingSelector(c.path, c.pos); sel != nil { - edits, err := c.editText(sel.Pos(), sel.Pos(), prefix) - if err != nil { - return CompletionItem{}, err - } - protocolEdits = append(protocolEdits, edits...) - } else { - // If there is no selector, just stick the prefix at the start. - insert = prefix + insert - snip.PrependText(prefix) - } - } - - if suffix != "" { - insert += suffix - snip.WriteText(suffix) - } - - detail = strings.TrimPrefix(detail, "untyped ") - // override computed detail with provided detail, if something is provided. - if cand.detail != "" { - detail = cand.detail - } - item := CompletionItem{ - Label: label, - InsertText: insert, - AdditionalTextEdits: protocolEdits, - Detail: detail, - Kind: kind, - Score: cand.score, - Depth: len(cand.path), - snippet: &snip, - obj: obj, - } - // If the user doesn't want documentation for completion items. - if !c.opts.documentation { - return item, nil - } - pos := c.snapshot.FileSet().Position(obj.Pos()) - - // We ignore errors here, because some types, like "unsafe" or "error", - // may not have valid positions that we can use to get documentation. - if !pos.IsValid() { - return item, nil - } - uri := span.URIFromPath(pos.Filename) - - // Find the source file of the candidate. - pkg, err := source.FindPackageFromPos(ctx, c.snapshot, obj.Pos()) - if err != nil { - return item, nil - } - - decl, err := c.snapshot.PosToDecl(ctx, pkg, obj.Pos()) - if err != nil { - return CompletionItem{}, err - } - hover, err := source.FindHoverContext(ctx, c.snapshot, pkg, obj, decl, nil) - if err != nil { - event.Error(ctx, "failed to find Hover", err, tag.URI.Of(uri)) - return item, nil - } - if c.opts.fullDocumentation { - item.Documentation = hover.Comment.Text() - } else { - item.Documentation = doc.Synopsis(hover.Comment.Text()) - } - // The desired pattern is `^// Deprecated`, but the prefix has been removed - if strings.HasPrefix(hover.Comment.Text(), "Deprecated") { - if c.snapshot.View().Options().CompletionTags { - item.Tags = []protocol.CompletionItemTag{protocol.ComplDeprecated} - } else if c.snapshot.View().Options().CompletionDeprecated { - item.Deprecated = true - } - } - - return item, nil -} - -// importEdits produces the text edits necessary to add the given import to the current file. -func (c *completer) importEdits(imp *importInfo) ([]protocol.TextEdit, error) { - if imp == nil { - return nil, nil - } - - pgf, err := c.pkg.File(span.URIFromPath(c.filename)) - if err != nil { - return nil, err - } - - return source.ComputeOneImportFixEdits(c.snapshot, pgf, &imports.ImportFix{ - StmtInfo: imports.ImportInfo{ - ImportPath: imp.importPath, - Name: imp.name, - }, - // IdentName is unused on this path and is difficult to get. - FixType: imports.AddImport, - }) -} - -func (c *completer) formatBuiltin(ctx context.Context, cand candidate) (CompletionItem, error) { - obj := cand.obj - item := CompletionItem{ - Label: obj.Name(), - InsertText: obj.Name(), - Score: cand.score, - } - switch obj.(type) { - case *types.Const: - item.Kind = protocol.ConstantCompletion - case *types.Builtin: - item.Kind = protocol.FunctionCompletion - sig, err := source.NewBuiltinSignature(ctx, c.snapshot, obj.Name()) - if err != nil { - return CompletionItem{}, err - } - item.Detail = "func" + sig.Format() - item.snippet = &snippet.Builder{} - c.functionCallSnippet(obj.Name(), sig.TypeParams(), sig.Params(), item.snippet) - case *types.TypeName: - if types.IsInterface(obj.Type()) { - item.Kind = protocol.InterfaceCompletion - } else { - item.Kind = protocol.ClassCompletion - } - case *types.Nil: - item.Kind = protocol.VariableCompletion - } - return item, nil -} - -// decide if the type params (if any) should be part of the completion -// which only possible for types.Named and types.Signature -// (so far, only in receivers, e.g.; func (s *GENERIC[K, V])..., which is a types.Named) -func (c *completer) wantTypeParams() bool { - // Need to be lexically in a receiver, and a child of an IndexListExpr - // (but IndexListExpr only exists with go1.18) - start := c.path[0].Pos() - for i, nd := range c.path { - if fd, ok := nd.(*ast.FuncDecl); ok { - if i > 0 && fd.Recv != nil && start < fd.Recv.End() { - return true - } else { - return false - } - } - } - return false -} diff --git a/internal/lsp/source/completion/keywords.go b/internal/lsp/source/completion/keywords.go deleted file mode 100644 index bbf59b0221f..00000000000 --- a/internal/lsp/source/completion/keywords.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "go/ast" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -const ( - BREAK = "break" - CASE = "case" - CHAN = "chan" - CONST = "const" - CONTINUE = "continue" - DEFAULT = "default" - DEFER = "defer" - ELSE = "else" - FALLTHROUGH = "fallthrough" - FOR = "for" - FUNC = "func" - GO = "go" - GOTO = "goto" - IF = "if" - IMPORT = "import" - INTERFACE = "interface" - MAP = "map" - PACKAGE = "package" - RANGE = "range" - RETURN = "return" - SELECT = "select" - STRUCT = "struct" - SWITCH = "switch" - TYPE = "type" - VAR = "var" -) - -// addKeywordCompletions offers keyword candidates appropriate at the position. -func (c *completer) addKeywordCompletions() { - seen := make(map[string]bool) - - if c.wantTypeName() && c.inference.objType == nil { - // If we want a type name but don't have an expected obj type, - // include "interface", "struct", "func", "chan", and "map". - - // "interface" and "struct" are more common declaring named types. - // Give them a higher score if we are in a type declaration. - structIntf, funcChanMap := stdScore, highScore - if len(c.path) > 1 { - if _, namedDecl := c.path[1].(*ast.TypeSpec); namedDecl { - structIntf, funcChanMap = highScore, stdScore - } - } - - c.addKeywordItems(seen, structIntf, STRUCT, INTERFACE) - c.addKeywordItems(seen, funcChanMap, FUNC, CHAN, MAP) - } - - // If we are at the file scope, only offer decl keywords. We don't - // get *ast.Idents at the file scope because non-keyword identifiers - // turn into *ast.BadDecl, not *ast.Ident. - if len(c.path) == 1 || isASTFile(c.path[1]) { - c.addKeywordItems(seen, stdScore, TYPE, CONST, VAR, FUNC, IMPORT) - return - } else if _, ok := c.path[0].(*ast.Ident); !ok { - // Otherwise only offer keywords if the client is completing an identifier. - return - } - - if len(c.path) > 2 { - // Offer "range" if we are in ast.ForStmt.Init. This is what the - // AST looks like before "range" is typed, e.g. "for i := r<>". - if loop, ok := c.path[2].(*ast.ForStmt); ok && source.NodeContains(loop.Init, c.pos) { - c.addKeywordItems(seen, stdScore, RANGE) - } - } - - // Only suggest keywords if we are beginning a statement. - switch n := c.path[1].(type) { - case *ast.BlockStmt, *ast.ExprStmt: - // OK - our ident must be at beginning of statement. - case *ast.CommClause: - // Make sure we aren't in the Comm statement. - if !n.Colon.IsValid() || c.pos <= n.Colon { - return - } - case *ast.CaseClause: - // Make sure we aren't in the case List. - if !n.Colon.IsValid() || c.pos <= n.Colon { - return - } - default: - return - } - - // Filter out keywords depending on scope - // Skip the first one because we want to look at the enclosing scopes - path := c.path[1:] - for i, n := range path { - switch node := n.(type) { - case *ast.CaseClause: - // only recommend "fallthrough" and "break" within the bodies of a case clause - if c.pos > node.Colon { - c.addKeywordItems(seen, stdScore, BREAK) - // "fallthrough" is only valid in switch statements. - // A case clause is always nested within a block statement in a switch statement, - // that block statement is nested within either a TypeSwitchStmt or a SwitchStmt. - if i+2 >= len(path) { - continue - } - if _, ok := path[i+2].(*ast.SwitchStmt); ok { - c.addKeywordItems(seen, stdScore, FALLTHROUGH) - } - } - case *ast.CommClause: - if c.pos > node.Colon { - c.addKeywordItems(seen, stdScore, BREAK) - } - case *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.SwitchStmt: - c.addKeywordItems(seen, stdScore, CASE, DEFAULT) - case *ast.ForStmt, *ast.RangeStmt: - c.addKeywordItems(seen, stdScore, BREAK, CONTINUE) - // This is a bit weak, functions allow for many keywords - case *ast.FuncDecl: - if node.Body != nil && c.pos > node.Body.Lbrace { - c.addKeywordItems(seen, stdScore, DEFER, RETURN, FOR, GO, SWITCH, SELECT, IF, ELSE, VAR, CONST, GOTO, TYPE) - } - } - } -} - -// addKeywordItems dedupes and adds completion items for the specified -// keywords with the specified score. -func (c *completer) addKeywordItems(seen map[string]bool, score float64, kws ...string) { - for _, kw := range kws { - if seen[kw] { - continue - } - seen[kw] = true - - if matchScore := c.matcher.Score(kw); matchScore > 0 { - c.items = append(c.items, CompletionItem{ - Label: kw, - Kind: protocol.KeywordCompletion, - InsertText: kw, - Score: score * float64(matchScore), - }) - } - } -} diff --git a/internal/lsp/source/completion/literal.go b/internal/lsp/source/completion/literal.go deleted file mode 100644 index 139ec17dc05..00000000000 --- a/internal/lsp/source/completion/literal.go +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "context" - "fmt" - "go/types" - "strings" - "unicode" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/snippet" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/typeparams" -) - -// literal generates composite literal, function literal, and make() -// completion items. -func (c *completer) literal(ctx context.Context, literalType types.Type, imp *importInfo) { - if !c.opts.literal { - return - } - - expType := c.inference.objType - - if c.inference.matchesVariadic(literalType) { - // Don't offer literal slice candidates for variadic arguments. - // For example, don't offer "[]interface{}{}" in "fmt.Print(<>)". - return - } - - // Avoid literal candidates if the expected type is an empty - // interface. It isn't very useful to suggest a literal candidate of - // every possible type. - if expType != nil && isEmptyInterface(expType) { - return - } - - // We handle unnamed literal completions explicitly before searching - // for candidates. Avoid named-type literal completions for - // unnamed-type expected type since that results in duplicate - // candidates. For example, in - // - // type mySlice []int - // var []int = <> - // - // don't offer "mySlice{}" since we have already added a candidate - // of "[]int{}". - if _, named := literalType.(*types.Named); named && expType != nil { - if _, named := source.Deref(expType).(*types.Named); !named { - return - } - } - - // Check if an object of type literalType would match our expected type. - cand := candidate{ - obj: c.fakeObj(literalType), - } - - switch literalType.Underlying().(type) { - // These literal types are addressable (e.g. "&[]int{}"), others are - // not (e.g. can't do "&(func(){})"). - case *types.Struct, *types.Array, *types.Slice, *types.Map: - cand.addressable = true - } - - if !c.matchingCandidate(&cand) || cand.convertTo != nil { - return - } - - var ( - qf = c.qf - sel = enclosingSelector(c.path, c.pos) - ) - - // Don't qualify the type name if we are in a selector expression - // since the package name is already present. - if sel != nil { - qf = func(_ *types.Package) string { return "" } - } - - snip, typeName := c.typeNameSnippet(literalType, qf) - - // A type name of "[]int" doesn't work very will with the matcher - // since "[" isn't a valid identifier prefix. Here we strip off the - // slice (and array) prefix yielding just "int". - matchName := typeName - switch t := literalType.(type) { - case *types.Slice: - matchName = types.TypeString(t.Elem(), qf) - case *types.Array: - matchName = types.TypeString(t.Elem(), qf) - } - - addlEdits, err := c.importEdits(imp) - if err != nil { - event.Error(ctx, "error adding import for literal candidate", err) - return - } - - // If prefix matches the type name, client may want a composite literal. - if score := c.matcher.Score(matchName); score > 0 { - if cand.hasMod(reference) { - if sel != nil { - // If we are in a selector we must place the "&" before the selector. - // For example, "foo.B<>" must complete to "&foo.Bar{}", not - // "foo.&Bar{}". - edits, err := c.editText(sel.Pos(), sel.Pos(), "&") - if err != nil { - event.Error(ctx, "error making edit for literal pointer completion", err) - return - } - addlEdits = append(addlEdits, edits...) - } else { - // Otherwise we can stick the "&" directly before the type name. - typeName = "&" + typeName - snip.PrependText("&") - } - } - - switch t := literalType.Underlying().(type) { - case *types.Struct, *types.Array, *types.Slice, *types.Map: - c.compositeLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) - case *types.Signature: - // Add a literal completion for a signature type that implements - // an interface. For example, offer "http.HandlerFunc()" when - // expected type is "http.Handler". - if source.IsInterface(expType) { - c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) - } - case *types.Basic: - // Add a literal completion for basic types that implement our - // expected interface (e.g. named string type http.Dir - // implements http.FileSystem), or are identical to our expected - // type (i.e. yielding a type conversion such as "float64()"). - if source.IsInterface(expType) || types.Identical(expType, literalType) { - c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits) - } - } - } - - // If prefix matches "make", client may want a "make()" - // invocation. We also include the type name to allow for more - // flexible fuzzy matching. - if score := c.matcher.Score("make." + matchName); !cand.hasMod(reference) && score > 0 { - switch literalType.Underlying().(type) { - case *types.Slice: - // The second argument to "make()" for slices is required, so default to "0". - c.makeCall(snip.Clone(), typeName, "0", float64(score), addlEdits) - case *types.Map, *types.Chan: - // Maps and channels don't require the second argument, so omit - // to keep things simple for now. - c.makeCall(snip.Clone(), typeName, "", float64(score), addlEdits) - } - } - - // If prefix matches "func", client may want a function literal. - if score := c.matcher.Score("func"); !cand.hasMod(reference) && score > 0 && !source.IsInterface(expType) { - switch t := literalType.Underlying().(type) { - case *types.Signature: - c.functionLiteral(ctx, t, float64(score)) - } - } -} - -// literalCandidateScore is the base score for literal candidates. -// Literal candidates match the expected type so they should be high -// scoring, but we want them ranked below lexical objects of the -// correct type, so scale down highScore. -const literalCandidateScore = highScore / 2 - -// functionLiteral adds a function literal completion item for the -// given signature. -func (c *completer) functionLiteral(ctx context.Context, sig *types.Signature, matchScore float64) { - snip := &snippet.Builder{} - snip.WriteText("func(") - - // First we generate names for each param and keep a seen count so - // we know if we need to uniquify param names. For example, - // "func(int)" will become "func(i int)", but "func(int, int64)" - // will become "func(i1 int, i2 int64)". - var ( - paramNames = make([]string, sig.Params().Len()) - paramNameCount = make(map[string]int) - hasTypeParams bool - ) - for i := 0; i < sig.Params().Len(); i++ { - var ( - p = sig.Params().At(i) - name = p.Name() - ) - - if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { - hasTypeParams = true - } - - if name == "" { - // If the param has no name in the signature, guess a name based - // on the type. Use an empty qualifier to ignore the package. - // For example, we want to name "http.Request" "r", not "hr". - name = source.FormatVarType(ctx, c.snapshot, c.pkg, p, func(p *types.Package) string { - return "" - }) - name = abbreviateTypeName(name) - } - paramNames[i] = name - if name != "_" { - paramNameCount[name]++ - } - } - - for n, c := range paramNameCount { - // Any names we saw more than once will need a unique suffix added - // on. Reset the count to 1 to act as the suffix for the first - // name. - if c >= 2 { - paramNameCount[n] = 1 - } else { - delete(paramNameCount, n) - } - } - - for i := 0; i < sig.Params().Len(); i++ { - if hasTypeParams && !c.opts.placeholders { - // If there are type params in the args then the user must - // choose the concrete types. If placeholders are disabled just - // drop them between the parens and let them fill things in. - snip.WritePlaceholder(nil) - break - } - - if i > 0 { - snip.WriteText(", ") - } - - var ( - p = sig.Params().At(i) - name = paramNames[i] - ) - - // Uniquify names by adding on an incrementing numeric suffix. - if idx, found := paramNameCount[name]; found { - paramNameCount[name]++ - name = fmt.Sprintf("%s%d", name, idx) - } - - if name != p.Name() && c.opts.placeholders { - // If we didn't use the signature's param name verbatim then we - // may have chosen a poor name. Give the user a placeholder so - // they can easily fix the name. - snip.WritePlaceholder(func(b *snippet.Builder) { - b.WriteText(name) - }) - } else { - snip.WriteText(name) - } - - // If the following param's type is identical to this one, omit - // this param's type string. For example, emit "i, j int" instead - // of "i int, j int". - if i == sig.Params().Len()-1 || !types.Identical(p.Type(), sig.Params().At(i+1).Type()) { - snip.WriteText(" ") - typeStr := source.FormatVarType(ctx, c.snapshot, c.pkg, p, c.qf) - if sig.Variadic() && i == sig.Params().Len()-1 { - typeStr = strings.Replace(typeStr, "[]", "...", 1) - } - - if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { - snip.WritePlaceholder(func(snip *snippet.Builder) { - snip.WriteText(typeStr) - }) - } else { - snip.WriteText(typeStr) - } - } - } - snip.WriteText(")") - - results := sig.Results() - if results.Len() > 0 { - snip.WriteText(" ") - } - - resultsNeedParens := results.Len() > 1 || - results.Len() == 1 && results.At(0).Name() != "" - - var resultHasTypeParams bool - for i := 0; i < results.Len(); i++ { - if tp, _ := results.At(i).Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { - resultHasTypeParams = true - } - } - - if resultsNeedParens { - snip.WriteText("(") - } - for i := 0; i < results.Len(); i++ { - if resultHasTypeParams && !c.opts.placeholders { - // Leave an empty tabstop if placeholders are disabled and there - // are type args that need specificying. - snip.WritePlaceholder(nil) - break - } - - if i > 0 { - snip.WriteText(", ") - } - r := results.At(i) - if name := r.Name(); name != "" { - snip.WriteText(name + " ") - } - - text := source.FormatVarType(ctx, c.snapshot, c.pkg, r, c.qf) - if tp, _ := r.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) { - snip.WritePlaceholder(func(snip *snippet.Builder) { - snip.WriteText(text) - }) - } else { - snip.WriteText(text) - } - } - if resultsNeedParens { - snip.WriteText(")") - } - - snip.WriteText(" {") - snip.WriteFinalTabstop() - snip.WriteText("}") - - c.items = append(c.items, CompletionItem{ - Label: "func(...) {}", - Score: matchScore * literalCandidateScore, - Kind: protocol.VariableCompletion, - snippet: snip, - }) -} - -// conventionalAcronyms contains conventional acronyms for type names -// in lower case. For example, "ctx" for "context" and "err" for "error". -var conventionalAcronyms = map[string]string{ - "context": "ctx", - "error": "err", - "tx": "tx", - "responsewriter": "w", -} - -// abbreviateTypeName abbreviates type names into acronyms. For -// example, "fooBar" is abbreviated "fb". Care is taken to ignore -// non-identifier runes. For example, "[]int" becomes "i", and -// "struct { i int }" becomes "s". -func abbreviateTypeName(s string) string { - var ( - b strings.Builder - useNextUpper bool - ) - - // Trim off leading non-letters. We trim everything between "[" and - // "]" to handle array types like "[someConst]int". - var inBracket bool - s = strings.TrimFunc(s, func(r rune) bool { - if inBracket { - inBracket = r != ']' - return true - } - - if r == '[' { - inBracket = true - } - - return !unicode.IsLetter(r) - }) - - if acr, ok := conventionalAcronyms[strings.ToLower(s)]; ok { - return acr - } - - for i, r := range s { - // Stop if we encounter a non-identifier rune. - if !unicode.IsLetter(r) && !unicode.IsNumber(r) { - break - } - - if i == 0 { - b.WriteRune(unicode.ToLower(r)) - } - - if unicode.IsUpper(r) { - if useNextUpper { - b.WriteRune(unicode.ToLower(r)) - useNextUpper = false - } - } else { - useNextUpper = true - } - } - - return b.String() -} - -// compositeLiteral adds a composite literal completion item for the given typeName. -func (c *completer) compositeLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) { - snip.WriteText("{") - // Don't put the tab stop inside the composite literal curlies "{}" - // for structs that have no accessible fields. - if strct, ok := T.(*types.Struct); !ok || fieldsAccessible(strct, c.pkg.GetTypes()) { - snip.WriteFinalTabstop() - } - snip.WriteText("}") - - nonSnippet := typeName + "{}" - - c.items = append(c.items, CompletionItem{ - Label: nonSnippet, - InsertText: nonSnippet, - Score: matchScore * literalCandidateScore, - Kind: protocol.VariableCompletion, - AdditionalTextEdits: edits, - snippet: snip, - }) -} - -// basicLiteral adds a literal completion item for the given basic -// type name typeName. -func (c *completer) basicLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) { - // Never give type conversions like "untyped int()". - if isUntyped(T) { - return - } - - snip.WriteText("(") - snip.WriteFinalTabstop() - snip.WriteText(")") - - nonSnippet := typeName + "()" - - c.items = append(c.items, CompletionItem{ - Label: nonSnippet, - InsertText: nonSnippet, - Detail: T.String(), - Score: matchScore * literalCandidateScore, - Kind: protocol.VariableCompletion, - AdditionalTextEdits: edits, - snippet: snip, - }) -} - -// makeCall adds a completion item for a "make()" call given a specific type. -func (c *completer) makeCall(snip *snippet.Builder, typeName string, secondArg string, matchScore float64, edits []protocol.TextEdit) { - // Keep it simple and don't add any placeholders for optional "make()" arguments. - - snip.PrependText("make(") - if secondArg != "" { - snip.WriteText(", ") - snip.WritePlaceholder(func(b *snippet.Builder) { - if c.opts.placeholders { - b.WriteText(secondArg) - } - }) - } - snip.WriteText(")") - - var nonSnippet strings.Builder - nonSnippet.WriteString("make(" + typeName) - if secondArg != "" { - nonSnippet.WriteString(", ") - nonSnippet.WriteString(secondArg) - } - nonSnippet.WriteByte(')') - - c.items = append(c.items, CompletionItem{ - Label: nonSnippet.String(), - InsertText: nonSnippet.String(), - Score: matchScore * literalCandidateScore, - Kind: protocol.FunctionCompletion, - AdditionalTextEdits: edits, - snippet: snip, - }) -} - -// Create a snippet for a type name where type params become placeholders. -func (c *completer) typeNameSnippet(literalType types.Type, qf types.Qualifier) (*snippet.Builder, string) { - var ( - snip snippet.Builder - typeName string - named, _ = literalType.(*types.Named) - ) - - if named != nil && named.Obj() != nil && typeparams.ForNamed(named).Len() > 0 && !c.fullyInstantiated(named) { - // We are not "fully instantiated" meaning we have type params that must be specified. - if pkg := qf(named.Obj().Pkg()); pkg != "" { - typeName = pkg + "." - } - - // We do this to get "someType" instead of "someType[T]". - typeName += named.Obj().Name() - snip.WriteText(typeName + "[") - - if c.opts.placeholders { - for i := 0; i < typeparams.ForNamed(named).Len(); i++ { - if i > 0 { - snip.WriteText(", ") - } - snip.WritePlaceholder(func(snip *snippet.Builder) { - snip.WriteText(types.TypeString(typeparams.ForNamed(named).At(i), qf)) - }) - } - } else { - snip.WritePlaceholder(nil) - } - snip.WriteText("]") - typeName += "[...]" - } else { - // We don't have unspecified type params so use default type formatting. - typeName = types.TypeString(literalType, qf) - snip.WriteText(typeName) - } - - return &snip, typeName -} - -// fullyInstantiated reports whether all of t's type params have -// specified type args. -func (c *completer) fullyInstantiated(t *types.Named) bool { - tps := typeparams.ForNamed(t) - tas := typeparams.NamedTypeArgs(t) - - if tps.Len() != tas.Len() { - return false - } - - for i := 0; i < tas.Len(); i++ { - switch ta := tas.At(i).(type) { - case *typeparams.TypeParam: - // A *TypeParam only counts as specified if it is currently in - // scope (i.e. we are in a generic definition). - if !c.typeParamInScope(ta) { - return false - } - case *types.Named: - if !c.fullyInstantiated(ta) { - return false - } - } - } - return true -} - -// typeParamInScope returns whether tp's object is in scope at c.pos. -// This tells you whether you are in a generic definition and can -// assume tp has been specified. -func (c *completer) typeParamInScope(tp *typeparams.TypeParam) bool { - obj := tp.Obj() - if obj == nil { - return false - } - - scope := c.innermostScope() - if scope == nil { - return false - } - - _, foundObj := scope.LookupParent(obj.Name(), c.pos) - return obj == foundObj -} diff --git a/internal/lsp/source/completion/package.go b/internal/lsp/source/completion/package.go deleted file mode 100644 index 21244efb5ec..00000000000 --- a/internal/lsp/source/completion/package.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "bytes" - "context" - "errors" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/fuzzy" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -// packageClauseCompletions offers completions for a package declaration when -// one is not present in the given file. -func packageClauseCompletions(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) ([]CompletionItem, *Selection, error) { - // We know that the AST for this file will be empty due to the missing - // package declaration, but parse it anyway to get a mapper. - pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull) - if err != nil { - return nil, nil, err - } - - pos, err := pgf.Mapper.Pos(position) - if err != nil { - return nil, nil, err - } - - surrounding, err := packageCompletionSurrounding(snapshot.FileSet(), pgf, pos) - if err != nil { - return nil, nil, fmt.Errorf("invalid position for package completion: %w", err) - } - - packageSuggestions, err := packageSuggestions(ctx, snapshot, fh.URI(), "") - if err != nil { - return nil, nil, err - } - - var items []CompletionItem - for _, pkg := range packageSuggestions { - insertText := fmt.Sprintf("package %s", pkg.name) - items = append(items, CompletionItem{ - Label: insertText, - Kind: protocol.ModuleCompletion, - InsertText: insertText, - Score: pkg.score, - }) - } - - return items, surrounding, nil -} - -// packageCompletionSurrounding returns surrounding for package completion if a -// package completions can be suggested at a given position. A valid location -// for package completion is above any declarations or import statements. -func packageCompletionSurrounding(fset *token.FileSet, pgf *source.ParsedGoFile, pos token.Pos) (*Selection, error) { - // If the file lacks a package declaration, the parser will return an empty - // AST. As a work-around, try to parse an expression from the file contents. - filename := pgf.URI.Filename() - expr, _ := parser.ParseExprFrom(fset, filename, pgf.Src, parser.Mode(0)) - if expr == nil { - return nil, fmt.Errorf("unparseable file (%s)", pgf.URI) - } - tok := fset.File(expr.Pos()) - offset, err := safetoken.Offset(pgf.Tok, pos) - if err != nil { - return nil, err - } - if offset > tok.Size() { - // internal bug: we should never get an offset that exceeds the size of our - // file. - bug.Report("out of bounds cursor", bug.Data{ - "offset": offset, - "URI": pgf.URI, - "size": tok.Size(), - }) - return nil, fmt.Errorf("cursor out of bounds") - } - cursor := tok.Pos(offset) - - // If we were able to parse out an identifier as the first expression from - // the file, it may be the beginning of a package declaration ("pack "). - // We can offer package completions if the cursor is in the identifier. - if name, ok := expr.(*ast.Ident); ok { - if cursor >= name.Pos() && cursor <= name.End() { - if !strings.HasPrefix(PACKAGE, name.Name) { - return nil, fmt.Errorf("cursor in non-matching ident") - } - return &Selection{ - content: name.Name, - cursor: cursor, - rng: span.NewRange(fset, name.Pos(), name.End()), - }, nil - } - } - - // The file is invalid, but it contains an expression that we were able to - // parse. We will use this expression to construct the cursor's - // "surrounding". - - // First, consider the possibility that we have a valid "package" keyword - // with an empty package name ("package "). "package" is parsed as an - // *ast.BadDecl since it is a keyword. This logic would allow "package" to - // appear on any line of the file as long as it's the first code expression - // in the file. - lines := strings.Split(string(pgf.Src), "\n") - cursorLine := tok.Line(cursor) - if cursorLine <= 0 || cursorLine > len(lines) { - return nil, fmt.Errorf("invalid line number") - } - if fset.Position(expr.Pos()).Line == cursorLine { - words := strings.Fields(lines[cursorLine-1]) - if len(words) > 0 && words[0] == PACKAGE { - content := PACKAGE - // Account for spaces if there are any. - if len(words) > 1 { - content += " " - } - - start := expr.Pos() - end := token.Pos(int(expr.Pos()) + len(content) + 1) - // We have verified that we have a valid 'package' keyword as our - // first expression. Ensure that cursor is in this keyword or - // otherwise fallback to the general case. - if cursor >= start && cursor <= end { - return &Selection{ - content: content, - cursor: cursor, - rng: span.NewRange(fset, start, end), - }, nil - } - } - } - - // If the cursor is after the start of the expression, no package - // declaration will be valid. - if cursor > expr.Pos() { - return nil, fmt.Errorf("cursor after expression") - } - - // If the cursor is in a comment, don't offer any completions. - if cursorInComment(fset, cursor, pgf.Src) { - return nil, fmt.Errorf("cursor in comment") - } - - // The surrounding range in this case is the cursor except for empty file, - // in which case it's end of file - 1 - start, end := cursor, cursor - if tok.Size() == 0 { - start, end = tok.Pos(0)-1, tok.Pos(0)-1 - } - - return &Selection{ - content: "", - cursor: cursor, - rng: span.NewRange(fset, start, end), - }, nil -} - -func cursorInComment(fset *token.FileSet, cursor token.Pos, src []byte) bool { - var s scanner.Scanner - s.Init(fset.File(cursor), src, func(_ token.Position, _ string) {}, scanner.ScanComments) - for { - pos, tok, lit := s.Scan() - if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) { - return tok == token.COMMENT - } - if tok == token.EOF { - break - } - } - return false -} - -// packageNameCompletions returns name completions for a package clause using -// the current name as prefix. -func (c *completer) packageNameCompletions(ctx context.Context, fileURI span.URI, name *ast.Ident) error { - cursor := int(c.pos - name.NamePos) - if cursor < 0 || cursor > len(name.Name) { - return errors.New("cursor is not in package name identifier") - } - - c.completionContext.packageCompletion = true - - prefix := name.Name[:cursor] - packageSuggestions, err := packageSuggestions(ctx, c.snapshot, fileURI, prefix) - if err != nil { - return err - } - - for _, pkg := range packageSuggestions { - c.deepState.enqueue(pkg) - } - return nil -} - -// packageSuggestions returns a list of packages from workspace packages that -// have the given prefix and are used in the same directory as the given -// file. This also includes test packages for these packages (_test) and -// the directory name itself. -func packageSuggestions(ctx context.Context, snapshot source.Snapshot, fileURI span.URI, prefix string) (packages []candidate, err error) { - workspacePackages, err := snapshot.ActivePackages(ctx) - if err != nil { - return nil, err - } - - toCandidate := func(name string, score float64) candidate { - obj := types.NewPkgName(0, nil, name, types.NewPackage("", name)) - return candidate{obj: obj, name: name, detail: name, score: score} - } - - matcher := fuzzy.NewMatcher(prefix) - - // Always try to suggest a main package - defer func() { - if score := float64(matcher.Score("main")); score > 0 { - packages = append(packages, toCandidate("main", score*lowScore)) - } - }() - - dirPath := filepath.Dir(fileURI.Filename()) - dirName := filepath.Base(dirPath) - if !isValidDirName(dirName) { - return packages, nil - } - pkgName := convertDirNameToPkgName(dirName) - - seenPkgs := make(map[string]struct{}) - - // The `go` command by default only allows one package per directory but we - // support multiple package suggestions since gopls is build system agnostic. - for _, pkg := range workspacePackages { - if pkg.Name() == "main" || pkg.Name() == "" { - continue - } - if _, ok := seenPkgs[pkg.Name()]; ok { - continue - } - - // Only add packages that are previously used in the current directory. - var relevantPkg bool - for _, pgf := range pkg.CompiledGoFiles() { - if filepath.Dir(pgf.URI.Filename()) == dirPath { - relevantPkg = true - break - } - } - if !relevantPkg { - continue - } - - // Add a found package used in current directory as a high relevance - // suggestion and the test package for it as a medium relevance - // suggestion. - if score := float64(matcher.Score(pkg.Name())); score > 0 { - packages = append(packages, toCandidate(pkg.Name(), score*highScore)) - } - seenPkgs[pkg.Name()] = struct{}{} - - testPkgName := pkg.Name() + "_test" - if _, ok := seenPkgs[testPkgName]; ok || strings.HasSuffix(pkg.Name(), "_test") { - continue - } - if score := float64(matcher.Score(testPkgName)); score > 0 { - packages = append(packages, toCandidate(testPkgName, score*stdScore)) - } - seenPkgs[testPkgName] = struct{}{} - } - - // Add current directory name as a low relevance suggestion. - if _, ok := seenPkgs[pkgName]; !ok { - if score := float64(matcher.Score(pkgName)); score > 0 { - packages = append(packages, toCandidate(pkgName, score*lowScore)) - } - - testPkgName := pkgName + "_test" - if score := float64(matcher.Score(testPkgName)); score > 0 { - packages = append(packages, toCandidate(testPkgName, score*lowScore)) - } - } - - return packages, nil -} - -// isValidDirName checks whether the passed directory name can be used in -// a package path. Requirements for a package path can be found here: -// https://golang.org/ref/mod#go-mod-file-ident. -func isValidDirName(dirName string) bool { - if dirName == "" { - return false - } - - for i, ch := range dirName { - if isLetter(ch) || isDigit(ch) { - continue - } - if i == 0 { - // Directory name can start only with '_'. '.' is not allowed in module paths. - // '-' and '~' are not allowed because elements of package paths must be - // safe command-line arguments. - if ch == '_' { - continue - } - } else { - // Modules path elements can't end with '.' - if isAllowedPunctuation(ch) && (i != len(dirName)-1 || ch != '.') { - continue - } - } - - return false - } - return true -} - -// convertDirNameToPkgName converts a valid directory name to a valid package name. -// It leaves only letters and digits. All letters are mapped to lower case. -func convertDirNameToPkgName(dirName string) string { - var buf bytes.Buffer - for _, ch := range dirName { - switch { - case isLetter(ch): - buf.WriteRune(unicode.ToLower(ch)) - - case buf.Len() != 0 && isDigit(ch): - buf.WriteRune(ch) - } - } - return buf.String() -} - -// isLetter and isDigit allow only ASCII characters because -// "Each path element is a non-empty string made of up ASCII letters, -// ASCII digits, and limited ASCII punctuation" -// (see https://golang.org/ref/mod#go-mod-file-ident). - -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' -} - -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -func isAllowedPunctuation(ch rune) bool { - return ch == '_' || ch == '-' || ch == '~' || ch == '.' -} diff --git a/internal/lsp/source/completion/util.go b/internal/lsp/source/completion/util.go deleted file mode 100644 index cd7849af262..00000000000 --- a/internal/lsp/source/completion/util.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package completion - -import ( - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/typeparams" -) - -// exprAtPos returns the index of the expression containing pos. -func exprAtPos(pos token.Pos, args []ast.Expr) int { - for i, expr := range args { - if expr.Pos() <= pos && pos <= expr.End() { - return i - } - } - return len(args) -} - -// eachField invokes fn for each field that can be selected from a -// value of type T. -func eachField(T types.Type, fn func(*types.Var)) { - // TODO(adonovan): this algorithm doesn't exclude ambiguous - // selections that match more than one field/method. - // types.NewSelectionSet should do that for us. - - // for termination on recursive types - var seen typeutil.Map - - var visit func(T types.Type) - visit = func(T types.Type) { - if T, ok := source.Deref(T).Underlying().(*types.Struct); ok { - if seen.At(T) != nil { - return - } - - for i := 0; i < T.NumFields(); i++ { - f := T.Field(i) - fn(f) - if f.Anonymous() { - seen.Set(T, true) - visit(f.Type()) - } - } - } - } - visit(T) -} - -// typeIsValid reports whether typ doesn't contain any Invalid types. -func typeIsValid(typ types.Type) bool { - // Check named types separately, because we don't want - // to call Underlying() on them to avoid problems with recursive types. - if _, ok := typ.(*types.Named); ok { - return true - } - - switch typ := typ.Underlying().(type) { - case *types.Basic: - return typ.Kind() != types.Invalid - case *types.Array: - return typeIsValid(typ.Elem()) - case *types.Slice: - return typeIsValid(typ.Elem()) - case *types.Pointer: - return typeIsValid(typ.Elem()) - case *types.Map: - return typeIsValid(typ.Key()) && typeIsValid(typ.Elem()) - case *types.Chan: - return typeIsValid(typ.Elem()) - case *types.Signature: - return typeIsValid(typ.Params()) && typeIsValid(typ.Results()) - case *types.Tuple: - for i := 0; i < typ.Len(); i++ { - if !typeIsValid(typ.At(i).Type()) { - return false - } - } - return true - case *types.Struct, *types.Interface: - // Don't bother checking structs, interfaces for validity. - return true - default: - return false - } -} - -// resolveInvalid traverses the node of the AST that defines the scope -// containing the declaration of obj, and attempts to find a user-friendly -// name for its invalid type. The resulting Object and its Type are fake. -func resolveInvalid(fset *token.FileSet, obj types.Object, node ast.Node, info *types.Info) types.Object { - var resultExpr ast.Expr - ast.Inspect(node, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.ValueSpec: - for _, name := range n.Names { - if info.Defs[name] == obj { - resultExpr = n.Type - } - } - return false - case *ast.Field: // This case handles parameters and results of a FuncDecl or FuncLit. - for _, name := range n.Names { - if info.Defs[name] == obj { - resultExpr = n.Type - } - } - return false - default: - return true - } - }) - // Construct a fake type for the object and return a fake object with this type. - typename := source.FormatNode(fset, resultExpr) - typ := types.NewNamed(types.NewTypeName(token.NoPos, obj.Pkg(), typename, nil), types.Typ[types.Invalid], nil) - return types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), typ) -} - -func isPointer(T types.Type) bool { - _, ok := T.(*types.Pointer) - return ok -} - -func isVar(obj types.Object) bool { - _, ok := obj.(*types.Var) - return ok -} - -func isTypeName(obj types.Object) bool { - _, ok := obj.(*types.TypeName) - return ok -} - -func isFunc(obj types.Object) bool { - _, ok := obj.(*types.Func) - return ok -} - -func isEmptyInterface(T types.Type) bool { - intf, _ := T.(*types.Interface) - return intf != nil && intf.NumMethods() == 0 && typeparams.IsMethodSet(intf) -} - -func isUntyped(T types.Type) bool { - if basic, ok := T.(*types.Basic); ok { - return basic.Info()&types.IsUntyped > 0 - } - return false -} - -func isPkgName(obj types.Object) bool { - _, ok := obj.(*types.PkgName) - return ok -} - -func isASTFile(n ast.Node) bool { - _, ok := n.(*ast.File) - return ok -} - -func deslice(T types.Type) types.Type { - if slice, ok := T.Underlying().(*types.Slice); ok { - return slice.Elem() - } - return nil -} - -// isSelector returns the enclosing *ast.SelectorExpr when pos is in the -// selector. -func enclosingSelector(path []ast.Node, pos token.Pos) *ast.SelectorExpr { - if len(path) == 0 { - return nil - } - - if sel, ok := path[0].(*ast.SelectorExpr); ok { - return sel - } - - if _, ok := path[0].(*ast.Ident); ok && len(path) > 1 { - if sel, ok := path[1].(*ast.SelectorExpr); ok && pos >= sel.Sel.Pos() { - return sel - } - } - - return nil -} - -// enclosingDeclLHS returns LHS idents from containing value spec or -// assign statement. -func enclosingDeclLHS(path []ast.Node) []*ast.Ident { - for _, n := range path { - switch n := n.(type) { - case *ast.ValueSpec: - return n.Names - case *ast.AssignStmt: - ids := make([]*ast.Ident, 0, len(n.Lhs)) - for _, e := range n.Lhs { - if id, ok := e.(*ast.Ident); ok { - ids = append(ids, id) - } - } - return ids - } - } - - return nil -} - -// exprObj returns the types.Object associated with the *ast.Ident or -// *ast.SelectorExpr e. -func exprObj(info *types.Info, e ast.Expr) types.Object { - var ident *ast.Ident - switch expr := e.(type) { - case *ast.Ident: - ident = expr - case *ast.SelectorExpr: - ident = expr.Sel - default: - return nil - } - - return info.ObjectOf(ident) -} - -// typeConversion returns the type being converted to if call is a type -// conversion expression. -func typeConversion(call *ast.CallExpr, info *types.Info) types.Type { - // Type conversion (e.g. "float64(foo)"). - if fun, _ := exprObj(info, call.Fun).(*types.TypeName); fun != nil { - return fun.Type() - } - - return nil -} - -// fieldsAccessible returns whether s has at least one field accessible by p. -func fieldsAccessible(s *types.Struct, p *types.Package) bool { - for i := 0; i < s.NumFields(); i++ { - f := s.Field(i) - if f.Exported() || f.Pkg() == p { - return true - } - } - return false -} - -// prevStmt returns the statement that precedes the statement containing pos. -// For example: -// -// foo := 1 -// bar(1 + 2<>) -// -// If "<>" is pos, prevStmt returns "foo := 1" -func prevStmt(pos token.Pos, path []ast.Node) ast.Stmt { - var blockLines []ast.Stmt - for i := 0; i < len(path) && blockLines == nil; i++ { - switch n := path[i].(type) { - case *ast.BlockStmt: - blockLines = n.List - case *ast.CommClause: - blockLines = n.Body - case *ast.CaseClause: - blockLines = n.Body - } - } - - for i := len(blockLines) - 1; i >= 0; i-- { - if blockLines[i].End() < pos { - return blockLines[i] - } - } - - return nil -} - -// formatZeroValue produces Go code representing the zero value of T. It -// returns the empty string if T is invalid. -func formatZeroValue(T types.Type, qf types.Qualifier) string { - switch u := T.Underlying().(type) { - case *types.Basic: - switch { - case u.Info()&types.IsNumeric > 0: - return "0" - case u.Info()&types.IsString > 0: - return `""` - case u.Info()&types.IsBoolean > 0: - return "false" - default: - return "" - } - case *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Slice, *types.Signature: - return "nil" - default: - return types.TypeString(T, qf) + "{}" - } -} - -// isBasicKind returns whether t is a basic type of kind k. -func isBasicKind(t types.Type, k types.BasicInfo) bool { - b, _ := t.Underlying().(*types.Basic) - return b != nil && b.Info()&k > 0 -} - -func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) { - rng := source.NewMappedRange(c.snapshot.FileSet(), c.mapper, from, to) - spn, err := rng.Span() - if err != nil { - return nil, err - } - return source.ToProtocolEdits(c.mapper, []diff.TextEdit{{ - Span: spn, - NewText: newText, - }}) -} diff --git a/internal/lsp/source/diagnostics.go b/internal/lsp/source/diagnostics.go deleted file mode 100644 index e393c2f9426..00000000000 --- a/internal/lsp/source/diagnostics.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -type SuggestedFix struct { - Title string - Edits map[span.URI][]protocol.TextEdit - Command *protocol.Command - ActionKind protocol.CodeActionKind -} - -type RelatedInformation struct { - URI span.URI - Range protocol.Range - Message string -} - -func Analyze(ctx context.Context, snapshot Snapshot, pkg Package, includeConvenience bool) (map[span.URI][]*Diagnostic, error) { - // Exit early if the context has been canceled. This also protects us - // from a race on Options, see golang/go#36699. - if ctx.Err() != nil { - return nil, ctx.Err() - } - - categories := []map[string]*Analyzer{} - if includeConvenience { - categories = append(categories, snapshot.View().Options().ConvenienceAnalyzers) - } - // If we had type errors, don't run any other analyzers. - if !pkg.HasTypeErrors() { - categories = append(categories, snapshot.View().Options().DefaultAnalyzers, snapshot.View().Options().StaticcheckAnalyzers) - } - var analyzers []*Analyzer - for _, cat := range categories { - for _, a := range cat { - analyzers = append(analyzers, a) - } - } - - analysisDiagnostics, err := snapshot.Analyze(ctx, pkg.ID(), analyzers) - if err != nil { - return nil, err - } - - reports := map[span.URI][]*Diagnostic{} - // Report diagnostics and errors from root analyzers. - for _, diag := range analysisDiagnostics { - reports[diag.URI] = append(reports[diag.URI], diag) - } - return reports, nil -} - -func FileDiagnostics(ctx context.Context, snapshot Snapshot, uri span.URI) (VersionedFileIdentity, []*Diagnostic, error) { - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - return VersionedFileIdentity{}, nil, err - } - pkg, _, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return VersionedFileIdentity{}, nil, err - } - diagnostics, err := snapshot.DiagnosePackage(ctx, pkg) - if err != nil { - return VersionedFileIdentity{}, nil, err - } - fileDiags := diagnostics[fh.URI()] - if !pkg.HasListOrParseErrors() { - analysisDiags, err := Analyze(ctx, snapshot, pkg, false) - if err != nil { - return VersionedFileIdentity{}, nil, err - } - fileDiags = append(fileDiags, analysisDiags[fh.URI()]...) - } - return fh.VersionedFileIdentity(), fileDiags, nil -} diff --git a/internal/lsp/source/fix.go b/internal/lsp/source/fix.go deleted file mode 100644 index 6a7f77dab36..00000000000 --- a/internal/lsp/source/fix.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/lsp/analysis/fillstruct" - "golang.org/x/tools/internal/lsp/analysis/undeclaredname" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -type ( - // SuggestedFixFunc is a function used to get the suggested fixes for a given - // gopls command, some of which are provided by go/analysis.Analyzers. Some of - // the analyzers in internal/lsp/analysis are not efficient enough to include - // suggested fixes with their diagnostics, so we have to compute them - // separately. Such analyzers should provide a function with a signature of - // SuggestedFixFunc. - SuggestedFixFunc func(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) (*analysis.SuggestedFix, error) - singleFileFixFunc func(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) -) - -const ( - FillStruct = "fill_struct" - StubMethods = "stub_methods" - UndeclaredName = "undeclared_name" - ExtractVariable = "extract_variable" - ExtractFunction = "extract_function" - ExtractMethod = "extract_method" -) - -// suggestedFixes maps a suggested fix command id to its handler. -var suggestedFixes = map[string]SuggestedFixFunc{ - FillStruct: singleFile(fillstruct.SuggestedFix), - UndeclaredName: singleFile(undeclaredname.SuggestedFix), - ExtractVariable: singleFile(extractVariable), - ExtractFunction: singleFile(extractFunction), - ExtractMethod: singleFile(extractMethod), - StubMethods: stubSuggestedFixFunc, -} - -// singleFile calls analyzers that expect inputs for a single file -func singleFile(sf singleFileFixFunc) SuggestedFixFunc { - return func(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) (*analysis.SuggestedFix, error) { - fset, rng, src, file, pkg, info, err := getAllSuggestedFixInputs(ctx, snapshot, fh, pRng) - if err != nil { - return nil, err - } - return sf(fset, rng, src, file, pkg, info) - } -} - -func SuggestedFixFromCommand(cmd protocol.Command, kind protocol.CodeActionKind) SuggestedFix { - return SuggestedFix{ - Title: cmd.Title, - Command: &cmd, - ActionKind: kind, - } -} - -// ApplyFix applies the command's suggested fix to the given file and -// range, returning the resulting edits. -func ApplyFix(ctx context.Context, fix string, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) ([]protocol.TextDocumentEdit, error) { - handler, ok := suggestedFixes[fix] - if !ok { - return nil, fmt.Errorf("no suggested fix function for %s", fix) - } - suggestion, err := handler(ctx, snapshot, fh, pRng) - if err != nil { - return nil, err - } - if suggestion == nil { - return nil, nil - } - fset := snapshot.FileSet() - editsPerFile := map[span.URI]*protocol.TextDocumentEdit{} - for _, edit := range suggestion.TextEdits { - spn, err := span.NewRange(fset, edit.Pos, edit.End).Span() - if err != nil { - return nil, err - } - fh, err := snapshot.GetVersionedFile(ctx, spn.URI()) - if err != nil { - return nil, err - } - te, ok := editsPerFile[spn.URI()] - if !ok { - te = &protocol.TextDocumentEdit{ - TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{ - Version: fh.Version(), - TextDocumentIdentifier: protocol.TextDocumentIdentifier{ - URI: protocol.URIFromSpanURI(fh.URI()), - }, - }, - } - editsPerFile[spn.URI()] = te - } - _, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, err - } - rng, err := pgf.Mapper.Range(spn) - if err != nil { - return nil, err - } - te.Edits = append(te.Edits, protocol.TextEdit{ - Range: rng, - NewText: string(edit.NewText), - }) - } - var edits []protocol.TextDocumentEdit - for _, edit := range editsPerFile { - edits = append(edits, *edit) - } - return edits, nil -} - -// getAllSuggestedFixInputs is a helper function to collect all possible needed -// inputs for an AppliesFunc or SuggestedFixFunc. -func getAllSuggestedFixInputs(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, span.Range, []byte, *ast.File, *types.Package, *types.Info, error) { - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, span.Range{}, nil, nil, nil, nil, fmt.Errorf("getting file for Identifier: %w", err) - } - rng, err := pgf.Mapper.RangeToSpanRange(pRng) - if err != nil { - return nil, span.Range{}, nil, nil, nil, nil, err - } - return snapshot.FileSet(), rng, pgf.Src, pgf.File, pkg.GetTypes(), pkg.GetTypesInfo(), nil -} diff --git a/internal/lsp/source/folding_range.go b/internal/lsp/source/folding_range.go deleted file mode 100644 index 576308f9967..00000000000 --- a/internal/lsp/source/folding_range.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "go/ast" - "go/token" - "sort" - "strings" - - "golang.org/x/tools/internal/lsp/protocol" -) - -// FoldingRangeInfo holds range and kind info of folding for an ast.Node -type FoldingRangeInfo struct { - MappedRange - Kind protocol.FoldingRangeKind -} - -// FoldingRange gets all of the folding range for f. -func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFoldingOnly bool) (ranges []*FoldingRangeInfo, err error) { - // TODO(suzmue): consider limiting the number of folding ranges returned, and - // implement a way to prioritize folding ranges in that case. - pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) - if err != nil { - return nil, err - } - - // With parse errors, we wouldn't be able to produce accurate folding info. - // LSP protocol (3.16) currently does not have a way to handle this case - // (https://github.com/microsoft/language-server-protocol/issues/1200). - // We cannot return an error either because we are afraid some editors - // may not handle errors nicely. As a workaround, we now return an empty - // result and let the client handle this case by double check the file - // contents (i.e. if the file is not empty and the folding range result - // is empty, raise an internal error). - if pgf.ParseErr != nil { - return nil, nil - } - - fset := snapshot.FileSet() - - // Get folding ranges for comments separately as they are not walked by ast.Inspect. - ranges = append(ranges, commentsFoldingRange(fset, pgf.Mapper, pgf.File)...) - - visit := func(n ast.Node) bool { - rng := foldingRangeFunc(fset, pgf.Mapper, n, lineFoldingOnly) - if rng != nil { - ranges = append(ranges, rng) - } - return true - } - // Walk the ast and collect folding ranges. - ast.Inspect(pgf.File, visit) - - sort.Slice(ranges, func(i, j int) bool { - irng, _ := ranges[i].Range() - jrng, _ := ranges[j].Range() - return protocol.CompareRange(irng, jrng) < 0 - }) - - return ranges, nil -} - -// foldingRangeFunc calculates the line folding range for ast.Node n -func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo { - // TODO(suzmue): include trailing empty lines before the closing - // parenthesis/brace. - var kind protocol.FoldingRangeKind - var start, end token.Pos - switch n := n.(type) { - case *ast.BlockStmt: - // Fold between positions of or lines between "{" and "}". - var startList, endList token.Pos - if num := len(n.List); num != 0 { - startList, endList = n.List[0].Pos(), n.List[num-1].End() - } - start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly) - case *ast.CaseClause: - // Fold from position of ":" to end. - start, end = n.Colon+1, n.End() - case *ast.CommClause: - // Fold from position of ":" to end. - start, end = n.Colon+1, n.End() - case *ast.CallExpr: - // Fold from position of "(" to position of ")". - start, end = n.Lparen+1, n.Rparen - case *ast.FieldList: - // Fold between positions of or lines between opening parenthesis/brace and closing parenthesis/brace. - var startList, endList token.Pos - if num := len(n.List); num != 0 { - startList, endList = n.List[0].Pos(), n.List[num-1].End() - } - start, end = validLineFoldingRange(fset, n.Opening, n.Closing, startList, endList, lineFoldingOnly) - case *ast.GenDecl: - // If this is an import declaration, set the kind to be protocol.Imports. - if n.Tok == token.IMPORT { - kind = protocol.Imports - } - // Fold between positions of or lines between "(" and ")". - var startSpecs, endSpecs token.Pos - if num := len(n.Specs); num != 0 { - startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End() - } - start, end = validLineFoldingRange(fset, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly) - case *ast.BasicLit: - // Fold raw string literals from position of "`" to position of "`". - if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' { - start, end = n.Pos(), n.End() - } - case *ast.CompositeLit: - // Fold between positions of or lines between "{" and "}". - var startElts, endElts token.Pos - if num := len(n.Elts); num != 0 { - startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End() - } - start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly) - } - - // Check that folding positions are valid. - if !start.IsValid() || !end.IsValid() { - return nil - } - // in line folding mode, do not fold if the start and end lines are the same. - if lineFoldingOnly && fset.Position(start).Line == fset.Position(end).Line { - return nil - } - return &FoldingRangeInfo{ - MappedRange: NewMappedRange(fset, m, start, end), - Kind: kind, - } -} - -// validLineFoldingRange returns start and end token.Pos for folding range if the range is valid. -// returns token.NoPos otherwise, which fails token.IsValid check -func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) { - if lineFoldingOnly { - if !open.IsValid() || !close.IsValid() { - return token.NoPos, token.NoPos - } - - // Don't want to fold if the start/end is on the same line as the open/close - // as an example, the example below should *not* fold: - // var x = [2]string{"d", - // "e" } - if fset.Position(open).Line == fset.Position(start).Line || - fset.Position(close).Line == fset.Position(end).Line { - return token.NoPos, token.NoPos - } - - return open + 1, end - } - return open + 1, close -} - -// commentsFoldingRange returns the folding ranges for all comment blocks in file. -// The folding range starts at the end of the first line of the comment block, and ends at the end of the -// comment block and has kind protocol.Comment. -func commentsFoldingRange(fset *token.FileSet, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) { - for _, commentGrp := range file.Comments { - startGrp, endGrp := fset.Position(commentGrp.Pos()), fset.Position(commentGrp.End()) - if startGrp.Line == endGrp.Line { - // Don't fold single line comments. - continue - } - - firstComment := commentGrp.List[0] - startPos, endLinePos := firstComment.Pos(), firstComment.End() - startCmmnt, endCmmnt := fset.Position(startPos), fset.Position(endLinePos) - if startCmmnt.Line != endCmmnt.Line { - // If the first comment spans multiple lines, then we want to have the - // folding range start at the end of the first line. - endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0])) - } - comments = append(comments, &FoldingRangeInfo{ - // Fold from the end of the first line comment to the end of the comment block. - MappedRange: NewMappedRange(fset, m, endLinePos, commentGrp.End()), - Kind: protocol.Comment, - }) - } - return comments -} diff --git a/internal/lsp/source/format.go b/internal/lsp/source/format.go deleted file mode 100644 index 1dd914ec3ff..00000000000 --- a/internal/lsp/source/format.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package source provides core features for use by Go editors and tools. -package source - -import ( - "bytes" - "context" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "strings" - "text/scanner" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/lsppos" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" -) - -// Format formats a file with a given range. -func Format(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.TextEdit, error) { - ctx, done := event.Start(ctx, "source.Format") - defer done() - - // Generated files shouldn't be edited. So, don't format them - if IsGenerated(ctx, snapshot, fh.URI()) { - return nil, fmt.Errorf("can't format %q: file is generated", fh.URI().Filename()) - } - - pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) - if err != nil { - return nil, err - } - // Even if this file has parse errors, it might still be possible to format it. - // Using format.Node on an AST with errors may result in code being modified. - // Attempt to format the source of this file instead. - if pgf.ParseErr != nil { - formatted, err := formatSource(ctx, fh) - if err != nil { - return nil, err - } - return computeTextEdits(ctx, snapshot, pgf, string(formatted)) - } - - fset := snapshot.FileSet() - - // format.Node changes slightly from one release to another, so the version - // of Go used to build the LSP server will determine how it formats code. - // This should be acceptable for all users, who likely be prompted to rebuild - // the LSP server on each Go release. - buf := &bytes.Buffer{} - if err := format.Node(buf, fset, pgf.File); err != nil { - return nil, err - } - formatted := buf.String() - - // Apply additional formatting, if any is supported. Currently, the only - // supported additional formatter is gofumpt. - if format := snapshot.View().Options().GofumptFormat; snapshot.View().Options().Gofumpt && format != nil { - // gofumpt can customize formatting based on language version and module - // path, if available. - // - // Try to derive this information, but fall-back on the default behavior. - // - // TODO: under which circumstances can we fail to find module information? - // Can this, for example, result in inconsistent formatting across saves, - // due to pending calls to packages.Load? - var langVersion, modulePath string - mds, err := snapshot.MetadataForFile(ctx, fh.URI()) - if err == nil && len(mds) > 0 { - if mi := mds[0].ModuleInfo(); mi != nil { - langVersion = mi.GoVersion - modulePath = mi.Path - } - } - b, err := format(ctx, langVersion, modulePath, buf.Bytes()) - if err != nil { - return nil, err - } - formatted = string(b) - } - return computeTextEdits(ctx, snapshot, pgf, formatted) -} - -func formatSource(ctx context.Context, fh FileHandle) ([]byte, error) { - _, done := event.Start(ctx, "source.formatSource") - defer done() - - data, err := fh.Read() - if err != nil { - return nil, err - } - return format.Source(data) -} - -type ImportFix struct { - Fix *imports.ImportFix - Edits []protocol.TextEdit -} - -// AllImportsFixes formats f for each possible fix to the imports. -// In addition to returning the result of applying all edits, -// it returns a list of fixes that could be applied to the file, with the -// corresponding TextEdits that would be needed to apply that fix. -func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { - ctx, done := event.Start(ctx, "source.AllImportsFixes") - defer done() - - pgf, err := snapshot.ParseGo(ctx, fh, ParseFull) - if err != nil { - return nil, nil, err - } - if err := snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { - allFixEdits, editsPerFix, err = computeImportEdits(snapshot, pgf, opts) - return err - }); err != nil { - return nil, nil, fmt.Errorf("AllImportsFixes: %v", err) - } - return allFixEdits, editsPerFix, nil -} - -// computeImportEdits computes a set of edits that perform one or all of the -// necessary import fixes. -func computeImportEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { - filename := pgf.URI.Filename() - - // Build up basic information about the original file. - allFixes, err := imports.FixImports(filename, pgf.Src, options) - if err != nil { - return nil, nil, err - } - - allFixEdits, err = computeFixEdits(snapshot, pgf, options, allFixes) - if err != nil { - return nil, nil, err - } - - // Apply all of the import fixes to the file. - // Add the edits for each fix to the result. - for _, fix := range allFixes { - edits, err := computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix}) - if err != nil { - return nil, nil, err - } - editsPerFix = append(editsPerFix, &ImportFix{ - Fix: fix, - Edits: edits, - }) - } - return allFixEdits, editsPerFix, nil -} - -// ComputeOneImportFixEdits returns text edits for a single import fix. -func ComputeOneImportFixEdits(snapshot Snapshot, pgf *ParsedGoFile, fix *imports.ImportFix) ([]protocol.TextEdit, error) { - options := &imports.Options{ - LocalPrefix: snapshot.View().Options().Local, - // Defaults. - AllErrors: true, - Comments: true, - Fragment: true, - FormatOnly: false, - TabIndent: true, - TabWidth: 8, - } - return computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix}) -} - -func computeFixEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options, fixes []*imports.ImportFix) ([]protocol.TextEdit, error) { - // trim the original data to match fixedData - left, err := importPrefix(pgf.Src) - if err != nil { - return nil, err - } - extra := !strings.Contains(left, "\n") // one line may have more than imports - if extra { - left = string(pgf.Src) - } - if len(left) > 0 && left[len(left)-1] != '\n' { - left += "\n" - } - // Apply the fixes and re-parse the file so that we can locate the - // new imports. - flags := parser.ImportsOnly - if extra { - // used all of origData above, use all of it here too - flags = 0 - } - fixedData, err := imports.ApplyFixes(fixes, "", pgf.Src, options, flags) - if err != nil { - return nil, err - } - if fixedData == nil || fixedData[len(fixedData)-1] != '\n' { - fixedData = append(fixedData, '\n') // ApplyFixes may miss the newline, go figure. - } - edits, err := snapshot.View().Options().ComputeEdits(pgf.URI, left, string(fixedData)) - if err != nil { - return nil, err - } - return ProtocolEditsFromSource([]byte(left), edits, pgf.Mapper.TokFile) -} - -// importPrefix returns the prefix of the given file content through the final -// import statement. If there are no imports, the prefix is the package -// statement and any comment groups below it. -func importPrefix(src []byte) (string, error) { - fset := token.NewFileSet() - // do as little parsing as possible - f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly|parser.ParseComments) - if err != nil { // This can happen if 'package' is misspelled - return "", fmt.Errorf("importPrefix: failed to parse: %s", err) - } - tok := fset.File(f.Pos()) - var importEnd int - for _, d := range f.Decls { - if x, ok := d.(*ast.GenDecl); ok && x.Tok == token.IMPORT { - if e, err := safetoken.Offset(tok, d.End()); err != nil { - return "", fmt.Errorf("importPrefix: %s", err) - } else if e > importEnd { - importEnd = e - } - } - } - - maybeAdjustToLineEnd := func(pos token.Pos, isCommentNode bool) int { - offset, err := safetoken.Offset(tok, pos) - if err != nil { - return -1 - } - - // Don't go past the end of the file. - if offset > len(src) { - offset = len(src) - } - // The go/ast package does not account for different line endings, and - // specifically, in the text of a comment, it will strip out \r\n line - // endings in favor of \n. To account for these differences, we try to - // return a position on the next line whenever possible. - switch line := tok.Line(tok.Pos(offset)); { - case line < tok.LineCount(): - nextLineOffset, err := safetoken.Offset(tok, tok.LineStart(line+1)) - if err != nil { - return -1 - } - // If we found a position that is at the end of a line, move the - // offset to the start of the next line. - if offset+1 == nextLineOffset { - offset = nextLineOffset - } - case isCommentNode, offset+1 == tok.Size(): - // If the last line of the file is a comment, or we are at the end - // of the file, the prefix is the entire file. - offset = len(src) - } - return offset - } - if importEnd == 0 { - pkgEnd := f.Name.End() - importEnd = maybeAdjustToLineEnd(pkgEnd, false) - } - for _, cgroup := range f.Comments { - for _, c := range cgroup.List { - if end, err := safetoken.Offset(tok, c.End()); err != nil { - return "", err - } else if end > importEnd { - startLine := tok.Position(c.Pos()).Line - endLine := tok.Position(c.End()).Line - - // Work around golang/go#41197 by checking if the comment might - // contain "\r", and if so, find the actual end position of the - // comment by scanning the content of the file. - startOffset, err := safetoken.Offset(tok, c.Pos()) - if err != nil { - return "", err - } - if startLine != endLine && bytes.Contains(src[startOffset:], []byte("\r")) { - if commentEnd := scanForCommentEnd(src[startOffset:]); commentEnd > 0 { - end = startOffset + commentEnd - } - } - importEnd = maybeAdjustToLineEnd(tok.Pos(end), true) - } - } - } - if importEnd > len(src) { - importEnd = len(src) - } - return string(src[:importEnd]), nil -} - -// scanForCommentEnd returns the offset of the end of the multi-line comment -// at the start of the given byte slice. -func scanForCommentEnd(src []byte) int { - var s scanner.Scanner - s.Init(bytes.NewReader(src)) - s.Mode ^= scanner.SkipComments - - t := s.Scan() - if t == scanner.Comment { - return s.Pos().Offset - } - return 0 -} - -func computeTextEdits(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile, formatted string) ([]protocol.TextEdit, error) { - _, done := event.Start(ctx, "source.computeTextEdits") - defer done() - - edits, err := snapshot.View().Options().ComputeEdits(pgf.URI, string(pgf.Src), formatted) - if err != nil { - return nil, err - } - return ToProtocolEdits(pgf.Mapper, edits) -} - -// ProtocolEditsFromSource converts text edits to LSP edits using the original -// source. -func ProtocolEditsFromSource(src []byte, edits []diff.TextEdit, tf *token.File) ([]protocol.TextEdit, error) { - m := lsppos.NewMapper(src) - var result []protocol.TextEdit - for _, edit := range edits { - spn, err := edit.Span.WithOffset(tf) - if err != nil { - return nil, fmt.Errorf("computing offsets: %v", err) - } - rng, err := m.Range(spn.Start().Offset(), spn.End().Offset()) - if err != nil { - return nil, err - } - - if rng.Start == rng.End && edit.NewText == "" { - // Degenerate case, which may result from a diff tool wanting to delete - // '\r' in line endings. Filter it out. - continue - } - result = append(result, protocol.TextEdit{ - Range: rng, - NewText: edit.NewText, - }) - } - return result, nil -} - -func ToProtocolEdits(m *protocol.ColumnMapper, edits []diff.TextEdit) ([]protocol.TextEdit, error) { - if edits == nil { - return nil, nil - } - result := make([]protocol.TextEdit, len(edits)) - for i, edit := range edits { - rng, err := m.Range(edit.Span) - if err != nil { - return nil, err - } - result[i] = protocol.TextEdit{ - Range: rng, - NewText: edit.NewText, - } - } - return result, nil -} - -func FromProtocolEdits(m *protocol.ColumnMapper, edits []protocol.TextEdit) ([]diff.TextEdit, error) { - if edits == nil { - return nil, nil - } - result := make([]diff.TextEdit, len(edits)) - for i, edit := range edits { - spn, err := m.RangeSpan(edit.Range) - if err != nil { - return nil, err - } - result[i] = diff.TextEdit{ - Span: spn, - NewText: edit.NewText, - } - } - return result, nil -} diff --git a/internal/lsp/source/format_test.go b/internal/lsp/source/format_test.go deleted file mode 100644 index eac78d97989..00000000000 --- a/internal/lsp/source/format_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "fmt" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" -) - -func TestImportPrefix(t *testing.T) { - for i, tt := range []struct { - input, want string - }{ - {"package foo", "package foo"}, - {"package foo\n", "package foo\n"}, - {"package foo\n\nfunc f(){}\n", "package foo\n"}, - {"package foo\n\nimport \"fmt\"\n", "package foo\n\nimport \"fmt\""}, - {"package foo\nimport (\n\"fmt\"\n)\n", "package foo\nimport (\n\"fmt\"\n)"}, - {"\n\n\npackage foo\n", "\n\n\npackage foo\n"}, - {"// hi \n\npackage foo //xx\nfunc _(){}\n", "// hi \n\npackage foo //xx\n"}, - {"package foo //hi\n", "package foo //hi\n"}, - {"//hi\npackage foo\n//a\n\n//b\n", "//hi\npackage foo\n//a\n\n//b\n"}, - { - "package a\n\nimport (\n \"fmt\"\n)\n//hi\n", - "package a\n\nimport (\n \"fmt\"\n)\n//hi\n", - }, - {`package a /*hi*/`, `package a /*hi*/`}, - {"package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n", "package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n"}, - {"package x; import \"os\"; func f() {}\n\n", "package x; import \"os\""}, - {"package x; func f() {fmt.Println()}\n\n", "package x"}, - } { - got, err := importPrefix([]byte(tt.input)) - if err != nil { - t.Fatal(err) - } - if got != tt.want { - t.Errorf("%d: failed for %q:\n%s", i, tt.input, diffStr(t, tt.want, got)) - } - } -} - -func TestCRLFFile(t *testing.T) { - for i, tt := range []struct { - input, want string - }{ - { - input: `package main - -/* -Hi description -*/ -func Hi() { -} -`, - want: `package main - -/* -Hi description -*/`, - }, - } { - got, err := importPrefix([]byte(strings.ReplaceAll(tt.input, "\n", "\r\n"))) - if err != nil { - t.Fatal(err) - } - want := strings.ReplaceAll(tt.want, "\n", "\r\n") - if got != want { - t.Errorf("%d: failed for %q:\n%s", i, tt.input, diffStr(t, want, got)) - } - } -} - -func diffStr(t *testing.T, want, got string) string { - if want == got { - return "" - } - // Add newlines to avoid newline messages in diff. - want += "\n" - got += "\n" - d, err := myers.ComputeEdits("", want, got) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("%q", diff.ToUnified("want", "got", want, d)) -} diff --git a/internal/lsp/source/highlight.go b/internal/lsp/source/highlight.go deleted file mode 100644 index 4be078b7fce..00000000000 --- a/internal/lsp/source/highlight.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/ast" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" -) - -func Highlight(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Range, error) { - ctx, done := event.Start(ctx, "source.Highlight") - defer done() - - // Don't use GetParsedFile because it uses TypecheckWorkspace, and we - // always want fully parsed files for highlight, regardless of whether - // the file belongs to a workspace package. - pkg, err := snapshot.PackageForFile(ctx, fh.URI(), TypecheckFull, WidestPackage) - if err != nil { - return nil, fmt.Errorf("getting package for Highlight: %w", err) - } - pgf, err := pkg.File(fh.URI()) - if err != nil { - return nil, fmt.Errorf("getting file for Highlight: %w", err) - } - - pos, err := pgf.Mapper.Pos(position) - if err != nil { - return nil, err - } - path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) - if len(path) == 0 { - return nil, fmt.Errorf("no enclosing position found for %v:%v", position.Line, position.Character) - } - // If start == end for astutil.PathEnclosingInterval, the 1-char interval - // following start is used instead. As a result, we might not get an exact - // match so we should check the 1-char interval to the left of the passed - // in position to see if that is an exact match. - if _, ok := path[0].(*ast.Ident); !ok { - if p, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1); p != nil { - switch p[0].(type) { - case *ast.Ident, *ast.SelectorExpr: - path = p // use preceding ident/selector - } - } - } - result, err := highlightPath(pkg, path) - if err != nil { - return nil, err - } - var ranges []protocol.Range - for rng := range result { - mRng, err := posToMappedRange(snapshot, pkg, rng.start, rng.end) - if err != nil { - return nil, err - } - pRng, err := mRng.Range() - if err != nil { - return nil, err - } - ranges = append(ranges, pRng) - } - return ranges, nil -} - -func highlightPath(pkg Package, path []ast.Node) (map[posRange]struct{}, error) { - result := make(map[posRange]struct{}) - switch node := path[0].(type) { - case *ast.BasicLit: - if len(path) > 1 { - if _, ok := path[1].(*ast.ImportSpec); ok { - err := highlightImportUses(pkg, path, result) - return result, err - } - } - highlightFuncControlFlow(path, result) - case *ast.ReturnStmt, *ast.FuncDecl, *ast.FuncType: - highlightFuncControlFlow(path, result) - case *ast.Ident: - highlightIdentifiers(pkg, path, result) - case *ast.ForStmt, *ast.RangeStmt: - highlightLoopControlFlow(path, result) - case *ast.SwitchStmt: - highlightSwitchFlow(path, result) - case *ast.BranchStmt: - // BREAK can exit a loop, switch or select, while CONTINUE exit a loop so - // these need to be handled separately. They can also be embedded in any - // other loop/switch/select if they have a label. TODO: add support for - // GOTO and FALLTHROUGH as well. - if node.Label != nil { - highlightLabeledFlow(node, result) - } else { - switch node.Tok { - case token.BREAK: - highlightUnlabeledBreakFlow(path, result) - case token.CONTINUE: - highlightLoopControlFlow(path, result) - } - } - default: - // If the cursor is in an unidentified area, return empty results. - return nil, nil - } - return result, nil -} - -type posRange struct { - start, end token.Pos -} - -func highlightFuncControlFlow(path []ast.Node, result map[posRange]struct{}) { - var enclosingFunc ast.Node - var returnStmt *ast.ReturnStmt - var resultsList *ast.FieldList - inReturnList := false - -Outer: - // Reverse walk the path till we get to the func block. - for i, n := range path { - switch node := n.(type) { - case *ast.KeyValueExpr: - // If cursor is in a key: value expr, we don't want control flow highlighting - return - case *ast.CallExpr: - // If cursor is an arg in a callExpr, we don't want control flow highlighting. - if i > 0 { - for _, arg := range node.Args { - if arg == path[i-1] { - return - } - } - } - case *ast.Field: - inReturnList = true - case *ast.FuncLit: - enclosingFunc = n - resultsList = node.Type.Results - break Outer - case *ast.FuncDecl: - enclosingFunc = n - resultsList = node.Type.Results - break Outer - case *ast.ReturnStmt: - returnStmt = node - // If the cursor is not directly in a *ast.ReturnStmt, then - // we need to know if it is within one of the values that is being returned. - inReturnList = inReturnList || path[0] != returnStmt - } - } - // Cursor is not in a function. - if enclosingFunc == nil { - return - } - // If the cursor is on a "return" or "func" keyword, we should highlight all of the exit - // points of the function, including the "return" and "func" keywords. - highlightAllReturnsAndFunc := path[0] == returnStmt || path[0] == enclosingFunc - switch path[0].(type) { - case *ast.Ident, *ast.BasicLit: - // Cursor is in an identifier and not in a return statement or in the results list. - if returnStmt == nil && !inReturnList { - return - } - case *ast.FuncType: - highlightAllReturnsAndFunc = true - } - // The user's cursor may be within the return statement of a function, - // or within the result section of a function's signature. - // index := -1 - var nodes []ast.Node - if returnStmt != nil { - for _, n := range returnStmt.Results { - nodes = append(nodes, n) - } - } else if resultsList != nil { - for _, n := range resultsList.List { - nodes = append(nodes, n) - } - } - _, index := nodeAtPos(nodes, path[0].Pos()) - - // Highlight the correct argument in the function declaration return types. - if resultsList != nil && -1 < index && index < len(resultsList.List) { - rng := posRange{ - start: resultsList.List[index].Pos(), - end: resultsList.List[index].End(), - } - result[rng] = struct{}{} - } - // Add the "func" part of the func declaration. - if highlightAllReturnsAndFunc { - r := posRange{ - start: enclosingFunc.Pos(), - end: enclosingFunc.Pos() + token.Pos(len("func")), - } - result[r] = struct{}{} - } - ast.Inspect(enclosingFunc, func(n ast.Node) bool { - // Don't traverse any other functions. - switch n.(type) { - case *ast.FuncDecl, *ast.FuncLit: - return enclosingFunc == n - } - ret, ok := n.(*ast.ReturnStmt) - if !ok { - return true - } - var toAdd ast.Node - // Add the entire return statement, applies when highlight the word "return" or "func". - if highlightAllReturnsAndFunc { - toAdd = n - } - // Add the relevant field within the entire return statement. - if -1 < index && index < len(ret.Results) { - toAdd = ret.Results[index] - } - if toAdd != nil { - result[posRange{start: toAdd.Pos(), end: toAdd.End()}] = struct{}{} - } - return false - }) -} - -func highlightUnlabeledBreakFlow(path []ast.Node, result map[posRange]struct{}) { - // Reverse walk the path until we find closest loop, select, or switch. - for _, n := range path { - switch n.(type) { - case *ast.ForStmt, *ast.RangeStmt: - highlightLoopControlFlow(path, result) - return // only highlight the innermost statement - case *ast.SwitchStmt: - highlightSwitchFlow(path, result) - return - case *ast.SelectStmt: - // TODO: add highlight when breaking a select. - return - } - } -} - -func highlightLabeledFlow(node *ast.BranchStmt, result map[posRange]struct{}) { - obj := node.Label.Obj - if obj == nil || obj.Decl == nil { - return - } - label, ok := obj.Decl.(*ast.LabeledStmt) - if !ok { - return - } - switch label.Stmt.(type) { - case *ast.ForStmt, *ast.RangeStmt: - highlightLoopControlFlow([]ast.Node{label.Stmt, label}, result) - case *ast.SwitchStmt: - highlightSwitchFlow([]ast.Node{label.Stmt, label}, result) - } -} - -func labelFor(path []ast.Node) *ast.Ident { - if len(path) > 1 { - if n, ok := path[1].(*ast.LabeledStmt); ok { - return n.Label - } - } - return nil -} - -func highlightLoopControlFlow(path []ast.Node, result map[posRange]struct{}) { - var loop ast.Node - var loopLabel *ast.Ident - stmtLabel := labelFor(path) -Outer: - // Reverse walk the path till we get to the for loop. - for i := range path { - switch n := path[i].(type) { - case *ast.ForStmt, *ast.RangeStmt: - loopLabel = labelFor(path[i:]) - - if stmtLabel == nil || loopLabel == stmtLabel { - loop = n - break Outer - } - } - } - if loop == nil { - return - } - - // Add the for statement. - rng := posRange{ - start: loop.Pos(), - end: loop.Pos() + token.Pos(len("for")), - } - result[rng] = struct{}{} - - // Traverse AST to find branch statements within the same for-loop. - ast.Inspect(loop, func(n ast.Node) bool { - switch n.(type) { - case *ast.ForStmt, *ast.RangeStmt: - return loop == n - case *ast.SwitchStmt, *ast.SelectStmt: - return false - } - b, ok := n.(*ast.BranchStmt) - if !ok { - return true - } - if b.Label == nil || labelDecl(b.Label) == loopLabel { - result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} - } - return true - }) - - // Find continue statements in the same loop or switches/selects. - ast.Inspect(loop, func(n ast.Node) bool { - switch n.(type) { - case *ast.ForStmt, *ast.RangeStmt: - return loop == n - } - - if n, ok := n.(*ast.BranchStmt); ok && n.Tok == token.CONTINUE { - result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} - } - return true - }) - - // We don't need to check other for loops if we aren't looking for labeled statements. - if loopLabel == nil { - return - } - - // Find labeled branch statements in any loop. - ast.Inspect(loop, func(n ast.Node) bool { - b, ok := n.(*ast.BranchStmt) - if !ok { - return true - } - // statement with labels that matches the loop - if b.Label != nil && labelDecl(b.Label) == loopLabel { - result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} - } - return true - }) -} - -func highlightSwitchFlow(path []ast.Node, result map[posRange]struct{}) { - var switchNode ast.Node - var switchNodeLabel *ast.Ident - stmtLabel := labelFor(path) -Outer: - // Reverse walk the path till we get to the switch statement. - for i := range path { - switch n := path[i].(type) { - case *ast.SwitchStmt: - switchNodeLabel = labelFor(path[i:]) - if stmtLabel == nil || switchNodeLabel == stmtLabel { - switchNode = n - break Outer - } - } - } - // Cursor is not in a switch statement - if switchNode == nil { - return - } - - // Add the switch statement. - rng := posRange{ - start: switchNode.Pos(), - end: switchNode.Pos() + token.Pos(len("switch")), - } - result[rng] = struct{}{} - - // Traverse AST to find break statements within the same switch. - ast.Inspect(switchNode, func(n ast.Node) bool { - switch n.(type) { - case *ast.SwitchStmt: - return switchNode == n - case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt: - return false - } - - b, ok := n.(*ast.BranchStmt) - if !ok || b.Tok != token.BREAK { - return true - } - - if b.Label == nil || labelDecl(b.Label) == switchNodeLabel { - result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} - } - return true - }) - - // We don't need to check other switches if we aren't looking for labeled statements. - if switchNodeLabel == nil { - return - } - - // Find labeled break statements in any switch - ast.Inspect(switchNode, func(n ast.Node) bool { - b, ok := n.(*ast.BranchStmt) - if !ok || b.Tok != token.BREAK { - return true - } - - if b.Label != nil && labelDecl(b.Label) == switchNodeLabel { - result[posRange{start: b.Pos(), end: b.End()}] = struct{}{} - } - - return true - }) -} - -func labelDecl(n *ast.Ident) *ast.Ident { - if n == nil { - return nil - } - if n.Obj == nil { - return nil - } - if n.Obj.Decl == nil { - return nil - } - stmt, ok := n.Obj.Decl.(*ast.LabeledStmt) - if !ok { - return nil - } - return stmt.Label -} - -func highlightImportUses(pkg Package, path []ast.Node, result map[posRange]struct{}) error { - basicLit, ok := path[0].(*ast.BasicLit) - if !ok { - return fmt.Errorf("highlightImportUses called with an ast.Node of type %T", basicLit) - } - ast.Inspect(path[len(path)-1], func(node ast.Node) bool { - if imp, ok := node.(*ast.ImportSpec); ok && imp.Path == basicLit { - result[posRange{start: node.Pos(), end: node.End()}] = struct{}{} - return false - } - n, ok := node.(*ast.Ident) - if !ok { - return true - } - obj, ok := pkg.GetTypesInfo().ObjectOf(n).(*types.PkgName) - if !ok { - return true - } - if !strings.Contains(basicLit.Value, obj.Name()) { - return true - } - result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} - return false - }) - return nil -} - -func highlightIdentifiers(pkg Package, path []ast.Node, result map[posRange]struct{}) error { - id, ok := path[0].(*ast.Ident) - if !ok { - return fmt.Errorf("highlightIdentifiers called with an ast.Node of type %T", id) - } - // Check if ident is inside return or func decl. - highlightFuncControlFlow(path, result) - - // TODO: maybe check if ident is a reserved word, if true then don't continue and return results. - - idObj := pkg.GetTypesInfo().ObjectOf(id) - pkgObj, isImported := idObj.(*types.PkgName) - ast.Inspect(path[len(path)-1], func(node ast.Node) bool { - if imp, ok := node.(*ast.ImportSpec); ok && isImported { - highlightImport(pkgObj, imp, result) - } - n, ok := node.(*ast.Ident) - if !ok { - return true - } - if n.Name != id.Name { - return false - } - if nObj := pkg.GetTypesInfo().ObjectOf(n); nObj == idObj { - result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} - } - return false - }) - return nil -} - -func highlightImport(obj *types.PkgName, imp *ast.ImportSpec, result map[posRange]struct{}) { - if imp.Name != nil || imp.Path == nil { - return - } - if !strings.Contains(imp.Path.Value, obj.Name()) { - return - } - result[posRange{start: imp.Path.Pos(), end: imp.Path.End()}] = struct{}{} -} diff --git a/internal/lsp/source/hover.go b/internal/lsp/source/hover.go deleted file mode 100644 index 58ea9696203..00000000000 --- a/internal/lsp/source/hover.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "go/ast" - "go/constant" - "go/doc" - "go/format" - "go/token" - "go/types" - "strconv" - "strings" - "time" - "unicode/utf8" - - "golang.org/x/text/unicode/runenames" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/typeparams" -) - -// HoverContext contains context extracted from the syntax and type information -// of a given node, for use in various summaries (hover, autocomplete, -// signature help). -type HoverContext struct { - // signatureSource is the object or node use to derive the hover signature. - // - // It may also hold a precomputed string. - // TODO(rfindley): pre-compute all signatures to avoid this indirection. - signatureSource interface{} - - // comment is the most relevant comment group associated with the hovered object. - Comment *ast.CommentGroup -} - -// HoverJSON contains information used by hover. It is also the JSON returned -// for the "structured" hover format -type HoverJSON struct { - // Synopsis is a single sentence synopsis of the symbol's documentation. - Synopsis string `json:"synopsis"` - - // FullDocumentation is the symbol's full documentation. - FullDocumentation string `json:"fullDocumentation"` - - // Signature is the symbol's signature. - Signature string `json:"signature"` - - // SingleLine is a single line describing the symbol. - // This is recommended only for use in clients that show a single line for hover. - SingleLine string `json:"singleLine"` - - // SymbolName is the types.Object.Name for the given symbol. - SymbolName string `json:"symbolName"` - - // LinkPath is the pkg.go.dev link for the given symbol. - // For example, the "go/ast" part of "pkg.go.dev/go/ast#Node". - LinkPath string `json:"linkPath"` - - // LinkAnchor is the pkg.go.dev link anchor for the given symbol. - // For example, the "Node" part of "pkg.go.dev/go/ast#Node". - LinkAnchor string `json:"linkAnchor"` -} - -func Hover(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) { - ident, err := Identifier(ctx, snapshot, fh, position) - if err != nil { - if hover, innerErr := hoverRune(ctx, snapshot, fh, position); innerErr == nil { - return hover, nil - } - return nil, nil - } - h, err := HoverIdentifier(ctx, ident) - if err != nil { - return nil, err - } - rng, err := ident.Range() - if err != nil { - return nil, err - } - hover, err := FormatHover(h, snapshot.View().Options()) - if err != nil { - return nil, err - } - return &protocol.Hover{ - Contents: protocol.MarkupContent{ - Kind: snapshot.View().Options().PreferredContentFormat, - Value: hover, - }, - Range: rng, - }, nil -} - -func hoverRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) { - ctx, done := event.Start(ctx, "source.hoverRune") - defer done() - - r, mrng, err := findRune(ctx, snapshot, fh, position) - if err != nil { - return nil, err - } - rng, err := mrng.Range() - if err != nil { - return nil, err - } - - var desc string - runeName := runenames.Name(r) - if len(runeName) > 0 && runeName[0] == '<' { - // Check if the rune looks like an HTML tag. If so, trim the surrounding <> - // characters to work around https://github.com/microsoft/vscode/issues/124042. - runeName = strings.TrimRight(runeName[1:], ">") - } - if strconv.IsPrint(r) { - desc = fmt.Sprintf("'%s', U+%04X, %s", string(r), uint32(r), runeName) - } else { - desc = fmt.Sprintf("U+%04X, %s", uint32(r), runeName) - } - return &protocol.Hover{ - Contents: protocol.MarkupContent{ - Kind: snapshot.View().Options().PreferredContentFormat, - Value: desc, - }, - Range: rng, - }, nil -} - -// ErrNoRuneFound is the error returned when no rune is found at a particular position. -var ErrNoRuneFound = errors.New("no rune found") - -// findRune returns rune information for a position in a file. -func findRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (rune, MappedRange, error) { - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return 0, MappedRange{}, err - } - pos, err := pgf.Mapper.Pos(position) - if err != nil { - return 0, MappedRange{}, err - } - - // Find the basic literal enclosing the given position, if there is one. - var lit *ast.BasicLit - var found bool - ast.Inspect(pgf.File, func(n ast.Node) bool { - if found { - return false - } - if n, ok := n.(*ast.BasicLit); ok && pos >= n.Pos() && pos <= n.End() { - lit = n - found = true - } - return !found - }) - if !found { - return 0, MappedRange{}, ErrNoRuneFound - } - - var r rune - var start, end token.Pos - switch lit.Kind { - case token.CHAR: - s, err := strconv.Unquote(lit.Value) - if err != nil { - // If the conversion fails, it's because of an invalid syntax, therefore - // there is no rune to be found. - return 0, MappedRange{}, ErrNoRuneFound - } - r, _ = utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return 0, MappedRange{}, fmt.Errorf("rune error") - } - start, end = lit.Pos(), lit.End() - case token.INT: - // It's an integer, scan only if it is a hex litteral whose bitsize in - // ranging from 8 to 32. - if !(strings.HasPrefix(lit.Value, "0x") && len(lit.Value[2:]) >= 2 && len(lit.Value[2:]) <= 8) { - return 0, MappedRange{}, ErrNoRuneFound - } - v, err := strconv.ParseUint(lit.Value[2:], 16, 32) - if err != nil { - return 0, MappedRange{}, err - } - r = rune(v) - if r == utf8.RuneError { - return 0, MappedRange{}, fmt.Errorf("rune error") - } - start, end = lit.Pos(), lit.End() - case token.STRING: - // It's a string, scan only if it contains a unicode escape sequence under or before the - // current cursor position. - var found bool - litOffset, err := safetoken.Offset(pgf.Tok, lit.Pos()) - if err != nil { - return 0, MappedRange{}, err - } - offset, err := safetoken.Offset(pgf.Tok, pos) - if err != nil { - return 0, MappedRange{}, err - } - for i := offset - litOffset; i > 0; i-- { - // Start at the cursor position and search backward for the beginning of a rune escape sequence. - rr, _ := utf8.DecodeRuneInString(lit.Value[i:]) - if rr == utf8.RuneError { - return 0, MappedRange{}, fmt.Errorf("rune error") - } - if rr == '\\' { - // Got the beginning, decode it. - var tail string - r, _, tail, err = strconv.UnquoteChar(lit.Value[i:], '"') - if err != nil { - // If the conversion fails, it's because of an invalid syntax, therefore is no rune to be found. - return 0, MappedRange{}, ErrNoRuneFound - } - // Only the rune escape sequence part of the string has to be highlighted, recompute the range. - runeLen := len(lit.Value) - (int(i) + len(tail)) - start = token.Pos(int(lit.Pos()) + int(i)) - end = token.Pos(int(start) + runeLen) - found = true - break - } - } - if !found { - // No escape sequence found - return 0, MappedRange{}, ErrNoRuneFound - } - default: - return 0, MappedRange{}, ErrNoRuneFound - } - - mappedRange, err := posToMappedRange(snapshot, pkg, start, end) - if err != nil { - return 0, MappedRange{}, err - } - return r, mappedRange, nil -} - -func HoverIdentifier(ctx context.Context, i *IdentifierInfo) (*HoverJSON, error) { - ctx, done := event.Start(ctx, "source.Hover") - defer done() - - hoverCtx, err := FindHoverContext(ctx, i.Snapshot, i.pkg, i.Declaration.obj, i.Declaration.node, i.Declaration.fullDecl) - if err != nil { - return nil, err - } - - h := &HoverJSON{ - FullDocumentation: hoverCtx.Comment.Text(), - Synopsis: doc.Synopsis(hoverCtx.Comment.Text()), - } - - fset := i.Snapshot.FileSet() - // Determine the symbol's signature. - switch x := hoverCtx.signatureSource.(type) { - case string: - h.Signature = x // a pre-computed signature - - case *ast.TypeSpec: - x2 := *x - // Don't duplicate comments when formatting type specs. - x2.Doc = nil - x2.Comment = nil - var b strings.Builder - b.WriteString("type ") - if err := format.Node(&b, fset, &x2); err != nil { - return nil, err - } - h.Signature = b.String() - - case ast.Node: - var b strings.Builder - if err := format.Node(&b, fset, x); err != nil { - return nil, err - } - h.Signature = b.String() - - // Check if the variable is an integer whose value we can present in a more - // user-friendly way, i.e. `var hex = 0xe34e` becomes `var hex = 58190` - if spec, ok := x.(*ast.ValueSpec); ok && len(spec.Values) > 0 { - if lit, ok := spec.Values[0].(*ast.BasicLit); ok && len(spec.Names) > 0 { - val := constant.MakeFromLiteral(types.ExprString(lit), lit.Kind, 0) - h.Signature = fmt.Sprintf("var %s = %s", spec.Names[0], val) - } - } - - case types.Object: - // If the variable is implicitly declared in a type switch, we need to - // manually generate its object string. - if typ := i.Declaration.typeSwitchImplicit; typ != nil { - if v, ok := x.(*types.Var); ok { - h.Signature = fmt.Sprintf("var %s %s", v.Name(), types.TypeString(typ, i.qf)) - break - } - } - h.Signature = objectString(x, i.qf, i.Inferred) - } - if obj := i.Declaration.obj; obj != nil { - h.SingleLine = objectString(obj, i.qf, nil) - } - obj := i.Declaration.obj - if obj == nil { - return h, nil - } - - // Check if the identifier is test-only (and is therefore not part of a - // package's API). This is true if the request originated in a test package, - // and if the declaration is also found in the same test package. - if i.pkg != nil && obj.Pkg() != nil && i.pkg.ForTest() != "" { - if _, err := i.pkg.File(i.Declaration.MappedRange[0].URI()); err == nil { - return h, nil - } - } - - h.SymbolName, h.LinkPath, h.LinkAnchor = linkData(obj, i.enclosing) - - // See golang/go#36998: don't link to modules matching GOPRIVATE. - // - // The path returned by linkData is an import path. - if i.Snapshot.View().IsGoPrivatePath(h.LinkPath) { - h.LinkPath = "" - } else if mod, version, ok := moduleAtVersion(h.LinkPath, i); ok { - h.LinkPath = strings.Replace(h.LinkPath, mod, mod+"@"+version, 1) - } - - return h, nil -} - -// linkData returns the name, import path, and anchor to use in building links -// to obj. -// -// If obj is not visible in documentation, the returned name will be empty. -func linkData(obj types.Object, enclosing *types.TypeName) (name, importPath, anchor string) { - // Package names simply link to the package. - if obj, ok := obj.(*types.PkgName); ok { - return obj.Name(), obj.Imported().Path(), "" - } - - // Builtins link to the special builtin package. - if obj.Parent() == types.Universe { - return obj.Name(), "builtin", obj.Name() - } - - // In all other cases, the object must be exported. - if !obj.Exported() { - return "", "", "" - } - - var recv types.Object // If non-nil, the field or method receiver base. - - switch obj := obj.(type) { - case *types.Var: - // If the object is a field, and we have an associated selector - // composite literal, or struct, we can determine the link. - if obj.IsField() && enclosing != nil { - recv = enclosing - } - case *types.Func: - typ, ok := obj.Type().(*types.Signature) - if !ok { - // Note: this should never happen. go/types guarantees that the type of - // *Funcs are Signatures. - // - // TODO(rfindley): given a 'debug' mode, we should panic here. - return "", "", "" - } - if r := typ.Recv(); r != nil { - if rtyp, _ := Deref(r.Type()).(*types.Named); rtyp != nil { - // If we have an unexported type, see if the enclosing type is - // exported (we may have an interface or struct we can link - // to). If not, don't show any link. - if !rtyp.Obj().Exported() { - if enclosing != nil { - recv = enclosing - } else { - return "", "", "" - } - } else { - recv = rtyp.Obj() - } - } - } - } - - if recv != nil && !recv.Exported() { - return "", "", "" - } - - // Either the object or its receiver must be in the package scope. - scopeObj := obj - if recv != nil { - scopeObj = recv - } - if scopeObj.Pkg() == nil || scopeObj.Pkg().Scope().Lookup(scopeObj.Name()) != scopeObj { - return "", "", "" - } - - // golang/go#52211: somehow we get here with a nil obj.Pkg - if obj.Pkg() == nil { - bug.Report("object with nil pkg", bug.Data{ - "name": obj.Name(), - "type": fmt.Sprintf("%T", obj), - }) - return "", "", "" - } - - importPath = obj.Pkg().Path() - if recv != nil { - anchor = fmt.Sprintf("%s.%s", recv.Name(), obj.Name()) - name = fmt.Sprintf("(%s.%s).%s", obj.Pkg().Name(), recv.Name(), obj.Name()) - } else { - // For most cases, the link is "package/path#symbol". - anchor = obj.Name() - name = fmt.Sprintf("%s.%s", obj.Pkg().Name(), obj.Name()) - } - return name, importPath, anchor -} - -func moduleAtVersion(path string, i *IdentifierInfo) (string, string, bool) { - // TODO(rfindley): moduleAtVersion should not be responsible for deciding - // whether or not the link target supports module version links. - if strings.ToLower(i.Snapshot.View().Options().LinkTarget) != "pkg.go.dev" { - return "", "", false - } - impPkg, err := i.pkg.GetImport(path) - if err != nil { - return "", "", false - } - if impPkg.Version() == nil { - return "", "", false - } - version, modpath := impPkg.Version().Version, impPkg.Version().Path - if modpath == "" || version == "" { - return "", "", false - } - return modpath, version, true -} - -// objectString is a wrapper around the types.ObjectString function. -// It handles adding more information to the object string. -func objectString(obj types.Object, qf types.Qualifier, inferred *types.Signature) string { - // If the signature type was inferred, prefer the preferred signature with a - // comment showing the generic signature. - if sig, _ := obj.Type().(*types.Signature); sig != nil && typeparams.ForSignature(sig).Len() > 0 && inferred != nil { - obj2 := types.NewFunc(obj.Pos(), obj.Pkg(), obj.Name(), inferred) - str := types.ObjectString(obj2, qf) - // Try to avoid overly long lines. - if len(str) > 60 { - str += "\n" - } else { - str += " " - } - str += "// " + types.TypeString(sig, qf) - return str - } - str := types.ObjectString(obj, qf) - switch obj := obj.(type) { - case *types.Const: - str = fmt.Sprintf("%s = %s", str, obj.Val()) - - // Try to add a formatted duration as an inline comment - typ, ok := obj.Type().(*types.Named) - if !ok { - break - } - pkg := typ.Obj().Pkg() - if pkg.Path() == "time" && typ.Obj().Name() == "Duration" { - if d, ok := constant.Int64Val(obj.Val()); ok { - str += " // " + time.Duration(d).String() - } - } - } - return str -} - -// FindHoverContext returns a HoverContext struct for an AST node and its -// declaration object. node should be the actual node used in type checking, -// while fullNode could be a separate node with more complete syntactic -// information. -func FindHoverContext(ctx context.Context, s Snapshot, pkg Package, obj types.Object, pkgNode ast.Node, fullDecl ast.Decl) (*HoverContext, error) { - var info *HoverContext - - // Type parameters get their signature from their declaration object. - if _, isTypeName := obj.(*types.TypeName); isTypeName { - if _, isTypeParam := obj.Type().(*typeparams.TypeParam); isTypeParam { - return &HoverContext{signatureSource: obj}, nil - } - } - - // This is problematic for a number of reasons. We really need to have a more - // general mechanism to validate the coherency of AST with type information, - // but absent that we must do our best to ensure that we don't use fullNode - // when we actually need the node that was type checked. - // - // pkgNode may be nil, if it was eliminated from the type-checked syntax. In - // that case, use fullDecl if available. - node := pkgNode - if node == nil && fullDecl != nil { - node = fullDecl - } - - switch node := node.(type) { - case *ast.Ident: - // The package declaration. - for _, f := range pkg.GetSyntax() { - if f.Name == pkgNode { - info = &HoverContext{Comment: f.Doc} - } - } - case *ast.ImportSpec: - // Try to find the package documentation for an imported package. - pkgPath, err := strconv.Unquote(node.Path.Value) - if err != nil { - return nil, err - } - imp, err := pkg.GetImport(pkgPath) - if err != nil { - return nil, err - } - // Assume that only one file will contain package documentation, - // so pick the first file that has a doc comment. - for _, file := range imp.GetSyntax() { - if file.Doc != nil { - info = &HoverContext{Comment: file.Doc} - if file.Name != nil { - info.signatureSource = "package " + file.Name.Name - } - break - } - } - case *ast.GenDecl: - switch obj := obj.(type) { - case *types.TypeName, *types.Var, *types.Const, *types.Func: - // Always use the full declaration here if we have it, because the - // dependent code doesn't rely on pointer identity. This is fragile. - if d, _ := fullDecl.(*ast.GenDecl); d != nil { - node = d - } - // obj may not have been produced by type checking the AST containing - // node, so we need to be careful about using token.Pos. - tok := s.FileSet().File(obj.Pos()) - offset, err := safetoken.Offset(tok, obj.Pos()) - if err != nil { - return nil, err - } - - // fullTok and fullPos are the *token.File and object position in for the - // full AST. - fullTok := s.FileSet().File(node.Pos()) - fullPos, err := safetoken.Pos(fullTok, offset) - if err != nil { - return nil, err - } - - var spec ast.Spec - for _, s := range node.Specs { - // Avoid panics by guarding the calls to token.Offset (golang/go#48249). - start, err := safetoken.Offset(fullTok, s.Pos()) - if err != nil { - return nil, err - } - end, err := safetoken.Offset(fullTok, s.End()) - if err != nil { - return nil, err - } - if start <= offset && offset <= end { - spec = s - break - } - } - - info, err = hoverGenDecl(node, spec, fullPos, obj) - if err != nil { - return nil, err - } - } - case *ast.TypeSpec: - if obj.Parent() == types.Universe { - if genDecl, ok := fullDecl.(*ast.GenDecl); ok { - info = hoverTypeSpec(node, genDecl) - } - } - case *ast.FuncDecl: - switch obj.(type) { - case *types.Func: - info = &HoverContext{signatureSource: obj, Comment: node.Doc} - case *types.Builtin: - info = &HoverContext{Comment: node.Doc} - if sig, err := NewBuiltinSignature(ctx, s, obj.Name()); err == nil { - info.signatureSource = "func " + sig.name + sig.Format() - } else { - // Fall back on the object as a signature source. - bug.Report("invalid builtin hover", bug.Data{ - "err": err.Error(), - }) - info.signatureSource = obj - } - case *types.Var: - // Object is a function param or the field of an anonymous struct - // declared with ':='. Skip the first one because only fields - // can have docs. - if isFunctionParam(obj, node) { - break - } - - field, err := s.PosToField(ctx, pkg, obj.Pos()) - if err != nil { - return nil, err - } - - if field != nil { - comment := field.Doc - if comment.Text() == "" { - comment = field.Comment - } - info = &HoverContext{signatureSource: obj, Comment: comment} - } - } - } - - if info == nil { - info = &HoverContext{signatureSource: obj} - } - - return info, nil -} - -// isFunctionParam returns true if the passed object is either an incoming -// or an outgoing function param -func isFunctionParam(obj types.Object, node *ast.FuncDecl) bool { - for _, f := range node.Type.Params.List { - if f.Pos() == obj.Pos() { - return true - } - } - if node.Type.Results != nil { - for _, f := range node.Type.Results.List { - if f.Pos() == obj.Pos() { - return true - } - } - } - return false -} - -// hoverGenDecl returns hover information an object declared via spec inside -// of the GenDecl node. obj is the type-checked object corresponding to the -// declaration, but may have been type-checked using a different AST than the -// given nodes; fullPos is the position of obj in node's AST. -func hoverGenDecl(node *ast.GenDecl, spec ast.Spec, fullPos token.Pos, obj types.Object) (*HoverContext, error) { - if spec == nil { - return nil, fmt.Errorf("no spec for node %v at position %v", node, fullPos) - } - - // If we have a field or method. - switch obj.(type) { - case *types.Var, *types.Const, *types.Func: - return hoverVar(spec, fullPos, obj, node), nil - } - // Handle types. - switch spec := spec.(type) { - case *ast.TypeSpec: - return hoverTypeSpec(spec, node), nil - case *ast.ValueSpec: - return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil - case *ast.ImportSpec: - return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil - } - return nil, fmt.Errorf("unable to format spec %v (%T)", spec, spec) -} - -// TODO(rfindley): rename this function. -func hoverTypeSpec(spec *ast.TypeSpec, decl *ast.GenDecl) *HoverContext { - comment := spec.Doc - if comment == nil && decl != nil { - comment = decl.Doc - } - if comment == nil { - comment = spec.Comment - } - return &HoverContext{ - signatureSource: spec, - Comment: comment, - } -} - -func hoverVar(node ast.Spec, fullPos token.Pos, obj types.Object, decl *ast.GenDecl) *HoverContext { - var fieldList *ast.FieldList - switch spec := node.(type) { - case *ast.TypeSpec: - switch t := spec.Type.(type) { - case *ast.StructType: - fieldList = t.Fields - case *ast.InterfaceType: - fieldList = t.Methods - } - case *ast.ValueSpec: - // Try to extract the field list of an anonymous struct - if fieldList = extractFieldList(spec.Type); fieldList != nil { - break - } - - comment := spec.Doc - if comment == nil { - comment = decl.Doc - } - if comment == nil { - comment = spec.Comment - } - - // We need the AST nodes for variable declarations of basic literals with - // associated values so that we can augment their hover with more information. - if _, ok := obj.(*types.Var); ok && spec.Type == nil && len(spec.Values) > 0 { - if _, ok := spec.Values[0].(*ast.BasicLit); ok { - return &HoverContext{signatureSource: spec, Comment: comment} - } - } - - return &HoverContext{signatureSource: obj, Comment: comment} - } - - if fieldList != nil { - comment := findFieldComment(fullPos, fieldList) - return &HoverContext{signatureSource: obj, Comment: comment} - } - return &HoverContext{signatureSource: obj, Comment: decl.Doc} -} - -// extractFieldList recursively tries to extract a field list. -// If it is not found, nil is returned. -func extractFieldList(specType ast.Expr) *ast.FieldList { - switch t := specType.(type) { - case *ast.StructType: - return t.Fields - case *ast.InterfaceType: - return t.Methods - case *ast.ArrayType: - return extractFieldList(t.Elt) - case *ast.MapType: - // Map value has a greater chance to be a struct - if fields := extractFieldList(t.Value); fields != nil { - return fields - } - return extractFieldList(t.Key) - case *ast.ChanType: - return extractFieldList(t.Value) - } - return nil -} - -// findFieldComment visits all fields in depth-first order and returns -// the comment of a field with passed position. If no comment is found, -// nil is returned. -func findFieldComment(pos token.Pos, fieldList *ast.FieldList) *ast.CommentGroup { - for _, field := range fieldList.List { - if field.Pos() == pos { - if field.Doc.Text() != "" { - return field.Doc - } - return field.Comment - } - - if nestedFieldList := extractFieldList(field.Type); nestedFieldList != nil { - if c := findFieldComment(pos, nestedFieldList); c != nil { - return c - } - } - } - return nil -} - -func FormatHover(h *HoverJSON, options *Options) (string, error) { - signature := formatSignature(h, options) - - switch options.HoverKind { - case SingleLine: - return h.SingleLine, nil - case NoDocumentation: - return signature, nil - case Structured: - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return string(b), nil - } - - link := formatLink(h, options) - doc := formatDoc(h, options) - - var b strings.Builder - parts := []string{signature, doc, link} - for i, el := range parts { - if el != "" { - b.WriteString(el) - - // Don't write out final newline. - if i == len(parts) { - continue - } - // If any elements of the remainder of the list are non-empty, - // write a newline. - if anyNonEmpty(parts[i+1:]) { - if options.PreferredContentFormat == protocol.Markdown { - b.WriteString("\n\n") - } else { - b.WriteRune('\n') - } - } - } - } - return b.String(), nil -} - -func formatSignature(h *HoverJSON, options *Options) string { - signature := h.Signature - if signature != "" && options.PreferredContentFormat == protocol.Markdown { - signature = fmt.Sprintf("```go\n%s\n```", signature) - } - return signature -} - -func formatLink(h *HoverJSON, options *Options) string { - if !options.LinksInHover || options.LinkTarget == "" || h.LinkPath == "" { - return "" - } - plainLink := BuildLink(options.LinkTarget, h.LinkPath, h.LinkAnchor) - switch options.PreferredContentFormat { - case protocol.Markdown: - return fmt.Sprintf("[`%s` on %s](%s)", h.SymbolName, options.LinkTarget, plainLink) - case protocol.PlainText: - return "" - default: - return plainLink - } -} - -// BuildLink constructs a link with the given target, path, and anchor. -func BuildLink(target, path, anchor string) string { - link := fmt.Sprintf("https://%s/%s", target, path) - if target == "pkg.go.dev" { - link += "?utm_source=gopls" - } - if anchor == "" { - return link - } - return link + "#" + anchor -} - -func formatDoc(h *HoverJSON, options *Options) string { - var doc string - switch options.HoverKind { - case SynopsisDocumentation: - doc = h.Synopsis - case FullDocumentation: - doc = h.FullDocumentation - } - if options.PreferredContentFormat == protocol.Markdown { - return CommentToMarkdown(doc) - } - return doc -} - -func anyNonEmpty(x []string) bool { - for _, el := range x { - if el != "" { - return true - } - } - return false -} diff --git a/internal/lsp/source/implementation.go b/internal/lsp/source/implementation.go deleted file mode 100644 index 6666605a99a..00000000000 --- a/internal/lsp/source/implementation.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "errors" - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/span" -) - -func Implementation(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) ([]protocol.Location, error) { - ctx, done := event.Start(ctx, "source.Implementation") - defer done() - - impls, err := implementations(ctx, snapshot, f, pp) - if err != nil { - return nil, err - } - var locations []protocol.Location - for _, impl := range impls { - if impl.pkg == nil || len(impl.pkg.CompiledGoFiles()) == 0 { - continue - } - rng, err := objToMappedRange(snapshot, impl.pkg, impl.obj) - if err != nil { - return nil, err - } - pr, err := rng.Range() - if err != nil { - return nil, err - } - locations = append(locations, protocol.Location{ - URI: protocol.URIFromSpanURI(rng.URI()), - Range: pr, - }) - } - sort.Slice(locations, func(i, j int) bool { - li, lj := locations[i], locations[j] - if li.URI == lj.URI { - return protocol.CompareRange(li.Range, lj.Range) < 0 - } - return li.URI < lj.URI - }) - return locations, nil -} - -var ErrNotAType = errors.New("not a type name or method") - -// implementations returns the concrete implementations of the specified -// interface, or the interfaces implemented by the specified concrete type. -func implementations(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position) ([]qualifiedObject, error) { - var ( - impls []qualifiedObject - seen = make(map[token.Position]bool) - fset = s.FileSet() - ) - - qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp) - if err != nil { - return nil, err - } - for _, qo := range qos { - var ( - queryType types.Type - queryMethod *types.Func - ) - - switch obj := qo.obj.(type) { - case *types.Func: - queryMethod = obj - if recv := obj.Type().(*types.Signature).Recv(); recv != nil { - queryType = ensurePointer(recv.Type()) - } - case *types.TypeName: - queryType = ensurePointer(obj.Type()) - } - - if queryType == nil { - return nil, ErrNotAType - } - - if types.NewMethodSet(queryType).Len() == 0 { - return nil, nil - } - - // Find all named types, even local types (which can have methods - // due to promotion). - var ( - allNamed []*types.Named - pkgs = make(map[*types.Package]Package) - ) - knownPkgs, err := s.KnownPackages(ctx) - if err != nil { - return nil, err - } - for _, pkg := range knownPkgs { - pkgs[pkg.GetTypes()] = pkg - info := pkg.GetTypesInfo() - for _, obj := range info.Defs { - obj, ok := obj.(*types.TypeName) - // We ignore aliases 'type M = N' to avoid duplicate reporting - // of the Named type N. - if !ok || obj.IsAlias() { - continue - } - if named, ok := obj.Type().(*types.Named); ok { - allNamed = append(allNamed, named) - } - } - } - - // Find all the named types that match our query. - for _, named := range allNamed { - var ( - candObj types.Object = named.Obj() - candType = ensurePointer(named) - ) - - if !concreteImplementsIntf(candType, queryType) { - continue - } - - ms := types.NewMethodSet(candType) - if ms.Len() == 0 { - // Skip empty interfaces. - continue - } - - // If client queried a method, look up corresponding candType method. - if queryMethod != nil { - sel := ms.Lookup(queryMethod.Pkg(), queryMethod.Name()) - if sel == nil { - continue - } - candObj = sel.Obj() - } - - pos := fset.Position(candObj.Pos()) - if candObj == queryMethod || seen[pos] { - continue - } - - seen[pos] = true - - impls = append(impls, qualifiedObject{ - obj: candObj, - pkg: pkgs[candObj.Pkg()], - }) - } - } - - return impls, nil -} - -// concreteImplementsIntf returns true if a is an interface type implemented by -// concrete type b, or vice versa. -func concreteImplementsIntf(a, b types.Type) bool { - aIsIntf, bIsIntf := IsInterface(a), IsInterface(b) - - // Make sure exactly one is an interface type. - if aIsIntf == bIsIntf { - return false - } - - // Rearrange if needed so "a" is the concrete type. - if aIsIntf { - a, b = b, a - } - - return types.AssignableTo(a, b) -} - -// ensurePointer wraps T in a *types.Pointer if T is a named, non-interface -// type. This is useful to make sure you consider a named type's full method -// set. -func ensurePointer(T types.Type) types.Type { - if _, ok := T.(*types.Named); ok && !IsInterface(T) { - return types.NewPointer(T) - } - - return T -} - -type qualifiedObject struct { - obj types.Object - - // pkg is the Package that contains obj's definition. - pkg Package - - // node is the *ast.Ident or *ast.ImportSpec we followed to find obj, if any. - node ast.Node - - // sourcePkg is the Package that contains node, if any. - sourcePkg Package -} - -var ( - errBuiltin = errors.New("builtin object") - errNoObjectFound = errors.New("no object found") -) - -// qualifiedObjsAtProtocolPos returns info for all the type.Objects -// referenced at the given position. An object will be returned for -// every package that the file belongs to, in every typechecking mode -// applicable. -func qualifiedObjsAtProtocolPos(ctx context.Context, s Snapshot, uri span.URI, pp protocol.Position) ([]qualifiedObject, error) { - pkgs, err := s.PackagesForFile(ctx, uri, TypecheckAll, false) - if err != nil { - return nil, err - } - if len(pkgs) == 0 { - return nil, errNoObjectFound - } - pkg := pkgs[0] - pgf, err := pkg.File(uri) - if err != nil { - return nil, err - } - pos, err := pgf.Mapper.Pos(pp) - if err != nil { - return nil, err - } - offset, err := safetoken.Offset(pgf.Tok, pos) - if err != nil { - return nil, err - } - return qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, map[objSearchKey]bool{}) -} - -type objSearchKey struct { - uri span.URI - offset int -} - -// qualifiedObjsAtLocation finds all objects referenced at offset in uri, across -// all packages in the snapshot. -func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, seen map[objSearchKey]bool) ([]qualifiedObject, error) { - if seen[key] { - return nil, nil - } - seen[key] = true - - // We search for referenced objects starting with all packages containing the - // current location, and then repeating the search for every distinct object - // location discovered. - // - // In the common case, there should be at most one additional location to - // consider: the definition of the object referenced by the location. But we - // try to be comprehensive in case we ever support variations on build - // constraints. - - pkgs, err := s.PackagesForFile(ctx, key.uri, TypecheckAll, false) - if err != nil { - return nil, err - } - - // report objects in the order we encounter them. This ensures that the first - // result is at the cursor... - var qualifiedObjs []qualifiedObject - // ...but avoid duplicates. - seenObjs := map[types.Object]bool{} - - for _, searchpkg := range pkgs { - pgf, err := searchpkg.File(key.uri) - if err != nil { - return nil, err - } - pos := pgf.Tok.Pos(key.offset) - path := pathEnclosingObjNode(pgf.File, pos) - if path == nil { - continue - } - var objs []types.Object - switch leaf := path[0].(type) { - case *ast.Ident: - // If leaf represents an implicit type switch object or the type - // switch "assign" variable, expand to all of the type switch's - // implicit objects. - if implicits, _ := typeSwitchImplicits(searchpkg, path); len(implicits) > 0 { - objs = append(objs, implicits...) - } else { - obj := searchpkg.GetTypesInfo().ObjectOf(leaf) - if obj == nil { - return nil, fmt.Errorf("%w for %q", errNoObjectFound, leaf.Name) - } - objs = append(objs, obj) - } - case *ast.ImportSpec: - // Look up the implicit *types.PkgName. - obj := searchpkg.GetTypesInfo().Implicits[leaf] - if obj == nil { - return nil, fmt.Errorf("%w for import %q", errNoObjectFound, ImportPath(leaf)) - } - objs = append(objs, obj) - } - // Get all of the transitive dependencies of the search package. - pkgs := make(map[*types.Package]Package) - var addPkg func(pkg Package) - addPkg = func(pkg Package) { - pkgs[pkg.GetTypes()] = pkg - for _, imp := range pkg.Imports() { - if _, ok := pkgs[imp.GetTypes()]; !ok { - addPkg(imp) - } - } - } - addPkg(searchpkg) - for _, obj := range objs { - if obj.Parent() == types.Universe { - return nil, fmt.Errorf("%q: %w", obj.Name(), errBuiltin) - } - pkg, ok := pkgs[obj.Pkg()] - if !ok { - event.Error(ctx, fmt.Sprintf("no package for obj %s: %v", obj, obj.Pkg()), err) - continue - } - qualifiedObjs = append(qualifiedObjs, qualifiedObject{ - obj: obj, - pkg: pkg, - sourcePkg: searchpkg, - node: path[0], - }) - seenObjs[obj] = true - - // If the qualified object is in another file (or more likely, another - // package), it's possible that there is another copy of it in a package - // that we haven't searched, e.g. a test variant. See golang/go#47564. - // - // In order to be sure we've considered all packages, call - // qualifiedObjsAtLocation recursively for all locations we encounter. We - // could probably be more precise here, only continuing the search if obj - // is in another package, but this should be good enough to find all - // uses. - - pos := obj.Pos() - var uri span.URI - offset := -1 - for _, pgf := range pkg.CompiledGoFiles() { - if pgf.Tok.Base() <= int(pos) && int(pos) <= pgf.Tok.Base()+pgf.Tok.Size() { - var err error - offset, err = safetoken.Offset(pgf.Tok, pos) - if err != nil { - return nil, err - } - uri = pgf.URI - } - } - if offset >= 0 { - otherObjs, err := qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, seen) - if err != nil { - return nil, err - } - for _, other := range otherObjs { - if !seenObjs[other.obj] { - qualifiedObjs = append(qualifiedObjs, other) - seenObjs[other.obj] = true - } - } - } else { - return nil, fmt.Errorf("missing file for position of %q in %q", obj.Name(), obj.Pkg().Name()) - } - } - } - // Return an error if no objects were found since callers will assume that - // the slice has at least 1 element. - if len(qualifiedObjs) == 0 { - return nil, errNoObjectFound - } - return qualifiedObjs, nil -} - -// pathEnclosingObjNode returns the AST path to the object-defining -// node associated with pos. "Object-defining" means either an -// *ast.Ident mapped directly to a types.Object or an ast.Node mapped -// implicitly to a types.Object. -func pathEnclosingObjNode(f *ast.File, pos token.Pos) []ast.Node { - var ( - path []ast.Node - found bool - ) - - ast.Inspect(f, func(n ast.Node) bool { - if found { - return false - } - - if n == nil { - path = path[:len(path)-1] - return false - } - - path = append(path, n) - - switch n := n.(type) { - case *ast.Ident: - // Include the position directly after identifier. This handles - // the common case where the cursor is right after the - // identifier the user is currently typing. Previously we - // handled this by calling astutil.PathEnclosingInterval twice, - // once for "pos" and once for "pos-1". - found = n.Pos() <= pos && pos <= n.End() - case *ast.ImportSpec: - if n.Path.Pos() <= pos && pos < n.Path.End() { - found = true - // If import spec has a name, add name to path even though - // position isn't in the name. - if n.Name != nil { - path = append(path, n.Name) - } - } - case *ast.StarExpr: - // Follow star expressions to the inner identifier. - if pos == n.Star { - pos = n.X.Pos() - } - } - - return !found - }) - - if len(path) == 0 { - return nil - } - - // Reverse path so leaf is first element. - for i := 0; i < len(path)/2; i++ { - path[i], path[len(path)-1-i] = path[len(path)-1-i], path[i] - } - - return path -} diff --git a/internal/lsp/source/known_packages.go b/internal/lsp/source/known_packages.go deleted file mode 100644 index d7f229ecc80..00000000000 --- a/internal/lsp/source/known_packages.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/imports" -) - -// KnownPackages returns a list of all known packages -// in the package graph that could potentially be imported -// by the given file. -func KnownPackages(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle) ([]string, error) { - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, fmt.Errorf("GetParsedFile: %w", err) - } - alreadyImported := map[string]struct{}{} - for _, imp := range pgf.File.Imports { - alreadyImported[imp.Path.Value] = struct{}{} - } - pkgs, err := snapshot.CachedImportPaths(ctx) - if err != nil { - return nil, err - } - var ( - seen = make(map[string]struct{}) - paths []string - ) - for path, knownPkg := range pkgs { - gofiles := knownPkg.CompiledGoFiles() - if len(gofiles) == 0 || gofiles[0].File.Name == nil { - continue - } - pkgName := gofiles[0].File.Name.Name - // package main cannot be imported - if pkgName == "main" { - continue - } - // test packages cannot be imported - if knownPkg.ForTest() != "" { - continue - } - // no need to import what the file already imports - if _, ok := alreadyImported[path]; ok { - continue - } - // snapshot.KnownPackages could have multiple versions of a pkg - if _, ok := seen[path]; ok { - continue - } - seen[path] = struct{}{} - // make sure internal packages are importable by the file - if !IsValidImport(pkg.PkgPath(), path) { - continue - } - // naive check on cyclical imports - if isDirectlyCyclical(pkg, knownPkg) { - continue - } - paths = append(paths, path) - seen[path] = struct{}{} - } - err = snapshot.RunProcessEnvFunc(ctx, func(o *imports.Options) error { - var mu sync.Mutex - ctx, cancel := context.WithTimeout(ctx, time.Millisecond*80) - defer cancel() - return imports.GetAllCandidates(ctx, func(ifix imports.ImportFix) { - mu.Lock() - defer mu.Unlock() - if _, ok := seen[ifix.StmtInfo.ImportPath]; ok { - return - } - paths = append(paths, ifix.StmtInfo.ImportPath) - }, "", pgf.URI.Filename(), pkg.GetTypes().Name(), o.Env) - }) - if err != nil { - // if an error occurred, we still have a decent list we can - // show to the user through snapshot.CachedImportPaths - event.Error(ctx, "imports.GetAllCandidates", err) - } - sort.Slice(paths, func(i, j int) bool { - importI, importJ := paths[i], paths[j] - iHasDot := strings.Contains(importI, ".") - jHasDot := strings.Contains(importJ, ".") - if iHasDot && !jHasDot { - return false - } - if jHasDot && !iHasDot { - return true - } - return importI < importJ - }) - return paths, nil -} - -// isDirectlyCyclical checks if imported directly imports pkg. -// It does not (yet) offer a full cyclical check because showing a user -// a list of importable packages already generates a very large list -// and having a few false positives in there could be worth the -// performance snappiness. -func isDirectlyCyclical(pkg, imported Package) bool { - for _, imp := range imported.Imports() { - if imp.PkgPath() == pkg.PkgPath() { - return true - } - } - return false -} diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go deleted file mode 100644 index d1d34efe787..00000000000 --- a/internal/lsp/source/options.go +++ /dev/null @@ -1,1491 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "io" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/asmdecl" - "golang.org/x/tools/go/analysis/passes/assign" - "golang.org/x/tools/go/analysis/passes/atomic" - "golang.org/x/tools/go/analysis/passes/atomicalign" - "golang.org/x/tools/go/analysis/passes/bools" - "golang.org/x/tools/go/analysis/passes/buildtag" - "golang.org/x/tools/go/analysis/passes/cgocall" - "golang.org/x/tools/go/analysis/passes/composite" - "golang.org/x/tools/go/analysis/passes/copylock" - "golang.org/x/tools/go/analysis/passes/deepequalerrors" - "golang.org/x/tools/go/analysis/passes/errorsas" - "golang.org/x/tools/go/analysis/passes/fieldalignment" - "golang.org/x/tools/go/analysis/passes/httpresponse" - "golang.org/x/tools/go/analysis/passes/ifaceassert" - "golang.org/x/tools/go/analysis/passes/loopclosure" - "golang.org/x/tools/go/analysis/passes/lostcancel" - "golang.org/x/tools/go/analysis/passes/nilfunc" - "golang.org/x/tools/go/analysis/passes/nilness" - "golang.org/x/tools/go/analysis/passes/printf" - "golang.org/x/tools/go/analysis/passes/shadow" - "golang.org/x/tools/go/analysis/passes/shift" - "golang.org/x/tools/go/analysis/passes/sortslice" - "golang.org/x/tools/go/analysis/passes/stdmethods" - "golang.org/x/tools/go/analysis/passes/stringintconv" - "golang.org/x/tools/go/analysis/passes/structtag" - "golang.org/x/tools/go/analysis/passes/testinggoroutine" - "golang.org/x/tools/go/analysis/passes/tests" - "golang.org/x/tools/go/analysis/passes/unmarshal" - "golang.org/x/tools/go/analysis/passes/unreachable" - "golang.org/x/tools/go/analysis/passes/unsafeptr" - "golang.org/x/tools/go/analysis/passes/unusedresult" - "golang.org/x/tools/go/analysis/passes/unusedwrite" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/lsp/analysis/embeddirective" - "golang.org/x/tools/internal/lsp/analysis/fillreturns" - "golang.org/x/tools/internal/lsp/analysis/fillstruct" - "golang.org/x/tools/internal/lsp/analysis/infertypeargs" - "golang.org/x/tools/internal/lsp/analysis/nonewvars" - "golang.org/x/tools/internal/lsp/analysis/noresultvalues" - "golang.org/x/tools/internal/lsp/analysis/simplifycompositelit" - "golang.org/x/tools/internal/lsp/analysis/simplifyrange" - "golang.org/x/tools/internal/lsp/analysis/simplifyslice" - "golang.org/x/tools/internal/lsp/analysis/stubmethods" - "golang.org/x/tools/internal/lsp/analysis/undeclaredname" - "golang.org/x/tools/internal/lsp/analysis/unusedparams" - "golang.org/x/tools/internal/lsp/analysis/useany" - "golang.org/x/tools/internal/lsp/command" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/protocol" -) - -var ( - optionsOnce sync.Once - defaultOptions *Options -) - -// DefaultOptions is the options that are used for Gopls execution independent -// of any externally provided configuration (LSP initialization, command -// invocation, etc.). -func DefaultOptions() *Options { - optionsOnce.Do(func() { - var commands []string - for _, c := range command.Commands { - commands = append(commands, c.ID()) - } - defaultOptions = &Options{ - ClientOptions: ClientOptions{ - InsertTextFormat: protocol.PlainTextTextFormat, - PreferredContentFormat: protocol.Markdown, - ConfigurationSupported: true, - DynamicConfigurationSupported: true, - DynamicRegistrationSemanticTokensSupported: true, - DynamicWatchedFilesSupported: true, - LineFoldingOnly: false, - HierarchicalDocumentSymbolSupport: true, - }, - ServerOptions: ServerOptions{ - SupportedCodeActions: map[FileKind]map[protocol.CodeActionKind]bool{ - Go: { - protocol.SourceFixAll: true, - protocol.SourceOrganizeImports: true, - protocol.QuickFix: true, - protocol.RefactorRewrite: true, - protocol.RefactorExtract: true, - }, - Mod: { - protocol.SourceOrganizeImports: true, - protocol.QuickFix: true, - }, - Work: {}, - Sum: {}, - Tmpl: {}, - }, - SupportedCommands: commands, - }, - UserOptions: UserOptions{ - BuildOptions: BuildOptions{ - ExpandWorkspaceToModule: true, - ExperimentalPackageCacheKey: true, - MemoryMode: ModeNormal, - DirectoryFilters: []string{"-node_modules"}, - TemplateExtensions: []string{}, - }, - UIOptions: UIOptions{ - DiagnosticOptions: DiagnosticOptions{ - DiagnosticsDelay: 250 * time.Millisecond, - Annotations: map[Annotation]bool{ - Bounds: true, - Escape: true, - Inline: true, - Nil: true, - }, - }, - DocumentationOptions: DocumentationOptions{ - HoverKind: FullDocumentation, - LinkTarget: "pkg.go.dev", - LinksInHover: true, - }, - NavigationOptions: NavigationOptions{ - ImportShortcut: Both, - SymbolMatcher: SymbolFastFuzzy, - SymbolStyle: DynamicSymbols, - }, - CompletionOptions: CompletionOptions{ - Matcher: Fuzzy, - CompletionBudget: 100 * time.Millisecond, - ExperimentalPostfixCompletions: true, - }, - Codelenses: map[string]bool{ - string(command.Generate): true, - string(command.RegenerateCgo): true, - string(command.Tidy): true, - string(command.GCDetails): false, - string(command.UpgradeDependency): true, - string(command.Vendor): true, - }, - }, - }, - InternalOptions: InternalOptions{ - LiteralCompletions: true, - TempModfile: true, - CompleteUnimported: true, - CompletionDocumentation: true, - DeepCompletion: true, - }, - Hooks: Hooks{ - ComputeEdits: myers.ComputeEdits, - URLRegexp: urlRegexp(), - DefaultAnalyzers: defaultAnalyzers(), - TypeErrorAnalyzers: typeErrorAnalyzers(), - ConvenienceAnalyzers: convenienceAnalyzers(), - StaticcheckAnalyzers: map[string]*Analyzer{}, - GoDiff: true, - }, - } - }) - return defaultOptions -} - -// Options holds various configuration that affects Gopls execution, organized -// by the nature or origin of the settings. -type Options struct { - ClientOptions - ServerOptions - UserOptions - InternalOptions - Hooks -} - -// ClientOptions holds LSP-specific configuration that is provided by the -// client. -type ClientOptions struct { - InsertTextFormat protocol.InsertTextFormat - ConfigurationSupported bool - DynamicConfigurationSupported bool - DynamicRegistrationSemanticTokensSupported bool - DynamicWatchedFilesSupported bool - PreferredContentFormat protocol.MarkupKind - LineFoldingOnly bool - HierarchicalDocumentSymbolSupport bool - SemanticTypes []string - SemanticMods []string - RelatedInformationSupported bool - CompletionTags bool - CompletionDeprecated bool -} - -// ServerOptions holds LSP-specific configuration that is provided by the -// server. -type ServerOptions struct { - SupportedCodeActions map[FileKind]map[protocol.CodeActionKind]bool - SupportedCommands []string -} - -type BuildOptions struct { - // BuildFlags is the set of flags passed on to the build system when invoked. - // It is applied to queries like `go list`, which is used when discovering files. - // The most common use is to set `-tags`. - BuildFlags []string - - // Env adds environment variables to external commands run by `gopls`, most notably `go list`. - Env map[string]string - - // DirectoryFilters can be used to exclude unwanted directories from the - // workspace. By default, all directories are included. Filters are an - // operator, `+` to include and `-` to exclude, followed by a path prefix - // relative to the workspace folder. They are evaluated in order, and - // the last filter that applies to a path controls whether it is included. - // The path prefix can be empty, so an initial `-` excludes everything. - // - // Examples: - // - // Exclude node_modules: `-node_modules` - // - // Include only project_a: `-` (exclude everything), `+project_a` - // - // Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules` - DirectoryFilters []string - - // TemplateExtensions gives the extensions of file names that are treateed - // as template files. (The extension - // is the part of the file name after the final dot.) - TemplateExtensions []string - - // MemoryMode controls the tradeoff `gopls` makes between memory usage and - // correctness. - // - // Values other than `Normal` are untested and may break in surprising ways. - MemoryMode MemoryMode `status:"experimental"` - - // ExpandWorkspaceToModule instructs `gopls` to adjust the scope of the - // workspace to find the best available module root. `gopls` first looks for - // a go.mod file in any parent directory of the workspace folder, expanding - // the scope to that directory if it exists. If no viable parent directory is - // found, gopls will check if there is exactly one child directory containing - // a go.mod file, narrowing the scope to that directory if it exists. - ExpandWorkspaceToModule bool `status:"experimental"` - - // ExperimentalWorkspaceModule opts a user into the experimental support - // for multi-module workspaces. - ExperimentalWorkspaceModule bool `status:"experimental"` - - // ExperimentalPackageCacheKey controls whether to use a coarser cache key - // for package type information to increase cache hits. This setting removes - // the user's environment, build flags, and working directory from the cache - // key, which should be a safe change as all relevant inputs into the type - // checking pass are already hashed into the key. This is temporarily guarded - // by an experiment because caching behavior is subtle and difficult to - // comprehensively test. - ExperimentalPackageCacheKey bool `status:"experimental"` - - // AllowModfileModifications disables -mod=readonly, allowing imports from - // out-of-scope modules. This option will eventually be removed. - AllowModfileModifications bool `status:"experimental"` - - // AllowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module - // downloads rather than requiring user action. This option will eventually - // be removed. - AllowImplicitNetworkAccess bool `status:"experimental"` - - // ExperimentalUseInvalidMetadata enables gopls to fall back on outdated - // package metadata to provide editor features if the go command fails to - // load packages for some reason (like an invalid go.mod file). This will - // eventually be the default behavior, and this setting will be removed. - ExperimentalUseInvalidMetadata bool `status:"experimental"` -} - -type UIOptions struct { - DocumentationOptions - CompletionOptions - NavigationOptions - DiagnosticOptions - - // Codelenses overrides the enabled/disabled state of code lenses. See the - // "Code Lenses" section of the - // [Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses) - // for the list of supported lenses. - // - // Example Usage: - // - // ```json5 - // "gopls": { - // ... - // "codelenses": { - // "generate": false, // Don't show the `go generate` lens. - // "gc_details": true // Show a code lens toggling the display of gc's choices. - // } - // ... - // } - // ``` - Codelenses map[string]bool - - // SemanticTokens controls whether the LSP server will send - // semantic tokens to the client. - SemanticTokens bool `status:"experimental"` -} - -type CompletionOptions struct { - // Placeholders enables placeholders for function parameters or struct - // fields in completion responses. - UsePlaceholders bool - - // CompletionBudget is the soft latency goal for completion requests. Most - // requests finish in a couple milliseconds, but in some cases deep - // completions can take much longer. As we use up our budget we - // dynamically reduce the search scope to ensure we return timely - // results. Zero means unlimited. - CompletionBudget time.Duration `status:"debug"` - - // Matcher sets the algorithm that is used when calculating completion - // candidates. - Matcher Matcher `status:"advanced"` - - // ExperimentalPostfixCompletions enables artificial method snippets - // such as "someSlice.sort!". - ExperimentalPostfixCompletions bool `status:"experimental"` -} - -type DocumentationOptions struct { - // HoverKind controls the information that appears in the hover text. - // SingleLine and Structured are intended for use only by authors of editor plugins. - HoverKind HoverKind - - // LinkTarget controls where documentation links go. - // It might be one of: - // - // * `"godoc.org"` - // * `"pkg.go.dev"` - // - // If company chooses to use its own `godoc.org`, its address can be used as well. - LinkTarget string - - // LinksInHover toggles the presence of links to documentation in hover. - LinksInHover bool -} - -type FormattingOptions struct { - // Local is the equivalent of the `goimports -local` flag, which puts - // imports beginning with this string after third-party packages. It should - // be the prefix of the import path whose imports should be grouped - // separately. - Local string - - // Gofumpt indicates if we should run gofumpt formatting. - Gofumpt bool -} - -type DiagnosticOptions struct { - // Analyses specify analyses that the user would like to enable or disable. - // A map of the names of analysis passes that should be enabled/disabled. - // A full list of analyzers that gopls uses can be found - // [here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md). - // - // Example Usage: - // - // ```json5 - // ... - // "analyses": { - // "unreachable": false, // Disable the unreachable analyzer. - // "unusedparams": true // Enable the unusedparams analyzer. - // } - // ... - // ``` - Analyses map[string]bool - - // Staticcheck enables additional analyses from staticcheck.io. - Staticcheck bool `status:"experimental"` - - // Annotations specifies the various kinds of optimization diagnostics - // that should be reported by the gc_details command. - Annotations map[Annotation]bool `status:"experimental"` - - // DiagnosticsDelay controls the amount of time that gopls waits - // after the most recent file modification before computing deep diagnostics. - // Simple diagnostics (parsing and type-checking) are always run immediately - // on recently modified packages. - // - // This option must be set to a valid duration string, for example `"250ms"`. - DiagnosticsDelay time.Duration `status:"advanced"` - - // ExperimentalWatchedFileDelay controls the amount of time that gopls waits - // for additional workspace/didChangeWatchedFiles notifications to arrive, - // before processing all such notifications in a single batch. This is - // intended for use by LSP clients that don't support their own batching of - // file system notifications. - // - // This option must be set to a valid duration string, for example `"100ms"`. - ExperimentalWatchedFileDelay time.Duration `status:"experimental"` -} - -type NavigationOptions struct { - // ImportShortcut specifies whether import statements should link to - // documentation or go to definitions. - ImportShortcut ImportShortcut - - // SymbolMatcher sets the algorithm that is used when finding workspace symbols. - SymbolMatcher SymbolMatcher `status:"advanced"` - - // SymbolStyle controls how symbols are qualified in symbol responses. - // - // Example Usage: - // - // ```json5 - // "gopls": { - // ... - // "symbolStyle": "Dynamic", - // ... - // } - // ``` - SymbolStyle SymbolStyle `status:"advanced"` -} - -// UserOptions holds custom Gopls configuration (not part of the LSP) that is -// modified by the client. -type UserOptions struct { - BuildOptions - UIOptions - FormattingOptions - - // VerboseOutput enables additional debug logging. - VerboseOutput bool `status:"debug"` -} - -// EnvSlice returns Env as a slice of k=v strings. -func (u *UserOptions) EnvSlice() []string { - var result []string - for k, v := range u.Env { - result = append(result, fmt.Sprintf("%v=%v", k, v)) - } - return result -} - -// SetEnvSlice sets Env from a slice of k=v strings. -func (u *UserOptions) SetEnvSlice(env []string) { - u.Env = map[string]string{} - for _, kv := range env { - split := strings.SplitN(kv, "=", 2) - if len(split) != 2 { - continue - } - u.Env[split[0]] = split[1] - } -} - -// Hooks contains configuration that is provided to the Gopls command by the -// main package. -type Hooks struct { - // LicensesText holds third party licenses for software used by gopls. - LicensesText string - - // TODO(rfindley): is this even necessary? - GoDiff bool - - // Whether staticcheck is supported. - StaticcheckSupported bool - - // ComputeEdits is used to compute edits between file versions. - ComputeEdits diff.ComputeEdits - - // URLRegexp is used to find potential URLs in comments/strings. - // - // Not all matches are shown to the user: if the matched URL is not detected - // as valid, it will be skipped. - URLRegexp *regexp.Regexp - - // GofumptFormat allows the gopls module to wire-in a call to - // gofumpt/format.Source. langVersion and modulePath are used for some - // Gofumpt formatting rules -- see the Gofumpt documentation for details. - GofumptFormat func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) - - DefaultAnalyzers map[string]*Analyzer - TypeErrorAnalyzers map[string]*Analyzer - ConvenienceAnalyzers map[string]*Analyzer - StaticcheckAnalyzers map[string]*Analyzer - - // Govulncheck is the implementation of the Govulncheck gopls command. - Govulncheck func(context.Context, *packages.Config, command.VulncheckArgs) (command.VulncheckResult, error) -} - -// InternalOptions contains settings that are not intended for use by the -// average user. These may be settings used by tests or outdated settings that -// will soon be deprecated. Some of these settings may not even be configurable -// by the user. -type InternalOptions struct { - // LiteralCompletions controls whether literal candidates such as - // "&someStruct{}" are offered. Tests disable this flag to simplify - // their expected values. - LiteralCompletions bool - - // VerboseWorkDoneProgress controls whether the LSP server should send - // progress reports for all work done outside the scope of an RPC. - // Used by the regression tests. - VerboseWorkDoneProgress bool - - // The following options were previously available to users, but they - // really shouldn't be configured by anyone other than "power users". - - // CompletionDocumentation enables documentation with completion results. - CompletionDocumentation bool - - // CompleteUnimported enables completion for packages that you do not - // currently import. - CompleteUnimported bool - - // DeepCompletion enables the ability to return completions from deep - // inside relevant entities, rather than just the locally accessible ones. - // - // Consider this example: - // - // ```go - // package main - // - // import "fmt" - // - // type wrapString struct { - // str string - // } - // - // func main() { - // x := wrapString{"hello world"} - // fmt.Printf(<>) - // } - // ``` - // - // At the location of the `<>` in this program, deep completion would suggest - // the result `x.str`. - DeepCompletion bool - - // TempModfile controls the use of the -modfile flag in Go 1.14. - TempModfile bool - - // ShowBugReports causes a message to be shown when the first bug is reported - // on the server. - // This option applies only during initialization. - ShowBugReports bool -} - -type ImportShortcut string - -const ( - Both ImportShortcut = "Both" - Link ImportShortcut = "Link" - Definition ImportShortcut = "Definition" -) - -func (s ImportShortcut) ShowLinks() bool { - return s == Both || s == Link -} - -func (s ImportShortcut) ShowDefinition() bool { - return s == Both || s == Definition -} - -type Matcher string - -const ( - Fuzzy Matcher = "Fuzzy" - CaseInsensitive Matcher = "CaseInsensitive" - CaseSensitive Matcher = "CaseSensitive" -) - -type SymbolMatcher string - -const ( - SymbolFuzzy SymbolMatcher = "Fuzzy" - SymbolFastFuzzy SymbolMatcher = "FastFuzzy" - SymbolCaseInsensitive SymbolMatcher = "CaseInsensitive" - SymbolCaseSensitive SymbolMatcher = "CaseSensitive" -) - -type SymbolStyle string - -const ( - // PackageQualifiedSymbols is package qualified symbols i.e. - // "pkg.Foo.Field". - PackageQualifiedSymbols SymbolStyle = "Package" - // FullyQualifiedSymbols is fully qualified symbols, i.e. - // "path/to/pkg.Foo.Field". - FullyQualifiedSymbols SymbolStyle = "Full" - // DynamicSymbols uses whichever qualifier results in the highest scoring - // match for the given symbol query. Here a "qualifier" is any "/" or "." - // delimited suffix of the fully qualified symbol. i.e. "to/pkg.Foo.Field" or - // just "Foo.Field". - DynamicSymbols SymbolStyle = "Dynamic" -) - -type HoverKind string - -const ( - SingleLine HoverKind = "SingleLine" - NoDocumentation HoverKind = "NoDocumentation" - SynopsisDocumentation HoverKind = "SynopsisDocumentation" - FullDocumentation HoverKind = "FullDocumentation" - - // Structured is an experimental setting that returns a structured hover format. - // This format separates the signature from the documentation, so that the client - // can do more manipulation of these fields. - // - // This should only be used by clients that support this behavior. - Structured HoverKind = "Structured" -) - -type MemoryMode string - -const ( - ModeNormal MemoryMode = "Normal" - // In DegradeClosed mode, `gopls` will collect less information about - // packages without open files. As a result, features like Find - // References and Rename will miss results in such packages. - ModeDegradeClosed MemoryMode = "DegradeClosed" -) - -type OptionResults []OptionResult - -type OptionResult struct { - Name string - Value interface{} - Error error -} - -type OptionState int - -const ( - OptionHandled = OptionState(iota) - OptionDeprecated - OptionUnexpected -) - -type LinkTarget string - -func SetOptions(options *Options, opts interface{}) OptionResults { - var results OptionResults - switch opts := opts.(type) { - case nil: - case map[string]interface{}: - // If the user's settings contains "allExperiments", set that first, - // and then let them override individual settings independently. - var enableExperiments bool - for name, value := range opts { - if b, ok := value.(bool); name == "allExperiments" && ok && b { - enableExperiments = true - options.EnableAllExperiments() - } - } - seen := map[string]struct{}{} - for name, value := range opts { - results = append(results, options.set(name, value, seen)) - } - // Finally, enable any experimental features that are specified in - // maps, which allows users to individually toggle them on or off. - if enableExperiments { - options.enableAllExperimentMaps() - } - default: - results = append(results, OptionResult{ - Value: opts, - Error: fmt.Errorf("Invalid options type %T", opts), - }) - } - return results -} - -func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) { - // Check if the client supports snippets in completion items. - if c := caps.TextDocument.Completion; c.CompletionItem.SnippetSupport { - o.InsertTextFormat = protocol.SnippetTextFormat - } - // Check if the client supports configuration messages. - o.ConfigurationSupported = caps.Workspace.Configuration - o.DynamicConfigurationSupported = caps.Workspace.DidChangeConfiguration.DynamicRegistration - o.DynamicRegistrationSemanticTokensSupported = caps.TextDocument.SemanticTokens.DynamicRegistration - o.DynamicWatchedFilesSupported = caps.Workspace.DidChangeWatchedFiles.DynamicRegistration - - // Check which types of content format are supported by this client. - if hover := caps.TextDocument.Hover; len(hover.ContentFormat) > 0 { - o.PreferredContentFormat = hover.ContentFormat[0] - } - // Check if the client supports only line folding. - fr := caps.TextDocument.FoldingRange - o.LineFoldingOnly = fr.LineFoldingOnly - // Check if the client supports hierarchical document symbols. - o.HierarchicalDocumentSymbolSupport = caps.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport - // Check if the client supports semantic tokens - o.SemanticTypes = caps.TextDocument.SemanticTokens.TokenTypes - o.SemanticMods = caps.TextDocument.SemanticTokens.TokenModifiers - // we don't need Requests, as we support full functionality - // we don't need Formats, as there is only one, for now - - // Check if the client supports diagnostic related information. - o.RelatedInformationSupported = caps.TextDocument.PublishDiagnostics.RelatedInformation - // Check if the client completion support includes tags (preferred) or deprecation - if caps.TextDocument.Completion.CompletionItem.TagSupport.ValueSet != nil { - o.CompletionTags = true - } else if caps.TextDocument.Completion.CompletionItem.DeprecatedSupport { - o.CompletionDeprecated = true - } -} - -func (o *Options) Clone() *Options { - result := &Options{ - ClientOptions: o.ClientOptions, - InternalOptions: o.InternalOptions, - Hooks: Hooks{ - GoDiff: o.GoDiff, - StaticcheckSupported: o.StaticcheckSupported, - ComputeEdits: o.ComputeEdits, - GofumptFormat: o.GofumptFormat, - URLRegexp: o.URLRegexp, - Govulncheck: o.Govulncheck, - }, - ServerOptions: o.ServerOptions, - UserOptions: o.UserOptions, - } - // Fully clone any slice or map fields. Only Hooks, ExperimentalOptions, - // and UserOptions can be modified. - copyStringMap := func(src map[string]bool) map[string]bool { - dst := make(map[string]bool) - for k, v := range src { - dst[k] = v - } - return dst - } - result.Analyses = copyStringMap(o.Analyses) - result.Codelenses = copyStringMap(o.Codelenses) - - copySlice := func(src []string) []string { - dst := make([]string, len(src)) - copy(dst, src) - return dst - } - result.SetEnvSlice(o.EnvSlice()) - result.BuildFlags = copySlice(o.BuildFlags) - result.DirectoryFilters = copySlice(o.DirectoryFilters) - - copyAnalyzerMap := func(src map[string]*Analyzer) map[string]*Analyzer { - dst := make(map[string]*Analyzer) - for k, v := range src { - dst[k] = v - } - return dst - } - result.DefaultAnalyzers = copyAnalyzerMap(o.DefaultAnalyzers) - result.TypeErrorAnalyzers = copyAnalyzerMap(o.TypeErrorAnalyzers) - result.ConvenienceAnalyzers = copyAnalyzerMap(o.ConvenienceAnalyzers) - result.StaticcheckAnalyzers = copyAnalyzerMap(o.StaticcheckAnalyzers) - return result -} - -func (o *Options) AddStaticcheckAnalyzer(a *analysis.Analyzer, enabled bool, severity protocol.DiagnosticSeverity) { - o.StaticcheckAnalyzers[a.Name] = &Analyzer{ - Analyzer: a, - Enabled: enabled, - Severity: severity, - } -} - -// EnableAllExperiments turns on all of the experimental "off-by-default" -// features offered by gopls. Any experimental features specified in maps -// should be enabled in enableAllExperimentMaps. -func (o *Options) EnableAllExperiments() { - o.SemanticTokens = true - o.ExperimentalPostfixCompletions = true - o.ExperimentalUseInvalidMetadata = true - o.ExperimentalWatchedFileDelay = 50 * time.Millisecond - o.SymbolMatcher = SymbolFastFuzzy -} - -func (o *Options) enableAllExperimentMaps() { - if _, ok := o.Codelenses[string(command.GCDetails)]; !ok { - o.Codelenses[string(command.GCDetails)] = true - } - if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok { - o.Analyses[unusedparams.Analyzer.Name] = true - } -} - -func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult { - // Flatten the name in case we get options with a hierarchy. - split := strings.Split(name, ".") - name = split[len(split)-1] - - result := OptionResult{Name: name, Value: value} - if _, ok := seen[name]; ok { - result.errorf("duplicate configuration for %s", name) - } - seen[name] = struct{}{} - - switch name { - case "env": - menv, ok := value.(map[string]interface{}) - if !ok { - result.errorf("invalid type %T, expect map", value) - break - } - if o.Env == nil { - o.Env = make(map[string]string) - } - for k, v := range menv { - o.Env[k] = fmt.Sprint(v) - } - - case "buildFlags": - iflags, ok := value.([]interface{}) - if !ok { - result.errorf("invalid type %T, expect list", value) - break - } - flags := make([]string, 0, len(iflags)) - for _, flag := range iflags { - flags = append(flags, fmt.Sprintf("%s", flag)) - } - o.BuildFlags = flags - case "directoryFilters": - ifilters, ok := value.([]interface{}) - if !ok { - result.errorf("invalid type %T, expect list", value) - break - } - var filters []string - for _, ifilter := range ifilters { - filter := fmt.Sprint(ifilter) - if filter == "" || (filter[0] != '+' && filter[0] != '-') { - result.errorf("invalid filter %q, must start with + or -", filter) - return result - } - filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/")) - } - o.DirectoryFilters = filters - case "memoryMode": - if s, ok := result.asOneOf( - string(ModeNormal), - string(ModeDegradeClosed), - ); ok { - o.MemoryMode = MemoryMode(s) - } - case "completionDocumentation": - result.setBool(&o.CompletionDocumentation) - case "usePlaceholders": - result.setBool(&o.UsePlaceholders) - case "deepCompletion": - result.setBool(&o.DeepCompletion) - case "completeUnimported": - result.setBool(&o.CompleteUnimported) - case "completionBudget": - result.setDuration(&o.CompletionBudget) - case "matcher": - if s, ok := result.asOneOf( - string(Fuzzy), - string(CaseSensitive), - string(CaseInsensitive), - ); ok { - o.Matcher = Matcher(s) - } - - case "symbolMatcher": - if s, ok := result.asOneOf( - string(SymbolFuzzy), - string(SymbolFastFuzzy), - string(SymbolCaseInsensitive), - string(SymbolCaseSensitive), - ); ok { - o.SymbolMatcher = SymbolMatcher(s) - } - - case "symbolStyle": - if s, ok := result.asOneOf( - string(FullyQualifiedSymbols), - string(PackageQualifiedSymbols), - string(DynamicSymbols), - ); ok { - o.SymbolStyle = SymbolStyle(s) - } - - case "hoverKind": - if s, ok := result.asOneOf( - string(NoDocumentation), - string(SingleLine), - string(SynopsisDocumentation), - string(FullDocumentation), - string(Structured), - ); ok { - o.HoverKind = HoverKind(s) - } - - case "linkTarget": - result.setString(&o.LinkTarget) - - case "linksInHover": - result.setBool(&o.LinksInHover) - - case "importShortcut": - if s, ok := result.asOneOf(string(Both), string(Link), string(Definition)); ok { - o.ImportShortcut = ImportShortcut(s) - } - - case "analyses": - result.setBoolMap(&o.Analyses) - - case "annotations": - result.setAnnotationMap(&o.Annotations) - - case "codelenses", "codelens": - var lensOverrides map[string]bool - result.setBoolMap(&lensOverrides) - if result.Error == nil { - if o.Codelenses == nil { - o.Codelenses = make(map[string]bool) - } - for lens, enabled := range lensOverrides { - o.Codelenses[lens] = enabled - } - } - - // codelens is deprecated, but still works for now. - // TODO(rstambler): Remove this for the gopls/v0.7.0 release. - if name == "codelens" { - result.deprecated("codelenses") - } - - case "staticcheck": - if v, ok := result.asBool(); ok { - o.Staticcheck = v - if v && !o.StaticcheckSupported { - // Warn if the user is trying to enable staticcheck, but staticcheck is - // unsupported. - result.Error = fmt.Errorf("applying setting %q: staticcheck is not supported at %s\n"+ - "\trebuild gopls with a more recent version of Go", result.Name, runtime.Version()) - } - } - - case "local": - result.setString(&o.Local) - - case "verboseOutput": - result.setBool(&o.VerboseOutput) - - case "verboseWorkDoneProgress": - result.setBool(&o.VerboseWorkDoneProgress) - - case "tempModfile": - result.setBool(&o.TempModfile) - - case "showBugReports": - result.setBool(&o.ShowBugReports) - - case "gofumpt": - result.setBool(&o.Gofumpt) - - case "semanticTokens": - result.setBool(&o.SemanticTokens) - - case "expandWorkspaceToModule": - result.setBool(&o.ExpandWorkspaceToModule) - - case "experimentalPostfixCompletions": - result.setBool(&o.ExperimentalPostfixCompletions) - - case "experimentalWorkspaceModule": // TODO(rfindley): suggest go.work on go1.18+ - result.setBool(&o.ExperimentalWorkspaceModule) - - case "experimentalTemplateSupport": // TODO(pjw): remove after June 2022 - result.deprecated("") - - case "templateExtensions": - if iexts, ok := value.([]interface{}); ok { - ans := []string{} - for _, x := range iexts { - ans = append(ans, fmt.Sprint(x)) - } - o.TemplateExtensions = ans - break - } - if value == nil { - o.TemplateExtensions = nil - break - } - result.errorf(fmt.Sprintf("unexpected type %T not []string", value)) - case "experimentalDiagnosticsDelay", "diagnosticsDelay": - if name == "experimentalDiagnosticsDelay" { - result.deprecated("diagnosticsDelay") - } - result.setDuration(&o.DiagnosticsDelay) - - case "experimentalWatchedFileDelay": - result.setDuration(&o.ExperimentalWatchedFileDelay) - - case "experimentalPackageCacheKey": - result.setBool(&o.ExperimentalPackageCacheKey) - - case "allowModfileModifications": - result.setBool(&o.AllowModfileModifications) - - case "allowImplicitNetworkAccess": - result.setBool(&o.AllowImplicitNetworkAccess) - - case "experimentalUseInvalidMetadata": - result.setBool(&o.ExperimentalUseInvalidMetadata) - - case "allExperiments": - // This setting should be handled before all of the other options are - // processed, so do nothing here. - - // Replaced settings. - case "experimentalDisabledAnalyses": - result.deprecated("analyses") - - case "disableDeepCompletion": - result.deprecated("deepCompletion") - - case "disableFuzzyMatching": - result.deprecated("fuzzyMatching") - - case "wantCompletionDocumentation": - result.deprecated("completionDocumentation") - - case "wantUnimportedCompletions": - result.deprecated("completeUnimported") - - case "fuzzyMatching": - result.deprecated("matcher") - - case "caseSensitiveCompletion": - result.deprecated("matcher") - - // Deprecated settings. - case "wantSuggestedFixes": - result.deprecated("") - - case "noIncrementalSync": - result.deprecated("") - - case "watchFileChanges": - result.deprecated("") - - case "go-diff": - result.deprecated("") - - default: - result.unexpected() - } - return result -} - -func (r *OptionResult) errorf(msg string, values ...interface{}) { - prefix := fmt.Sprintf("parsing setting %q: ", r.Name) - r.Error = fmt.Errorf(prefix+msg, values...) -} - -// A SoftError is an error that does not affect the functionality of gopls. -type SoftError struct { - msg string -} - -func (e *SoftError) Error() string { - return e.msg -} - -func (r *OptionResult) deprecated(replacement string) { - msg := fmt.Sprintf("gopls setting %q is deprecated", r.Name) - if replacement != "" { - msg = fmt.Sprintf("%s, use %q instead", msg, replacement) - } - r.Error = &SoftError{msg} -} - -func (r *OptionResult) unexpected() { - r.Error = fmt.Errorf("unexpected gopls setting %q", r.Name) -} - -func (r *OptionResult) asBool() (bool, bool) { - b, ok := r.Value.(bool) - if !ok { - r.errorf("invalid type %T, expect bool", r.Value) - return false, false - } - return b, true -} - -func (r *OptionResult) setBool(b *bool) { - if v, ok := r.asBool(); ok { - *b = v - } -} - -func (r *OptionResult) setDuration(d *time.Duration) { - if v, ok := r.asString(); ok { - parsed, err := time.ParseDuration(v) - if err != nil { - r.errorf("failed to parse duration %q: %v", v, err) - return - } - *d = parsed - } -} - -func (r *OptionResult) setBoolMap(bm *map[string]bool) { - m := r.asBoolMap() - *bm = m -} - -func (r *OptionResult) setAnnotationMap(bm *map[Annotation]bool) { - all := r.asBoolMap() - if all == nil { - return - } - // Default to everything enabled by default. - m := make(map[Annotation]bool) - for k, enabled := range all { - a, err := asOneOf( - k, - string(Nil), - string(Escape), - string(Inline), - string(Bounds), - ) - if err != nil { - // In case of an error, process any legacy values. - switch k { - case "noEscape": - m[Escape] = false - r.errorf(`"noEscape" is deprecated, set "Escape: false" instead`) - case "noNilcheck": - m[Nil] = false - r.errorf(`"noNilcheck" is deprecated, set "Nil: false" instead`) - case "noInline": - m[Inline] = false - r.errorf(`"noInline" is deprecated, set "Inline: false" instead`) - case "noBounds": - m[Bounds] = false - r.errorf(`"noBounds" is deprecated, set "Bounds: false" instead`) - default: - r.errorf(err.Error()) - } - continue - } - m[Annotation(a)] = enabled - } - *bm = m -} - -func (r *OptionResult) asBoolMap() map[string]bool { - all, ok := r.Value.(map[string]interface{}) - if !ok { - r.errorf("invalid type %T for map[string]bool option", r.Value) - return nil - } - m := make(map[string]bool) - for a, enabled := range all { - if enabled, ok := enabled.(bool); ok { - m[a] = enabled - } else { - r.errorf("invalid type %T for map key %q", enabled, a) - return m - } - } - return m -} - -func (r *OptionResult) asString() (string, bool) { - b, ok := r.Value.(string) - if !ok { - r.errorf("invalid type %T, expect string", r.Value) - return "", false - } - return b, true -} - -func (r *OptionResult) asOneOf(options ...string) (string, bool) { - s, ok := r.asString() - if !ok { - return "", false - } - s, err := asOneOf(s, options...) - if err != nil { - r.errorf(err.Error()) - } - return s, err == nil -} - -func asOneOf(str string, options ...string) (string, error) { - lower := strings.ToLower(str) - for _, opt := range options { - if strings.ToLower(opt) == lower { - return opt, nil - } - } - return "", fmt.Errorf("invalid option %q for enum", str) -} - -func (r *OptionResult) setString(s *string) { - if v, ok := r.asString(); ok { - *s = v - } -} - -// EnabledAnalyzers returns all of the analyzers enabled for the given -// snapshot. -func EnabledAnalyzers(snapshot Snapshot) (analyzers []*Analyzer) { - for _, a := range snapshot.View().Options().DefaultAnalyzers { - if a.IsEnabled(snapshot.View()) { - analyzers = append(analyzers, a) - } - } - for _, a := range snapshot.View().Options().TypeErrorAnalyzers { - if a.IsEnabled(snapshot.View()) { - analyzers = append(analyzers, a) - } - } - for _, a := range snapshot.View().Options().ConvenienceAnalyzers { - if a.IsEnabled(snapshot.View()) { - analyzers = append(analyzers, a) - } - } - for _, a := range snapshot.View().Options().StaticcheckAnalyzers { - if a.IsEnabled(snapshot.View()) { - analyzers = append(analyzers, a) - } - } - return analyzers -} - -func typeErrorAnalyzers() map[string]*Analyzer { - return map[string]*Analyzer{ - fillreturns.Analyzer.Name: { - Analyzer: fillreturns.Analyzer, - ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, - Enabled: true, - }, - nonewvars.Analyzer.Name: { - Analyzer: nonewvars.Analyzer, - Enabled: true, - }, - noresultvalues.Analyzer.Name: { - Analyzer: noresultvalues.Analyzer, - Enabled: true, - }, - undeclaredname.Analyzer.Name: { - Analyzer: undeclaredname.Analyzer, - Fix: UndeclaredName, - Enabled: true, - }, - } -} - -func convenienceAnalyzers() map[string]*Analyzer { - return map[string]*Analyzer{ - fillstruct.Analyzer.Name: { - Analyzer: fillstruct.Analyzer, - Fix: FillStruct, - Enabled: true, - ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite}, - }, - stubmethods.Analyzer.Name: { - Analyzer: stubmethods.Analyzer, - ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite}, - Fix: StubMethods, - Enabled: true, - }, - } -} - -func defaultAnalyzers() map[string]*Analyzer { - return map[string]*Analyzer{ - // The traditional vet suite: - asmdecl.Analyzer.Name: {Analyzer: asmdecl.Analyzer, Enabled: true}, - assign.Analyzer.Name: {Analyzer: assign.Analyzer, Enabled: true}, - atomic.Analyzer.Name: {Analyzer: atomic.Analyzer, Enabled: true}, - bools.Analyzer.Name: {Analyzer: bools.Analyzer, Enabled: true}, - buildtag.Analyzer.Name: {Analyzer: buildtag.Analyzer, Enabled: true}, - cgocall.Analyzer.Name: {Analyzer: cgocall.Analyzer, Enabled: true}, - composite.Analyzer.Name: {Analyzer: composite.Analyzer, Enabled: true}, - copylock.Analyzer.Name: {Analyzer: copylock.Analyzer, Enabled: true}, - errorsas.Analyzer.Name: {Analyzer: errorsas.Analyzer, Enabled: true}, - httpresponse.Analyzer.Name: {Analyzer: httpresponse.Analyzer, Enabled: true}, - ifaceassert.Analyzer.Name: {Analyzer: ifaceassert.Analyzer, Enabled: true}, - loopclosure.Analyzer.Name: {Analyzer: loopclosure.Analyzer, Enabled: true}, - lostcancel.Analyzer.Name: {Analyzer: lostcancel.Analyzer, Enabled: true}, - nilfunc.Analyzer.Name: {Analyzer: nilfunc.Analyzer, Enabled: true}, - printf.Analyzer.Name: {Analyzer: printf.Analyzer, Enabled: true}, - shift.Analyzer.Name: {Analyzer: shift.Analyzer, Enabled: true}, - stdmethods.Analyzer.Name: {Analyzer: stdmethods.Analyzer, Enabled: true}, - stringintconv.Analyzer.Name: {Analyzer: stringintconv.Analyzer, Enabled: true}, - structtag.Analyzer.Name: {Analyzer: structtag.Analyzer, Enabled: true}, - tests.Analyzer.Name: {Analyzer: tests.Analyzer, Enabled: true}, - unmarshal.Analyzer.Name: {Analyzer: unmarshal.Analyzer, Enabled: true}, - unreachable.Analyzer.Name: {Analyzer: unreachable.Analyzer, Enabled: true}, - unsafeptr.Analyzer.Name: {Analyzer: unsafeptr.Analyzer, Enabled: true}, - unusedresult.Analyzer.Name: {Analyzer: unusedresult.Analyzer, Enabled: true}, - - // Non-vet analyzers: - atomicalign.Analyzer.Name: {Analyzer: atomicalign.Analyzer, Enabled: true}, - deepequalerrors.Analyzer.Name: {Analyzer: deepequalerrors.Analyzer, Enabled: true}, - fieldalignment.Analyzer.Name: {Analyzer: fieldalignment.Analyzer, Enabled: false}, - nilness.Analyzer.Name: {Analyzer: nilness.Analyzer, Enabled: false}, - shadow.Analyzer.Name: {Analyzer: shadow.Analyzer, Enabled: false}, - sortslice.Analyzer.Name: {Analyzer: sortslice.Analyzer, Enabled: true}, - testinggoroutine.Analyzer.Name: {Analyzer: testinggoroutine.Analyzer, Enabled: true}, - unusedparams.Analyzer.Name: {Analyzer: unusedparams.Analyzer, Enabled: false}, - unusedwrite.Analyzer.Name: {Analyzer: unusedwrite.Analyzer, Enabled: false}, - useany.Analyzer.Name: {Analyzer: useany.Analyzer, Enabled: false}, - infertypeargs.Analyzer.Name: {Analyzer: infertypeargs.Analyzer, Enabled: true}, - embeddirective.Analyzer.Name: {Analyzer: embeddirective.Analyzer, Enabled: true}, - - // gofmt -s suite: - simplifycompositelit.Analyzer.Name: { - Analyzer: simplifycompositelit.Analyzer, - Enabled: true, - ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, - }, - simplifyrange.Analyzer.Name: { - Analyzer: simplifyrange.Analyzer, - Enabled: true, - ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, - }, - simplifyslice.Analyzer.Name: { - Analyzer: simplifyslice.Analyzer, - Enabled: true, - ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix}, - }, - } -} - -func urlRegexp() *regexp.Regexp { - // Ensure links are matched as full words, not anywhere. - re := regexp.MustCompile(`\b(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?\b`) - re.Longest() - return re -} - -type APIJSON struct { - Options map[string][]*OptionJSON - Commands []*CommandJSON - Lenses []*LensJSON - Analyzers []*AnalyzerJSON -} - -type OptionJSON struct { - Name string - Type string - Doc string - EnumKeys EnumKeys - EnumValues []EnumValue - Default string - Status string - Hierarchy string -} - -func (o *OptionJSON) String() string { - return o.Name -} - -func (o *OptionJSON) Write(w io.Writer) { - fmt.Fprintf(w, "**%v** *%v*\n\n", o.Name, o.Type) - writeStatus(w, o.Status) - enumValues := collectEnums(o) - fmt.Fprintf(w, "%v%v\nDefault: `%v`.\n\n", o.Doc, enumValues, o.Default) -} - -func writeStatus(section io.Writer, status string) { - switch status { - case "": - case "advanced": - fmt.Fprint(section, "**This is an advanced setting and should not be configured by most `gopls` users.**\n\n") - case "debug": - fmt.Fprint(section, "**This setting is for debugging purposes only.**\n\n") - case "experimental": - fmt.Fprint(section, "**This setting is experimental and may be deleted.**\n\n") - default: - fmt.Fprintf(section, "**Status: %s.**\n\n", status) - } -} - -var parBreakRE = regexp.MustCompile("\n{2,}") - -func collectEnums(opt *OptionJSON) string { - var b strings.Builder - write := func(name, doc string, index, len int) { - if doc != "" { - unbroken := parBreakRE.ReplaceAllString(doc, "\\\n") - fmt.Fprintf(&b, "* %s\n", strings.TrimSpace(unbroken)) - } else { - fmt.Fprintf(&b, "* `%s`\n", name) - } - } - if len(opt.EnumValues) > 0 && opt.Type == "enum" { - b.WriteString("\nMust be one of:\n\n") - for i, val := range opt.EnumValues { - write(val.Value, val.Doc, i, len(opt.EnumValues)) - } - } else if len(opt.EnumKeys.Keys) > 0 && shouldShowEnumKeysInSettings(opt.Name) { - b.WriteString("\nCan contain any of:\n\n") - for i, val := range opt.EnumKeys.Keys { - write(val.Name, val.Doc, i, len(opt.EnumKeys.Keys)) - } - } - return b.String() -} - -func shouldShowEnumKeysInSettings(name string) bool { - // Both of these fields have too many possible options to print. - return !hardcodedEnumKeys(name) -} - -func hardcodedEnumKeys(name string) bool { - return name == "analyses" || name == "codelenses" -} - -type EnumKeys struct { - ValueType string - Keys []EnumKey -} - -type EnumKey struct { - Name string - Doc string - Default string -} - -type EnumValue struct { - Value string - Doc string -} - -type CommandJSON struct { - Command string - Title string - Doc string - ArgDoc string - ResultDoc string -} - -func (c *CommandJSON) String() string { - return c.Command -} - -func (c *CommandJSON) Write(w io.Writer) { - fmt.Fprintf(w, "### **%v**\nIdentifier: `%v`\n\n%v\n\n", c.Title, c.Command, c.Doc) - if c.ArgDoc != "" { - fmt.Fprintf(w, "Args:\n\n```\n%s\n```\n\n", c.ArgDoc) - } - if c.ResultDoc != "" { - fmt.Fprintf(w, "Result:\n\n```\n%s\n```\n\n", c.ResultDoc) - } -} - -type LensJSON struct { - Lens string - Title string - Doc string -} - -func (l *LensJSON) String() string { - return l.Title -} - -func (l *LensJSON) Write(w io.Writer) { - fmt.Fprintf(w, "%s (%s): %s", l.Title, l.Lens, l.Doc) -} - -type AnalyzerJSON struct { - Name string - Doc string - Default bool -} - -func (a *AnalyzerJSON) String() string { - return a.Name -} - -func (a *AnalyzerJSON) Write(w io.Writer) { - fmt.Fprintf(w, "%s (%s): %v", a.Name, a.Doc, a.Default) -} diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go deleted file mode 100644 index 3541600b207..00000000000 --- a/internal/lsp/source/references.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "errors" - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// ReferenceInfo holds information about reference to an identifier in Go source. -type ReferenceInfo struct { - Name string - MappedRange - ident *ast.Ident - obj types.Object - pkg Package - isDeclaration bool -} - -// References returns a list of references for a given identifier within the packages -// containing i.File. Declarations appear first in the result. -func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]*ReferenceInfo, error) { - ctx, done := event.Start(ctx, "source.References") - defer done() - - qualifiedObjs, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp) - // Don't return references for builtin types. - if errors.Is(err, errBuiltin) { - return nil, nil - } - if err != nil { - return nil, err - } - - refs, err := references(ctx, s, qualifiedObjs, includeDeclaration, true, false) - if err != nil { - return nil, err - } - - toSort := refs - if includeDeclaration { - toSort = refs[1:] - } - sort.Slice(toSort, func(i, j int) bool { - x := CompareURI(toSort[i].URI(), toSort[j].URI()) - if x == 0 { - return toSort[i].ident.Pos() < toSort[j].ident.Pos() - } - return x < 0 - }) - return refs, nil -} - -// references is a helper function to avoid recomputing qualifiedObjsAtProtocolPos. -func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject, includeDeclaration, includeInterfaceRefs, includeEmbeddedRefs bool) ([]*ReferenceInfo, error) { - var ( - references []*ReferenceInfo - seen = make(map[token.Pos]bool) - ) - - pos := qos[0].obj.Pos() - if pos == token.NoPos { - return nil, fmt.Errorf("no position for %s", qos[0].obj) - } - filename := snapshot.FileSet().Position(pos).Filename - pgf, err := qos[0].pkg.File(span.URIFromPath(filename)) - if err != nil { - return nil, err - } - declIdent, err := findIdentifier(ctx, snapshot, qos[0].pkg, pgf, qos[0].obj.Pos()) - if err != nil { - return nil, err - } - // Make sure declaration is the first item in the response. - if includeDeclaration { - references = append(references, &ReferenceInfo{ - MappedRange: declIdent.MappedRange, - Name: qos[0].obj.Name(), - ident: declIdent.ident, - obj: qos[0].obj, - pkg: declIdent.pkg, - isDeclaration: true, - }) - } - - for _, qo := range qos { - var searchPkgs []Package - - // Only search dependents if the object is exported. - if qo.obj.Exported() { - reverseDeps, err := snapshot.GetReverseDependencies(ctx, qo.pkg.ID()) - if err != nil { - return nil, err - } - searchPkgs = append(searchPkgs, reverseDeps...) - } - // Add the package in which the identifier is declared. - searchPkgs = append(searchPkgs, qo.pkg) - for _, pkg := range searchPkgs { - for ident, obj := range pkg.GetTypesInfo().Uses { - // For instantiated objects (as in methods or fields on instantiated - // types), we may not have pointer-identical objects but still want to - // consider them references. - if !equalOrigin(obj, qo.obj) { - // If ident is not a use of qo.obj, skip it, with one exception: - // uses of an embedded field can be considered references of the - // embedded type name - if !includeEmbeddedRefs { - continue - } - v, ok := obj.(*types.Var) - if !ok || !v.Embedded() { - continue - } - named, ok := v.Type().(*types.Named) - if !ok || named.Obj() != qo.obj { - continue - } - } - if seen[ident.Pos()] { - continue - } - seen[ident.Pos()] = true - rng, err := posToMappedRange(snapshot, pkg, ident.Pos(), ident.End()) - if err != nil { - return nil, err - } - references = append(references, &ReferenceInfo{ - Name: ident.Name, - ident: ident, - pkg: pkg, - obj: obj, - MappedRange: rng, - }) - } - } - } - - // When searching on type name, don't include interface references -- they - // would be things like all references to Stringer for any type that - // happened to have a String method. - _, isType := declIdent.Declaration.obj.(*types.TypeName) - if includeInterfaceRefs && !isType { - declRange, err := declIdent.Range() - if err != nil { - return nil, err - } - fh, err := snapshot.GetFile(ctx, declIdent.URI()) - if err != nil { - return nil, err - } - interfaceRefs, err := interfaceReferences(ctx, snapshot, fh, declRange.Start) - if err != nil { - return nil, err - } - references = append(references, interfaceRefs...) - } - - return references, nil -} - -// equalOrigin reports whether obj1 and obj2 have equivalent origin object. -// This may be the case even if obj1 != obj2, if one or both of them is -// instantiated. -func equalOrigin(obj1, obj2 types.Object) bool { - return obj1.Pkg() == obj2.Pkg() && obj1.Pos() == obj2.Pos() && obj1.Name() == obj2.Name() -} - -// interfaceReferences returns the references to the interfaces implemented by -// the type or method at the given position. -func interfaceReferences(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position) ([]*ReferenceInfo, error) { - implementations, err := implementations(ctx, s, f, pp) - if err != nil { - if errors.Is(err, ErrNotAType) { - return nil, nil - } - return nil, err - } - - var refs []*ReferenceInfo - for _, impl := range implementations { - implRefs, err := references(ctx, s, []qualifiedObject{impl}, false, false, false) - if err != nil { - return nil, err - } - refs = append(refs, implRefs...) - } - return refs, nil -} diff --git a/internal/lsp/source/rename.go b/internal/lsp/source/rename.go deleted file mode 100644 index 6312bcb1296..00000000000 --- a/internal/lsp/source/rename.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "context" - "errors" - "fmt" - "go/ast" - "go/format" - "go/token" - "go/types" - "regexp" - "strings" - - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/refactor/satisfy" -) - -type renamer struct { - ctx context.Context - fset *token.FileSet - refs []*ReferenceInfo - objsToUpdate map[types.Object]bool - hadConflicts bool - errors string - from, to string - satisfyConstraints map[satisfy.Constraint]bool - packages map[*types.Package]Package // may include additional packages that are a dep of pkg - msets typeutil.MethodSetCache - changeMethods bool -} - -type PrepareItem struct { - Range protocol.Range - Text string -} - -// PrepareRename searches for a valid renaming at position pp. -// -// The returned usererr is intended to be displayed to the user to explain why -// the prepare fails. Probably we could eliminate the redundancy in returning -// two errors, but for now this is done defensively. -func PrepareRename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) (_ *PrepareItem, usererr, err error) { - ctx, done := event.Start(ctx, "source.PrepareRename") - defer done() - - qos, err := qualifiedObjsAtProtocolPos(ctx, snapshot, f.URI(), pp) - if err != nil { - return nil, nil, err - } - node, obj, pkg := qos[0].node, qos[0].obj, qos[0].sourcePkg - if err := checkRenamable(obj); err != nil { - return nil, err, err - } - mr, err := posToMappedRange(snapshot, pkg, node.Pos(), node.End()) - if err != nil { - return nil, nil, err - } - rng, err := mr.Range() - if err != nil { - return nil, nil, err - } - if _, isImport := node.(*ast.ImportSpec); isImport { - // We're not really renaming the import path. - rng.End = rng.Start - } - return &PrepareItem{ - Range: rng, - Text: obj.Name(), - }, nil, nil -} - -// checkRenamable verifies if an obj may be renamed. -func checkRenamable(obj types.Object) error { - if v, ok := obj.(*types.Var); ok && v.Embedded() { - return errors.New("can't rename embedded fields: rename the type directly or name the field") - } - if obj.Name() == "_" { - return errors.New("can't rename \"_\"") - } - return nil -} - -// Rename returns a map of TextEdits for each file modified when renaming a -// given identifier within a package. -func Rename(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]protocol.TextEdit, error) { - ctx, done := event.Start(ctx, "source.Rename") - defer done() - - qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp) - if err != nil { - return nil, err - } - - obj, pkg := qos[0].obj, qos[0].pkg - - if err := checkRenamable(obj); err != nil { - return nil, err - } - if obj.Name() == newName { - return nil, fmt.Errorf("old and new names are the same: %s", newName) - } - if !isValidIdentifier(newName) { - return nil, fmt.Errorf("invalid identifier to rename: %q", newName) - } - if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("package for %s is ill typed", f.URI()) - } - refs, err := references(ctx, s, qos, true, false, true) - if err != nil { - return nil, err - } - r := renamer{ - ctx: ctx, - fset: s.FileSet(), - refs: refs, - objsToUpdate: make(map[types.Object]bool), - from: obj.Name(), - to: newName, - packages: make(map[*types.Package]Package), - } - - // A renaming initiated at an interface method indicates the - // intention to rename abstract and concrete methods as needed - // to preserve assignability. - for _, ref := range refs { - if obj, ok := ref.obj.(*types.Func); ok { - recv := obj.Type().(*types.Signature).Recv() - if recv != nil && IsInterface(recv.Type().Underlying()) { - r.changeMethods = true - break - } - } - } - for _, from := range refs { - r.packages[from.pkg.GetTypes()] = from.pkg - } - - // Check that the renaming of the identifier is ok. - for _, ref := range refs { - r.check(ref.obj) - if r.hadConflicts { // one error is enough. - break - } - } - if r.hadConflicts { - return nil, fmt.Errorf(r.errors) - } - - changes, err := r.update() - if err != nil { - return nil, err - } - result := make(map[span.URI][]protocol.TextEdit) - for uri, edits := range changes { - // These edits should really be associated with FileHandles for maximal correctness. - // For now, this is good enough. - fh, err := s.GetFile(ctx, uri) - if err != nil { - return nil, err - } - data, err := fh.Read() - if err != nil { - return nil, err - } - m := protocol.NewColumnMapper(uri, data) - // Sort the edits first. - diff.SortTextEdits(edits) - protocolEdits, err := ToProtocolEdits(m, edits) - if err != nil { - return nil, err - } - result[uri] = protocolEdits - } - return result, nil -} - -// Rename all references to the identifier. -func (r *renamer) update() (map[span.URI][]diff.TextEdit, error) { - result := make(map[span.URI][]diff.TextEdit) - seen := make(map[span.Span]bool) - - docRegexp, err := regexp.Compile(`\b` + r.from + `\b`) - if err != nil { - return nil, err - } - for _, ref := range r.refs { - refSpan, err := ref.Span() - if err != nil { - return nil, err - } - if seen[refSpan] { - continue - } - seen[refSpan] = true - - // Renaming a types.PkgName may result in the addition or removal of an identifier, - // so we deal with this separately. - if pkgName, ok := ref.obj.(*types.PkgName); ok && ref.isDeclaration { - edit, err := r.updatePkgName(pkgName) - if err != nil { - return nil, err - } - result[refSpan.URI()] = append(result[refSpan.URI()], *edit) - continue - } - - // Replace the identifier with r.to. - edit := diff.TextEdit{ - Span: refSpan, - NewText: r.to, - } - - result[refSpan.URI()] = append(result[refSpan.URI()], edit) - - if !ref.isDeclaration || ref.ident == nil { // uses do not have doc comments to update. - continue - } - - doc := r.docComment(ref.pkg, ref.ident) - if doc == nil { - continue - } - - // Perform the rename in doc comments declared in the original package. - // go/parser strips out \r\n returns from the comment text, so go - // line-by-line through the comment text to get the correct positions. - for _, comment := range doc.List { - if isDirective(comment.Text) { - continue - } - lines := strings.Split(comment.Text, "\n") - tok := r.fset.File(comment.Pos()) - commentLine := tok.Position(comment.Pos()).Line - for i, line := range lines { - lineStart := comment.Pos() - if i > 0 { - lineStart = tok.LineStart(commentLine + i) - } - for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) { - rng := span.NewRange(r.fset, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1])) - spn, err := rng.Span() - if err != nil { - return nil, err - } - result[spn.URI()] = append(result[spn.URI()], diff.TextEdit{ - Span: spn, - NewText: r.to, - }) - } - } - } - } - - return result, nil -} - -// docComment returns the doc for an identifier. -func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup { - _, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End()) - for _, node := range nodes { - switch decl := node.(type) { - case *ast.FuncDecl: - return decl.Doc - case *ast.Field: - return decl.Doc - case *ast.GenDecl: - return decl.Doc - // For {Type,Value}Spec, if the doc on the spec is absent, - // search for the enclosing GenDecl - case *ast.TypeSpec: - if decl.Doc != nil { - return decl.Doc - } - case *ast.ValueSpec: - if decl.Doc != nil { - return decl.Doc - } - case *ast.Ident: - case *ast.AssignStmt: - // *ast.AssignStmt doesn't have an associated comment group. - // So, we try to find a comment just before the identifier. - - // Try to find a comment group only for short variable declarations (:=). - if decl.Tok != token.DEFINE { - return nil - } - - var file *ast.File - for _, f := range pkg.GetSyntax() { - if f.Pos() <= id.Pos() && id.Pos() <= f.End() { - file = f - break - } - } - if file == nil { - return nil - } - - identLine := r.fset.Position(id.Pos()).Line - for _, comment := range file.Comments { - if comment.Pos() > id.Pos() { - // Comment is after the identifier. - continue - } - - lastCommentLine := r.fset.Position(comment.End()).Line - if lastCommentLine+1 == identLine { - return comment - } - } - default: - return nil - } - } - return nil -} - -// updatePkgName returns the updates to rename a pkgName in the import spec -func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.TextEdit, error) { - // Modify ImportSpec syntax to add or remove the Name as needed. - pkg := r.packages[pkgName.Pkg()] - _, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos()) - if len(path) < 2 { - return nil, fmt.Errorf("no path enclosing interval for %s", pkgName.Name()) - } - spec, ok := path[1].(*ast.ImportSpec) - if !ok { - return nil, fmt.Errorf("failed to update PkgName for %s", pkgName.Name()) - } - - var astIdent *ast.Ident // will be nil if ident is removed - if pkgName.Imported().Name() != r.to { - // ImportSpec.Name needed - astIdent = &ast.Ident{NamePos: spec.Path.Pos(), Name: r.to} - } - - // Make a copy of the ident that just has the name and path. - updated := &ast.ImportSpec{ - Name: astIdent, - Path: spec.Path, - EndPos: spec.EndPos, - } - - rng := span.NewRange(r.fset, spec.Pos(), spec.End()) - spn, err := rng.Span() - if err != nil { - return nil, err - } - - var buf bytes.Buffer - format.Node(&buf, r.fset, updated) - newText := buf.String() - - return &diff.TextEdit{ - Span: spn, - NewText: newText, - }, nil -} diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go deleted file mode 100644 index 813f67e7b3b..00000000000 --- a/internal/lsp/source/signature_help.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" -) - -func SignatureHelp(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.SignatureInformation, int, error) { - ctx, done := event.Start(ctx, "source.SignatureHelp") - defer done() - - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, 0, fmt.Errorf("getting file for SignatureHelp: %w", err) - } - pos, err := pgf.Mapper.Pos(position) - if err != nil { - return nil, 0, err - } - // Find a call expression surrounding the query position. - var callExpr *ast.CallExpr - path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos) - if path == nil { - return nil, 0, fmt.Errorf("cannot find node enclosing position") - } -FindCall: - for _, node := range path { - switch node := node.(type) { - case *ast.CallExpr: - if pos >= node.Lparen && pos <= node.Rparen { - callExpr = node - break FindCall - } - case *ast.FuncLit, *ast.FuncType: - // The user is within an anonymous function, - // which may be the parameter to the *ast.CallExpr. - // Don't show signature help in this case. - return nil, 0, fmt.Errorf("no signature help within a function declaration") - case *ast.BasicLit: - if node.Kind == token.STRING { - return nil, 0, fmt.Errorf("no signature help within a string literal") - } - } - - } - if callExpr == nil || callExpr.Fun == nil { - return nil, 0, fmt.Errorf("cannot find an enclosing function") - } - - qf := Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()) - - // Get the object representing the function, if available. - // There is no object in certain cases such as calling a function returned by - // a function (e.g. "foo()()"). - var obj types.Object - switch t := callExpr.Fun.(type) { - case *ast.Ident: - obj = pkg.GetTypesInfo().ObjectOf(t) - case *ast.SelectorExpr: - obj = pkg.GetTypesInfo().ObjectOf(t.Sel) - } - - // Handle builtin functions separately. - if obj, ok := obj.(*types.Builtin); ok { - return builtinSignature(ctx, snapshot, callExpr, obj.Name(), pos) - } - - // Get the type information for the function being called. - sigType := pkg.GetTypesInfo().TypeOf(callExpr.Fun) - if sigType == nil { - return nil, 0, fmt.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun) - } - - sig, _ := sigType.Underlying().(*types.Signature) - if sig == nil { - return nil, 0, fmt.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun) - } - - activeParam := activeParameter(callExpr, sig.Params().Len(), sig.Variadic(), pos) - - var ( - name string - comment *ast.CommentGroup - ) - if obj != nil { - declPkg, err := FindPackageFromPos(ctx, snapshot, obj.Pos()) - if err != nil { - return nil, 0, err - } - node, err := snapshot.PosToDecl(ctx, declPkg, obj.Pos()) - if err != nil { - return nil, 0, err - } - rng, err := objToMappedRange(snapshot, pkg, obj) - if err != nil { - return nil, 0, err - } - decl := Declaration{ - obj: obj, - node: node, - } - decl.MappedRange = append(decl.MappedRange, rng) - d, err := FindHoverContext(ctx, snapshot, pkg, decl.obj, decl.node, nil) - if err != nil { - return nil, 0, err - } - name = obj.Name() - comment = d.Comment - } else { - name = "func" - } - s := NewSignature(ctx, snapshot, pkg, sig, comment, qf) - paramInfo := make([]protocol.ParameterInformation, 0, len(s.params)) - for _, p := range s.params { - paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p}) - } - return &protocol.SignatureInformation{ - Label: name + s.Format(), - Documentation: s.doc, - Parameters: paramInfo, - }, activeParam, nil -} - -func builtinSignature(ctx context.Context, snapshot Snapshot, callExpr *ast.CallExpr, name string, pos token.Pos) (*protocol.SignatureInformation, int, error) { - sig, err := NewBuiltinSignature(ctx, snapshot, name) - if err != nil { - return nil, 0, err - } - paramInfo := make([]protocol.ParameterInformation, 0, len(sig.params)) - for _, p := range sig.params { - paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p}) - } - activeParam := activeParameter(callExpr, len(sig.params), sig.variadic, pos) - return &protocol.SignatureInformation{ - Label: sig.name + sig.Format(), - Documentation: sig.doc, - Parameters: paramInfo, - }, activeParam, nil - -} - -func activeParameter(callExpr *ast.CallExpr, numParams int, variadic bool, pos token.Pos) (activeParam int) { - if len(callExpr.Args) == 0 { - return 0 - } - // First, check if the position is even in the range of the arguments. - start, end := callExpr.Lparen, callExpr.Rparen - if !(start <= pos && pos <= end) { - return 0 - } - for _, expr := range callExpr.Args { - if start == token.NoPos { - start = expr.Pos() - } - end = expr.End() - if start <= pos && pos <= end { - break - } - // Don't advance the active parameter for the last parameter of a variadic function. - if !variadic || activeParam < numParams-1 { - activeParam++ - } - start = expr.Pos() + 1 // to account for commas - } - return activeParam -} diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go deleted file mode 100644 index 426bffc97b5..00000000000 --- a/internal/lsp/source/source_test.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source_test - -import ( - "context" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "sort" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/cache" - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/fuzzy" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/source/completion" - "golang.org/x/tools/internal/lsp/tests" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/testenv" -) - -func TestMain(m *testing.M) { - bug.PanicOnBugs = true - testenv.ExitIfSmallMachine() - os.Exit(m.Run()) -} - -func TestSource(t *testing.T) { - tests.RunTests(t, "../testdata", true, testSource) -} - -type runner struct { - snapshot source.Snapshot - view source.View - data *tests.Data - ctx context.Context - normalizers []tests.Normalizer -} - -func testSource(t *testing.T, datum *tests.Data) { - ctx := tests.Context(t) - - cache := cache.New(nil) - session := cache.NewSession(ctx) - options := source.DefaultOptions().Clone() - tests.DefaultOptions(options) - options.SetEnvSlice(datum.Config.Env) - view, _, release, err := session.NewView(ctx, "source_test", span.URIFromPath(datum.Config.Dir), options) - release() - if err != nil { - t.Fatal(err) - } - defer view.Shutdown(ctx) - - // Enable type error analyses for tests. - // TODO(golang/go#38212): Delete this once they are enabled by default. - tests.EnableAllAnalyzers(view, options) - view.SetOptions(ctx, options) - - var modifications []source.FileModification - for filename, content := range datum.Config.Overlay { - if filepath.Ext(filename) != ".go" { - continue - } - modifications = append(modifications, source.FileModification{ - URI: span.URIFromPath(filename), - Action: source.Open, - Version: -1, - Text: content, - LanguageID: "go", - }) - } - if err := session.ModifyFiles(ctx, modifications); err != nil { - t.Fatal(err) - } - snapshot, release := view.Snapshot(ctx) - defer release() - r := &runner{ - view: view, - snapshot: snapshot, - data: datum, - ctx: ctx, - normalizers: tests.CollectNormalizers(datum.Exported), - } - tests.Run(t, r, datum) -} - -func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) { - mapper, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := mapper.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - - items, err := source.PrepareCallHierarchy(r.ctx, r.snapshot, fh, loc.Range.Start) - if err != nil { - t.Fatal(err) - } - if len(items) == 0 { - t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range) - } - - callLocation := protocol.Location{ - URI: items[0].URI, - Range: items[0].Range, - } - if callLocation != loc { - t.Fatalf("expected source.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation) - } - - incomingCalls, err := source.IncomingCalls(r.ctx, r.snapshot, fh, loc.Range.Start) - if err != nil { - t.Error(err) - } - var incomingCallItems []protocol.CallHierarchyItem - for _, item := range incomingCalls { - incomingCallItems = append(incomingCallItems, item.From) - } - msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls) - if msg != "" { - t.Error(fmt.Sprintf("incoming calls differ: %s", msg)) - } - - outgoingCalls, err := source.OutgoingCalls(r.ctx, r.snapshot, fh, loc.Range.Start) - if err != nil { - t.Error(err) - } - var outgoingCallItems []protocol.CallHierarchyItem - for _, item := range outgoingCalls { - outgoingCallItems = append(outgoingCallItems, item.To) - } - msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls) - if msg != "" { - t.Error(fmt.Sprintf("outgoing calls differ: %s", msg)) - } -} - -func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) { - fileID, got, err := source.FileDiagnostics(r.ctx, r.snapshot, uri) - if err != nil { - t.Fatal(err) - } - // A special case to test that there are no diagnostics for a file. - if len(want) == 1 && want[0].Source == "no_diagnostics" { - if len(got) != 0 { - t.Errorf("expected no diagnostics for %s, got %v", uri, got) - } - return - } - if diff := tests.DiffDiagnostics(fileID.URI, want, got); diff != "" { - t.Error(diff) - } -} - -func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - _, got := r.callCompletion(t, src, func(opts *source.Options) { - opts.Matcher = source.CaseInsensitive - opts.DeepCompletion = false - opts.CompleteUnimported = false - opts.InsertTextFormat = protocol.SnippetTextFormat - opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal") - opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix") - }) - got = tests.FilterBuiltins(src, got) - if diff := tests.DiffCompletionItems(want, got); diff != "" { - t.Errorf("%s: %s", src, diff) - } -} - -func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) { - _, list := r.callCompletion(t, src, func(opts *source.Options) { - opts.UsePlaceholders = placeholders - opts.DeepCompletion = true - opts.CompleteUnimported = false - }) - got := tests.FindItem(list, *items[expected.CompletionItem]) - want := expected.PlainSnippet - if placeholders { - want = expected.PlaceholderSnippet - } - if diff := tests.DiffSnippets(want, got); diff != "" { - t.Errorf("%s: %s", src, diff) - } -} - -func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - _, got := r.callCompletion(t, src, func(opts *source.Options) {}) - got = tests.FilterBuiltins(src, got) - if diff := tests.CheckCompletionOrder(want, got, false); diff != "" { - t.Errorf("%s: %s", src, diff) - } -} - -func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - prefix, list := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.CaseInsensitive - opts.CompleteUnimported = false - }) - list = tests.FilterBuiltins(src, list) - fuzzyMatcher := fuzzy.NewMatcher(prefix) - var got []protocol.CompletionItem - for _, item := range list { - if fuzzyMatcher.Score(item.Label) <= 0 { - continue - } - got = append(got, item) - } - if msg := tests.DiffCompletionItems(want, got); msg != "" { - t.Errorf("%s: %s", src, msg) - } -} - -func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - _, got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.Fuzzy - opts.CompleteUnimported = false - }) - got = tests.FilterBuiltins(src, got) - if msg := tests.DiffCompletionItems(want, got); msg != "" { - t.Errorf("%s: %s", src, msg) - } -} - -func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - _, list := r.callCompletion(t, src, func(opts *source.Options) { - opts.Matcher = source.CaseSensitive - opts.CompleteUnimported = false - }) - list = tests.FilterBuiltins(src, list) - if diff := tests.DiffCompletionItems(want, list); diff != "" { - t.Errorf("%s: %s", src, diff) - } -} - -func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) { - var want []protocol.CompletionItem - for _, pos := range test.CompletionItems { - want = append(want, tests.ToProtocolCompletionItem(*items[pos])) - } - _, got := r.callCompletion(t, src, func(opts *source.Options) { - opts.DeepCompletion = true - opts.Matcher = source.Fuzzy - opts.ExperimentalPostfixCompletions = true - }) - if msg := tests.CheckCompletionOrder(want, got, true); msg != "" { - t.Errorf("%s: %s", src, msg) - } -} - -func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) (string, []protocol.CompletionItem) { - fh, err := r.snapshot.GetFile(r.ctx, src.URI()) - if err != nil { - t.Fatal(err) - } - original := r.view.Options() - modified := original.Clone() - options(modified) - newView, err := r.view.SetOptions(r.ctx, modified) - if newView != r.view { - t.Fatalf("options change unexpectedly created new view") - } - if err != nil { - t.Fatal(err) - } - defer r.view.SetOptions(r.ctx, original) - - list, surrounding, err := completion.Completion(r.ctx, r.snapshot, fh, protocol.Position{ - Line: uint32(src.Start().Line() - 1), - Character: uint32(src.Start().Column() - 1), - }, protocol.CompletionContext{}) - if err != nil && !errors.As(err, &completion.ErrIsDefinition{}) { - t.Fatalf("failed for %v: %v", src, err) - } - var prefix string - if surrounding != nil { - prefix = strings.ToLower(surrounding.Prefix()) - } - - var numDeepCompletionsSeen int - var items []completion.CompletionItem - // Apply deep completion filtering. - for _, item := range list { - if item.Depth > 0 { - if !modified.DeepCompletion { - continue - } - if numDeepCompletionsSeen >= completion.MaxDeepCompletions { - continue - } - numDeepCompletionsSeen++ - } - items = append(items, item) - } - return prefix, tests.ToProtocolCompletionItems(items) -} - -func (r *runner) FoldingRanges(t *testing.T, spn span.Span) { - uri := spn.URI() - - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - data, err := fh.Read() - if err != nil { - t.Error(err) - return - } - - // Test all folding ranges. - ranges, err := source.FoldingRange(r.ctx, r.snapshot, fh, false) - if err != nil { - t.Error(err) - return - } - r.foldingRanges(t, "foldingRange", uri, string(data), ranges) - - // Test folding ranges with lineFoldingOnly - ranges, err = source.FoldingRange(r.ctx, r.snapshot, fh, true) - if err != nil { - t.Error(err) - return - } - r.foldingRanges(t, "foldingRange-lineFolding", uri, string(data), ranges) -} - -func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, data string, ranges []*source.FoldingRangeInfo) { - t.Helper() - // Fold all ranges. - nonOverlapping := nonOverlappingRanges(t, ranges) - for i, rngs := range nonOverlapping { - got, err := foldRanges(string(data), rngs) - if err != nil { - t.Error(err) - continue - } - tag := fmt.Sprintf("%s-%d", prefix, i) - want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if diff := tests.Diff(t, want, got); diff != "" { - t.Errorf("%s: foldingRanges failed for %s, diff:\n%v", tag, uri.Filename(), diff) - } - } - - // Filter by kind. - kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment} - for _, kind := range kinds { - var kindOnly []*source.FoldingRangeInfo - for _, fRng := range ranges { - if fRng.Kind == kind { - kindOnly = append(kindOnly, fRng) - } - } - - nonOverlapping := nonOverlappingRanges(t, kindOnly) - for i, rngs := range nonOverlapping { - got, err := foldRanges(string(data), rngs) - if err != nil { - t.Error(err) - continue - } - tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i) - want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if diff := tests.Diff(t, want, got); diff != "" { - t.Errorf("%s: failed for %s, diff:\n%v", tag, uri.Filename(), diff) - } - } - - } -} - -func nonOverlappingRanges(t *testing.T, ranges []*source.FoldingRangeInfo) (res [][]*source.FoldingRangeInfo) { - for _, fRng := range ranges { - setNum := len(res) - for i := 0; i < len(res); i++ { - canInsert := true - for _, rng := range res[i] { - if conflict(t, rng, fRng) { - canInsert = false - break - } - } - if canInsert { - setNum = i - break - } - } - if setNum == len(res) { - res = append(res, []*source.FoldingRangeInfo{}) - } - res[setNum] = append(res[setNum], fRng) - } - return res -} - -func conflict(t *testing.T, a, b *source.FoldingRangeInfo) bool { - arng, err := a.Range() - if err != nil { - t.Fatal(err) - } - brng, err := b.Range() - if err != nil { - t.Fatal(err) - } - // a start position is <= b start positions - return protocol.ComparePosition(arng.Start, brng.Start) <= 0 && protocol.ComparePosition(arng.End, brng.Start) > 0 -} - -func foldRanges(contents string, ranges []*source.FoldingRangeInfo) (string, error) { - foldedText := "<>" - res := contents - // Apply the folds from the end of the file forward - // to preserve the offsets. - for i := len(ranges) - 1; i >= 0; i-- { - fRange := ranges[i] - spn, err := fRange.Span() - if err != nil { - return "", err - } - start := spn.Start().Offset() - end := spn.End().Offset() - - tmp := res[0:start] + foldedText - res = tmp + res[end:] - } - return res, nil -} - -func (r *runner) Format(t *testing.T, spn span.Span) { - gofmted := string(r.data.Golden("gofmt", spn.URI().Filename(), func() ([]byte, error) { - cmd := exec.Command("gofmt", spn.URI().Filename()) - out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files - return out, nil - })) - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - edits, err := source.Format(r.ctx, r.snapshot, fh) - if err != nil { - if gofmted != "" { - t.Error(err) - } - return - } - data, err := fh.Read() - if err != nil { - t.Fatal(err) - } - m, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - diffEdits, err := source.FromProtocolEdits(m, edits) - if err != nil { - t.Error(err) - } - got := diff.ApplyEdits(string(data), diffEdits) - if gofmted != got { - t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", spn.URI().Filename(), gofmted, got) - } -} - -func (r *runner) SemanticTokens(t *testing.T, spn span.Span) { - t.Skip("nothing to test in source") -} - -func (r *runner) Import(t *testing.T, spn span.Span) { - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - edits, _, err := source.AllImportsFixes(r.ctx, r.snapshot, fh) - if err != nil { - t.Error(err) - } - data, err := fh.Read() - if err != nil { - t.Fatal(err) - } - m, err := r.data.Mapper(fh.URI()) - if err != nil { - t.Fatal(err) - } - diffEdits, err := source.FromProtocolEdits(m, edits) - if err != nil { - t.Error(err) - } - got := diff.ApplyEdits(string(data), diffEdits) - want := string(r.data.Golden("goimports", spn.URI().Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if want != got { - d, err := myers.ComputeEdits(spn.URI(), want, got) - if err != nil { - t.Fatal(err) - } - t.Errorf("import failed for %s: %s", spn.URI().Filename(), diff.ToUnified("want", "got", want, d)) - } -} - -func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) { - _, srcRng, err := spanToRange(r.data, d.Src) - if err != nil { - t.Fatal(err) - } - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - ident, err := source.Identifier(r.ctx, r.snapshot, fh, srcRng.Start) - if err != nil { - t.Fatalf("failed for %v: %v", d.Src, err) - } - h, err := source.HoverIdentifier(r.ctx, ident) - if err != nil { - t.Fatalf("failed for %v: %v", d.Src, err) - } - hover, err := source.FormatHover(h, r.view.Options()) - if err != nil { - t.Fatal(err) - } - rng, err := ident.Declaration.MappedRange[0].Range() - if err != nil { - t.Fatal(err) - } - if d.IsType { - rng, err = ident.Type.Range() - if err != nil { - t.Fatal(err) - } - hover = "" - } - didSomething := false - if hover != "" { - didSomething = true - tag := fmt.Sprintf("%s-hoverdef", d.Name) - expectHover := string(r.data.Golden(tag, d.Src.URI().Filename(), func() ([]byte, error) { - return []byte(hover), nil - })) - hover = tests.StripSubscripts(hover) - expectHover = tests.StripSubscripts(expectHover) - if hover != expectHover { - t.Errorf("hoverdef for %s failed:\n%s", d.Src, tests.Diff(t, expectHover, hover)) - } - } - if !d.OnlyHover { - didSomething = true - if _, defRng, err := spanToRange(r.data, d.Def); err != nil { - t.Fatal(err) - } else if rng != defRng { - t.Errorf("for %v got %v want %v", d.Src, rng, defRng) - } - } - if !didSomething { - t.Errorf("no tests ran for %s", d.Src.URI()) - } -} - -func (r *runner) Implementation(t *testing.T, spn span.Span, impls []span.Span) { - sm, err := r.data.Mapper(spn.URI()) - if err != nil { - t.Fatal(err) - } - loc, err := sm.Location(spn) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - locs, err := source.Implementation(r.ctx, r.snapshot, fh, loc.Range.Start) - if err != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - if len(locs) != len(impls) { - t.Fatalf("got %d locations for implementation, expected %d", len(locs), len(impls)) - } - var results []span.Span - for i := range locs { - locURI := locs[i].URI.SpanURI() - lm, err := r.data.Mapper(locURI) - if err != nil { - t.Fatal(err) - } - imp, err := lm.Span(locs[i]) - if err != nil { - t.Fatalf("failed for %v: %v", locs[i], err) - } - results = append(results, imp) - } - // Sort results and expected to make tests deterministic. - sort.SliceStable(results, func(i, j int) bool { - return span.Compare(results[i], results[j]) == -1 - }) - sort.SliceStable(impls, func(i, j int) bool { - return span.Compare(impls[i], impls[j]) == -1 - }) - for i := range results { - if results[i] != impls[i] { - t.Errorf("for %dth implementation of %v got %v want %v", i, spn, results[i], impls[i]) - } - } -} - -func (r *runner) Highlight(t *testing.T, src span.Span, locations []span.Span) { - ctx := r.ctx - m, srcRng, err := spanToRange(r.data, src) - if err != nil { - t.Fatal(err) - } - fh, err := r.snapshot.GetFile(r.ctx, src.URI()) - if err != nil { - t.Fatal(err) - } - highlights, err := source.Highlight(ctx, r.snapshot, fh, srcRng.Start) - if err != nil { - t.Errorf("highlight failed for %s: %v", src.URI(), err) - } - if len(highlights) != len(locations) { - t.Fatalf("got %d highlights for highlight at %v:%v:%v, expected %d", len(highlights), src.URI().Filename(), src.Start().Line(), src.Start().Column(), len(locations)) - } - // Check to make sure highlights have a valid range. - var results []span.Span - for i := range highlights { - h, err := m.RangeSpan(highlights[i]) - if err != nil { - t.Fatalf("failed for %v: %v", highlights[i], err) - } - results = append(results, h) - } - // Sort results to make tests deterministic since DocumentHighlight uses a map. - sort.SliceStable(results, func(i, j int) bool { - return span.Compare(results[i], results[j]) == -1 - }) - // Check to make sure all the expected highlights are found. - for i := range results { - if results[i] != locations[i] { - t.Errorf("want %v, got %v\n", locations[i], results[i]) - } - } -} - -func (r *runner) Hover(t *testing.T, src span.Span, text string) { - ctx := r.ctx - _, srcRng, err := spanToRange(r.data, src) - if err != nil { - t.Fatal(err) - } - fh, err := r.snapshot.GetFile(r.ctx, src.URI()) - if err != nil { - t.Fatal(err) - } - hover, err := source.Hover(ctx, r.snapshot, fh, srcRng.Start) - if err != nil { - t.Errorf("hover failed for %s: %v", src.URI(), err) - } - if text == "" { - if hover != nil { - t.Errorf("want nil, got %v\n", hover) - } - } else { - if hover == nil { - t.Fatalf("want hover result to not be nil") - } - if got := hover.Contents.Value; got != text { - t.Errorf("want %v, got %v\n", got, text) - } - if want, got := srcRng, hover.Range; want != got { - t.Errorf("want range %v, got %v instead", want, got) - } - } -} - -func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) { - ctx := r.ctx - _, srcRng, err := spanToRange(r.data, src) - if err != nil { - t.Fatal(err) - } - snapshot := r.snapshot - fh, err := snapshot.GetFile(r.ctx, src.URI()) - if err != nil { - t.Fatal(err) - } - for _, includeDeclaration := range []bool{true, false} { - t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) { - want := make(map[span.Span]bool) - for i, pos := range itemList { - // We don't want the first result if we aren't including the declaration. - if i == 0 && !includeDeclaration { - continue - } - want[pos] = true - } - refs, err := source.References(ctx, snapshot, fh, srcRng.Start, includeDeclaration) - if err != nil { - t.Fatalf("failed for %s: %v", src, err) - } - got := make(map[span.Span]bool) - for _, refInfo := range refs { - refSpan, err := refInfo.Span() - if err != nil { - t.Fatal(err) - } - got[refSpan] = true - } - if len(got) != len(want) { - t.Errorf("references failed: different lengths got %v want %v", len(got), len(want)) - } - for spn := range got { - if !want[spn] { - t.Errorf("references failed: incorrect references got %v want locations %v", got, want) - } - } - }) - } -} - -func (r *runner) Rename(t *testing.T, spn span.Span, newText string) { - tag := fmt.Sprintf("%s-rename", newText) - - _, srcRng, err := spanToRange(r.data, spn) - if err != nil { - t.Fatal(err) - } - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - changes, err := source.Rename(r.ctx, r.snapshot, fh, srcRng.Start, newText) - if err != nil { - renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) { - return []byte(err.Error()), nil - })) - if err.Error() != renamed { - t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v\n", newText, renamed, err) - } - return - } - - var res []string - for editURI, edits := range changes { - fh, err := r.snapshot.GetFile(r.ctx, editURI) - if err != nil { - t.Fatal(err) - } - data, err := fh.Read() - if err != nil { - t.Fatal(err) - } - m, err := r.data.Mapper(fh.URI()) - if err != nil { - t.Fatal(err) - } - diffEdits, err := source.FromProtocolEdits(m, edits) - if err != nil { - t.Fatal(err) - } - contents := applyEdits(string(data), diffEdits) - if len(changes) > 1 { - filename := filepath.Base(editURI.Filename()) - contents = fmt.Sprintf("%s:\n%s", filename, contents) - } - res = append(res, contents) - } - - // Sort on filename - sort.Strings(res) - - var got string - for i, val := range res { - if i != 0 { - got += "\n" - } - got += val - } - - renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - - if renamed != got { - t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v", newText, renamed, got) - } -} - -func applyEdits(contents string, edits []diff.TextEdit) string { - res := contents - - // Apply the edits from the end of the file forward - // to preserve the offsets - for i := len(edits) - 1; i >= 0; i-- { - edit := edits[i] - start := edit.Span.Start().Offset() - end := edit.Span.End().Offset() - tmp := res[0:start] + edit.NewText - res = tmp + res[end:] - } - return res -} - -func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) { - _, srcRng, err := spanToRange(r.data, src) - if err != nil { - t.Fatal(err) - } - // Find the identifier at the position. - fh, err := r.snapshot.GetFile(r.ctx, src.URI()) - if err != nil { - t.Fatal(err) - } - item, _, err := source.PrepareRename(r.ctx, r.snapshot, fh, srcRng.Start) - if err != nil { - if want.Text != "" { // expected an ident. - t.Errorf("prepare rename failed for %v: got error: %v", src, err) - } - return - } - if item == nil { - if want.Text != "" { - t.Errorf("prepare rename failed for %v: got nil", src) - } - return - } - if want.Text == "" { - t.Errorf("prepare rename failed for %v: expected nil, got %v", src, item) - return - } - if item.Range.Start == item.Range.End { - // Special case for 0-length ranges. Marks can't specify a 0-length range, - // so just compare the start. - if item.Range.Start != want.Range.Start { - t.Errorf("prepare rename failed: incorrect point, got %v want %v", item.Range.Start, want.Range.Start) - } - } else { - if protocol.CompareRange(item.Range, want.Range) != 0 { - t.Errorf("prepare rename failed: incorrect range got %v want %v", item.Range, want.Range) - } - } -} - -func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) { - fh, err := r.snapshot.GetFile(r.ctx, uri) - if err != nil { - t.Fatal(err) - } - symbols, err := source.DocumentSymbols(r.ctx, r.snapshot, fh) - if err != nil { - t.Errorf("symbols failed for %s: %v", uri, err) - } - if len(symbols) != len(expectedSymbols) { - t.Errorf("want %d top-level symbols in %v, got %d", len(expectedSymbols), uri, len(symbols)) - return - } - if diff := tests.DiffSymbols(t, uri, expectedSymbols, symbols); diff != "" { - t.Error(diff) - } -} - -func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { - r.callWorkspaceSymbols(t, uri, query, typ) -} - -func (r *runner) callWorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) { - t.Helper() - - matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ) - gotSymbols, err := source.WorkspaceSymbols(r.ctx, matcher, r.view.Options().SymbolStyle, []source.View{r.view}, query) - if err != nil { - t.Fatal(err) - } - got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols) - if err != nil { - t.Fatal(err) - } - got = filepath.ToSlash(tests.Normalize(got, r.normalizers)) - want := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) { - return []byte(got), nil - })) - if diff := tests.Diff(t, want, got); diff != "" { - t.Error(diff) - } -} - -func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) { - _, rng, err := spanToRange(r.data, spn) - if err != nil { - t.Fatal(err) - } - fh, err := r.snapshot.GetFile(r.ctx, spn.URI()) - if err != nil { - t.Fatal(err) - } - gotSignature, gotActiveParameter, err := source.SignatureHelp(r.ctx, r.snapshot, fh, rng.Start) - if err != nil { - // Only fail if we got an error we did not expect. - if want != nil { - t.Fatalf("failed for %v: %v", spn, err) - } - return - } - if gotSignature == nil { - if want != nil { - t.Fatalf("got nil signature, but expected %v", want) - } - return - } - got := &protocol.SignatureHelp{ - Signatures: []protocol.SignatureInformation{*gotSignature}, - ActiveParameter: uint32(gotActiveParameter), - } - diff, err := tests.DiffSignatures(spn, want, got) - if err != nil { - t.Fatal(err) - } - if diff != "" { - t.Error(diff) - } -} - -// These are pure LSP features, no source level functionality to be tested. -func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {} - -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { -} -func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {} -func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {} -func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {} -func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {} - -func spanToRange(data *tests.Data, spn span.Span) (*protocol.ColumnMapper, protocol.Range, error) { - m, err := data.Mapper(spn.URI()) - if err != nil { - return nil, protocol.Range{}, err - } - srcRng, err := m.Range(spn) - if err != nil { - return nil, protocol.Range{}, err - } - return m, srcRng, nil -} diff --git a/internal/lsp/source/stub.go b/internal/lsp/source/stub.go deleted file mode 100644 index 0d1981795f2..00000000000 --- a/internal/lsp/source/stub.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "context" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/lsp/analysis/stubmethods" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/typeparams" -) - -func stubSuggestedFixFunc(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, rng protocol.Range) (*analysis.SuggestedFix, error) { - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, fmt.Errorf("GetParsedFile: %w", err) - } - nodes, pos, err := getStubNodes(pgf, rng) - if err != nil { - return nil, fmt.Errorf("getNodes: %w", err) - } - si := stubmethods.GetStubInfo(pkg.GetTypesInfo(), nodes, pos) - if si == nil { - return nil, fmt.Errorf("nil interface request") - } - parsedConcreteFile, concreteFH, err := getStubFile(ctx, si.Concrete.Obj(), snapshot) - if err != nil { - return nil, fmt.Errorf("getFile(concrete): %w", err) - } - var ( - methodsSrc []byte - stubImports []*stubImport // additional imports needed for method stubs - ) - if si.Interface.Pkg() == nil && si.Interface.Name() == "error" && si.Interface.Parent() == types.Universe { - methodsSrc = stubErr(ctx, parsedConcreteFile.File, si, snapshot) - } else { - methodsSrc, stubImports, err = stubMethods(ctx, parsedConcreteFile.File, si, snapshot) - } - if err != nil { - return nil, fmt.Errorf("stubMethods: %w", err) - } - nodes, _ = astutil.PathEnclosingInterval(parsedConcreteFile.File, si.Concrete.Obj().Pos(), si.Concrete.Obj().Pos()) - concreteSrc, err := concreteFH.Read() - if err != nil { - return nil, fmt.Errorf("error reading concrete file source: %w", err) - } - insertPos, err := safetoken.Offset(parsedConcreteFile.Tok, nodes[1].End()) - if err != nil || insertPos >= len(concreteSrc) { - return nil, fmt.Errorf("insertion position is past the end of the file") - } - var buf bytes.Buffer - buf.Write(concreteSrc[:insertPos]) - buf.WriteByte('\n') - buf.Write(methodsSrc) - buf.Write(concreteSrc[insertPos:]) - fset := token.NewFileSet() - newF, err := parser.ParseFile(fset, parsedConcreteFile.File.Name.Name, buf.Bytes(), parser.ParseComments) - if err != nil { - return nil, fmt.Errorf("could not reparse file: %w", err) - } - for _, imp := range stubImports { - astutil.AddNamedImport(fset, newF, imp.Name, imp.Path) - } - var source bytes.Buffer - err = format.Node(&source, fset, newF) - if err != nil { - return nil, fmt.Errorf("format.Node: %w", err) - } - diffEdits, err := snapshot.View().Options().ComputeEdits(parsedConcreteFile.URI, string(parsedConcreteFile.Src), source.String()) - if err != nil { - return nil, err - } - var edits []analysis.TextEdit - for _, edit := range diffEdits { - rng, err := edit.Span.Range(parsedConcreteFile.Mapper.TokFile) - if err != nil { - return nil, err - } - edits = append(edits, analysis.TextEdit{ - Pos: rng.Start, - End: rng.End, - NewText: []byte(edit.NewText), - }) - } - return &analysis.SuggestedFix{ - TextEdits: edits, - }, nil -} - -// stubMethods returns the Go code of all methods -// that implement the given interface -func stubMethods(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) ([]byte, []*stubImport, error) { - ifacePkg, err := deducePkgFromTypes(ctx, snapshot, si.Interface) - if err != nil { - return nil, nil, err - } - si.Concrete.Obj().Type() - concMS := types.NewMethodSet(types.NewPointer(si.Concrete.Obj().Type())) - missing, err := missingMethods(ctx, snapshot, concMS, si.Concrete.Obj().Pkg(), si.Interface, ifacePkg, map[string]struct{}{}) - if err != nil { - return nil, nil, fmt.Errorf("missingMethods: %w", err) - } - if len(missing) == 0 { - return nil, nil, fmt.Errorf("no missing methods found") - } - var ( - stubImports []*stubImport - methodsBuffer bytes.Buffer - ) - for _, mi := range missing { - for _, m := range mi.missing { - // TODO(marwan-at-work): this should share the same logic with source.FormatVarType - // as it also accounts for type aliases. - sig := types.TypeString(m.Type(), stubmethods.RelativeToFiles(si.Concrete.Obj().Pkg(), concreteFile, mi.file, func(name, path string) { - for _, imp := range stubImports { - if imp.Name == name && imp.Path == path { - return - } - } - stubImports = append(stubImports, &stubImport{name, path}) - })) - _, err = methodsBuffer.Write(printStubMethod(methodData{ - Method: m.Name(), - Concrete: getStubReceiver(si), - Interface: deduceIfaceName(si.Concrete.Obj().Pkg(), si.Interface.Pkg(), si.Interface), - Signature: strings.TrimPrefix(sig, "func"), - })) - if err != nil { - return nil, nil, fmt.Errorf("error printing method: %w", err) - } - methodsBuffer.WriteRune('\n') - } - } - return methodsBuffer.Bytes(), stubImports, nil -} - -// stubErr reurns the Go code implementation -// of an error interface relevant to the -// concrete type -func stubErr(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) []byte { - return printStubMethod(methodData{ - Method: "Error", - Interface: "error", - Concrete: getStubReceiver(si), - Signature: "() string", - }) -} - -// getStubReceiver returns the concrete type's name as a method receiver. -// It accounts for type parameters if they exist. -func getStubReceiver(si *stubmethods.StubInfo) string { - var concrete string - if si.Pointer { - concrete += "*" - } - concrete += si.Concrete.Obj().Name() - concrete += FormatTypeParams(typeparams.ForNamed(si.Concrete)) - return concrete -} - -type methodData struct { - Method string - Interface string - Concrete string - Signature string -} - -// printStubMethod takes methodData and returns Go code that represents the given method such as: -// -// // {{ .Method }} implements {{ .Interface }} -// func ({{ .Concrete }}) {{ .Method }}{{ .Signature }} { -// panic("unimplemented") -// } -func printStubMethod(md methodData) []byte { - var b bytes.Buffer - fmt.Fprintf(&b, "// %s implements %s\n", md.Method, md.Interface) - fmt.Fprintf(&b, "func (%s) %s%s {\n\t", md.Concrete, md.Method, md.Signature) - fmt.Fprintln(&b, `panic("unimplemented")`) - fmt.Fprintln(&b, "}") - return b.Bytes() -} - -func deducePkgFromTypes(ctx context.Context, snapshot Snapshot, ifaceObj types.Object) (Package, error) { - pkgs, err := snapshot.KnownPackages(ctx) - if err != nil { - return nil, err - } - for _, p := range pkgs { - if p.PkgPath() == ifaceObj.Pkg().Path() { - return p, nil - } - } - return nil, fmt.Errorf("pkg %q not found", ifaceObj.Pkg().Path()) -} - -func deduceIfaceName(concretePkg, ifacePkg *types.Package, ifaceObj types.Object) string { - if concretePkg.Path() == ifacePkg.Path() { - return ifaceObj.Name() - } - return fmt.Sprintf("%s.%s", ifacePkg.Name(), ifaceObj.Name()) -} - -func getStubNodes(pgf *ParsedGoFile, pRng protocol.Range) ([]ast.Node, token.Pos, error) { - spn, err := pgf.Mapper.RangeSpan(pRng) - if err != nil { - return nil, 0, err - } - rng, err := spn.Range(pgf.Mapper.TokFile) - if err != nil { - return nil, 0, err - } - nodes, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start, rng.End) - return nodes, rng.Start, nil -} - -/* -missingMethods takes a concrete type and returns any missing methods for the given interface as well as -any missing interface that might have been embedded to its parent. For example: - - type I interface { - io.Writer - Hello() - } - -returns - - []*missingInterface{ - { - iface: *types.Interface (io.Writer), - file: *ast.File: io.go, - missing []*types.Func{Write}, - }, - { - iface: *types.Interface (I), - file: *ast.File: myfile.go, - missing: []*types.Func{Hello} - }, - } -*/ -func missingMethods(ctx context.Context, snapshot Snapshot, concMS *types.MethodSet, concPkg *types.Package, ifaceObj types.Object, ifacePkg Package, visited map[string]struct{}) ([]*missingInterface, error) { - iface, ok := ifaceObj.Type().Underlying().(*types.Interface) - if !ok { - return nil, fmt.Errorf("expected %v to be an interface but got %T", iface, ifaceObj.Type().Underlying()) - } - missing := []*missingInterface{} - for i := 0; i < iface.NumEmbeddeds(); i++ { - eiface := iface.Embedded(i).Obj() - depPkg := ifacePkg - if eiface.Pkg().Path() != ifacePkg.PkgPath() { - var err error - depPkg, err = ifacePkg.GetImport(eiface.Pkg().Path()) - if err != nil { - return nil, err - } - } - em, err := missingMethods(ctx, snapshot, concMS, concPkg, eiface, depPkg, visited) - if err != nil { - return nil, err - } - missing = append(missing, em...) - } - parsedFile, _, err := getStubFile(ctx, ifaceObj, snapshot) - if err != nil { - return nil, fmt.Errorf("error getting iface file: %w", err) - } - mi := &missingInterface{ - pkg: ifacePkg, - iface: iface, - file: parsedFile.File, - } - if mi.file == nil { - return nil, fmt.Errorf("could not find ast.File for %v", ifaceObj.Name()) - } - for i := 0; i < iface.NumExplicitMethods(); i++ { - method := iface.ExplicitMethod(i) - // if the concrete type does not have the interface method - if concMS.Lookup(concPkg, method.Name()) == nil { - if _, ok := visited[method.Name()]; !ok { - mi.missing = append(mi.missing, method) - visited[method.Name()] = struct{}{} - } - } - if sel := concMS.Lookup(concPkg, method.Name()); sel != nil { - implSig := sel.Type().(*types.Signature) - ifaceSig := method.Type().(*types.Signature) - if !types.Identical(ifaceSig, implSig) { - return nil, fmt.Errorf("mimsatched %q function signatures:\nhave: %s\nwant: %s", method.Name(), implSig, ifaceSig) - } - } - } - if len(mi.missing) > 0 { - missing = append(missing, mi) - } - return missing, nil -} - -func getStubFile(ctx context.Context, obj types.Object, snapshot Snapshot) (*ParsedGoFile, VersionedFileHandle, error) { - objPos := snapshot.FileSet().Position(obj.Pos()) - objFile := span.URIFromPath(objPos.Filename) - objectFH := snapshot.FindFile(objFile) - _, goFile, err := GetParsedFile(ctx, snapshot, objectFH, WidestPackage) - if err != nil { - return nil, nil, fmt.Errorf("GetParsedFile: %w", err) - } - return goFile, objectFH, nil -} - -// missingInterface represents an interface -// that has all or some of its methods missing -// from the destination concrete type -type missingInterface struct { - iface *types.Interface - file *ast.File - pkg Package - missing []*types.Func -} - -// stubImport represents a newly added import -// statement to the concrete type. If name is not -// empty, then that import is required to have that name. -type stubImport struct{ Name, Path string } diff --git a/internal/lsp/source/symbols.go b/internal/lsp/source/symbols.go deleted file mode 100644 index 074b24eba01..00000000000 --- a/internal/lsp/source/symbols.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/ast" - "go/types" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" -) - -func DocumentSymbols(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.DocumentSymbol, error) { - ctx, done := event.Start(ctx, "source.DocumentSymbols") - defer done() - - pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) - if err != nil { - return nil, fmt.Errorf("getting file for DocumentSymbols: %w", err) - } - - info := pkg.GetTypesInfo() - q := Qualifier(pgf.File, pkg.GetTypes(), info) - - symbolsToReceiver := make(map[types.Type]int) - var symbols []protocol.DocumentSymbol - for _, decl := range pgf.File.Decls { - switch decl := decl.(type) { - case *ast.FuncDecl: - if decl.Name.Name == "_" { - continue - } - if obj := info.ObjectOf(decl.Name); obj != nil { - fs, err := funcSymbol(snapshot, pkg, decl, obj, q) - if err != nil { - return nil, err - } - // If function is a method, prepend the type of the method. - if fs.Kind == protocol.Method { - rtype := obj.Type().(*types.Signature).Recv().Type() - fs.Name = fmt.Sprintf("(%s).%s", types.TypeString(rtype, q), fs.Name) - } - symbols = append(symbols, fs) - } - case *ast.GenDecl: - for _, spec := range decl.Specs { - switch spec := spec.(type) { - case *ast.TypeSpec: - if spec.Name.Name == "_" { - continue - } - if obj := info.ObjectOf(spec.Name); obj != nil { - ts, err := typeSymbol(snapshot, pkg, info, spec, obj, q) - if err != nil { - return nil, err - } - symbols = append(symbols, ts) - symbolsToReceiver[obj.Type()] = len(symbols) - 1 - } - case *ast.ValueSpec: - for _, name := range spec.Names { - if name.Name == "_" { - continue - } - if obj := info.ObjectOf(name); obj != nil { - vs, err := varSymbol(snapshot, pkg, decl, name, obj, q) - if err != nil { - return nil, err - } - symbols = append(symbols, vs) - } - } - } - } - } - } - return symbols, nil -} - -func funcSymbol(snapshot Snapshot, pkg Package, decl *ast.FuncDecl, obj types.Object, q types.Qualifier) (protocol.DocumentSymbol, error) { - s := protocol.DocumentSymbol{ - Name: obj.Name(), - Kind: protocol.Function, - } - var err error - s.Range, err = nodeToProtocolRange(snapshot, pkg, decl) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, decl.Name) - if err != nil { - return protocol.DocumentSymbol{}, err - } - sig, _ := obj.Type().(*types.Signature) - if sig != nil { - if sig.Recv() != nil { - s.Kind = protocol.Method - } - s.Detail += "(" - for i := 0; i < sig.Params().Len(); i++ { - if i > 0 { - s.Detail += ", " - } - param := sig.Params().At(i) - label := types.TypeString(param.Type(), q) - if param.Name() != "" { - label = fmt.Sprintf("%s %s", param.Name(), label) - } - s.Detail += label - } - s.Detail += ")" - } - return s, nil -} - -func typeSymbol(snapshot Snapshot, pkg Package, info *types.Info, spec *ast.TypeSpec, obj types.Object, qf types.Qualifier) (protocol.DocumentSymbol, error) { - s := protocol.DocumentSymbol{ - Name: obj.Name(), - } - s.Detail, _ = FormatType(obj.Type(), qf) - s.Kind = typeToKind(obj.Type()) - - var err error - s.Range, err = nodeToProtocolRange(snapshot, pkg, spec) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, spec.Name) - if err != nil { - return protocol.DocumentSymbol{}, err - } - t, objIsStruct := obj.Type().Underlying().(*types.Struct) - st, specIsStruct := spec.Type.(*ast.StructType) - if objIsStruct && specIsStruct { - for i := 0; i < t.NumFields(); i++ { - f := t.Field(i) - child := protocol.DocumentSymbol{ - Name: f.Name(), - Kind: protocol.Field, - } - child.Detail, _ = FormatType(f.Type(), qf) - - spanNode, selectionNode := nodesForStructField(i, st) - if span, err := nodeToProtocolRange(snapshot, pkg, spanNode); err == nil { - child.Range = span - } - if span, err := nodeToProtocolRange(snapshot, pkg, selectionNode); err == nil { - child.SelectionRange = span - } - s.Children = append(s.Children, child) - } - } - - ti, objIsInterface := obj.Type().Underlying().(*types.Interface) - ai, specIsInterface := spec.Type.(*ast.InterfaceType) - if objIsInterface && specIsInterface { - for i := 0; i < ti.NumExplicitMethods(); i++ { - method := ti.ExplicitMethod(i) - child := protocol.DocumentSymbol{ - Name: method.Name(), - Kind: protocol.Method, - } - - var spanNode, selectionNode ast.Node - Methods: - for _, f := range ai.Methods.List { - for _, id := range f.Names { - if id.Name == method.Name() { - spanNode, selectionNode = f, id - break Methods - } - } - } - child.Range, err = nodeToProtocolRange(snapshot, pkg, spanNode) - if err != nil { - return protocol.DocumentSymbol{}, err - } - child.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, selectionNode) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.Children = append(s.Children, child) - } - - for i := 0; i < ti.NumEmbeddeds(); i++ { - embedded := ti.EmbeddedType(i) - nt, isNamed := embedded.(*types.Named) - if !isNamed { - continue - } - - child := protocol.DocumentSymbol{ - Name: types.TypeString(embedded, qf), - } - child.Kind = typeToKind(embedded) - var spanNode, selectionNode ast.Node - Embeddeds: - for _, f := range ai.Methods.List { - if len(f.Names) > 0 { - continue - } - - if t := info.TypeOf(f.Type); types.Identical(nt, t) { - spanNode, selectionNode = f, f.Type - break Embeddeds - } - } - child.Range, err = nodeToProtocolRange(snapshot, pkg, spanNode) - if err != nil { - return protocol.DocumentSymbol{}, err - } - child.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, selectionNode) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.Children = append(s.Children, child) - } - } - return s, nil -} - -func nodesForStructField(i int, st *ast.StructType) (span, selection ast.Node) { - j := 0 - for _, field := range st.Fields.List { - if len(field.Names) == 0 { - if i == j { - return field, field.Type - } - j++ - continue - } - for _, name := range field.Names { - if i == j { - return field, name - } - j++ - } - } - return nil, nil -} - -func varSymbol(snapshot Snapshot, pkg Package, decl ast.Node, name *ast.Ident, obj types.Object, q types.Qualifier) (protocol.DocumentSymbol, error) { - s := protocol.DocumentSymbol{ - Name: obj.Name(), - Kind: protocol.Variable, - } - if _, ok := obj.(*types.Const); ok { - s.Kind = protocol.Constant - } - var err error - s.Range, err = nodeToProtocolRange(snapshot, pkg, decl) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, name) - if err != nil { - return protocol.DocumentSymbol{}, err - } - s.Detail = types.TypeString(obj.Type(), q) - return s, nil -} diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go deleted file mode 100644 index 93344e08678..00000000000 --- a/internal/lsp/source/types_format.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "context" - "fmt" - "go/ast" - "go/doc" - "go/printer" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/typeparams" -) - -// FormatType returns the detail and kind for a types.Type. -func FormatType(typ types.Type, qf types.Qualifier) (detail string, kind protocol.CompletionItemKind) { - if types.IsInterface(typ) { - detail = "interface{...}" - kind = protocol.InterfaceCompletion - } else if _, ok := typ.(*types.Struct); ok { - detail = "struct{...}" - kind = protocol.StructCompletion - } else if typ != typ.Underlying() { - detail, kind = FormatType(typ.Underlying(), qf) - } else { - detail = types.TypeString(typ, qf) - kind = protocol.ClassCompletion - } - return detail, kind -} - -type signature struct { - name, doc string - typeParams, params, results []string - variadic bool - needResultParens bool -} - -func (s *signature) Format() string { - var b strings.Builder - b.WriteByte('(') - for i, p := range s.params { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(p) - } - b.WriteByte(')') - - // Add space between parameters and results. - if len(s.results) > 0 { - b.WriteByte(' ') - } - if s.needResultParens { - b.WriteByte('(') - } - for i, r := range s.results { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(r) - } - if s.needResultParens { - b.WriteByte(')') - } - return b.String() -} - -func (s *signature) TypeParams() []string { - return s.typeParams -} - -func (s *signature) Params() []string { - return s.params -} - -// NewBuiltinSignature returns signature for the builtin object with a given -// name, if a builtin object with the name exists. -func NewBuiltinSignature(ctx context.Context, s Snapshot, name string) (*signature, error) { - builtin, err := s.BuiltinFile(ctx) - if err != nil { - return nil, err - } - obj := builtin.File.Scope.Lookup(name) - if obj == nil { - return nil, fmt.Errorf("no builtin object for %s", name) - } - decl, ok := obj.Decl.(*ast.FuncDecl) - if !ok { - return nil, fmt.Errorf("no function declaration for builtin: %s", name) - } - if decl.Type == nil { - return nil, fmt.Errorf("no type for builtin decl %s", decl.Name) - } - var variadic bool - if decl.Type.Params.List != nil { - numParams := len(decl.Type.Params.List) - lastParam := decl.Type.Params.List[numParams-1] - if _, ok := lastParam.Type.(*ast.Ellipsis); ok { - variadic = true - } - } - params, _ := formatFieldList(ctx, s, decl.Type.Params, variadic) - results, needResultParens := formatFieldList(ctx, s, decl.Type.Results, false) - d := decl.Doc.Text() - switch s.View().Options().HoverKind { - case SynopsisDocumentation: - d = doc.Synopsis(d) - case NoDocumentation: - d = "" - } - return &signature{ - doc: d, - name: name, - needResultParens: needResultParens, - params: params, - results: results, - variadic: variadic, - }, nil -} - -var replacer = strings.NewReplacer( - `ComplexType`, `complex128`, - `FloatType`, `float64`, - `IntegerType`, `int`, -) - -func formatFieldList(ctx context.Context, snapshot Snapshot, list *ast.FieldList, variadic bool) ([]string, bool) { - if list == nil { - return nil, false - } - var writeResultParens bool - var result []string - for i := 0; i < len(list.List); i++ { - if i >= 1 { - writeResultParens = true - } - p := list.List[i] - cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4} - b := &bytes.Buffer{} - if err := cfg.Fprint(b, snapshot.FileSet(), p.Type); err != nil { - event.Error(ctx, "unable to print type", nil, tag.Type.Of(p.Type)) - continue - } - typ := replacer.Replace(b.String()) - if len(p.Names) == 0 { - result = append(result, typ) - } - for _, name := range p.Names { - if name.Name != "" { - if i == 0 { - writeResultParens = true - } - result = append(result, fmt.Sprintf("%s %s", name.Name, typ)) - } else { - result = append(result, typ) - } - } - } - if variadic { - result[len(result)-1] = strings.Replace(result[len(result)-1], "[]", "...", 1) - } - return result, writeResultParens -} - -// FormatTypeParams turns TypeParamList into its Go representation, such as: -// [T, Y]. Note that it does not print constraints as this is mainly used for -// formatting type params in method receivers. -func FormatTypeParams(tparams *typeparams.TypeParamList) string { - if tparams == nil || tparams.Len() == 0 { - return "" - } - var buf bytes.Buffer - buf.WriteByte('[') - for i := 0; i < tparams.Len(); i++ { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(tparams.At(i).Obj().Name()) - } - buf.WriteByte(']') - return buf.String() -} - -// NewSignature returns formatted signature for a types.Signature struct. -func NewSignature(ctx context.Context, s Snapshot, pkg Package, sig *types.Signature, comment *ast.CommentGroup, qf types.Qualifier) *signature { - var tparams []string - tpList := typeparams.ForSignature(sig) - for i := 0; i < tpList.Len(); i++ { - tparam := tpList.At(i) - // TODO: is it possible to reuse the logic from FormatVarType here? - s := tparam.Obj().Name() + " " + tparam.Constraint().String() - tparams = append(tparams, s) - } - - params := make([]string, 0, sig.Params().Len()) - for i := 0; i < sig.Params().Len(); i++ { - el := sig.Params().At(i) - typ := FormatVarType(ctx, s, pkg, el, qf) - p := typ - if el.Name() != "" { - p = el.Name() + " " + typ - } - params = append(params, p) - } - - var needResultParens bool - results := make([]string, 0, sig.Results().Len()) - for i := 0; i < sig.Results().Len(); i++ { - if i >= 1 { - needResultParens = true - } - el := sig.Results().At(i) - typ := FormatVarType(ctx, s, pkg, el, qf) - if el.Name() == "" { - results = append(results, typ) - } else { - if i == 0 { - needResultParens = true - } - results = append(results, el.Name()+" "+typ) - } - } - var d string - if comment != nil { - d = comment.Text() - } - switch s.View().Options().HoverKind { - case SynopsisDocumentation: - d = doc.Synopsis(d) - case NoDocumentation: - d = "" - } - return &signature{ - doc: d, - typeParams: tparams, - params: params, - results: results, - variadic: sig.Variadic(), - needResultParens: needResultParens, - } -} - -// FormatVarType formats a *types.Var, accounting for type aliases. -// To do this, it looks in the AST of the file in which the object is declared. -// On any errors, it always falls back to types.TypeString. -func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj *types.Var, qf types.Qualifier) string { - pkg, err := FindPackageFromPos(ctx, snapshot, obj.Pos()) - if err != nil { - return types.TypeString(obj.Type(), qf) - } - - expr, err := varType(ctx, snapshot, pkg, obj) - if err != nil { - return types.TypeString(obj.Type(), qf) - } - - // If the given expr refers to a type parameter, then use the - // object's Type instead of the type parameter declaration. This helps - // format the instantiated type as opposed to the original undeclared - // generic type. - if typeparams.IsTypeParam(pkg.GetTypesInfo().Types[expr].Type) { - return types.TypeString(obj.Type(), qf) - } - - // The type names in the AST may not be correctly qualified. - // Determine the package name to use based on the package that originated - // the query and the package in which the type is declared. - // We then qualify the value by cloning the AST node and editing it. - clonedInfo := make(map[token.Pos]*types.PkgName) - qualified := cloneExpr(expr, pkg.GetTypesInfo(), clonedInfo) - - // If the request came from a different package than the one in which the - // types are defined, we may need to modify the qualifiers. - qualified = qualifyExpr(qualified, srcpkg, pkg, clonedInfo, qf) - fmted := FormatNode(snapshot.FileSet(), qualified) - return fmted -} - -// varType returns the type expression for a *types.Var. -func varType(ctx context.Context, snapshot Snapshot, pkg Package, obj *types.Var) (ast.Expr, error) { - field, err := snapshot.PosToField(ctx, pkg, obj.Pos()) - if err != nil { - return nil, err - } - if field == nil { - return nil, fmt.Errorf("no declaration for object %s", obj.Name()) - } - return field.Type, nil -} - -// qualifyExpr applies the "pkgName." prefix to any *ast.Ident in the expr. -func qualifyExpr(expr ast.Expr, srcpkg, pkg Package, clonedInfo map[token.Pos]*types.PkgName, qf types.Qualifier) ast.Expr { - ast.Inspect(expr, func(n ast.Node) bool { - switch n := n.(type) { - case *ast.ArrayType, *ast.ChanType, *ast.Ellipsis, - *ast.FuncType, *ast.MapType, *ast.ParenExpr, - *ast.StarExpr, *ast.StructType, *ast.FieldList, *ast.Field: - // These are the only types that are cloned by cloneExpr below, - // so these are the only types that we can traverse and potentially - // modify. This is not an ideal approach, but it works for now. - - // TODO(rFindley): can we eliminate this filtering entirely? This caused - // bugs in the past (golang/go#50539) - return true - case *ast.SelectorExpr: - // We may need to change any selectors in which the X is a package - // name and the Sel is exported. - x, ok := n.X.(*ast.Ident) - if !ok { - return false - } - obj, ok := clonedInfo[x.Pos()] - if !ok { - return false - } - x.Name = qf(obj.Imported()) - return false - case *ast.Ident: - if srcpkg == pkg { - return false - } - // Only add the qualifier if the identifier is exported. - if ast.IsExported(n.Name) { - pkgName := qf(pkg.GetTypes()) - n.Name = pkgName + "." + n.Name - } - } - return false - }) - return expr -} - -// cloneExpr only clones expressions that appear in the parameters or return -// values of a function declaration. The original expression may be returned -// to the caller in 2 cases: -// -// 1. The expression has no pointer fields. -// 2. The expression cannot appear in an *ast.FuncType, making it -// unnecessary to clone. -// -// This function also keeps track of selector expressions in which the X is a -// package name and marks them in a map along with their type information, so -// that this information can be used when rewriting the expression. -// -// NOTE: This function is tailored to the use case of qualifyExpr, and should -// be used with caution. -func cloneExpr(expr ast.Expr, info *types.Info, clonedInfo map[token.Pos]*types.PkgName) ast.Expr { - switch expr := expr.(type) { - case *ast.ArrayType: - return &ast.ArrayType{ - Lbrack: expr.Lbrack, - Elt: cloneExpr(expr.Elt, info, clonedInfo), - Len: expr.Len, - } - case *ast.ChanType: - return &ast.ChanType{ - Arrow: expr.Arrow, - Begin: expr.Begin, - Dir: expr.Dir, - Value: cloneExpr(expr.Value, info, clonedInfo), - } - case *ast.Ellipsis: - return &ast.Ellipsis{ - Ellipsis: expr.Ellipsis, - Elt: cloneExpr(expr.Elt, info, clonedInfo), - } - case *ast.FuncType: - return &ast.FuncType{ - Func: expr.Func, - Params: cloneFieldList(expr.Params, info, clonedInfo), - Results: cloneFieldList(expr.Results, info, clonedInfo), - } - case *ast.Ident: - return cloneIdent(expr) - case *ast.MapType: - return &ast.MapType{ - Map: expr.Map, - Key: cloneExpr(expr.Key, info, clonedInfo), - Value: cloneExpr(expr.Value, info, clonedInfo), - } - case *ast.ParenExpr: - return &ast.ParenExpr{ - Lparen: expr.Lparen, - Rparen: expr.Rparen, - X: cloneExpr(expr.X, info, clonedInfo), - } - case *ast.SelectorExpr: - s := &ast.SelectorExpr{ - Sel: cloneIdent(expr.Sel), - X: cloneExpr(expr.X, info, clonedInfo), - } - if x, ok := expr.X.(*ast.Ident); ok && ast.IsExported(expr.Sel.Name) { - if obj, ok := info.ObjectOf(x).(*types.PkgName); ok { - clonedInfo[s.X.Pos()] = obj - } - } - return s - case *ast.StarExpr: - return &ast.StarExpr{ - Star: expr.Star, - X: cloneExpr(expr.X, info, clonedInfo), - } - case *ast.StructType: - return &ast.StructType{ - Struct: expr.Struct, - Fields: cloneFieldList(expr.Fields, info, clonedInfo), - Incomplete: expr.Incomplete, - } - default: - return expr - } -} - -func cloneFieldList(fl *ast.FieldList, info *types.Info, clonedInfo map[token.Pos]*types.PkgName) *ast.FieldList { - if fl == nil { - return nil - } - if fl.List == nil { - return &ast.FieldList{ - Closing: fl.Closing, - Opening: fl.Opening, - } - } - list := make([]*ast.Field, 0, len(fl.List)) - for _, f := range fl.List { - var names []*ast.Ident - for _, n := range f.Names { - names = append(names, cloneIdent(n)) - } - list = append(list, &ast.Field{ - Comment: f.Comment, - Doc: f.Doc, - Names: names, - Tag: f.Tag, - Type: cloneExpr(f.Type, info, clonedInfo), - }) - } - return &ast.FieldList{ - Closing: fl.Closing, - Opening: fl.Opening, - List: list, - } -} - -func cloneIdent(ident *ast.Ident) *ast.Ident { - return &ast.Ident{ - NamePos: ident.NamePos, - Name: ident.Name, - Obj: ident.Obj, - } -} diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go deleted file mode 100644 index 9cb2ee69482..00000000000 --- a/internal/lsp/source/util.go +++ /dev/null @@ -1,580 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/ast" - "go/printer" - "go/token" - "go/types" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// MappedRange provides mapped protocol.Range for a span.Range, accounting for -// UTF-16 code points. -type MappedRange struct { - spanRange span.Range // the range in the compiled source (package.CompiledGoFiles) - m *protocol.ColumnMapper // a mapper of the edited source (package.GoFiles) -} - -// NewMappedRange returns a MappedRange for the given start and end token.Pos. -// -// By convention, start and end are assumed to be positions in the compiled (== -// type checked) source, whereas the column mapper m maps positions in the -// user-edited source. Note that these may not be the same, as when using CGo: -// CompiledGoFiles contains generated files, whose positions (via -// token.File.Position) point to locations in the edited file -- the file -// containing `import "C"`. -func NewMappedRange(fset *token.FileSet, m *protocol.ColumnMapper, start, end token.Pos) MappedRange { - if tf := fset.File(start); tf == nil { - bug.Report("nil file", nil) - } else { - mapped := m.TokFile.Name() - adjusted := tf.PositionFor(start, true) // adjusted position - if adjusted.Filename != mapped { - bug.Reportf("mapped file %q does not match start position file %q", mapped, adjusted.Filename) - } - } - return MappedRange{ - spanRange: span.NewRange(fset, start, end), - m: m, - } -} - -// Range returns the LSP range in the edited source. -// -// See the documentation of NewMappedRange for information on edited vs -// compiled source. -func (s MappedRange) Range() (protocol.Range, error) { - if s.m == nil { - return protocol.Range{}, bug.Errorf("invalid range") - } - spn, err := s.Span() - if err != nil { - return protocol.Range{}, err - } - return s.m.Range(spn) -} - -// Span returns the span corresponding to the mapped range in the edited -// source. -// -// See the documentation of NewMappedRange for information on edited vs -// compiled source. -func (s MappedRange) Span() (span.Span, error) { - // In the past, some code-paths have relied on Span returning an error if s - // is the zero value (i.e. s.m is nil). But this should be treated as a bug: - // observe that s.URI() would panic in this case. - if s.m == nil { - return span.Span{}, bug.Errorf("invalid range") - } - return span.FileSpan(s.spanRange.TokFile, s.m.TokFile, s.spanRange.Start, s.spanRange.End) -} - -// URI returns the URI of the edited file. -// -// See the documentation of NewMappedRange for information on edited vs -// compiled source. -func (s MappedRange) URI() span.URI { - return s.m.URI -} - -// GetParsedFile is a convenience function that extracts the Package and -// ParsedGoFile for a file in a Snapshot. pkgPolicy is one of NarrowestPackage/ -// WidestPackage. -func GetParsedFile(ctx context.Context, snapshot Snapshot, fh FileHandle, pkgPolicy PackageFilter) (Package, *ParsedGoFile, error) { - pkg, err := snapshot.PackageForFile(ctx, fh.URI(), TypecheckWorkspace, pkgPolicy) - if err != nil { - return nil, nil, err - } - pgh, err := pkg.File(fh.URI()) - return pkg, pgh, err -} - -func IsGenerated(ctx context.Context, snapshot Snapshot, uri span.URI) bool { - fh, err := snapshot.GetFile(ctx, uri) - if err != nil { - return false - } - pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader) - if err != nil { - return false - } - for _, commentGroup := range pgf.File.Comments { - for _, comment := range commentGroup.List { - if matched := generatedRx.MatchString(comment.Text); matched { - // Check if comment is at the beginning of the line in source. - if pgf.Tok.Position(comment.Slash).Column == 1 { - return true - } - } - } - } - return false -} - -func nodeToProtocolRange(snapshot Snapshot, pkg Package, n ast.Node) (protocol.Range, error) { - mrng, err := posToMappedRange(snapshot, pkg, n.Pos(), n.End()) - if err != nil { - return protocol.Range{}, err - } - return mrng.Range() -} - -func objToMappedRange(snapshot Snapshot, pkg Package, obj types.Object) (MappedRange, error) { - if pkgName, ok := obj.(*types.PkgName); ok { - // An imported Go package has a package-local, unqualified name. - // When the name matches the imported package name, there is no - // identifier in the import spec with the local package name. - // - // For example: - // import "go/ast" // name "ast" matches package name - // import a "go/ast" // name "a" does not match package name - // - // When the identifier does not appear in the source, have the range - // of the object be the import path, including quotes. - if pkgName.Imported().Name() == pkgName.Name() { - return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(len(pkgName.Imported().Path())+2)) - } - } - return nameToMappedRange(snapshot, pkg, obj.Pos(), obj.Name()) -} - -func nameToMappedRange(snapshot Snapshot, pkg Package, pos token.Pos, name string) (MappedRange, error) { - return posToMappedRange(snapshot, pkg, pos, pos+token.Pos(len(name))) -} - -func posToMappedRange(snapshot Snapshot, pkg Package, pos, end token.Pos) (MappedRange, error) { - logicalFilename := snapshot.FileSet().File(pos).Position(pos).Filename - pgf, _, err := findFileInDeps(pkg, span.URIFromPath(logicalFilename)) - if err != nil { - return MappedRange{}, err - } - if !pos.IsValid() { - return MappedRange{}, fmt.Errorf("invalid position for %v", pos) - } - if !end.IsValid() { - return MappedRange{}, fmt.Errorf("invalid position for %v", end) - } - return NewMappedRange(snapshot.FileSet(), pgf.Mapper, pos, end), nil -} - -// Matches cgo generated comment as well as the proposed standard: -// -// https://golang.org/s/generatedcode -var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) - -// FileKindForLang returns the file kind associated with the given language ID, -// or UnknownKind if the language ID is not recognized. -func FileKindForLang(langID string) FileKind { - switch langID { - case "go": - return Go - case "go.mod": - return Mod - case "go.sum": - return Sum - case "tmpl", "gotmpl": - return Tmpl - case "go.work": - return Work - default: - return UnknownKind - } -} - -func (k FileKind) String() string { - switch k { - case Go: - return "go" - case Mod: - return "go.mod" - case Sum: - return "go.sum" - case Tmpl: - return "tmpl" - case Work: - return "go.work" - default: - return fmt.Sprintf("unk%d", k) - } -} - -// nodeAtPos returns the index and the node whose position is contained inside -// the node list. -func nodeAtPos(nodes []ast.Node, pos token.Pos) (ast.Node, int) { - if nodes == nil { - return nil, -1 - } - for i, node := range nodes { - if node.Pos() <= pos && pos <= node.End() { - return node, i - } - } - return nil, -1 -} - -// IsInterface returns if a types.Type is an interface -func IsInterface(T types.Type) bool { - return T != nil && types.IsInterface(T) -} - -// FormatNode returns the "pretty-print" output for an ast node. -func FormatNode(fset *token.FileSet, n ast.Node) string { - var buf strings.Builder - if err := printer.Fprint(&buf, fset, n); err != nil { - return "" - } - return buf.String() -} - -// Deref returns a pointer's element type, traversing as many levels as needed. -// Otherwise it returns typ. -// -// It can return a pointer type for cyclic types (see golang/go#45510). -func Deref(typ types.Type) types.Type { - var seen map[types.Type]struct{} - for { - p, ok := typ.Underlying().(*types.Pointer) - if !ok { - return typ - } - if _, ok := seen[p.Elem()]; ok { - return typ - } - - typ = p.Elem() - - if seen == nil { - seen = make(map[types.Type]struct{}) - } - seen[typ] = struct{}{} - } -} - -func SortDiagnostics(d []*Diagnostic) { - sort.Slice(d, func(i int, j int) bool { - return CompareDiagnostic(d[i], d[j]) < 0 - }) -} - -func CompareDiagnostic(a, b *Diagnostic) int { - if r := protocol.CompareRange(a.Range, b.Range); r != 0 { - return r - } - if a.Source < b.Source { - return -1 - } - if a.Source > b.Source { - return +1 - } - if a.Message < b.Message { - return -1 - } - if a.Message > b.Message { - return +1 - } - return 0 -} - -// FindPackageFromPos finds the first package containing pos in its -// type-checked AST. -func FindPackageFromPos(ctx context.Context, snapshot Snapshot, pos token.Pos) (Package, error) { - tok := snapshot.FileSet().File(pos) - if tok == nil { - return nil, fmt.Errorf("no file for pos %v", pos) - } - uri := span.URIFromPath(tok.Name()) - pkgs, err := snapshot.PackagesForFile(ctx, uri, TypecheckAll, true) - if err != nil { - return nil, err - } - // Only return the package if it actually type-checked the given position. - for _, pkg := range pkgs { - parsed, err := pkg.File(uri) - if err != nil { - return nil, err - } - if parsed == nil { - continue - } - if parsed.Tok.Base() != tok.Base() { - continue - } - return pkg, nil - } - return nil, fmt.Errorf("no package for given file position") -} - -// findFileInDeps finds uri in pkg or its dependencies. -func findFileInDeps(pkg Package, uri span.URI) (*ParsedGoFile, Package, error) { - queue := []Package{pkg} - seen := make(map[string]bool) - - for len(queue) > 0 { - pkg := queue[0] - queue = queue[1:] - seen[pkg.ID()] = true - - if pgf, err := pkg.File(uri); err == nil { - return pgf, pkg, nil - } - for _, dep := range pkg.Imports() { - if !seen[dep.ID()] { - queue = append(queue, dep) - } - } - } - return nil, nil, fmt.Errorf("no file for %s in package %s", uri, pkg.ID()) -} - -// ImportPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func ImportPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err != nil { - return "" - } - return t -} - -// NodeContains returns true if a node encloses a given position pos. -func NodeContains(n ast.Node, pos token.Pos) bool { - return n != nil && n.Pos() <= pos && pos <= n.End() -} - -// CollectScopes returns all scopes in an ast path, ordered as innermost scope -// first. -func CollectScopes(info *types.Info, path []ast.Node, pos token.Pos) []*types.Scope { - // scopes[i], where i= len(c) { - return false - } - for i := 0; i <= colon+1; i++ { - if i == colon { - continue - } - b := c[i] - if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { - return false - } - } - return true -} - -// honorSymlinks toggles whether or not we consider symlinks when comparing -// file or directory URIs. -const honorSymlinks = false - -func CompareURI(left, right span.URI) int { - if honorSymlinks { - return span.CompareURI(left, right) - } - if left == right { - return 0 - } - if left < right { - return -1 - } - return 1 -} - -// InDir checks whether path is in the file tree rooted at dir. -// InDir makes some effort to succeed even in the presence of symbolic links. -// -// Copied and slightly adjusted from go/src/cmd/go/internal/search/search.go. -func InDir(dir, path string) bool { - if inDirLex(dir, path) { - return true - } - if !honorSymlinks { - return false - } - xpath, err := filepath.EvalSymlinks(path) - if err != nil || xpath == path { - xpath = "" - } else { - if inDirLex(dir, xpath) { - return true - } - } - - xdir, err := filepath.EvalSymlinks(dir) - if err == nil && xdir != dir { - if inDirLex(xdir, path) { - return true - } - if xpath != "" { - if inDirLex(xdir, xpath) { - return true - } - } - } - return false -} - -// inDirLex is like inDir but only checks the lexical form of the file names. -// It does not consider symbolic links. -// -// Copied from go/src/cmd/go/internal/search/search.go. -func inDirLex(dir, path string) bool { - pv := strings.ToUpper(filepath.VolumeName(path)) - dv := strings.ToUpper(filepath.VolumeName(dir)) - path = path[len(pv):] - dir = dir[len(dv):] - switch { - default: - return false - case pv != dv: - return false - case len(path) == len(dir): - if path == dir { - return true - } - return false - case dir == "": - return path != "" - case len(path) > len(dir): - if dir[len(dir)-1] == filepath.Separator { - if path[:len(dir)] == dir { - return path[len(dir):] != "" - } - return false - } - if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir { - if len(path) == len(dir)+1 { - return true - } - return path[len(dir)+1:] != "" - } - return false - } -} - -// IsValidImport returns whether importPkgPath is importable -// by pkgPath -func IsValidImport(pkgPath, importPkgPath string) bool { - i := strings.LastIndex(string(importPkgPath), "/internal/") - if i == -1 { - return true - } - if IsCommandLineArguments(string(pkgPath)) { - return true - } - return strings.HasPrefix(string(pkgPath), string(importPkgPath[:i])) -} - -// IsCommandLineArguments reports whether a given value denotes -// "command-line-arguments" package, which is a package with an unknown ID -// created by the go command. It can have a test variant, which is why callers -// should not check that a value equals "command-line-arguments" directly. -func IsCommandLineArguments(s string) bool { - return strings.Contains(s, "command-line-arguments") -} - -// LineToRange creates a Range spanning start and end. -func LineToRange(m *protocol.ColumnMapper, uri span.URI, start, end modfile.Position) (protocol.Range, error) { - return ByteOffsetsToRange(m, uri, start.Byte, end.Byte) -} - -// ByteOffsetsToRange creates a range spanning start and end. -func ByteOffsetsToRange(m *protocol.ColumnMapper, uri span.URI, start, end int) (protocol.Range, error) { - line, col, err := span.ToPosition(m.TokFile, start) - if err != nil { - return protocol.Range{}, err - } - s := span.NewPoint(line, col, start) - line, col, err = span.ToPosition(m.TokFile, end) - if err != nil { - return protocol.Range{}, err - } - e := span.NewPoint(line, col, end) - return m.Range(span.New(uri, s, e)) -} diff --git a/internal/lsp/source/util_test.go b/internal/lsp/source/util_test.go deleted file mode 100644 index 5d4e98f151c..00000000000 --- a/internal/lsp/source/util_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "go/scanner" - "go/token" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -func TestMappedRangeAdjustment(t *testing.T) { - // Test that mapped range adjusts positions in compiled files to positions in - // the corresponding edited file. - - compiled := []byte(`// Generated. DO NOT EDIT. - -package p - -//line edited.go:3:1 -const a𐐀b = 42`) - edited := []byte(`package p - -const a𐐀b = 42`) - - fset := token.NewFileSet() - cf := scanFile(fset, "compiled.go", compiled) - ef := scanFile(fset, "edited.go", edited) - eURI := span.URIFromPath(ef.Name()) - - mapper := &protocol.ColumnMapper{ - URI: eURI, - TokFile: ef, - Content: edited, - } - - start := cf.Pos(bytes.Index(compiled, []byte("a𐐀b"))) - end := start + token.Pos(len("a𐐀b")) - mr := NewMappedRange(fset, mapper, start, end) - gotRange, err := mr.Range() - if err != nil { - t.Fatal(err) - } - wantRange := protocol.Range{ - Start: protocol.Position{Line: 2, Character: 6}, - End: protocol.Position{Line: 2, Character: 10}, - } - if gotRange != wantRange { - t.Errorf("NewMappedRange(...).Range(): got %v, want %v", gotRange, wantRange) - } - - // Verify that the mapped span is also in the edited file. - gotSpan, err := mr.Span() - if err != nil { - t.Fatal(err) - } - if gotURI := gotSpan.URI(); gotURI != eURI { - t.Errorf("mr.Span().URI() = %v, want %v", gotURI, eURI) - } - wantOffset := bytes.Index(edited, []byte("a𐐀b")) - if gotOffset := gotSpan.Start().Offset(); gotOffset != wantOffset { - t.Errorf("mr.Span().Start().Offset() = %d, want %d", gotOffset, wantOffset) - } -} - -// scanFile scans the a file into fset, in order to honor line directives. -func scanFile(fset *token.FileSet, name string, content []byte) *token.File { - f := fset.AddFile(name, -1, len(content)) - var s scanner.Scanner - s.Init(f, content, nil, scanner.ScanComments) - for { - _, tok, _ := s.Scan() - if tok == token.EOF { - break - } - } - return f -} diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go deleted file mode 100644 index 94037f33fe3..00000000000 --- a/internal/lsp/source/view.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "bytes" - "context" - "errors" - "fmt" - "go/ast" - "go/scanner" - "go/token" - "go/types" - "io" - "strings" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/module" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/imports" - "golang.org/x/tools/internal/lsp/progress" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// Snapshot represents the current state for the given view. -type Snapshot interface { - ID() uint64 - - // View returns the View associated with this snapshot. - View() View - - // BackgroundContext returns a context used for all background processing - // on behalf of this snapshot. - BackgroundContext() context.Context - - // Fileset returns the Fileset used to parse all the Go files in this snapshot. - FileSet() *token.FileSet - - // ValidBuildConfiguration returns true if there is some error in the - // user's workspace. In particular, if they are both outside of a module - // and their GOPATH. - ValidBuildConfiguration() bool - - // WriteEnv writes the view-specific environment to the io.Writer. - WriteEnv(ctx context.Context, w io.Writer) error - - // FindFile returns the FileHandle for the given URI, if it is already - // in the given snapshot. - FindFile(uri span.URI) VersionedFileHandle - - // GetVersionedFile returns the VersionedFileHandle for a given URI, - // initializing it if it is not already part of the snapshot. - GetVersionedFile(ctx context.Context, uri span.URI) (VersionedFileHandle, error) - - // GetFile returns the FileHandle for a given URI, initializing it if it is - // not already part of the snapshot. - GetFile(ctx context.Context, uri span.URI) (FileHandle, error) - - // AwaitInitialized waits until the snapshot's view is initialized. - AwaitInitialized(ctx context.Context) - - // IsOpen returns whether the editor currently has a file open. - IsOpen(uri span.URI) bool - - // IgnoredFile reports if a file would be ignored by a `go list` of the whole - // workspace. - IgnoredFile(uri span.URI) bool - - // Templates returns the .tmpl files - Templates() map[span.URI]VersionedFileHandle - - // ParseGo returns the parsed AST for the file. - // If the file is not available, returns nil and an error. - ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error) - - // PosToField is a cache of *ast.Fields by token.Pos. This allows us - // to quickly find corresponding *ast.Field node given a *types.Var. - // We must refer to the AST to render type aliases properly when - // formatting signatures and other types. - PosToField(ctx context.Context, pkg Package, pos token.Pos) (*ast.Field, error) - - // PosToDecl maps certain objects' positions to their surrounding - // ast.Decl. This mapping is used when building the documentation - // string for the objects. - PosToDecl(ctx context.Context, pkg Package, pos token.Pos) (ast.Decl, error) - - // DiagnosePackage returns basic diagnostics, including list, parse, and type errors - // for pkg, grouped by file. - DiagnosePackage(ctx context.Context, pkg Package) (map[span.URI][]*Diagnostic, error) - - // Analyze runs the analyses for the given package at this snapshot. - Analyze(ctx context.Context, pkgID string, analyzers []*Analyzer) ([]*Diagnostic, error) - - // RunGoCommandPiped runs the given `go` command, writing its output - // to stdout and stderr. Verb, Args, and WorkingDir must be specified. - // - // RunGoCommandPiped runs the command serially using gocommand.RunPiped, - // enforcing that this command executes exclusively to other commands on the - // server. - RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error - - // RunGoCommandDirect runs the given `go` command. Verb, Args, and - // WorkingDir must be specified. - RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) - - // RunGoCommands runs a series of `go` commands that updates the go.mod - // and go.sum file for wd, and returns their updated contents. - RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) - - // RunProcessEnvFunc runs fn with the process env for this snapshot's view. - // Note: the process env contains cached module and filesystem state. - RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error - - // ModFiles are the go.mod files enclosed in the snapshot's view and known - // to the snapshot. - ModFiles() []span.URI - - // ParseMod is used to parse go.mod files. - ParseMod(ctx context.Context, fh FileHandle) (*ParsedModule, error) - - // ModWhy returns the results of `go mod why` for the module specified by - // the given go.mod file. - ModWhy(ctx context.Context, fh FileHandle) (map[string]string, error) - - // ModTidy returns the results of `go mod tidy` for the module specified by - // the given go.mod file. - ModTidy(ctx context.Context, pm *ParsedModule) (*TidiedModule, error) - - // GoModForFile returns the URI of the go.mod file for the given URI. - GoModForFile(uri span.URI) span.URI - - // WorkFile, if non-empty, is the go.work file for the workspace. - WorkFile() span.URI - - // ParseWork is used to parse go.work files. - ParseWork(ctx context.Context, fh FileHandle) (*ParsedWorkFile, error) - - // BuiltinFile returns information about the special builtin package. - BuiltinFile(ctx context.Context) (*ParsedGoFile, error) - - // IsBuiltin reports whether uri is part of the builtin package. - IsBuiltin(ctx context.Context, uri span.URI) bool - - // PackagesForFile returns the packages that this file belongs to, checked - // in mode. - PackagesForFile(ctx context.Context, uri span.URI, mode TypecheckMode, includeTestVariants bool) ([]Package, error) - - // PackageForFile returns a single package that this file belongs to, - // checked in mode and filtered by the package policy. - PackageForFile(ctx context.Context, uri span.URI, mode TypecheckMode, selectPackage PackageFilter) (Package, error) - - // GetActiveReverseDeps returns the active files belonging to the reverse - // dependencies of this file's package, checked in TypecheckWorkspace mode. - GetReverseDependencies(ctx context.Context, id string) ([]Package, error) - - // CachedImportPaths returns all the imported packages loaded in this - // snapshot, indexed by their import path and checked in TypecheckWorkspace - // mode. - CachedImportPaths(ctx context.Context) (map[string]Package, error) - - // KnownPackages returns all the packages loaded in this snapshot, checked - // in TypecheckWorkspace mode. - KnownPackages(ctx context.Context) ([]Package, error) - - // ActivePackages returns the packages considered 'active' in the workspace. - // - // In normal memory mode, this is all workspace packages. In degraded memory - // mode, this is just the reverse transitive closure of open packages. - ActivePackages(ctx context.Context) ([]Package, error) - - // Symbols returns all symbols in the snapshot. - Symbols(ctx context.Context) (map[span.URI][]Symbol, error) - - // Metadata returns package metadata associated with the given file URI. - MetadataForFile(ctx context.Context, uri span.URI) ([]Metadata, error) - - // GetCriticalError returns any critical errors in the workspace. - GetCriticalError(ctx context.Context) *CriticalError - - // BuildGoplsMod generates a go.mod file for all modules in the workspace. - // It bypasses any existing gopls.mod. - BuildGoplsMod(ctx context.Context) (*modfile.File, error) -} - -// PackageFilter sets how a package is filtered out from a set of packages -// containing a given file. -type PackageFilter int - -const ( - // NarrowestPackage picks the "narrowest" package for a given file. - // By "narrowest" package, we mean the package with the fewest number of - // files that includes the given file. This solves the problem of test - // variants, as the test will have more files than the non-test package. - NarrowestPackage PackageFilter = iota - - // WidestPackage returns the Package containing the most files. - // This is useful for something like diagnostics, where we'd prefer to - // offer diagnostics for as many files as possible. - WidestPackage -) - -// InvocationFlags represents the settings of a particular go command invocation. -// It is a mode, plus a set of flag bits. -type InvocationFlags int - -const ( - // Normal is appropriate for commands that might be run by a user and don't - // deliberately modify go.mod files, e.g. `go test`. - Normal InvocationFlags = iota - // WriteTemporaryModFile is for commands that need information from a - // modified version of the user's go.mod file, e.g. `go mod tidy` used to - // generate diagnostics. - WriteTemporaryModFile - // LoadWorkspace is for packages.Load, and other operations that should - // consider the whole workspace at once. - LoadWorkspace - - // AllowNetwork is a flag bit that indicates the invocation should be - // allowed to access the network. - AllowNetwork InvocationFlags = 1 << 10 -) - -func (m InvocationFlags) Mode() InvocationFlags { - return m & (AllowNetwork - 1) -} - -func (m InvocationFlags) AllowNetwork() bool { - return m&AllowNetwork != 0 -} - -// View represents a single workspace. -// This is the level at which we maintain configuration like working directory -// and build tags. -type View interface { - // Name returns the name this view was constructed with. - Name() string - - // Folder returns the folder with which this view was created. - Folder() span.URI - - // Shutdown closes this view, and detaches it from its session. - Shutdown(ctx context.Context) - - // Options returns a copy of the Options for this view. - Options() *Options - - // SetOptions sets the options of this view to new values. - // Calling this may cause the view to be invalidated and a replacement view - // added to the session. If so the new view will be returned, otherwise the - // original one will be. - SetOptions(context.Context, *Options) (View, error) - - // Snapshot returns the current snapshot for the view. - Snapshot(ctx context.Context) (Snapshot, func()) - - // Rebuild rebuilds the current view, replacing the original view in its session. - Rebuild(ctx context.Context) (Snapshot, func(), error) - - // IsGoPrivatePath reports whether target is a private import path, as identified - // by the GOPRIVATE environment variable. - IsGoPrivatePath(path string) bool - - // ModuleUpgrades returns known module upgrades. - ModuleUpgrades() map[string]string - - // RegisterModuleUpgrades registers that upgrades exist for the given modules. - RegisterModuleUpgrades(upgrades map[string]string) - - // FileKind returns the type of a file - FileKind(FileHandle) FileKind -} - -// A FileSource maps uris to FileHandles. This abstraction exists both for -// testability, and so that algorithms can be run equally on session and -// snapshot files. -type FileSource interface { - // GetFile returns the FileHandle for a given URI. - GetFile(ctx context.Context, uri span.URI) (FileHandle, error) -} - -// A ParsedGoFile contains the results of parsing a Go file. -type ParsedGoFile struct { - URI span.URI - Mode ParseMode - File *ast.File - Tok *token.File - // Source code used to build the AST. It may be different from the - // actual content of the file if we have fixed the AST. - Src []byte - Mapper *protocol.ColumnMapper - ParseErr scanner.ErrorList -} - -// A ParsedModule contains the results of parsing a go.mod file. -type ParsedModule struct { - URI span.URI - File *modfile.File - Mapper *protocol.ColumnMapper - ParseErrors []*Diagnostic -} - -// A ParsedWorkFile contains the results of parsing a go.work file. -type ParsedWorkFile struct { - URI span.URI - File *modfile.WorkFile - Mapper *protocol.ColumnMapper - ParseErrors []*Diagnostic -} - -// A TidiedModule contains the results of running `go mod tidy` on a module. -type TidiedModule struct { - // Diagnostics representing changes made by `go mod tidy`. - Diagnostics []*Diagnostic - // The bytes of the go.mod file after it was tidied. - TidiedContent []byte -} - -// Metadata represents package metadata retrieved from go/packages. -type Metadata interface { - // PackageName is the package name. - PackageName() string - - // PackagePath is the package path. - PackagePath() string - - // ModuleInfo returns the go/packages module information for the given package. - ModuleInfo() *packages.Module -} - -// Session represents a single connection from a client. -// This is the level at which things like open files are maintained on behalf -// of the client. -// A session may have many active views at any given time. -type Session interface { - // ID returns the unique identifier for this session on this server. - ID() string - // NewView creates a new View, returning it and its first snapshot. If a - // non-empty tempWorkspace directory is provided, the View will record a copy - // of its gopls workspace module in that directory, so that client tooling - // can execute in the same main module. - NewView(ctx context.Context, name string, folder span.URI, options *Options) (View, Snapshot, func(), error) - - // Cache returns the cache that created this session, for debugging only. - Cache() interface{} - - // View returns a view with a matching name, if the session has one. - View(name string) View - - // ViewOf returns a view corresponding to the given URI. - ViewOf(uri span.URI) (View, error) - - // Views returns the set of active views built by this session. - Views() []View - - // Shutdown the session and all views it has created. - Shutdown(ctx context.Context) - - // GetFile returns a handle for the specified file. - GetFile(ctx context.Context, uri span.URI) (FileHandle, error) - - // DidModifyFile reports a file modification to the session. It returns - // the new snapshots after the modifications have been applied, paired with - // the affected file URIs for those snapshots. - DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, []func(), error) - - // ExpandModificationsToDirectories returns the set of changes with the - // directory changes removed and expanded to include all of the files in - // the directory. - ExpandModificationsToDirectories(ctx context.Context, changes []FileModification) []FileModification - - // Overlays returns a slice of file overlays for the session. - Overlays() []Overlay - - // Options returns a copy of the SessionOptions for this session. - Options() *Options - - // SetOptions sets the options of this session to new values. - SetOptions(*Options) - - // FileWatchingGlobPatterns returns glob patterns to watch every directory - // known by the view. For views within a module, this is the module root, - // any directory in the module root, and any replace targets. - FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} - - // SetProgressTracker sets the progress tracker for the session. - SetProgressTracker(tracker *progress.Tracker) -} - -var ErrViewExists = errors.New("view already exists for session") - -// Overlay is the type for a file held in memory on a session. -type Overlay interface { - Kind() FileKind - VersionedFileHandle -} - -// FileModification represents a modification to a file. -type FileModification struct { - URI span.URI - Action FileAction - - // OnDisk is true if a watched file is changed on disk. - // If true, Version will be -1 and Text will be nil. - OnDisk bool - - // Version will be -1 and Text will be nil when they are not supplied, - // specifically on textDocument/didClose and for on-disk changes. - Version int32 - Text []byte - - // LanguageID is only sent from the language client on textDocument/didOpen. - LanguageID string -} - -type FileAction int - -const ( - UnknownFileAction = FileAction(iota) - Open - Change - Close - Save - Create - Delete - InvalidateMetadata -) - -func (a FileAction) String() string { - switch a { - case Open: - return "Open" - case Change: - return "Change" - case Close: - return "Close" - case Save: - return "Save" - case Create: - return "Create" - case Delete: - return "Delete" - case InvalidateMetadata: - return "InvalidateMetadata" - default: - return "Unknown" - } -} - -var ErrTmpModfileUnsupported = errors.New("-modfile is unsupported for this Go version") -var ErrNoModOnDisk = errors.New("go.mod file is not on disk") - -func IsNonFatalGoModError(err error) bool { - return err == ErrTmpModfileUnsupported || err == ErrNoModOnDisk -} - -// ParseMode controls the content of the AST produced when parsing a source file. -type ParseMode int - -const ( - // ParseHeader specifies that the main package declaration and imports are needed. - // This is the mode used when attempting to examine the package graph structure. - ParseHeader ParseMode = iota - - // ParseExported specifies that the package is used only as a dependency, - // and only its exported declarations are needed. More may be included if - // necessary to avoid type errors. - ParseExported - - // ParseFull specifies the full AST is needed. - // This is used for files of direct interest where the entire contents must - // be considered. - ParseFull -) - -// TypecheckMode controls what kind of parsing should be done (see ParseMode) -// while type checking a package. -type TypecheckMode int - -const ( - // Invalid default value. - TypecheckUnknown TypecheckMode = iota - // TypecheckFull means to use ParseFull. - TypecheckFull - // TypecheckWorkspace means to use ParseFull for workspace packages, and - // ParseExported for others. - TypecheckWorkspace - // TypecheckAll means ParseFull for workspace packages, and both Full and - // Exported for others. Only valid for some functions. - TypecheckAll -) - -type VersionedFileHandle interface { - FileHandle - Version() int32 - Session() string - - // LSPIdentity returns the version identity of a file. - VersionedFileIdentity() VersionedFileIdentity -} - -type VersionedFileIdentity struct { - URI span.URI - - // SessionID is the ID of the LSP session. - SessionID string - - // Version is the version of the file, as specified by the client. It should - // only be set in combination with SessionID. - Version int32 -} - -// FileHandle represents a handle to a specific version of a single file. -type FileHandle interface { - URI() span.URI - - // FileIdentity returns a FileIdentity for the file, even if there was an - // error reading it. - FileIdentity() FileIdentity - // Read reads the contents of a file. - // If the file is not available, returns a nil slice and an error. - Read() ([]byte, error) - // Saved reports whether the file has the same content on disk. - Saved() bool -} - -// FileIdentity uniquely identifies a file at a version from a FileSystem. -type FileIdentity struct { - URI span.URI - - // Identifier represents a unique identifier for the file's content. - Hash string -} - -func (id FileIdentity) String() string { - return fmt.Sprintf("%s%s", id.URI, id.Hash) -} - -// FileKind describes the kind of the file in question. -// It can be one of Go,mod, Sum, or Tmpl. -type FileKind int - -const ( - // UnknownKind is a file type we don't know about. - UnknownKind = FileKind(iota) - - // Go is a normal go source file. - Go - // Mod is a go.mod file. - Mod - // Sum is a go.sum file. - Sum - // Tmpl is a template file. - Tmpl - // Work is a go.work file. - Work -) - -// Analyzer represents a go/analysis analyzer with some boolean properties -// that let the user know how to use the analyzer. -type Analyzer struct { - Analyzer *analysis.Analyzer - - // Enabled reports whether the analyzer is enabled. This value can be - // configured per-analysis in user settings. For staticcheck analyzers, - // the value of the Staticcheck setting overrides this field. - Enabled bool - - // Fix is the name of the suggested fix name used to invoke the suggested - // fixes for the analyzer. It is non-empty if we expect this analyzer to - // provide its fix separately from its diagnostics. That is, we should apply - // the analyzer's suggested fixes through a Command, not a TextEdit. - Fix string - - // ActionKind is the kind of code action this analyzer produces. If - // unspecified the type defaults to quickfix. - ActionKind []protocol.CodeActionKind - - // Severity is the severity set for diagnostics reported by this - // analyzer. If left unset it defaults to Warning. - Severity protocol.DiagnosticSeverity -} - -func (a Analyzer) IsEnabled(view View) bool { - // Staticcheck analyzers can only be enabled when staticcheck is on. - if _, ok := view.Options().StaticcheckAnalyzers[a.Analyzer.Name]; ok { - if !view.Options().Staticcheck { - return false - } - } - if enabled, ok := view.Options().Analyses[a.Analyzer.Name]; ok { - return enabled - } - return a.Enabled -} - -// Package represents a Go package that has been type-checked. It maintains -// only the relevant fields of a *go/packages.Package. -type Package interface { - ID() string - Name() string - PkgPath() string - CompiledGoFiles() []*ParsedGoFile - File(uri span.URI) (*ParsedGoFile, error) - GetSyntax() []*ast.File - GetTypes() *types.Package - GetTypesInfo() *types.Info - GetTypesSizes() types.Sizes - IsIllTyped() bool - ForTest() string - GetImport(pkgPath string) (Package, error) - MissingDependencies() []string - Imports() []Package - Version() *module.Version - HasListOrParseErrors() bool - HasTypeErrors() bool - ParseMode() ParseMode -} - -type CriticalError struct { - // MainError is the primary error. Must be non-nil. - MainError error - // DiagList contains any supplemental (structured) diagnostics. - DiagList []*Diagnostic -} - -// An Diagnostic corresponds to an LSP Diagnostic. -// https://microsoft.github.io/language-server-protocol/specification#diagnostic -type Diagnostic struct { - URI span.URI - Range protocol.Range - Severity protocol.DiagnosticSeverity - Code string - CodeHref string - - // Source is a human-readable description of the source of the error. - // Diagnostics generated by an analysis.Analyzer set it to Analyzer.Name. - Source DiagnosticSource - - Message string - - Tags []protocol.DiagnosticTag - Related []RelatedInformation - - // Fields below are used internally to generate quick fixes. They aren't - // part of the LSP spec and don't leave the server. - SuggestedFixes []SuggestedFix - Analyzer *Analyzer -} - -type DiagnosticSource string - -const ( - UnknownError DiagnosticSource = "" - ListError DiagnosticSource = "go list" - ParseError DiagnosticSource = "syntax" - TypeError DiagnosticSource = "compiler" - ModTidyError DiagnosticSource = "go mod tidy" - OptimizationDetailsError DiagnosticSource = "optimizer details" - UpgradeNotification DiagnosticSource = "upgrade available" - TemplateError DiagnosticSource = "template" - WorkFileError DiagnosticSource = "go.work file" -) - -func AnalyzerErrorKind(name string) DiagnosticSource { - return DiagnosticSource(name) -} - -var ( - PackagesLoadError = errors.New("packages.Load error") -) - -// WorkspaceModuleVersion is the nonexistent pseudoversion suffix used in the -// construction of the workspace module. It is exported so that we can make -// sure not to show this version to end users in error messages, to avoid -// confusion. -// The major version is not included, as that depends on the module path. -// -// If workspace module A is dependent on workspace module B, we need our -// nonexistent version to be greater than the version A mentions. -// Otherwise, the go command will try to update to that version. Use a very -// high minor version to make that more likely. -const workspaceModuleVersion = ".9999999.0-goplsworkspace" - -func IsWorkspaceModuleVersion(version string) bool { - return strings.HasSuffix(version, workspaceModuleVersion) -} - -func WorkspaceModuleVersion(majorVersion string) string { - // Use the highest compatible major version to avoid unwanted upgrades. - // See the comment on workspaceModuleVersion. - if majorVersion == "v0" { - majorVersion = "v1" - } - return majorVersion + workspaceModuleVersion -} diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go deleted file mode 100644 index 11e22d17bea..00000000000 --- a/internal/lsp/source/workspace_symbol.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "context" - "fmt" - "go/types" - "path/filepath" - "runtime" - "sort" - "strings" - "unicode" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/fuzzy" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/span" -) - -// Symbol holds a precomputed symbol value. Note: we avoid using the -// protocol.SymbolInformation struct here in order to reduce the size of each -// symbol. -type Symbol struct { - Name string - Kind protocol.SymbolKind - Range protocol.Range -} - -// maxSymbols defines the maximum number of symbol results that should ever be -// sent in response to a client. -const maxSymbols = 100 - -// WorkspaceSymbols matches symbols across all views using the given query, -// according to the match semantics parameterized by matcherType and style. -// -// The workspace symbol method is defined in the spec as follows: -// -// The workspace symbol request is sent from the client to the server to -// list project-wide symbols matching the query string. -// -// It is unclear what "project-wide" means here, but given the parameters of -// workspace/symbol do not include any workspace identifier, then it has to be -// assumed that "project-wide" means "across all workspaces". Hence why -// WorkspaceSymbols receives the views []View. -// -// However, it then becomes unclear what it would mean to call WorkspaceSymbols -// with a different configured SymbolMatcher per View. Therefore we assume that -// Session level configuration will define the SymbolMatcher to be used for the -// WorkspaceSymbols method. -func WorkspaceSymbols(ctx context.Context, matcherType SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) { - ctx, done := event.Start(ctx, "source.WorkspaceSymbols") - defer done() - if query == "" { - return nil, nil - } - sc := newSymbolCollector(matcherType, style, query) - return sc.walk(ctx, views) -} - -// A matcherFunc returns the index and score of a symbol match. -// -// See the comment for symbolCollector for more information. -type matcherFunc func(chunks []string) (int, float64) - -// A symbolizer returns the best symbol match for a name with pkg, according to -// some heuristic. The symbol name is passed as the slice nameParts of logical -// name pieces. For example, for myType.field the caller can pass either -// []string{"myType.field"} or []string{"myType.", "field"}. -// -// See the comment for symbolCollector for more information. -type symbolizer func(name string, pkg Metadata, m matcherFunc) ([]string, float64) - -func fullyQualifiedSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { - _, score := dynamicSymbolMatch(name, pkg, matcher) - if score > 0 { - return []string{pkg.PackagePath(), ".", name}, score - } - return nil, 0 -} - -func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { - var score float64 - - endsInPkgName := strings.HasSuffix(pkg.PackagePath(), pkg.PackageName()) - - // If the package path does not end in the package name, we need to check the - // package-qualified symbol as an extra pass first. - if !endsInPkgName { - pkgQualified := []string{pkg.PackageName(), ".", name} - idx, score := matcher(pkgQualified) - nameStart := len(pkg.PackageName()) + 1 - if score > 0 { - // If our match is contained entirely within the unqualified portion, - // just return that. - if idx >= nameStart { - return []string{name}, score - } - // Lower the score for matches that include the package name. - return pkgQualified, score * 0.8 - } - } - - // Now try matching the fully qualified symbol. - fullyQualified := []string{pkg.PackagePath(), ".", name} - idx, score := matcher(fullyQualified) - - // As above, check if we matched just the unqualified symbol name. - nameStart := len(pkg.PackagePath()) + 1 - if idx >= nameStart { - return []string{name}, score - } - - // If our package path ends in the package name, we'll have skipped the - // initial pass above, so check if we matched just the package-qualified - // name. - if endsInPkgName && idx >= 0 { - pkgStart := len(pkg.PackagePath()) - len(pkg.PackageName()) - if idx >= pkgStart { - return []string{pkg.PackageName(), ".", name}, score - } - } - - // Our match was not contained within the unqualified or package qualified - // symbol. Return the fully qualified symbol but discount the score. - return fullyQualified, score * 0.6 -} - -func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { - qualified := []string{pkg.PackageName(), ".", name} - if _, s := matcher(qualified); s > 0 { - return qualified, s - } - return nil, 0 -} - -// symbolCollector holds context as we walk Packages, gathering symbols that -// match a given query. -// -// How we match symbols is parameterized by two interfaces: -// - A matcherFunc determines how well a string symbol matches a query. It -// returns a non-negative score indicating the quality of the match. A score -// of zero indicates no match. -// - A symbolizer determines how we extract the symbol for an object. This -// enables the 'symbolStyle' configuration option. -type symbolCollector struct { - // These types parameterize the symbol-matching pass. - matchers []matcherFunc - symbolizer symbolizer - - symbolStore -} - -func newSymbolCollector(matcher SymbolMatcher, style SymbolStyle, query string) *symbolCollector { - var s symbolizer - switch style { - case DynamicSymbols: - s = dynamicSymbolMatch - case FullyQualifiedSymbols: - s = fullyQualifiedSymbolMatch - case PackageQualifiedSymbols: - s = packageSymbolMatch - default: - panic(fmt.Errorf("unknown symbol style: %v", style)) - } - sc := &symbolCollector{symbolizer: s} - sc.matchers = make([]matcherFunc, runtime.GOMAXPROCS(-1)) - for i := range sc.matchers { - sc.matchers[i] = buildMatcher(matcher, query) - } - return sc -} - -func buildMatcher(matcher SymbolMatcher, query string) matcherFunc { - switch matcher { - case SymbolFuzzy: - return parseQuery(query, newFuzzyMatcher) - case SymbolFastFuzzy: - return parseQuery(query, func(query string) matcherFunc { - return fuzzy.NewSymbolMatcher(query).Match - }) - case SymbolCaseSensitive: - return matchExact(query) - case SymbolCaseInsensitive: - q := strings.ToLower(query) - exact := matchExact(q) - wrapper := []string{""} - return func(chunks []string) (int, float64) { - s := strings.Join(chunks, "") - wrapper[0] = strings.ToLower(s) - return exact(wrapper) - } - } - panic(fmt.Errorf("unknown symbol matcher: %v", matcher)) -} - -func newFuzzyMatcher(query string) matcherFunc { - fm := fuzzy.NewMatcher(query) - return func(chunks []string) (int, float64) { - score := float64(fm.ScoreChunks(chunks)) - ranges := fm.MatchedRanges() - if len(ranges) > 0 { - return ranges[0], score - } - return -1, score - } -} - -// parseQuery parses a field-separated symbol query, extracting the special -// characters listed below, and returns a matcherFunc corresponding to the AND -// of all field queries. -// -// Special characters: -// -// ^ match exact prefix -// $ match exact suffix -// ' match exact -// -// In all three of these special queries, matches are 'smart-cased', meaning -// they are case sensitive if the symbol query contains any upper-case -// characters, and case insensitive otherwise. -func parseQuery(q string, newMatcher func(string) matcherFunc) matcherFunc { - fields := strings.Fields(q) - if len(fields) == 0 { - return func([]string) (int, float64) { return -1, 0 } - } - var funcs []matcherFunc - for _, field := range fields { - var f matcherFunc - switch { - case strings.HasPrefix(field, "^"): - prefix := field[1:] - f = smartCase(prefix, func(chunks []string) (int, float64) { - s := strings.Join(chunks, "") - if strings.HasPrefix(s, prefix) { - return 0, 1 - } - return -1, 0 - }) - case strings.HasPrefix(field, "'"): - exact := field[1:] - f = smartCase(exact, matchExact(exact)) - case strings.HasSuffix(field, "$"): - suffix := field[0 : len(field)-1] - f = smartCase(suffix, func(chunks []string) (int, float64) { - s := strings.Join(chunks, "") - if strings.HasSuffix(s, suffix) { - return len(s) - len(suffix), 1 - } - return -1, 0 - }) - default: - f = newMatcher(field) - } - funcs = append(funcs, f) - } - if len(funcs) == 1 { - return funcs[0] - } - return comboMatcher(funcs).match -} - -func matchExact(exact string) matcherFunc { - return func(chunks []string) (int, float64) { - s := strings.Join(chunks, "") - if idx := strings.LastIndex(s, exact); idx >= 0 { - return idx, 1 - } - return -1, 0 - } -} - -// smartCase returns a matcherFunc that is case-sensitive if q contains any -// upper-case characters, and case-insensitive otherwise. -func smartCase(q string, m matcherFunc) matcherFunc { - insensitive := strings.ToLower(q) == q - wrapper := []string{""} - return func(chunks []string) (int, float64) { - s := strings.Join(chunks, "") - if insensitive { - s = strings.ToLower(s) - } - wrapper[0] = s - return m(wrapper) - } -} - -type comboMatcher []matcherFunc - -func (c comboMatcher) match(chunks []string) (int, float64) { - score := 1.0 - first := 0 - for _, f := range c { - idx, s := f(chunks) - if idx < first { - first = idx - } - score *= s - } - return first, score -} - -func (sc *symbolCollector) walk(ctx context.Context, views []View) ([]protocol.SymbolInformation, error) { - // Use the root view URIs for determining (lexically) whether a uri is in any - // open workspace. - var roots []string - for _, v := range views { - roots = append(roots, strings.TrimRight(string(v.Folder()), "/")) - } - - results := make(chan *symbolStore) - matcherlen := len(sc.matchers) - files := make(map[span.URI]symbolFile) - - for _, v := range views { - snapshot, release := v.Snapshot(ctx) - defer release() - psyms, err := snapshot.Symbols(ctx) - if err != nil { - return nil, err - } - - filters := v.Options().DirectoryFilters - folder := filepath.ToSlash(v.Folder().Filename()) - for uri, syms := range psyms { - norm := filepath.ToSlash(uri.Filename()) - nm := strings.TrimPrefix(norm, folder) - if FiltersDisallow(nm, filters) { - continue - } - // Only scan each file once. - if _, ok := files[uri]; ok { - continue - } - mds, err := snapshot.MetadataForFile(ctx, uri) - if err != nil { - event.Error(ctx, fmt.Sprintf("missing metadata for %q", uri), err) - continue - } - if len(mds) == 0 { - // TODO: should use the bug reporting API - continue - } - files[uri] = symbolFile{uri, mds[0], syms} - } - } - - var work []symbolFile - for _, f := range files { - work = append(work, f) - } - - // Compute matches concurrently. Each symbolWorker has its own symbolStore, - // which we merge at the end. - for i, matcher := range sc.matchers { - go func(i int, matcher matcherFunc) { - w := &symbolWorker{ - symbolizer: sc.symbolizer, - matcher: matcher, - ss: &symbolStore{}, - roots: roots, - } - for j := i; j < len(work); j += matcherlen { - w.matchFile(work[j]) - } - results <- w.ss - }(i, matcher) - } - - for i := 0; i < matcherlen; i++ { - ss := <-results - for _, si := range ss.res { - sc.store(si) - } - } - return sc.results(), nil -} - -// FilterDisallow is code from the body of cache.pathExcludedByFilter in cache/view.go -// Exporting and using that function would cause an import cycle. -// Moving it here and exporting it would leave behind view_test.go. -// (This code is exported and used in the body of cache.pathExcludedByFilter) -func FiltersDisallow(path string, filters []string) bool { - path = strings.TrimPrefix(path, "/") - var excluded bool - for _, filter := range filters { - op, prefix := filter[0], filter[1:] - // Non-empty prefixes have to be precise directory matches. - if prefix != "" { - prefix = prefix + "/" - path = path + "/" - } - if !strings.HasPrefix(path, prefix) { - continue - } - excluded = op == '-' - } - return excluded -} - -// symbolFile holds symbol information for a single file. -type symbolFile struct { - uri span.URI - md Metadata - syms []Symbol -} - -// symbolWorker matches symbols and captures the highest scoring results. -type symbolWorker struct { - symbolizer symbolizer - matcher matcherFunc - ss *symbolStore - roots []string -} - -func (w *symbolWorker) matchFile(i symbolFile) { - for _, sym := range i.syms { - symbolParts, score := w.symbolizer(sym.Name, i.md, w.matcher) - - // Check if the score is too low before applying any downranking. - if w.ss.tooLow(score) { - continue - } - - // Factors to apply to the match score for the purpose of downranking - // results. - // - // These numbers were crudely calibrated based on trial-and-error using a - // small number of sample queries. Adjust as necessary. - // - // All factors are multiplicative, meaning if more than one applies they are - // multiplied together. - const ( - // nonWorkspaceFactor is applied to symbols outside of any active - // workspace. Developers are less likely to want to jump to code that they - // are not actively working on. - nonWorkspaceFactor = 0.5 - // nonWorkspaceUnexportedFactor is applied to unexported symbols outside of - // any active workspace. Since one wouldn't usually jump to unexported - // symbols to understand a package API, they are particularly irrelevant. - nonWorkspaceUnexportedFactor = 0.5 - // every field or method nesting level to access the field decreases - // the score by a factor of 1.0 - depth*depthFactor, up to a depth of - // 3. - depthFactor = 0.2 - ) - - startWord := true - exported := true - depth := 0.0 - for _, r := range sym.Name { - if startWord && !unicode.IsUpper(r) { - exported = false - } - if r == '.' { - startWord = true - depth++ - } else { - startWord = false - } - } - - inWorkspace := false - for _, root := range w.roots { - if strings.HasPrefix(string(i.uri), root) { - inWorkspace = true - break - } - } - - // Apply downranking based on workspace position. - if !inWorkspace { - score *= nonWorkspaceFactor - if !exported { - score *= nonWorkspaceUnexportedFactor - } - } - - // Apply downranking based on symbol depth. - if depth > 3 { - depth = 3 - } - score *= 1.0 - depth*depthFactor - - if w.ss.tooLow(score) { - continue - } - - si := symbolInformation{ - score: score, - symbol: strings.Join(symbolParts, ""), - kind: sym.Kind, - uri: i.uri, - rng: sym.Range, - container: i.md.PackagePath(), - } - w.ss.store(si) - } -} - -type symbolStore struct { - res [maxSymbols]symbolInformation -} - -// store inserts si into the sorted results, if si has a high enough score. -func (sc *symbolStore) store(si symbolInformation) { - if sc.tooLow(si.score) { - return - } - insertAt := sort.Search(len(sc.res), func(i int) bool { - // Sort by score, then symbol length, and finally lexically. - if sc.res[i].score != si.score { - return sc.res[i].score < si.score - } - if len(sc.res[i].symbol) != len(si.symbol) { - return len(sc.res[i].symbol) > len(si.symbol) - } - return sc.res[i].symbol > si.symbol - }) - if insertAt < len(sc.res)-1 { - copy(sc.res[insertAt+1:], sc.res[insertAt:len(sc.res)-1]) - } - sc.res[insertAt] = si -} - -func (sc *symbolStore) tooLow(score float64) bool { - return score <= sc.res[len(sc.res)-1].score -} - -func (sc *symbolStore) results() []protocol.SymbolInformation { - var res []protocol.SymbolInformation - for _, si := range sc.res { - if si.score <= 0 { - return res - } - res = append(res, si.asProtocolSymbolInformation()) - } - return res -} - -func typeToKind(typ types.Type) protocol.SymbolKind { - switch typ := typ.Underlying().(type) { - case *types.Interface: - return protocol.Interface - case *types.Struct: - return protocol.Struct - case *types.Signature: - if typ.Recv() != nil { - return protocol.Method - } - return protocol.Function - case *types.Named: - return typeToKind(typ.Underlying()) - case *types.Basic: - i := typ.Info() - switch { - case i&types.IsNumeric != 0: - return protocol.Number - case i&types.IsBoolean != 0: - return protocol.Boolean - case i&types.IsString != 0: - return protocol.String - } - } - return protocol.Variable -} - -// symbolInformation is a cut-down version of protocol.SymbolInformation that -// allows struct values of this type to be used as map keys. -type symbolInformation struct { - score float64 - symbol string - container string - kind protocol.SymbolKind - uri span.URI - rng protocol.Range -} - -// asProtocolSymbolInformation converts s to a protocol.SymbolInformation value. -// -// TODO: work out how to handle tags if/when they are needed. -func (s symbolInformation) asProtocolSymbolInformation() protocol.SymbolInformation { - return protocol.SymbolInformation{ - Name: s.symbol, - Kind: s.kind, - Location: protocol.Location{ - URI: protocol.URIFromSpanURI(s.uri), - Range: s.rng, - }, - ContainerName: s.container, - } -} diff --git a/internal/lsp/source/workspace_symbol_test.go b/internal/lsp/source/workspace_symbol_test.go deleted file mode 100644 index 314ef785df3..00000000000 --- a/internal/lsp/source/workspace_symbol_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package source - -import ( - "testing" -) - -func TestParseQuery(t *testing.T) { - tests := []struct { - query, s string - wantMatch bool - }{ - {"", "anything", false}, - {"any", "anything", true}, - {"any$", "anything", false}, - {"ing$", "anything", true}, - {"ing$", "anythinG", true}, - {"inG$", "anything", false}, - {"^any", "anything", true}, - {"^any", "Anything", true}, - {"^Any", "anything", false}, - {"at", "anything", true}, - // TODO: this appears to be a bug in the fuzzy matching algorithm. 'At' - // should cause a case-sensitive match. - // {"At", "anything", false}, - {"At", "Anything", true}, - {"'yth", "Anything", true}, - {"'yti", "Anything", false}, - {"'any 'thing", "Anything", true}, - {"anythn nythg", "Anything", true}, - {"ntx", "Anything", false}, - {"anythn", "anything", true}, - {"ing", "anything", true}, - {"anythn nythgx", "anything", false}, - } - - for _, test := range tests { - matcher := parseQuery(test.query, newFuzzyMatcher) - if _, score := matcher([]string{test.s}); score > 0 != test.wantMatch { - t.Errorf("parseQuery(%q) match for %q: %.2g, want match: %t", test.query, test.s, score, test.wantMatch) - } - } -} diff --git a/internal/lsp/symbols.go b/internal/lsp/symbols.go deleted file mode 100644 index f04e4572dba..00000000000 --- a/internal/lsp/symbols.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/template" -) - -func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) { - ctx, done := event.Start(ctx, "lsp.Server.documentSymbol") - defer done() - - snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind) - defer release() - if !ok { - return []interface{}{}, err - } - var docSymbols []protocol.DocumentSymbol - if snapshot.View().FileKind(fh) == source.Tmpl { - docSymbols, err = template.DocumentSymbols(snapshot, fh) - } else { - docSymbols, err = source.DocumentSymbols(ctx, snapshot, fh) - } - if err != nil { - event.Error(ctx, "DocumentSymbols failed", err, tag.URI.Of(fh.URI())) - return []interface{}{}, nil - } - // Convert the symbols to an interface array. - // TODO: Remove this once the lsp deprecates SymbolInformation. - symbols := make([]interface{}, len(docSymbols)) - for i, s := range docSymbols { - if snapshot.View().Options().HierarchicalDocumentSymbolSupport { - symbols[i] = s - continue - } - // If the client does not support hierarchical document symbols, then - // we need to be backwards compatible for now and return SymbolInformation. - symbols[i] = protocol.SymbolInformation{ - Name: s.Name, - Kind: s.Kind, - Deprecated: s.Deprecated, - Location: protocol.Location{ - URI: params.TextDocument.URI, - Range: s.Range, - }, - } - } - return symbols, nil -} diff --git a/internal/lsp/template/completion.go b/internal/lsp/template/completion.go deleted file mode 100644 index 13dbdf1e525..00000000000 --- a/internal/lsp/template/completion.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "context" - "fmt" - "go/scanner" - "go/token" - "strings" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -// information needed for completion -type completer struct { - p *Parsed - pos protocol.Position - offset int // offset of the start of the Token - ctx protocol.CompletionContext - syms map[string]symbol -} - -func Completion(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle, pos protocol.Position, context protocol.CompletionContext) (*protocol.CompletionList, error) { - all := New(snapshot.Templates()) - var start int // the beginning of the Token (completed or not) - syms := make(map[string]symbol) - var p *Parsed - for fn, fc := range all.files { - // collect symbols from all template files - filterSyms(syms, fc.symbols) - if fn.Filename() != fh.URI().Filename() { - continue - } - if start = inTemplate(fc, pos); start == -1 { - return nil, nil - } - p = fc - } - if p == nil { - // this cannot happen unless the search missed a template file - return nil, fmt.Errorf("%s not found", fh.FileIdentity().URI.Filename()) - } - c := completer{ - p: p, - pos: pos, - offset: start + len(Left), - ctx: context, - syms: syms, - } - return c.complete() -} - -func filterSyms(syms map[string]symbol, ns []symbol) { - for _, xsym := range ns { - switch xsym.kind { - case protocol.Method, protocol.Package, protocol.Boolean, protocol.Namespace, - protocol.Function: - syms[xsym.name] = xsym // we don't care which symbol we get - case protocol.Variable: - if xsym.name != "dot" { - syms[xsym.name] = xsym - } - case protocol.Constant: - if xsym.name == "nil" { - syms[xsym.name] = xsym - } - } - } -} - -// return the starting position of the enclosing token, or -1 if none -func inTemplate(fc *Parsed, pos protocol.Position) int { - // pos is the pos-th character. if the cursor is at the beginning - // of the file, pos is 0. That is, we've only seen characters before pos - // 1. pos might be in a Token, return tk.Start - // 2. pos might be after an elided but before a Token, return elided - // 3. return -1 for false - offset := fc.FromPosition(pos) - // this could be a binary search, as the tokens are ordered - for _, tk := range fc.tokens { - if tk.Start < offset && offset <= tk.End { - return tk.Start - } - } - for _, x := range fc.elided { - if x > offset { - // fc.elided is sorted - break - } - // If the interval [x,offset] does not contain Left or Right - // then provide completions. (do we need the test for Right?) - if !bytes.Contains(fc.buf[x:offset], []byte(Left)) && !bytes.Contains(fc.buf[x:offset], []byte(Right)) { - return x - } - } - return -1 -} - -var ( - keywords = []string{"if", "with", "else", "block", "range", "template", "end}}", "end"} - globals = []string{"and", "call", "html", "index", "slice", "js", "len", "not", "or", - "urlquery", "printf", "println", "print", "eq", "ne", "le", "lt", "ge", "gt"} -) - -// find the completions. start is the offset of either the Token enclosing pos, or where -// the incomplete token starts. -// The error return is always nil. -func (c *completer) complete() (*protocol.CompletionList, error) { - ans := &protocol.CompletionList{IsIncomplete: true, Items: []protocol.CompletionItem{}} - start := c.p.FromPosition(c.pos) - sofar := c.p.buf[c.offset:start] - if len(sofar) == 0 || sofar[len(sofar)-1] == ' ' || sofar[len(sofar)-1] == '\t' { - return ans, nil - } - // sofar could be parsed by either c.analyzer() or scan(). The latter is precise - // and slower, but fast enough - words := scan(sofar) - // 1. if pattern starts $, show variables - // 2. if pattern starts ., show methods (and . by itself?) - // 3. if len(words) == 1, show firstWords (but if it were a |, show functions and globals) - // 4. ...? (parenthetical expressions, arguments, ...) (packages, namespaces, nil?) - if len(words) == 0 { - return nil, nil // if this happens, why were we called? - } - pattern := string(words[len(words)-1]) - if pattern[0] == '$' { - // should we also return a raw "$"? - for _, s := range c.syms { - if s.kind == protocol.Variable && weakMatch(s.name, pattern) > 0 { - ans.Items = append(ans.Items, protocol.CompletionItem{ - Label: s.name, - Kind: protocol.VariableCompletion, - Detail: "Variable", - }) - } - } - return ans, nil - } - if pattern[0] == '.' { - for _, s := range c.syms { - if s.kind == protocol.Method && weakMatch("."+s.name, pattern) > 0 { - ans.Items = append(ans.Items, protocol.CompletionItem{ - Label: s.name, - Kind: protocol.MethodCompletion, - Detail: "Method/member", - }) - } - } - return ans, nil - } - // could we get completion attempts in strings or numbers, and if so, do we care? - // globals - for _, kw := range globals { - if weakMatch(kw, string(pattern)) != 0 { - ans.Items = append(ans.Items, protocol.CompletionItem{ - Label: kw, - Kind: protocol.KeywordCompletion, - Detail: "Function", - }) - } - } - // and functions - for _, s := range c.syms { - if s.kind == protocol.Function && weakMatch(s.name, pattern) != 0 { - ans.Items = append(ans.Items, protocol.CompletionItem{ - Label: s.name, - Kind: protocol.FunctionCompletion, - Detail: "Function", - }) - } - } - // keywords if we're at the beginning - if len(words) <= 1 || len(words[len(words)-2]) == 1 && words[len(words)-2][0] == '|' { - for _, kw := range keywords { - if weakMatch(kw, string(pattern)) != 0 { - ans.Items = append(ans.Items, protocol.CompletionItem{ - Label: kw, - Kind: protocol.KeywordCompletion, - Detail: "keyword", - }) - } - } - } - return ans, nil -} - -// someday think about comments, strings, backslashes, etc -// this would repeat some of the template parsing, but because the user is typing -// there may be no parse tree here. -// (go/scanner will report 2 tokens for $a, as $ is not a legal go identifier character) -// (go/scanner is about 2.7 times more expensive) -func (c *completer) analyze(buf []byte) [][]byte { - // we want to split on whitespace and before dots - var working []byte - var ans [][]byte - for _, ch := range buf { - if ch == '.' && len(working) > 0 { - ans = append(ans, working) - working = []byte{'.'} - continue - } - if ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' { - if len(working) > 0 { - ans = append(ans, working) - working = []byte{} - continue - } - } - working = append(working, ch) - } - if len(working) > 0 { - ans = append(ans, working) - } - ch := buf[len(buf)-1] - if ch == ' ' || ch == '\t' { - // avoid completing on whitespace - ans = append(ans, []byte{ch}) - } - return ans -} - -// version of c.analyze that uses go/scanner. -func scan(buf []byte) []string { - fset := token.NewFileSet() - fp := fset.AddFile("", -1, len(buf)) - var sc scanner.Scanner - sc.Init(fp, buf, func(pos token.Position, msg string) {}, scanner.ScanComments) - ans := make([]string, 0, 10) // preallocating gives a measurable savings - for { - _, tok, lit := sc.Scan() // tok is an int - if tok == token.EOF { - break // done - } else if tok == token.SEMICOLON && lit == "\n" { - continue // don't care, but probably can't happen - } else if tok == token.PERIOD { - ans = append(ans, ".") // lit is empty - } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "." { - ans[len(ans)-1] = "." + lit - } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "$" { - ans[len(ans)-1] = "$" + lit - } else if lit != "" { - ans = append(ans, lit) - } - } - return ans -} - -// pattern is what the user has typed -func weakMatch(choice, pattern string) float64 { - lower := strings.ToLower(choice) - // for now, use only lower-case everywhere - pattern = strings.ToLower(pattern) - // The first char has to match - if pattern[0] != lower[0] { - return 0 - } - // If they start with ., then the second char has to match - from := 1 - if pattern[0] == '.' { - if len(pattern) < 2 { - return 1 // pattern just a ., so it matches - } - if pattern[1] != lower[1] { - return 0 - } - from = 2 - } - // check that all the characters of pattern occur as a subsequence of choice - i, j := from, from - for ; i < len(lower) && j < len(pattern); j++ { - if pattern[j] == lower[i] { - i++ - if i >= len(lower) { - return 0 - } - } - } - if j < len(pattern) { - return 0 - } - return 1 -} - -// for debug printing -func strContext(c protocol.CompletionContext) string { - switch c.TriggerKind { - case protocol.Invoked: - return "invoked" - case protocol.TriggerCharacter: - return fmt.Sprintf("triggered(%s)", c.TriggerCharacter) - case protocol.TriggerForIncompleteCompletions: - // gopls doesn't seem to handle these explicitly anywhere - return "incomplete" - } - return fmt.Sprintf("?%v", c) -} diff --git a/internal/lsp/template/completion_test.go b/internal/lsp/template/completion_test.go deleted file mode 100644 index bfcdb537202..00000000000 --- a/internal/lsp/template/completion_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "log" - "sort" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/protocol" -) - -func init() { - log.SetFlags(log.Lshortfile) -} - -type tparse struct { - marked string // ^ shows where to ask for completions. (The user just typed the following character.) - wanted []string // expected completions -} - -// Test completions in templates that parse enough (if completion needs symbols) -// Seen characters up to the ^ -func TestParsed(t *testing.T) { - var tests = []tparse{ - {"{{x}}{{12. xx^", nil}, // https://github.com/golang/go/issues/50430 - {``, nil}, - {"{{i^f}}", []string{"index", "if"}}, - {"{{if .}}{{e^ {{end}}", []string{"eq", "end}}", "else", "end"}}, - {"{{foo}}{{f^", []string{"foo"}}, - {"{{$^}}", []string{"$"}}, - {"{{$x:=4}}{{$^", []string{"$x"}}, - {"{{$x:=4}}{{$ ^ ", []string{}}, - {"{{len .Modified}}{{.^Mo", []string{"Modified"}}, - {"{{len .Modified}}{{.mf^", []string{"Modified"}}, - {"{{$^ }}", []string{"$"}}, - {"{{$a =3}}{{$^", []string{"$a"}}, - // .two is not good here: fix someday - {`{{.Modified}}{{.^{{if $.one.two}}xxx{{end}}`, []string{"Modified", "one", "two"}}, - {`{{.Modified}}{{.o^{{if $.one.two}}xxx{{end}}`, []string{"one"}}, - {"{{.Modiifed}}{{.one.t^{{if $.one.two}}xxx{{end}}", []string{"two"}}, - {`{{block "foo" .}}{{i^`, []string{"index", "if"}}, - {"{{in^{{Internal}}", []string{"index", "Internal", "if"}}, - // simple number has no completions - {"{{4^e", []string{}}, - // simple string has no completions - {"{{`e^", []string{}}, - {"{{`No i^", []string{}}, // example of why go/scanner is used - {"{{xavier}}{{12. x^", []string{"xavier"}}, - } - for _, tx := range tests { - c := testCompleter(t, tx) - var v []string - if c != nil { - ans, _ := c.complete() - for _, a := range ans.Items { - v = append(v, a.Label) - } - } - if len(v) != len(tx.wanted) { - t.Errorf("%q: got %q, wanted %q %d,%d", tx.marked, v, tx.wanted, len(v), len(tx.wanted)) - continue - } - sort.Strings(tx.wanted) - sort.Strings(v) - for i := 0; i < len(v); i++ { - if tx.wanted[i] != v[i] { - t.Errorf("%q at %d: got %v, wanted %v", tx.marked, i, v, tx.wanted) - break - } - } - } -} - -func testCompleter(t *testing.T, tx tparse) *completer { - t.Helper() - // seen chars up to ^ - col := strings.Index(tx.marked, "^") - buf := strings.Replace(tx.marked, "^", "", 1) - p := parseBuffer([]byte(buf)) - pos := protocol.Position{Line: 0, Character: uint32(col)} - if p.ParseErr != nil { - log.Printf("%q: %v", tx.marked, p.ParseErr) - } - offset := inTemplate(p, pos) - if offset == -1 { - return nil - } - syms := make(map[string]symbol) - filterSyms(syms, p.symbols) - c := &completer{ - p: p, - pos: protocol.Position{Line: 0, Character: uint32(col)}, - offset: offset + len(Left), - ctx: protocol.CompletionContext{TriggerKind: protocol.Invoked}, - syms: syms, - } - return c -} diff --git a/internal/lsp/template/highlight.go b/internal/lsp/template/highlight.go deleted file mode 100644 index a45abaf5020..00000000000 --- a/internal/lsp/template/highlight.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "context" - "fmt" - "regexp" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Highlight(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.DocumentHighlight, error) { - buf, err := fh.Read() - if err != nil { - return nil, err - } - p := parseBuffer(buf) - pos := p.FromPosition(loc) - var ans []protocol.DocumentHighlight - if p.ParseErr == nil { - for _, s := range p.symbols { - if s.start <= pos && pos < s.start+s.length { - return markSymbols(p, s) - } - } - } - // these tokens exist whether or not there was a parse error - // (symbols require a successful parse) - for _, tok := range p.tokens { - if tok.Start <= pos && pos < tok.End { - wordAt := findWordAt(p, pos) - if len(wordAt) > 0 { - return markWordInToken(p, wordAt) - } - } - } - // find the 'word' at pos, etc: someday - // until then we get the default action, which doesn't respect word boundaries - return ans, nil -} - -func markSymbols(p *Parsed, sym symbol) ([]protocol.DocumentHighlight, error) { - var ans []protocol.DocumentHighlight - for _, s := range p.symbols { - if s.name == sym.name { - kind := protocol.Read - if s.vardef { - kind = protocol.Write - } - ans = append(ans, protocol.DocumentHighlight{ - Range: p.Range(s.start, s.length), - Kind: kind, - }) - } - } - return ans, nil -} - -// A token is {{...}}, and this marks words in the token that equal the give word -func markWordInToken(p *Parsed, wordAt string) ([]protocol.DocumentHighlight, error) { - var ans []protocol.DocumentHighlight - pat, err := regexp.Compile(fmt.Sprintf(`\b%s\b`, wordAt)) - if err != nil { - return nil, fmt.Errorf("%q: unmatchable word (%v)", wordAt, err) - } - for _, tok := range p.tokens { - got := pat.FindAllIndex(p.buf[tok.Start:tok.End], -1) - for i := 0; i < len(got); i++ { - ans = append(ans, protocol.DocumentHighlight{ - Range: p.Range(got[i][0], got[i][1]-got[i][0]), - Kind: protocol.Text, - }) - } - } - return ans, nil -} - -var wordRe = regexp.MustCompile(`[$]?\w+$`) -var moreRe = regexp.MustCompile(`^[$]?\w+`) - -// findWordAt finds the word the cursor is in (meaning in or just before) -func findWordAt(p *Parsed, pos int) string { - if pos >= len(p.buf) { - return "" // can't happen, as we are called with pos < tok.End - } - after := moreRe.Find(p.buf[pos:]) - if len(after) == 0 { - return "" // end of the word - } - got := wordRe.Find(p.buf[:pos+len(after)]) - return string(got) -} diff --git a/internal/lsp/template/parse.go b/internal/lsp/template/parse.go deleted file mode 100644 index 181a5228fd2..00000000000 --- a/internal/lsp/template/parse.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package template contains code for dealing with templates -package template - -// template files are small enough that the code reprocesses them each time -// this may be a bad choice for projects with lots of template files. - -// This file contains the parsing code, some debugging printing, and -// implementations for Diagnose, Definition, Hover, References - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "regexp" - "runtime" - "sort" - "text/template" - "text/template/parse" - "unicode/utf8" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -var ( - Left = []byte("{{") - Right = []byte("}}") -) - -type Parsed struct { - buf []byte //contents - lines [][]byte // needed?, other than for debugging? - elided []int // offsets where Left was replaced by blanks - - // tokens are matched Left-Right pairs, computed before trying to parse - tokens []Token - - // result of parsing - named []*template.Template // the template and embedded templates - ParseErr error - symbols []symbol - stack []parse.Node // used while computing symbols - - // for mapping from offsets in buf to LSP coordinates - // See FromPosition() and LineCol() - nls []int // offset of newlines before each line (nls[0]==-1) - lastnl int // last line seen - check int // used to decide whether to use lastnl or search through nls - nonASCII bool // are there any non-ascii runes in buf? -} - -// Token is a single {{...}}. More precisely, Left...Right -type Token struct { - Start, End int // offset from start of template - Multiline bool -} - -// All contains the Parse of all the template files -type All struct { - files map[span.URI]*Parsed -} - -// New returns the Parses of the snapshot's tmpl files -// (maybe cache these, but then avoiding import cycles needs code rearrangements) -func New(tmpls map[span.URI]source.VersionedFileHandle) *All { - all := make(map[span.URI]*Parsed) - for k, v := range tmpls { - buf, err := v.Read() - if err != nil { // PJW: decide what to do with these errors - log.Printf("failed to read %s (%v)", v.URI().Filename(), err) - continue - } - all[k] = parseBuffer(buf) - } - return &All{files: all} -} - -func parseBuffer(buf []byte) *Parsed { - ans := &Parsed{ - buf: buf, - check: -1, - nls: []int{-1}, - } - if len(buf) == 0 { - return ans - } - // how to compute allAscii... - for _, b := range buf { - if b >= utf8.RuneSelf { - ans.nonASCII = true - break - } - } - if buf[len(buf)-1] != '\n' { - ans.buf = append(buf, '\n') - } - for i, p := range ans.buf { - if p == '\n' { - ans.nls = append(ans.nls, i) - } - } - ans.setTokens() // ans.buf may be a new []byte - ans.lines = bytes.Split(ans.buf, []byte{'\n'}) - t, err := template.New("").Parse(string(ans.buf)) - if err != nil { - funcs := make(template.FuncMap) - for t == nil && ans.ParseErr == nil { - // in 1.17 it may be possible to avoid getting this error - // template: :2: function "foo" not defined - matches := parseErrR.FindStringSubmatch(err.Error()) - if len(matches) == 2 { - // suppress the error by giving it a function with the right name - funcs[matches[1]] = func() interface{} { return nil } - t, err = template.New("").Funcs(funcs).Parse(string(ans.buf)) - continue - } - ans.ParseErr = err // unfixed error - return ans - } - } - ans.named = t.Templates() - // set the symbols - for _, t := range ans.named { - ans.stack = append(ans.stack, t.Root) - ans.findSymbols() - if t.Name() != "" { - // defining a template. The pos is just after {{define...}} (or {{block...}}?) - at, sz := ans.FindLiteralBefore(int(t.Root.Pos)) - s := symbol{start: at, length: sz, name: t.Name(), kind: protocol.Namespace, vardef: true} - ans.symbols = append(ans.symbols, s) - } - } - - sort.Slice(ans.symbols, func(i, j int) bool { - left, right := ans.symbols[i], ans.symbols[j] - if left.start != right.start { - return left.start < right.start - } - if left.vardef != right.vardef { - return left.vardef - } - return left.kind < right.kind - }) - return ans -} - -// FindLiteralBefore locates the first preceding string literal -// returning its position and length in buf -// or returns -1 if there is none. -// Assume double-quoted string rather than backquoted string for now. -func (p *Parsed) FindLiteralBefore(pos int) (int, int) { - left, right := -1, -1 - for i := pos - 1; i >= 0; i-- { - if p.buf[i] != '"' { - continue - } - if right == -1 { - right = i - continue - } - left = i - break - } - if left == -1 { - return -1, 0 - } - return left + 1, right - left - 1 -} - -var ( - parseErrR = regexp.MustCompile(`template:.*function "([^"]+)" not defined`) -) - -func (p *Parsed) setTokens() { - const ( - // InRaw and InString only occur inside an action (SeenLeft) - Start = iota - InRaw - InString - SeenLeft - ) - state := Start - var left, oldState int - for n := 0; n < len(p.buf); n++ { - c := p.buf[n] - switch state { - case InRaw: - if c == '`' { - state = oldState - } - case InString: - if c == '"' && !isEscaped(p.buf[:n]) { - state = oldState - } - case SeenLeft: - if c == '`' { - oldState = state // it's SeenLeft, but a little clearer this way - state = InRaw - continue - } - if c == '"' { - oldState = state - state = InString - continue - } - if bytes.HasPrefix(p.buf[n:], Right) { - right := n + len(Right) - tok := Token{Start: left, - End: right, - Multiline: bytes.Contains(p.buf[left:right], []byte{'\n'}), - } - p.tokens = append(p.tokens, tok) - state = Start - } - // If we see (unquoted) Left then the original left is probably the user - // typing. Suppress the original left - if bytes.HasPrefix(p.buf[n:], Left) { - p.elideAt(left) - left = n - n += len(Left) - 1 // skip the rest - } - case Start: - if bytes.HasPrefix(p.buf[n:], Left) { - left = n - state = SeenLeft - n += len(Left) - 1 // skip the rest (avoids {{{ bug) - } - } - } - // this error occurs after typing {{ at the end of the file - if state != Start { - // Unclosed Left. remove the Left at left - p.elideAt(left) - } -} - -func (p *Parsed) elideAt(left int) { - if p.elided == nil { - // p.buf is the same buffer that v.Read() returns, so copy it. - // (otherwise the next time it's parsed, elided information is lost) - b := make([]byte, len(p.buf)) - copy(b, p.buf) - p.buf = b - } - for i := 0; i < len(Left); i++ { - p.buf[left+i] = ' ' - } - p.elided = append(p.elided, left) -} - -// isEscaped reports whether the byte after buf is escaped -func isEscaped(buf []byte) bool { - backSlashes := 0 - for j := len(buf) - 1; j >= 0 && buf[j] == '\\'; j-- { - backSlashes++ - } - return backSlashes%2 == 1 -} - -func (p *Parsed) Tokens() []Token { - return p.tokens -} - -func (p *Parsed) utf16len(buf []byte) int { - cnt := 0 - if !p.nonASCII { - return len(buf) - } - // we need a utf16len(rune), but we don't have it - for _, r := range string(buf) { - cnt++ - if r >= 1<<16 { - cnt++ - } - } - return cnt -} - -func (p *Parsed) TokenSize(t Token) (int, error) { - if t.Multiline { - return -1, fmt.Errorf("TokenSize called with Multiline token %#v", t) - } - ans := p.utf16len(p.buf[t.Start:t.End]) - return ans, nil -} - -// RuneCount counts runes in line l, from col s to e -// (e==0 for end of line. called only for multiline tokens) -func (p *Parsed) RuneCount(l, s, e uint32) uint32 { - start := p.nls[l] + 1 + int(s) - end := p.nls[l] + 1 + int(e) - if e == 0 || end > p.nls[l+1] { - end = p.nls[l+1] - } - return uint32(utf8.RuneCount(p.buf[start:end])) -} - -// LineCol converts from a 0-based byte offset to 0-based line, col. col in runes -func (p *Parsed) LineCol(x int) (uint32, uint32) { - if x < p.check { - p.lastnl = 0 - } - p.check = x - for i := p.lastnl; i < len(p.nls); i++ { - if p.nls[i] <= x { - continue - } - p.lastnl = i - var count int - if i > 0 && x == p.nls[i-1] { // \n - count = 0 - } else { - count = p.utf16len(p.buf[p.nls[i-1]+1 : x]) - } - return uint32(i - 1), uint32(count) - } - if x == len(p.buf)-1 { // trailing \n - return uint32(len(p.nls) - 1), 0 - } - // shouldn't happen - for i := 1; i < 4; i++ { - _, f, l, ok := runtime.Caller(i) - if !ok { - break - } - log.Printf("%d: %s:%d", i, f, l) - } - - msg := fmt.Errorf("LineCol off the end, %d of %d, nls=%v, %q", x, len(p.buf), p.nls, p.buf[x:]) - event.Error(context.Background(), "internal error", msg) - return 0, 0 -} - -// Position produces a protocol.Position from an offset in the template -func (p *Parsed) Position(pos int) protocol.Position { - line, col := p.LineCol(pos) - return protocol.Position{Line: line, Character: col} -} - -func (p *Parsed) Range(x, length int) protocol.Range { - line, col := p.LineCol(x) - ans := protocol.Range{ - Start: protocol.Position{Line: line, Character: col}, - End: protocol.Position{Line: line, Character: col + uint32(length)}, - } - return ans -} - -// FromPosition translates a protocol.Position into an offset into the template -func (p *Parsed) FromPosition(x protocol.Position) int { - l, c := int(x.Line), int(x.Character) - if l >= len(p.nls) || p.nls[l]+1 >= len(p.buf) { - // paranoia to avoid panic. return the largest offset - return len(p.buf) - } - line := p.buf[p.nls[l]+1:] - cnt := 0 - for w := range string(line) { - if cnt >= c { - return w + p.nls[l] + 1 - } - cnt++ - } - // do we get here? NO - pos := int(x.Character) + p.nls[int(x.Line)] + 1 - event.Error(context.Background(), "internal error", fmt.Errorf("surprise %#v", x)) - return pos -} - -func symAtPosition(fh source.FileHandle, loc protocol.Position) (*symbol, *Parsed, error) { - buf, err := fh.Read() - if err != nil { - return nil, nil, err - } - p := parseBuffer(buf) - pos := p.FromPosition(loc) - syms := p.SymsAtPos(pos) - if len(syms) == 0 { - return nil, p, fmt.Errorf("no symbol found") - } - if len(syms) > 1 { - log.Printf("Hover: %d syms, not 1 %v", len(syms), syms) - } - sym := syms[0] - return &sym, p, nil -} - -func (p *Parsed) SymsAtPos(pos int) []symbol { - ans := []symbol{} - for _, s := range p.symbols { - if s.start <= pos && pos < s.start+s.length { - ans = append(ans, s) - } - } - return ans -} - -type wrNode struct { - p *Parsed - w io.Writer -} - -// WriteNode is for debugging -func (p *Parsed) WriteNode(w io.Writer, n parse.Node) { - wr := wrNode{p: p, w: w} - wr.writeNode(n, "") -} - -func (wr wrNode) writeNode(n parse.Node, indent string) { - if n == nil { - return - } - at := func(pos parse.Pos) string { - line, col := wr.p.LineCol(int(pos)) - return fmt.Sprintf("(%d)%v:%v", pos, line, col) - } - switch x := n.(type) { - case *parse.ActionNode: - fmt.Fprintf(wr.w, "%sActionNode at %s\n", indent, at(x.Pos)) - wr.writeNode(x.Pipe, indent+". ") - case *parse.BoolNode: - fmt.Fprintf(wr.w, "%sBoolNode at %s, %v\n", indent, at(x.Pos), x.True) - case *parse.BranchNode: - fmt.Fprintf(wr.w, "%sBranchNode at %s\n", indent, at(x.Pos)) - wr.writeNode(x.Pipe, indent+"Pipe. ") - wr.writeNode(x.List, indent+"List. ") - wr.writeNode(x.ElseList, indent+"Else. ") - case *parse.ChainNode: - fmt.Fprintf(wr.w, "%sChainNode at %s, %v\n", indent, at(x.Pos), x.Field) - case *parse.CommandNode: - fmt.Fprintf(wr.w, "%sCommandNode at %s, %d children\n", indent, at(x.Pos), len(x.Args)) - for _, a := range x.Args { - wr.writeNode(a, indent+". ") - } - //case *parse.CommentNode: // 1.16 - case *parse.DotNode: - fmt.Fprintf(wr.w, "%sDotNode at %s\n", indent, at(x.Pos)) - case *parse.FieldNode: - fmt.Fprintf(wr.w, "%sFieldNode at %s, %v\n", indent, at(x.Pos), x.Ident) - case *parse.IdentifierNode: - fmt.Fprintf(wr.w, "%sIdentifierNode at %s, %v\n", indent, at(x.Pos), x.Ident) - case *parse.IfNode: - fmt.Fprintf(wr.w, "%sIfNode at %s\n", indent, at(x.Pos)) - wr.writeNode(&x.BranchNode, indent+". ") - case *parse.ListNode: - if x == nil { - return // nil BranchNode.ElseList - } - fmt.Fprintf(wr.w, "%sListNode at %s, %d children\n", indent, at(x.Pos), len(x.Nodes)) - for _, n := range x.Nodes { - wr.writeNode(n, indent+". ") - } - case *parse.NilNode: - fmt.Fprintf(wr.w, "%sNilNode at %s\n", indent, at(x.Pos)) - case *parse.NumberNode: - fmt.Fprintf(wr.w, "%sNumberNode at %s, %s\n", indent, at(x.Pos), x.Text) - case *parse.PipeNode: - if x == nil { - return // {{template "xxx"}} - } - fmt.Fprintf(wr.w, "%sPipeNode at %s, %d vars, %d cmds, IsAssign:%v\n", - indent, at(x.Pos), len(x.Decl), len(x.Cmds), x.IsAssign) - for _, d := range x.Decl { - wr.writeNode(d, indent+"Decl. ") - } - for _, c := range x.Cmds { - wr.writeNode(c, indent+"Cmd. ") - } - case *parse.RangeNode: - fmt.Fprintf(wr.w, "%sRangeNode at %s\n", indent, at(x.Pos)) - wr.writeNode(&x.BranchNode, indent+". ") - case *parse.StringNode: - fmt.Fprintf(wr.w, "%sStringNode at %s, %s\n", indent, at(x.Pos), x.Quoted) - case *parse.TemplateNode: - fmt.Fprintf(wr.w, "%sTemplateNode at %s, %s\n", indent, at(x.Pos), x.Name) - wr.writeNode(x.Pipe, indent+". ") - case *parse.TextNode: - fmt.Fprintf(wr.w, "%sTextNode at %s, len %d\n", indent, at(x.Pos), len(x.Text)) - case *parse.VariableNode: - fmt.Fprintf(wr.w, "%sVariableNode at %s, %v\n", indent, at(x.Pos), x.Ident) - case *parse.WithNode: - fmt.Fprintf(wr.w, "%sWithNode at %s\n", indent, at(x.Pos)) - wr.writeNode(&x.BranchNode, indent+". ") - } -} - -// short prints at most 40 bytes of node.String(), for debugging -func short(n parse.Node) string { - s := fmt.Sprint(n) // recovers from panic - if len(s) > 40 { - return s[:40] + "..." - } - return s -} - -var kindNames = []string{"", "File", "Module", "Namespace", "Package", "Class", "Method", "Property", - "Field", "Constructor", "Enum", "Interface", "Function", "Variable", "Constant", "String", - "Number", "Boolean", "Array", "Object", "Key", "Null", "EnumMember", "Struct", "Event", - "Operator", "TypeParameter"} - -func kindStr(k protocol.SymbolKind) string { - n := int(k) - if n < 1 || n >= len(kindNames) { - return fmt.Sprintf("?SymbolKind %d?", n) - } - return kindNames[n] -} diff --git a/internal/lsp/template/symbols.go b/internal/lsp/template/symbols.go deleted file mode 100644 index ce5a1e799b7..00000000000 --- a/internal/lsp/template/symbols.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "context" - "fmt" - "text/template/parse" - "unicode/utf8" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -// in local coordinates, to be translated to protocol.DocumentSymbol -type symbol struct { - start int // for sorting - length int // in runes (unicode code points) - name string - kind protocol.SymbolKind - vardef bool // is this a variable definition? - // do we care about selection range, or children? - // no children yet, and selection range is the same as range -} - -func (s symbol) String() string { - return fmt.Sprintf("{%d,%d,%s,%s,%v}", s.start, s.length, s.name, s.kind, s.vardef) -} - -// for FieldNode or VariableNode (or ChainNode?) -func (p *Parsed) fields(flds []string, x parse.Node) []symbol { - ans := []symbol{} - // guessing that there are no embedded blanks allowed. The doc is unclear - lookfor := "" - switch x.(type) { - case *parse.FieldNode: - for _, f := range flds { - lookfor += "." + f // quadratic, but probably ok - } - case *parse.VariableNode: - lookfor = flds[0] - for i := 1; i < len(flds); i++ { - lookfor += "." + flds[i] - } - case *parse.ChainNode: // PJW, what are these? - for _, f := range flds { - lookfor += "." + f // quadratic, but probably ok - } - default: - // If these happen they will happen even if gopls is restarted - // and the users does the same thing, so it is better not to panic. - // context.Background() is used because we don't have access - // to any other context. [we could, but it would be complicated] - event.Log(context.Background(), fmt.Sprintf("%T unexpected in fields()", x)) - return nil - } - if len(lookfor) == 0 { - event.Log(context.Background(), fmt.Sprintf("no strings in fields() %#v", x)) - return nil - } - startsAt := int(x.Position()) - ix := bytes.Index(p.buf[startsAt:], []byte(lookfor)) // HasPrefix? PJW? - if ix < 0 || ix > len(lookfor) { // lookfor expected to be at start (or so) - // probably golang.go/#43388, so back up - startsAt -= len(flds[0]) + 1 - ix = bytes.Index(p.buf[startsAt:], []byte(lookfor)) // ix might be 1? PJW - if ix < 0 { - return ans - } - } - at := ix + startsAt - for _, f := range flds { - at += 1 // . - kind := protocol.Method - if f[0] == '$' { - kind = protocol.Variable - } - sym := symbol{name: f, kind: kind, start: at, length: utf8.RuneCount([]byte(f))} - if kind == protocol.Variable && len(p.stack) > 1 { - if pipe, ok := p.stack[len(p.stack)-2].(*parse.PipeNode); ok { - for _, y := range pipe.Decl { - if x == y { - sym.vardef = true - } - } - } - } - ans = append(ans, sym) - at += len(f) - } - return ans -} - -func (p *Parsed) findSymbols() { - if len(p.stack) == 0 { - return - } - n := p.stack[len(p.stack)-1] - pop := func() { - p.stack = p.stack[:len(p.stack)-1] - } - if n == nil { // allowing nil simplifies the code - pop() - return - } - nxt := func(nd parse.Node) { - p.stack = append(p.stack, nd) - p.findSymbols() - } - switch x := n.(type) { - case *parse.ActionNode: - nxt(x.Pipe) - case *parse.BoolNode: - // need to compute the length from the value - msg := fmt.Sprintf("%v", x.True) - p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(msg), kind: protocol.Boolean}) - case *parse.BranchNode: - nxt(x.Pipe) - nxt(x.List) - nxt(x.ElseList) - case *parse.ChainNode: - p.symbols = append(p.symbols, p.fields(x.Field, x)...) - nxt(x.Node) - case *parse.CommandNode: - for _, a := range x.Args { - nxt(a) - } - //case *parse.CommentNode: // go 1.16 - // log.Printf("implement %d", x.Type()) - case *parse.DotNode: - sym := symbol{name: "dot", kind: protocol.Variable, start: int(x.Pos), length: 1} - p.symbols = append(p.symbols, sym) - case *parse.FieldNode: - p.symbols = append(p.symbols, p.fields(x.Ident, x)...) - case *parse.IdentifierNode: - sym := symbol{name: x.Ident, kind: protocol.Function, start: int(x.Pos), - length: utf8.RuneCount([]byte(x.Ident))} - p.symbols = append(p.symbols, sym) - case *parse.IfNode: - nxt(&x.BranchNode) - case *parse.ListNode: - if x != nil { // wretched typed nils. Node should have an IfNil - for _, nd := range x.Nodes { - nxt(nd) - } - } - case *parse.NilNode: - sym := symbol{name: "nil", kind: protocol.Constant, start: int(x.Pos), length: 3} - p.symbols = append(p.symbols, sym) - case *parse.NumberNode: - // no name; ascii - p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(x.Text), kind: protocol.Number}) - case *parse.PipeNode: - if x == nil { // {{template "foo"}} - return - } - for _, d := range x.Decl { - nxt(d) - } - for _, c := range x.Cmds { - nxt(c) - } - case *parse.RangeNode: - nxt(&x.BranchNode) - case *parse.StringNode: - // no name - sz := utf8.RuneCount([]byte(x.Text)) - p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.String}) - case *parse.TemplateNode: // invoking a template - // x.Pos points to the quote before the name - p.symbols = append(p.symbols, symbol{name: x.Name, kind: protocol.Package, start: int(x.Pos) + 1, - length: utf8.RuneCount([]byte(x.Name))}) - nxt(x.Pipe) - case *parse.TextNode: - if len(x.Text) == 1 && x.Text[0] == '\n' { - break - } - // nothing to report, but build one for hover - sz := utf8.RuneCount([]byte(x.Text)) - p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.Constant}) - case *parse.VariableNode: - p.symbols = append(p.symbols, p.fields(x.Ident, x)...) - case *parse.WithNode: - nxt(&x.BranchNode) - - } - pop() -} - -// DocumentSymbols returns a hierarchy of the symbols defined in a template file. -// (The hierarchy is flat. SymbolInformation might be better.) -func DocumentSymbols(snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentSymbol, error) { - buf, err := fh.Read() - if err != nil { - return nil, err - } - p := parseBuffer(buf) - if p.ParseErr != nil { - return nil, p.ParseErr - } - var ans []protocol.DocumentSymbol - for _, s := range p.symbols { - if s.kind == protocol.Constant { - continue - } - d := kindStr(s.kind) - if d == "Namespace" { - d = "Template" - } - if s.vardef { - d += "(def)" - } else { - d += "(use)" - } - r := p.Range(s.start, s.length) - y := protocol.DocumentSymbol{ - Name: s.name, - Detail: d, - Kind: s.kind, - Range: r, - SelectionRange: r, // or should this be the entire {{...}}? - } - ans = append(ans, y) - } - return ans, nil -} diff --git a/internal/lsp/testdata/analyzer/bad_test.go b/internal/lsp/testdata/analyzer/bad_test.go deleted file mode 100644 index c819cbc0111..00000000000 --- a/internal/lsp/testdata/analyzer/bad_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package analyzer - -import ( - "fmt" - "sync" - "testing" -) - -func Testbad(t *testing.T) { //@diag("", "tests", "Testbad has malformed name: first letter after 'Test' must not be lowercase", "warning") - var x sync.Mutex - _ = x //@diag("x", "copylocks", "assignment copies lock value to _: sync.Mutex", "warning") - - printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, "printf", "golang.org/x/tools/internal/lsp/analyzer.printfWrapper format %s reads arg #1, but call has 0 args", "warning") -} - -func printfWrapper(format string, args ...interface{}) { - fmt.Printf(format, args...) -} diff --git a/internal/lsp/testdata/bad/bad0.go b/internal/lsp/testdata/bad/bad0.go deleted file mode 100644 index 36a4e6b95f7..00000000000 --- a/internal/lsp/testdata/bad/bad0.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build go1.11 - -package bad - -import _ "golang.org/x/tools/internal/lsp/assign/internal/secret" //@diag("\"golang.org/x/tools/internal/lsp/assign/internal/secret\"", "compiler", "could not import golang.org/x/tools/internal/lsp/assign/internal/secret (invalid use of internal package golang.org/x/tools/internal/lsp/assign/internal/secret)", "error") - -func stuff() { //@item(stuff, "stuff", "func()", "func") - x := "heeeeyyyy" - random2(x) //@diag("x", "compiler", "cannot use x (variable of type string) as int value in argument to random2", "error") - random2(1) //@complete("dom", random, random2, random3) - y := 3 //@diag("y", "compiler", "y declared but not used", "error") -} - -type bob struct { //@item(bob, "bob", "struct{...}", "struct") - x int -} - -func _() { - var q int - _ = &bob{ - f: q, //@diag("f: q", "compiler", "unknown field f in struct literal", "error") - } -} diff --git a/internal/lsp/testdata/bad/bad1.go b/internal/lsp/testdata/bad/bad1.go deleted file mode 100644 index 512f2d9869b..00000000000 --- a/internal/lsp/testdata/bad/bad1.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.11 - -package bad - -// See #36637 -type stateFunc func() stateFunc //@item(stateFunc, "stateFunc", "func() stateFunc", "type") - -var a unknown //@item(global_a, "a", "unknown", "var"),diag("unknown", "compiler", "undeclared name: unknown", "error") - -func random() int { //@item(random, "random", "func() int", "func") - //@complete("", global_a, bob, random, random2, random3, stateFunc, stuff) - return 0 -} - -func random2(y int) int { //@item(random2, "random2", "func(y int) int", "func"),item(bad_y_param, "y", "int", "var") - x := 6 //@item(x, "x", "int", "var"),diag("x", "compiler", "x declared but not used", "error") - var q blah //@item(q, "q", "blah", "var"),diag("q", "compiler", "q declared but not used", "error"),diag("blah", "compiler", "undeclared name: blah", "error") - var t **blob //@item(t, "t", "**blob", "var"),diag("t", "compiler", "t declared but not used", "error"),diag("blob", "compiler", "undeclared name: blob", "error") - //@complete("", q, t, x, bad_y_param, global_a, bob, random, random2, random3, stateFunc, stuff) - - return y -} - -func random3(y ...int) { //@item(random3, "random3", "func(y ...int)", "func"),item(y_variadic_param, "y", "[]int", "var") - //@complete("", y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff) - - var ch chan (favType1) //@item(ch, "ch", "chan (favType1)", "var"),diag("ch", "compiler", "ch declared but not used", "error"),diag("favType1", "compiler", "undeclared name: favType1", "error") - var m map[keyType]int //@item(m, "m", "map[keyType]int", "var"),diag("m", "compiler", "m declared but not used", "error"),diag("keyType", "compiler", "undeclared name: keyType", "error") - var arr []favType2 //@item(arr, "arr", "[]favType2", "var"),diag("arr", "compiler", "arr declared but not used", "error"),diag("favType2", "compiler", "undeclared name: favType2", "error") - var fn1 func() badResult //@item(fn1, "fn1", "func() badResult", "var"),diag("fn1", "compiler", "fn1 declared but not used", "error"),diag("badResult", "compiler", "undeclared name: badResult", "error") - var fn2 func(badParam) //@item(fn2, "fn2", "func(badParam)", "var"),diag("fn2", "compiler", "fn2 declared but not used", "error"),diag("badParam", "compiler", "undeclared name: badParam", "error") - //@complete("", arr, ch, fn1, fn2, m, y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff) -} diff --git a/internal/lsp/testdata/badstmt/badstmt.go.in b/internal/lsp/testdata/badstmt/badstmt.go.in deleted file mode 100644 index 5a560791086..00000000000 --- a/internal/lsp/testdata/badstmt/badstmt.go.in +++ /dev/null @@ -1,26 +0,0 @@ -package badstmt - -import ( - "golang.org/x/tools/internal/lsp/foo" -) - -func _() { - defer foo.F //@complete(" //", Foo),diag(" //", "syntax", "function must be invoked in defer statement", "error") - y := 1 - defer foo.F //@complete(" //", Foo) -} - -func _() { - switch true { - case true: - go foo.F //@complete(" //", Foo) - } -} - -func _() { - defer func() { - foo.F //@complete(" //", Foo),snippet(" //", Foo, "Foo()", "Foo()") - - foo. //@rank(" //", Foo) - } -} diff --git a/internal/lsp/testdata/badstmt/badstmt_2.go.in b/internal/lsp/testdata/badstmt/badstmt_2.go.in deleted file mode 100644 index f754b46aaac..00000000000 --- a/internal/lsp/testdata/badstmt/badstmt_2.go.in +++ /dev/null @@ -1,9 +0,0 @@ -package badstmt - -import ( - "golang.org/x/tools/internal/lsp/foo" -) - -func _() { - defer func() { foo. } //@rank(" }", Foo) -} diff --git a/internal/lsp/testdata/badstmt/badstmt_4.go.in b/internal/lsp/testdata/badstmt/badstmt_4.go.in deleted file mode 100644 index a9b46fb021b..00000000000 --- a/internal/lsp/testdata/badstmt/badstmt_4.go.in +++ /dev/null @@ -1,11 +0,0 @@ -package badstmt - -import ( - "golang.org/x/tools/internal/lsp/foo" -) - -func _() { - go func() { - defer foo. //@rank(" //", Foo, IntFoo) - } -} diff --git a/internal/lsp/testdata/basiclit/basiclit.go b/internal/lsp/testdata/basiclit/basiclit.go deleted file mode 100644 index 9829003d357..00000000000 --- a/internal/lsp/testdata/basiclit/basiclit.go +++ /dev/null @@ -1,56 +0,0 @@ -package basiclit - -func _() { - var a int // something for lexical completions - - _ = "hello." //@complete(".") - - _ = 1 //@complete(" //") - - _ = 1. //@complete(".") - - _ = 'a' //@complete("' ") - - _ = 'a' //@hover("'a'", "'a', U+0061, LATIN SMALL LETTER A") - _ = 0x61 //@hover("0x61", "'a', U+0061, LATIN SMALL LETTER A") - - _ = '\u2211' //@hover("'\\u2211'", "'āˆ‘', U+2211, N-ARY SUMMATION") - _ = 0x2211 //@hover("0x2211", "'āˆ‘', U+2211, N-ARY SUMMATION") - _ = "foo \u2211 bar" //@hover("\\u2211", "'āˆ‘', U+2211, N-ARY SUMMATION") - - _ = '\a' //@hover("'\\a'", "U+0007, control") - _ = "foo \a bar" //@hover("\\a", "U+0007, control") - - _ = '\U0001F30A' //@hover("'\\U0001F30A'", "'🌊', U+1F30A, WATER WAVE") - _ = 0x0001F30A //@hover("0x0001F30A", "'🌊', U+1F30A, WATER WAVE") - _ = "foo \U0001F30A bar" //@hover("\\U0001F30A", "'🌊', U+1F30A, WATER WAVE") - - _ = '\x7E' //@hover("'\\x7E'", "'~', U+007E, TILDE") - _ = "foo \x7E bar" //@hover("\\x7E", "'~', U+007E, TILDE") - _ = "foo \a bar" //@hover("\\a", "U+0007, control") - - _ = '\173' //@hover("'\\173'", "'{', U+007B, LEFT CURLY BRACKET") - _ = "foo \173 bar" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET") - _ = "foo \173 bar \u2211 baz" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET") - _ = "foo \173 bar \u2211 baz" //@hover("\\u2211", "'āˆ‘', U+2211, N-ARY SUMMATION") - _ = "foo\173bar\u2211baz" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET") - _ = "foo\173bar\u2211baz" //@hover("\\u2211", "'āˆ‘', U+2211, N-ARY SUMMATION") - - // search for runes in string only if there is an escaped sequence - _ = "hello" //@hover("\"hello\"", "") - - // incorrect escaped rune sequences - _ = '\0' //@hover("'\\0'", "") - _ = '\u22111' //@hover("'\\u22111'", "") - _ = '\U00110000' //@hover("'\\U00110000'", "") - _ = '\u12e45'//@hover("'\\u12e45'", "") - _ = '\xa' //@hover("'\\xa'", "") - _ = 'aa' //@hover("'aa'", "") - - // other basic lits - _ = 1 //@hover("1", "") - _ = 1.2 //@hover("1.2", "") - _ = 1.2i //@hover("1.2i", "") - _ = 0123 //@hover("0123", "") - _ = 0x1234567890 //@hover("0x1234567890", "") -} diff --git a/internal/lsp/testdata/cgo/declarecgo.go.golden b/internal/lsp/testdata/cgo/declarecgo.go.golden deleted file mode 100644 index b6d94d0c6c6..00000000000 --- a/internal/lsp/testdata/cgo/declarecgo.go.golden +++ /dev/null @@ -1,30 +0,0 @@ --- funccgoexample-definition -- -cgo/declarecgo.go:18:6-13: defined here as ```go -func Example() -``` - -[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example) --- funccgoexample-definition-json -- -{ - "span": { - "uri": "file://cgo/declarecgo.go", - "start": { - "line": 18, - "column": 6, - "offset": 151 - }, - "end": { - "line": 18, - "column": 13, - "offset": 158 - } - }, - "description": "```go\nfunc Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)" -} - --- funccgoexample-hoverdef -- -```go -func Example() -``` - -[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example) diff --git a/internal/lsp/testdata/cgoimport/usecgo.go.golden b/internal/lsp/testdata/cgoimport/usecgo.go.golden deleted file mode 100644 index f33f94f84a6..00000000000 --- a/internal/lsp/testdata/cgoimport/usecgo.go.golden +++ /dev/null @@ -1,30 +0,0 @@ --- funccgoexample-definition -- -cgo/declarecgo.go:18:6-13: defined here as ```go -func cgo.Example() -``` - -[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example) --- funccgoexample-definition-json -- -{ - "span": { - "uri": "file://cgo/declarecgo.go", - "start": { - "line": 18, - "column": 6, - "offset": 151 - }, - "end": { - "line": 18, - "column": 13, - "offset": 158 - } - }, - "description": "```go\nfunc cgo.Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)" -} - --- funccgoexample-hoverdef -- -```go -func cgo.Example() -``` - -[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example) diff --git a/internal/lsp/testdata/danglingstmt/dangling_selector_2.go b/internal/lsp/testdata/danglingstmt/dangling_selector_2.go deleted file mode 100644 index a9e75e82a57..00000000000 --- a/internal/lsp/testdata/danglingstmt/dangling_selector_2.go +++ /dev/null @@ -1,8 +0,0 @@ -package danglingstmt - -import "golang.org/x/tools/internal/lsp/foo" - -func _() { - foo. //@rank(" //", Foo) - var _ = []string{foo.} //@rank("}", Foo) -} diff --git a/internal/lsp/testdata/errors/errors.go b/internal/lsp/testdata/errors/errors.go deleted file mode 100644 index 42105629eaa..00000000000 --- a/internal/lsp/testdata/errors/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package errors - -import ( - "golang.org/x/tools/internal/lsp/types" -) - -func _() { - bob.Bob() //@complete(".") - types.b //@complete(" //", Bob_interface) -} diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go deleted file mode 100644 index 4e2b12fbcd2..00000000000 --- a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go +++ /dev/null @@ -1,8 +0,0 @@ -package extract - -func _() { - a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a") - // Comment on its own line - _ = 3 + 4 //@mark(exEn18, "4") - //@extractfunc(exSt18, exEn18) -} diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden deleted file mode 100644 index a43822a90b0..00000000000 --- a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden +++ /dev/null @@ -1,17 +0,0 @@ --- functionextraction_extract_basic_comment_4_2 -- -package extract - -func _() { - /* comment in the middle of a line */ - //@mark(exSt18, "a") - // Comment on its own line - newFunction() //@mark(exEn18, "4") - //@extractfunc(exSt18, exEn18) -} - -func newFunction() { - a := 1 - - _ = 3 + 4 -} - diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden b/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden deleted file mode 100644 index eab22a673c1..00000000000 --- a/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden +++ /dev/null @@ -1,728 +0,0 @@ --- functionextraction_extract_basic_13_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(a *A) int { - sum := a.x + a.y - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_basic_14_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(sum int) int { - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_basic_18_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func newFunction(a A) bool { - return a.x < a.y -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_basic_22_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(a A) int { - sum := a.x + a.y - return sum -} - --- functionextraction_extract_basic_23_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(sum int) int { - return sum -} - --- functionextraction_extract_basic_9_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func newFunction(a *A) bool { - return a.x < a.y -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_method_13_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(a *A) int { - sum := a.x + a.y - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_method_14_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(sum int) int { - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_method_18_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func newFunction(a A) bool { - return a.x < a.y -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- functionextraction_extract_method_22_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(a A) int { - sum := a.x + a.y - return sum -} - --- functionextraction_extract_method_23_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func newFunction(sum int) int { - return sum -} - --- functionextraction_extract_method_9_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func newFunction(a *A) bool { - return a.x < a.y -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_basic_13_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a *A) newMethod() int { - sum := a.x + a.y - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_basic_14_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (*A) newMethod(sum int) int { - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_basic_18_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) newMethod() bool { - return a.x < a.y -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_basic_22_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) newMethod() int { - sum := a.x + a.y - return sum -} - --- methodextraction_extract_basic_23_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (A) newMethod(sum int) int { - return sum -} - --- methodextraction_extract_basic_9_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) newMethod() bool { - return a.x < a.y -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_method_13_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a *A) newMethod() int { - sum := a.x + a.y - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_method_14_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (*A) newMethod(sum int) int { - return sum -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_method_18_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) newMethod() bool { - return a.x < a.y -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - --- methodextraction_extract_method_22_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) newMethod() int { - sum := a.x + a.y - return sum -} - --- methodextraction_extract_method_23_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (A) newMethod(sum int) int { - return sum -} - --- methodextraction_extract_method_9_2 -- -package extract - -type A struct { - x int - y int -} - -func (a *A) XLessThanYP() bool { - return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a *A) newMethod() bool { - return a.x < a.y -} - -func (a *A) AddP() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - -func (a A) XLessThanY() bool { - return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y") -} - -func (a A) Add() int { - sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y") - return sum //@extractmethod("return", "sum"),extractfunc("return", "sum") -} - diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go deleted file mode 100644 index c49e5d6a017..00000000000 --- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go +++ /dev/null @@ -1,6 +0,0 @@ -package extract - -func _() { - var _ = 1 + 2 //@suggestedfix("1", "refactor.extract") - var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract") -} diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden deleted file mode 100644 index 00ee7b4f94d..00000000000 --- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden +++ /dev/null @@ -1,18 +0,0 @@ --- suggestedfix_extract_basic_lit_4_10 -- -package extract - -func _() { - x := 1 - var _ = x + 2 //@suggestedfix("1", "refactor.extract") - var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract") -} - --- suggestedfix_extract_basic_lit_5_10 -- -package extract - -func _() { - var _ = 1 + 2 //@suggestedfix("1", "refactor.extract") - x := 3 + 4 - var _ = x //@suggestedfix("3 + 4", "refactor.extract") -} - diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden deleted file mode 100644 index 74df67ee65f..00000000000 --- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden +++ /dev/null @@ -1,36 +0,0 @@ --- suggestedfix_extract_func_call_6_7 -- -package extract - -import "strconv" - -func _() { - x0 := append([]int{}, 1) - a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract") - str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") -} - --- suggestedfix_extract_func_call_6_8 -- -package extract - -import "strconv" - -func _() { - x := append([]int{}, 1) - x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract") - str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") -} - --- suggestedfix_extract_func_call_8_12 -- -package extract - -import "strconv" - -func _() { - x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract") - str := "1" - x, x1 := strconv.Atoi(str) - b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract") -} - diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/internal/lsp/testdata/extract/extract_variable/extract_scope.go deleted file mode 100644 index 5dfcc36203b..00000000000 --- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go +++ /dev/null @@ -1,13 +0,0 @@ -package extract - -import "go/ast" - -func _() { - x0 := 0 - if true { - y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract") - } - if true { - x1 := !false //@suggestedfix("!false", "refactor.extract") - } -} diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden deleted file mode 100644 index e0e6464b59a..00000000000 --- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden +++ /dev/null @@ -1,32 +0,0 @@ --- suggestedfix_extract_scope_11_9 -- -package extract - -import "go/ast" - -func _() { - x0 := 0 - if true { - y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract") - } - if true { - x := !false - x1 := x //@suggestedfix("!false", "refactor.extract") - } -} - --- suggestedfix_extract_scope_8_8 -- -package extract - -import "go/ast" - -func _() { - x0 := 0 - if true { - x := ast.CompositeLit{} - y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract") - } - if true { - x1 := !false //@suggestedfix("!false", "refactor.extract") - } -} - diff --git a/internal/lsp/testdata/fillstruct/a.go b/internal/lsp/testdata/fillstruct/a.go deleted file mode 100644 index 5c6df6c4a7c..00000000000 --- a/internal/lsp/testdata/fillstruct/a.go +++ /dev/null @@ -1,27 +0,0 @@ -package fillstruct - -import ( - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -type basicStruct struct { - foo int -} - -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") - -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") diff --git a/internal/lsp/testdata/fillstruct/a.go.golden b/internal/lsp/testdata/fillstruct/a.go.golden deleted file mode 100644 index 5d6dbceb279..00000000000 --- a/internal/lsp/testdata/fillstruct/a.go.golden +++ /dev/null @@ -1,126 +0,0 @@ --- suggestedfix_a_11_21 -- -package fillstruct - -import ( - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -type basicStruct struct { - foo int -} - -var _ = basicStruct{ - foo: 0, -} //@suggestedfix("}", "refactor.rewrite") - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") - -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a_18_22 -- -package fillstruct - -import ( - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -type basicStruct struct { - foo int -} - -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{ - foo: 0, - bar: "", -} //@suggestedfix("}", "refactor.rewrite") - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") - -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a_25_22 -- -package fillstruct - -import ( - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -type basicStruct struct { - foo int -} - -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{ - bar: "", - basic: basicStruct{}, -} //@suggestedfix("}", "refactor.rewrite") - -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a_27_16 -- -package fillstruct - -import ( - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -type basicStruct struct { - foo int -} - -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") - -type twoArgStruct struct { - foo int - bar string -} - -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") - -type nestedStruct struct { - bar string - basic basicStruct -} - -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") - -var _ = data.B{ - ExportedInt: 0, -} //@suggestedfix("}", "refactor.rewrite") - diff --git a/internal/lsp/testdata/fillstruct/a2.go b/internal/lsp/testdata/fillstruct/a2.go deleted file mode 100644 index 8e12a6b54ba..00000000000 --- a/internal/lsp/testdata/fillstruct/a2.go +++ /dev/null @@ -1,29 +0,0 @@ -package fillstruct - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") diff --git a/internal/lsp/testdata/fillstruct/a2.go.golden b/internal/lsp/testdata/fillstruct/a2.go.golden deleted file mode 100644 index 78a6ee2b691..00000000000 --- a/internal/lsp/testdata/fillstruct/a2.go.golden +++ /dev/null @@ -1,139 +0,0 @@ --- suggestedfix_a2_11_21 -- -package fillstruct - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{ - m: map[string]int{}, - s: []int{}, - c: make(chan int), - c1: make(<-chan int), - a: [2]string{}, -} //@suggestedfix("}", "refactor.rewrite") - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a2_17_19 -- -package fillstruct - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{ - fn: func(i int) int { - }, -} //@suggestedfix("}", "refactor.rewrite") - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a2_23_25 -- -package fillstruct - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{ - fn: func(i int, s string) (string, int) { - }, -} //@suggestedfix("}", "refactor.rewrite") - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") - --- suggestedfix_a2_29_24 -- -package fillstruct - -type typedStruct struct { - m map[string]int - s []int - c chan int - c1 <-chan int - a [2]string -} - -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStruct struct { - fn func(i int) int -} - -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") - -type funStructCompex struct { - fn func(i int, s string) (string, int) -} - -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") - -type funStructEmpty struct { - fn func() -} - -var _ = funStructEmpty{ - fn: func() { - }, -} //@suggestedfix("}", "refactor.rewrite") - diff --git a/internal/lsp/testdata/fillstruct/a4.go b/internal/lsp/testdata/fillstruct/a4.go deleted file mode 100644 index 7833d338c64..00000000000 --- a/internal/lsp/testdata/fillstruct/a4.go +++ /dev/null @@ -1,39 +0,0 @@ -package fillstruct - -import "go/ast" - -type iStruct struct { - X int -} - -type sStruct struct { - str string -} - -type multiFill struct { - num int - strin string - arr []int -} - -type assignStruct struct { - n ast.Node -} - -func fill() { - var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") - - var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") - - var n int - _ = []int{} - if true { - arr := []int{1, 2} - } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") - - var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") -} diff --git a/internal/lsp/testdata/fillstruct/a4.go.golden b/internal/lsp/testdata/fillstruct/a4.go.golden deleted file mode 100644 index 109c6b5ea47..00000000000 --- a/internal/lsp/testdata/fillstruct/a4.go.golden +++ /dev/null @@ -1,174 +0,0 @@ --- suggestedfix_a4_25_18 -- -package fillstruct - -import "go/ast" - -type iStruct struct { - X int -} - -type sStruct struct { - str string -} - -type multiFill struct { - num int - strin string - arr []int -} - -type assignStruct struct { - n ast.Node -} - -func fill() { - var x int - var _ = iStruct{ - X: x, - } //@suggestedfix("}", "refactor.rewrite") - - var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") - - var n int - _ = []int{} - if true { - arr := []int{1, 2} - } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") - - var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") -} - --- suggestedfix_a4_28_18 -- -package fillstruct - -import "go/ast" - -type iStruct struct { - X int -} - -type sStruct struct { - str string -} - -type multiFill struct { - num int - strin string - arr []int -} - -type assignStruct struct { - n ast.Node -} - -func fill() { - var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") - - var s string - var _ = sStruct{ - str: s, - } //@suggestedfix("}", "refactor.rewrite") - - var n int - _ = []int{} - if true { - arr := []int{1, 2} - } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") - - var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") -} - --- suggestedfix_a4_35_20 -- -package fillstruct - -import "go/ast" - -type iStruct struct { - X int -} - -type sStruct struct { - str string -} - -type multiFill struct { - num int - strin string - arr []int -} - -type assignStruct struct { - n ast.Node -} - -func fill() { - var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") - - var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") - - var n int - _ = []int{} - if true { - arr := []int{1, 2} - } - var _ = multiFill{ - num: n, - strin: s, - arr: []int{}, - } //@suggestedfix("}", "refactor.rewrite") - - var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") -} - --- suggestedfix_a4_38_23 -- -package fillstruct - -import "go/ast" - -type iStruct struct { - X int -} - -type sStruct struct { - str string -} - -type multiFill struct { - num int - strin string - arr []int -} - -type assignStruct struct { - n ast.Node -} - -func fill() { - var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") - - var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") - - var n int - _ = []int{} - if true { - arr := []int{1, 2} - } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") - - var node *ast.CompositeLit - var _ = assignStruct{ - n: node, - } //@suggestedfix("}", "refactor.rewrite") -} - diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go b/internal/lsp/testdata/fillstruct/fill_struct.go deleted file mode 100644 index fccec135321..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct.go +++ /dev/null @@ -1,26 +0,0 @@ -package fillstruct - -type StructA struct { - unexportedIntField int - ExportedIntField int - MapA map[int]string - Array []int - StructB -} - -type StructA2 struct { - B *StructB -} - -type StructA3 struct { - B StructB -} - -func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") - if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") - } -} diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/internal/lsp/testdata/fillstruct/fill_struct.go.golden deleted file mode 100644 index 8d997031516..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct.go.golden +++ /dev/null @@ -1,124 +0,0 @@ --- suggestedfix_fill_struct_20_15 -- -package fillstruct - -type StructA struct { - unexportedIntField int - ExportedIntField int - MapA map[int]string - Array []int - StructB -} - -type StructA2 struct { - B *StructB -} - -type StructA3 struct { - B StructB -} - -func fill() { - a := StructA{ - unexportedIntField: 0, - ExportedIntField: 0, - MapA: map[int]string{}, - Array: []int{}, - StructB: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") - if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") - } -} - --- suggestedfix_fill_struct_21_16 -- -package fillstruct - -type StructA struct { - unexportedIntField int - ExportedIntField int - MapA map[int]string - Array []int - StructB -} - -type StructA2 struct { - B *StructB -} - -type StructA3 struct { - B StructB -} - -func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{ - B: &StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") - if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") - } -} - --- suggestedfix_fill_struct_22_16 -- -package fillstruct - -type StructA struct { - unexportedIntField int - ExportedIntField int - MapA map[int]string - Array []int - StructB -} - -type StructA2 struct { - B *StructB -} - -type StructA3 struct { - B StructB -} - -func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{ - B: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") - } -} - --- suggestedfix_fill_struct_24_16 -- -package fillstruct - -type StructA struct { - unexportedIntField int - ExportedIntField int - MapA map[int]string - Array []int - StructB -} - -type StructA2 struct { - B *StructB -} - -type StructA3 struct { - B StructB -} - -func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") - if true { - _ = StructA3{ - B: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - } -} - diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/internal/lsp/testdata/fillstruct/fill_struct_anon.go deleted file mode 100644 index b5d2337fd9d..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go +++ /dev/null @@ -1,14 +0,0 @@ -package fillstruct - -type StructAnon struct { - a struct{} - b map[string]interface{} - c map[string]struct { - d int - e bool - } -} - -func fill() { - _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite") -} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/internal/lsp/testdata/fillstruct/fill_struct_nested.go deleted file mode 100644 index 79eb84b7478..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go +++ /dev/null @@ -1,15 +0,0 @@ -package fillstruct - -type StructB struct { - StructC -} - -type StructC struct { - unexportedInt int -} - -func nested() { - c := StructB{ - StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite") - } -} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go b/internal/lsp/testdata/fillstruct/fill_struct_package.go deleted file mode 100644 index 71f124858b3..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_package.go +++ /dev/null @@ -1,12 +0,0 @@ -package fillstruct - -import ( - h2 "net/http" - - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -func unexported() { - a := data.B{} //@suggestedfix("}", "refactor.rewrite") - _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite") -} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden deleted file mode 100644 index 13c85702527..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden +++ /dev/null @@ -1,36 +0,0 @@ --- suggestedfix_fill_struct_package_10_14 -- -package fillstruct - -import ( - h2 "net/http" - - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -func unexported() { - a := data.B{ - ExportedInt: 0, - } //@suggestedfix("}", "refactor.rewrite") - _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite") -} - --- suggestedfix_fill_struct_package_11_16 -- -package fillstruct - -import ( - h2 "net/http" - - "golang.org/x/tools/internal/lsp/fillstruct/data" -) - -func unexported() { - a := data.B{} //@suggestedfix("}", "refactor.rewrite") - _ = h2.Client{ - Transport: nil, - CheckRedirect: func(req *h2.Request, via []*h2.Request) error { - }, - Jar: nil, - Timeout: 0, - } //@suggestedfix("}", "refactor.rewrite") -} - diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go deleted file mode 100644 index d5d1bbba5c3..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go +++ /dev/null @@ -1,9 +0,0 @@ -package fillstruct - -type StructD struct { - ExportedIntField int -} - -func spaces() { - d := StructD{} //@suggestedfix("}", "refactor.rewrite") -} diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go deleted file mode 100644 index 50877e9005c..00000000000 --- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go +++ /dev/null @@ -1,12 +0,0 @@ -package fillstruct - -import "unsafe" - -type unsafeStruct struct { - x int - p unsafe.Pointer -} - -func fill() { - _ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite") -} diff --git a/internal/lsp/testdata/folding/a.go.golden b/internal/lsp/testdata/folding/a.go.golden deleted file mode 100644 index ce691023361..00000000000 --- a/internal/lsp/testdata/folding/a.go.golden +++ /dev/null @@ -1,759 +0,0 @@ --- foldingRange-0 -- -package folding //@fold("package") - -import (<>) - -import _ "os" - -// bar is a function.<> -func bar(<>) string {<>} - --- foldingRange-1 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch {<>} - /* This is a multiline<> - - /* This is a multiline<> - _ = []int{<>} - _ = [2]string{<>} - _ = map[string]int{<>} - type T struct {<>} - _ = T{<>} - x, y := make(<>), make(<>) - select {<>} - // This is a multiline comment<> - return <> -} - --- foldingRange-2 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true:<> - case false:<> - default:<> - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x:<> - case <-y:<> - default:<> - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-3 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true {<>} else {<>} - case false: - fmt.Println(<>) - default: - fmt.Println(<>) - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val {<>} else {<>} - case <-y: - fmt.Println(<>) - default: - fmt.Println(<>) - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-4 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true { - fmt.Println(<>) - } else { - fmt.Println(<>) - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val { - fmt.Println(<>) - } else { - fmt.Println(<>) - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-cmd -- -3:9-6:0 -10:22-11:32 -12:10-12:9 -12:20-75:0 -14:10-25:1 -15:12-20:3 -16:12-18:2 -17:16-17:21 -18:11-20:2 -19:16-19:22 -21:13-22:22 -22:15-22:21 -23:10-24:24 -24:15-24:23 -26:24-28:11 -30:24-33:32 -34:12-38:1 -39:16-41:1 -42:21-46:1 -47:17-51:1 -52:8-56:1 -57:15-57:23 -57:32-57:40 -58:10-69:1 -59:18-64:3 -60:11-62:2 -61:16-61:28 -62:11-64:2 -63:16-63:29 -65:11-66:18 -66:15-66:17 -67:10-68:24 -68:15-68:23 -70:32-71:30 -72:9-74:16 - --- foldingRange-comment-0 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function.<> -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true { - fmt.Println("true") - } else { - fmt.Println("false") - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline<> - - /* This is a multiline<> - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val { - fmt.Println("true from x") - } else { - fmt.Println("false from x") - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment<> - return ` -this string -is not indented` -} - --- foldingRange-imports-0 -- -package folding //@fold("package") - -import (<>) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true { - fmt.Println("true") - } else { - fmt.Println("false") - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val { - fmt.Println("true from x") - } else { - fmt.Println("false from x") - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-lineFolding-0 -- -package folding //@fold("package") - -import (<> -) - -import _ "os" - -// bar is a function.<> -func bar() string {<> -} - --- foldingRange-lineFolding-1 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch {<> - } - /* This is a multiline<> - - /* This is a multiline<> - _ = []int{<>, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{<>, - } - type T struct {<> - } - _ = T{<>, - } - x, y := make(chan bool), make(chan bool) - select {<> - } - // This is a multiline comment<> - return <> -} - --- foldingRange-lineFolding-2 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true:<> - case false:<> - default:<> - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x:<> - case <-y:<> - default:<> - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-lineFolding-3 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true {<> - } else {<> - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val {<> - } else {<> - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - --- foldingRange-lineFolding-comment-0 -- -package folding //@fold("package") - -import ( - "fmt" - _ "log" -) - -import _ "os" - -// bar is a function.<> -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true { - fmt.Println("true") - } else { - fmt.Println("false") - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline<> - - /* This is a multiline<> - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val { - fmt.Println("true from x") - } else { - fmt.Println("false from x") - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment<> - return ` -this string -is not indented` -} - --- foldingRange-lineFolding-imports-0 -- -package folding //@fold("package") - -import (<> -) - -import _ "os" - -// bar is a function. -// With a multiline doc comment. -func bar() string { - /* This is a single line comment */ - switch { - case true: - if true { - fmt.Println("true") - } else { - fmt.Println("false") - } - case false: - fmt.Println("false") - default: - fmt.Println("default") - } - /* This is a multiline - block - comment */ - - /* This is a multiline - block - comment */ - // Followed by another comment. - _ = []int{ - 1, - 2, - 3, - } - _ = [2]string{"d", - "e", - } - _ = map[string]int{ - "a": 1, - "b": 2, - "c": 3, - } - type T struct { - f string - g int - h string - } - _ = T{ - f: "j", - g: 4, - h: "i", - } - x, y := make(chan bool), make(chan bool) - select { - case val := <-x: - if val { - fmt.Println("true from x") - } else { - fmt.Println("false from x") - } - case <-y: - fmt.Println("y") - default: - fmt.Println("default") - } - // This is a multiline comment - // that is not a doc comment. - return ` -this string -is not indented` -} - diff --git a/internal/lsp/testdata/folding/bad.go.golden b/internal/lsp/testdata/folding/bad.go.golden deleted file mode 100644 index d1bdfec60cd..00000000000 --- a/internal/lsp/testdata/folding/bad.go.golden +++ /dev/null @@ -1,91 +0,0 @@ --- foldingRange-0 -- -package folding //@fold("package") - -import (<>) - -import (<>) - -// badBar is a function. -func badBar(<>) string {<>} - --- foldingRange-1 -- -package folding //@fold("package") - -import ( "fmt" - _ "log" -) - -import ( - _ "os" ) - -// badBar is a function. -func badBar() string { x := true - if x {<>} else {<>} - return -} - --- foldingRange-2 -- -package folding //@fold("package") - -import ( "fmt" - _ "log" -) - -import ( - _ "os" ) - -// badBar is a function. -func badBar() string { x := true - if x { - // This is the only foldable thing in this file when lineFoldingOnly - fmt.Println(<>) - } else { - fmt.Println(<>) } - return -} - --- foldingRange-cmd -- -3:9-5:0 -7:9-8:8 -11:13-11:12 -11:23-18:0 -12:8-15:1 -14:15-14:20 -15:10-16:23 -16:15-16:21 - --- foldingRange-imports-0 -- -package folding //@fold("package") - -import (<>) - -import (<>) - -// badBar is a function. -func badBar() string { x := true - if x { - // This is the only foldable thing in this file when lineFoldingOnly - fmt.Println("true") - } else { - fmt.Println("false") } - return -} - --- foldingRange-lineFolding-0 -- -package folding //@fold("package") - -import ( "fmt" - _ "log" -) - -import ( - _ "os" ) - -// badBar is a function. -func badBar() string { x := true - if x {<> - } else { - fmt.Println("false") } - return -} - diff --git a/internal/lsp/testdata/foo/foo.go b/internal/lsp/testdata/foo/foo.go deleted file mode 100644 index 20ea183e5d9..00000000000 --- a/internal/lsp/testdata/foo/foo.go +++ /dev/null @@ -1,30 +0,0 @@ -package foo //@mark(PackageFoo, "foo"),item(PackageFoo, "foo", "\"golang.org/x/tools/internal/lsp/foo\"", "package") - -type StructFoo struct { //@item(StructFoo, "StructFoo", "struct{...}", "struct") - Value int //@item(Value, "Value", "int", "field") -} - -// Pre-set this marker, as we don't have a "source" for it in this package. -/* Error() */ //@item(Error, "Error", "func() string", "method") - -func Foo() { //@item(Foo, "Foo", "func()", "func") - var err error - err.Error() //@complete("E", Error) -} - -func _() { - var sFoo StructFoo //@mark(sFoo1, "sFoo"),complete("t", StructFoo) - if x := sFoo; x.Value == 1 { //@mark(sFoo2, "sFoo"),complete("V", Value),typdef("sFoo", StructFoo),refs("sFo", sFoo1, sFoo2) - return - } -} - -func _() { - shadowed := 123 - { - shadowed := "hi" //@item(shadowed, "shadowed", "string", "var"),refs("shadowed", shadowed) - sha //@complete("a", shadowed) - } -} - -type IntFoo int //@item(IntFoo, "IntFoo", "int", "type") diff --git a/internal/lsp/testdata/format/bad_format.go.golden b/internal/lsp/testdata/format/bad_format.go.golden deleted file mode 100644 index c2ac5a1a13e..00000000000 --- a/internal/lsp/testdata/format/bad_format.go.golden +++ /dev/null @@ -1,21 +0,0 @@ --- gofmt -- -package format //@format("package") - -import ( - "fmt" - "log" - "runtime" -) - -func hello() { - - var x int //@diag("x", "compiler", "x declared but not used", "error") -} - -func hi() { - runtime.GOROOT() - fmt.Printf("") - - log.Printf("") -} - diff --git a/internal/lsp/testdata/format/bad_format.go.in b/internal/lsp/testdata/format/bad_format.go.in deleted file mode 100644 index 06187238ebe..00000000000 --- a/internal/lsp/testdata/format/bad_format.go.in +++ /dev/null @@ -1,22 +0,0 @@ -package format //@format("package") - -import ( - "runtime" - "fmt" - "log" -) - -func hello() { - - - - - var x int //@diag("x", "compiler", "x declared but not used", "error") -} - -func hi() { - runtime.GOROOT() - fmt.Printf("") - - log.Printf("") -} diff --git a/internal/lsp/testdata/generated/generated.go b/internal/lsp/testdata/generated/generated.go deleted file mode 100644 index c92bd9eb8c3..00000000000 --- a/internal/lsp/testdata/generated/generated.go +++ /dev/null @@ -1,7 +0,0 @@ -package generated - -// Code generated by generator.go. DO NOT EDIT. - -func _() { - var y int //@diag("y", "compiler", "y declared but not used", "error") -} diff --git a/internal/lsp/testdata/generated/generator.go b/internal/lsp/testdata/generated/generator.go deleted file mode 100644 index f26e33c8064..00000000000 --- a/internal/lsp/testdata/generated/generator.go +++ /dev/null @@ -1,5 +0,0 @@ -package generated - -func _() { - var x int //@diag("x", "compiler", "x declared but not used", "error") -} diff --git a/internal/lsp/testdata/godef/a/a.go b/internal/lsp/testdata/godef/a/a.go deleted file mode 100644 index 5cc85527aeb..00000000000 --- a/internal/lsp/testdata/godef/a/a.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package a is a package for testing go to definition. -package a //@mark(aPackage, "a "),hoverdef("a ", aPackage) - -import ( - "fmt" - "go/types" - "sync" -) - -var ( - // x is a variable. - x string //@x,hoverdef("x", x) -) - -// Constant block. When I hover on h, I should see this comment. -const ( - // When I hover on g, I should see this comment. - g = 1 //@g,hoverdef("g", g) - - h = 2 //@h,hoverdef("h", h) -) - -// z is a variable too. -var z string //@z,hoverdef("z", z) - -type A string //@mark(AString, "A") - -func AStuff() { //@AStuff - x := 5 - Random2(x) //@godef("dom2", Random2) - Random() //@godef("()", Random) - - var err error //@err - fmt.Printf("%v", err) //@godef("err", err) - - var y string //@string,hoverdef("string", string) - _ = make([]int, 0) //@make,hoverdef("make", make) - - var mu sync.Mutex - mu.Lock() //@Lock,hoverdef("Lock", Lock) - - var typ *types.Named //@mark(typesImport, "types"),hoverdef("types", typesImport) - typ.Obj().Name() //@Name,hoverdef("Name", Name) -} - -type A struct { -} - -func (_ A) Hi() {} //@mark(AHi, "Hi") - -type S struct { - Field int //@mark(AField, "Field") - R // embed a struct - H // embed an interface -} - -type R struct { - Field2 int //@mark(AField2, "Field2") -} - -func (_ R) Hey() {} //@mark(AHey, "Hey") - -type H interface { - Goodbye() //@mark(AGoodbye, "Goodbye") -} - -type I interface { - B() //@mark(AB, "B") - J -} - -type J interface { - Hello() //@mark(AHello, "Hello") -} - -func _() { - // 1st type declaration block - type ( - a struct { //@mark(declBlockA, "a"),hoverdef("a", declBlockA) - x string - } - ) - - // 2nd type declaration block - type ( - // b has a comment - b struct{} //@mark(declBlockB, "b"),hoverdef("b", declBlockB) - ) - - // 3rd type declaration block - type ( - // c is a struct - c struct { //@mark(declBlockC, "c"),hoverdef("c", declBlockC) - f string - } - - d string //@mark(declBlockD, "d"),hoverdef("d", declBlockD) - ) - - type ( - e struct { //@mark(declBlockE, "e"),hoverdef("e", declBlockE) - f float64 - } // e has a comment - ) -} diff --git a/internal/lsp/testdata/godef/a/a.go.golden b/internal/lsp/testdata/godef/a/a.go.golden deleted file mode 100644 index 9f67a147d14..00000000000 --- a/internal/lsp/testdata/godef/a/a.go.golden +++ /dev/null @@ -1,190 +0,0 @@ --- Lock-hoverdef -- -```go -func (*sync.Mutex).Lock() -``` - -Lock locks m\. - -[`(sync.Mutex).Lock` on pkg.go.dev](https://pkg.go.dev/sync?utm_source=gopls#Mutex.Lock) --- Name-hoverdef -- -```go -func (*types.object).Name() string -``` - -Name returns the object\'s \(package\-local, unqualified\) name\. - -[`(types.TypeName).Name` on pkg.go.dev](https://pkg.go.dev/go/types?utm_source=gopls#TypeName.Name) --- Random-definition -- -godef/a/random.go:3:6-12: defined here as ```go -func Random() int -``` - -[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random) --- Random-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 3, - "column": 6, - "offset": 16 - }, - "end": { - "line": 3, - "column": 12, - "offset": 22 - } - }, - "description": "```go\nfunc Random() int\n```\n\n[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random)" -} - --- Random-hoverdef -- -```go -func Random() int -``` - -[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random) --- Random2-definition -- -godef/a/random.go:8:6-13: defined here as ```go -func Random2(y int) int -``` - -[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2) --- Random2-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 8, - "column": 6, - "offset": 71 - }, - "end": { - "line": 8, - "column": 13, - "offset": 78 - } - }, - "description": "```go\nfunc Random2(y int) int\n```\n\n[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2)" -} - --- Random2-hoverdef -- -```go -func Random2(y int) int -``` - -[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2) --- aPackage-hoverdef -- -Package a is a package for testing go to definition\. --- declBlockA-hoverdef -- -```go -type a struct { - x string -} -``` - -1st type declaration block --- declBlockB-hoverdef -- -```go -type b struct{} -``` - -b has a comment --- declBlockC-hoverdef -- -```go -type c struct { - f string -} -``` - -c is a struct --- declBlockD-hoverdef -- -```go -type d string -``` - -3rd type declaration block --- declBlockE-hoverdef -- -```go -type e struct { - f float64 -} -``` - -e has a comment --- err-definition -- -godef/a/a.go:33:6-9: defined here as ```go -var err error -``` - -\@err --- err-definition-json -- -{ - "span": { - "uri": "file://godef/a/a.go", - "start": { - "line": 33, - "column": 6, - "offset": 612 - }, - "end": { - "line": 33, - "column": 9, - "offset": 615 - } - }, - "description": "```go\nvar err error\n```\n\n\\@err" -} - --- err-hoverdef -- -```go -var err error -``` - -\@err --- g-hoverdef -- -```go -const g untyped int = 1 -``` - -When I hover on g, I should see this comment\. --- h-hoverdef -- -```go -const h untyped int = 2 -``` - -Constant block\. --- make-hoverdef -- -```go -func make(t Type, size ...int) Type -``` - -The make built\-in function allocates and initializes an object of type slice, map, or chan \(only\)\. - -[`make` on pkg.go.dev](https://pkg.go.dev/builtin?utm_source=gopls#make) --- string-hoverdef -- -```go -type string string -``` - -string is the set of all strings of 8\-bit bytes, conventionally but not necessarily representing UTF\-8\-encoded text\. - -[`string` on pkg.go.dev](https://pkg.go.dev/builtin?utm_source=gopls#string) --- typesImport-hoverdef -- -```go -package types ("go/types") -``` - -[`types` on pkg.go.dev](https://pkg.go.dev/go/types?utm_source=gopls) --- x-hoverdef -- -```go -var x string -``` - -x is a variable\. --- z-hoverdef -- -```go -var z string -``` - -z is a variable too\. diff --git a/internal/lsp/testdata/godef/a/a_test.go b/internal/lsp/testdata/godef/a/a_test.go deleted file mode 100644 index 77bd633b6c0..00000000000 --- a/internal/lsp/testdata/godef/a/a_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package a - -import ( - "testing" -) - -func TestA(t *testing.T) { //@TestA,godef(TestA, TestA) -} diff --git a/internal/lsp/testdata/godef/a/a_test.go.golden b/internal/lsp/testdata/godef/a/a_test.go.golden deleted file mode 100644 index e5cb3d799cc..00000000000 --- a/internal/lsp/testdata/godef/a/a_test.go.golden +++ /dev/null @@ -1,26 +0,0 @@ --- TestA-definition -- -godef/a/a_test.go:7:6-11: defined here as ```go -func TestA(t *testing.T) -``` --- TestA-definition-json -- -{ - "span": { - "uri": "file://godef/a/a_test.go", - "start": { - "line": 7, - "column": 6, - "offset": 39 - }, - "end": { - "line": 7, - "column": 11, - "offset": 44 - } - }, - "description": "```go\nfunc TestA(t *testing.T)\n```" -} - --- TestA-hoverdef -- -```go -func TestA(t *testing.T) -``` diff --git a/internal/lsp/testdata/godef/a/a_x_test.go b/internal/lsp/testdata/godef/a/a_x_test.go deleted file mode 100644 index 4631eba2c0a..00000000000 --- a/internal/lsp/testdata/godef/a/a_x_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package a_test - -import ( - "testing" -) - -func TestA2(t *testing.T) { //@TestA2,godef(TestA2, TestA2) - Nonexistant() //@diag("Nonexistant", "compiler", "undeclared name: Nonexistant", "error") -} diff --git a/internal/lsp/testdata/godef/a/d.go b/internal/lsp/testdata/godef/a/d.go deleted file mode 100644 index 2da8d058edf..00000000000 --- a/internal/lsp/testdata/godef/a/d.go +++ /dev/null @@ -1,43 +0,0 @@ -package a //@mark(a, "a "),hoverdef("a ", a) - -import "fmt" - -type Thing struct { //@Thing - Member string //@Member -} - -var Other Thing //@Other - -func Things(val []string) []Thing { //@Things - return nil -} - -func (t Thing) Method(i int) string { //@Method - return t.Member -} - -func useThings() { - t := Thing{ //@mark(aStructType, "ing") - Member: "string", //@mark(fMember, "ember") - } - fmt.Print(t.Member) //@mark(aMember, "ember") - fmt.Print(Other) //@mark(aVar, "ther") - Things() //@mark(aFunc, "ings") - t.Method() //@mark(aMethod, "eth") -} - -/*@ -godef(aStructType, Thing) -godef(aMember, Member) -godef(aVar, Other) -godef(aFunc, Things) -godef(aMethod, Method) -godef(fMember, Member) -godef(Member, Member) - -//param -//package name -//const -//anon field - -*/ diff --git a/internal/lsp/testdata/godef/a/d.go.golden b/internal/lsp/testdata/godef/a/d.go.golden deleted file mode 100644 index 47723b0453c..00000000000 --- a/internal/lsp/testdata/godef/a/d.go.golden +++ /dev/null @@ -1,164 +0,0 @@ --- Member-definition -- -godef/a/d.go:6:2-8: defined here as ```go -field Member string -``` - -\@Member - -[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member) --- Member-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 6, - "column": 2, - "offset": 90 - }, - "end": { - "line": 6, - "column": 8, - "offset": 96 - } - }, - "description": "```go\nfield Member string\n```\n\n\\@Member\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)" -} - --- Member-hoverdef -- -```go -field Member string -``` - -\@Member - -[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member) --- Method-definition -- -godef/a/d.go:15:16-22: defined here as ```go -func (Thing).Method(i int) string -``` - -[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method) --- Method-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 15, - "column": 16, - "offset": 219 - }, - "end": { - "line": 15, - "column": 22, - "offset": 225 - } - }, - "description": "```go\nfunc (Thing).Method(i int) string\n```\n\n[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method)" -} - --- Method-hoverdef -- -```go -func (Thing).Method(i int) string -``` - -[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method) --- Other-definition -- -godef/a/d.go:9:5-10: defined here as ```go -var Other Thing -``` - -\@Other - -[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other) --- Other-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 9, - "column": 5, - "offset": 121 - }, - "end": { - "line": 9, - "column": 10, - "offset": 126 - } - }, - "description": "```go\nvar Other Thing\n```\n\n\\@Other\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)" -} - --- Other-hoverdef -- -```go -var Other Thing -``` - -\@Other - -[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other) --- Thing-definition -- -godef/a/d.go:5:6-11: defined here as ```go -type Thing struct { - Member string //@Member -} -``` - -[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing) --- Thing-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 5, - "column": 6, - "offset": 65 - }, - "end": { - "line": 5, - "column": 11, - "offset": 70 - } - }, - "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)" -} - --- Thing-hoverdef -- -```go -type Thing struct { - Member string //@Member -} -``` - -[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing) --- Things-definition -- -godef/a/d.go:11:6-12: defined here as ```go -func Things(val []string) []Thing -``` - -[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things) --- Things-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 11, - "column": 6, - "offset": 148 - }, - "end": { - "line": 11, - "column": 12, - "offset": 154 - } - }, - "description": "```go\nfunc Things(val []string) []Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)" -} - --- Things-hoverdef -- -```go -func Things(val []string) []Thing -``` - -[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things) --- a-hoverdef -- -Package a is a package for testing go to definition\. diff --git a/internal/lsp/testdata/godef/a/g.go.golden b/internal/lsp/testdata/godef/a/g.go.golden deleted file mode 100644 index b7ed7392806..00000000000 --- a/internal/lsp/testdata/godef/a/g.go.golden +++ /dev/null @@ -1,6 +0,0 @@ --- dur-hoverdef -- -```go -const dur time.Duration = 910350000000 // 15m10.35s -``` - -dur is a constant of type time\.Duration\. diff --git a/internal/lsp/testdata/godef/a/h.go.golden b/internal/lsp/testdata/godef/a/h.go.golden deleted file mode 100644 index 4b27211e9aa..00000000000 --- a/internal/lsp/testdata/godef/a/h.go.golden +++ /dev/null @@ -1,136 +0,0 @@ --- arrD-hoverdef -- -```go -field d int -``` - -d field --- arrE-hoverdef -- -```go -field e struct{f int} -``` - -e nested struct --- arrF-hoverdef -- -```go -field f int -``` - -f field of nested struct --- complexH-hoverdef -- -```go -field h int -``` - -h field --- complexI-hoverdef -- -```go -field i struct{j int} -``` - -i nested struct --- complexJ-hoverdef -- -```go -field j int -``` - -j field of nested struct --- mapStructKeyX-hoverdef -- -```go -field x []string -``` - -X key field --- mapStructKeyY-hoverdef -- -```go -field y string -``` --- mapStructValueX-hoverdef -- -```go -field x string -``` - -X value field --- nestedMap-hoverdef -- -```go -field m map[string]float64 -``` - -nested map --- nestedNumber-hoverdef -- -```go -field number int64 -``` - -nested number --- nestedString-hoverdef -- -```go -field str string -``` - -nested string --- openMethod-hoverdef -- -```go -func (interface).open() error -``` - -open method comment --- returnX-hoverdef -- -```go -field x int -``` - -X coord --- returnY-hoverdef -- -```go -field y int -``` - -Y coord --- structA-hoverdef -- -```go -field a int -``` - -a field --- structB-hoverdef -- -```go -field b struct{c int} -``` - -b nested struct --- structC-hoverdef -- -```go -field c int -``` - -c field of nested struct --- testDescription-hoverdef -- -```go -field desc string -``` - -test description --- testInput-hoverdef -- -```go -field in map[string][]struct{key string; value interface{}} -``` - -test input --- testInputKey-hoverdef -- -```go -field key string -``` - -test key --- testInputValue-hoverdef -- -```go -field value interface{} -``` - -test value --- testResultValue-hoverdef -- -```go -field value int -``` - -expected test value diff --git a/internal/lsp/testdata/godef/a/random.go b/internal/lsp/testdata/godef/a/random.go deleted file mode 100644 index 62055c1fcec..00000000000 --- a/internal/lsp/testdata/godef/a/random.go +++ /dev/null @@ -1,31 +0,0 @@ -package a - -func Random() int { //@Random - y := 6 + 7 - return y -} - -func Random2(y int) int { //@Random2,mark(RandomParamY, "y") - return y //@godef("y", RandomParamY) -} - -type Pos struct { - x, y int //@mark(PosX, "x"),mark(PosY, "y") -} - -// Typ has a comment. Its fields do not. -type Typ struct{ field string } //@mark(TypField, "field") - -func _() { - x := &Typ{} - x.field //@godef("field", TypField) -} - -func (p *Pos) Sum() int { //@mark(PosSum, "Sum") - return p.x + p.y //@godef("x", PosX) -} - -func _() { - var p Pos - _ = p.Sum() //@godef("()", PosSum) -} diff --git a/internal/lsp/testdata/godef/a/random.go.golden b/internal/lsp/testdata/godef/a/random.go.golden deleted file mode 100644 index 381a11acee8..00000000000 --- a/internal/lsp/testdata/godef/a/random.go.golden +++ /dev/null @@ -1,112 +0,0 @@ --- PosSum-definition -- -godef/a/random.go:24:15-18: defined here as ```go -func (*Pos).Sum() int -``` - -[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum) --- PosSum-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 24, - "column": 15, - "offset": 413 - }, - "end": { - "line": 24, - "column": 18, - "offset": 416 - } - }, - "description": "```go\nfunc (*Pos).Sum() int\n```\n\n[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum)" -} - --- PosSum-hoverdef -- -```go -func (*Pos).Sum() int -``` - -[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum) --- PosX-definition -- -godef/a/random.go:13:2-3: defined here as ```go -field x int -``` - -\@mark\(PosX, \"x\"\),mark\(PosY, \"y\"\) --- PosX-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 13, - "column": 2, - "offset": 187 - }, - "end": { - "line": 13, - "column": 3, - "offset": 188 - } - }, - "description": "```go\nfield x int\n```\n\n\\@mark\\(PosX, \\\"x\\\"\\),mark\\(PosY, \\\"y\\\"\\)" -} - --- PosX-hoverdef -- -```go -field x int -``` - -\@mark\(PosX, \"x\"\),mark\(PosY, \"y\"\) --- RandomParamY-definition -- -godef/a/random.go:8:14-15: defined here as ```go -var y int -``` --- RandomParamY-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 8, - "column": 14, - "offset": 79 - }, - "end": { - "line": 8, - "column": 15, - "offset": 80 - } - }, - "description": "```go\nvar y int\n```" -} - --- RandomParamY-hoverdef -- -```go -var y int -``` --- TypField-definition -- -godef/a/random.go:17:18-23: defined here as ```go -field field string -``` --- TypField-definition-json -- -{ - "span": { - "uri": "file://godef/a/random.go", - "start": { - "line": 17, - "column": 18, - "offset": 292 - }, - "end": { - "line": 17, - "column": 23, - "offset": 297 - } - }, - "description": "```go\nfield field string\n```" -} - --- TypField-hoverdef -- -```go -field field string -``` diff --git a/internal/lsp/testdata/godef/b/b.go b/internal/lsp/testdata/godef/b/b.go deleted file mode 100644 index f9c1d64024b..00000000000 --- a/internal/lsp/testdata/godef/b/b.go +++ /dev/null @@ -1,57 +0,0 @@ -package b - -import ( - myFoo "golang.org/x/tools/internal/lsp/foo" //@mark(myFoo, "myFoo"),godef("myFoo", myFoo) - "golang.org/x/tools/internal/lsp/godef/a" //@mark(AImport, re"\".*\"") -) - -type Embed struct { - *a.A - a.I - a.S -} - -func _() { - e := Embed{} - e.Hi() //@hoverdef("Hi", AHi) - e.B() //@hoverdef("B", AB) - e.Field //@hoverdef("Field", AField) - e.Field2 //@hoverdef("Field2", AField2) - e.Hello() //@hoverdef("Hello", AHello) - e.Hey() //@hoverdef("Hey", AHey) - e.Goodbye() //@hoverdef("Goodbye", AGoodbye) -} - -type aAlias = a.A //@mark(aAlias, "aAlias") - -type S1 struct { //@S1 - F1 int //@mark(S1F1, "F1") - S2 //@godef("S2", S2),mark(S1S2, "S2") - a.A //@godef("A", AString) - aAlias //@godef("a", aAlias) -} - -type S2 struct { //@S2 - F1 string //@mark(S2F1, "F1") - F2 int //@mark(S2F2, "F2") - *a.A //@godef("A", AString),godef("a",AImport) -} - -type S3 struct { - F1 struct { - a.A //@godef("A", AString) - } -} - -func Bar() { - a.AStuff() //@godef("AStuff", AStuff) - var x S1 //@godef("S1", S1) - _ = x.S2 //@godef("S2", S1S2) - _ = x.F1 //@godef("F1", S1F1) - _ = x.F2 //@godef("F2", S2F2) - _ = x.S2.F1 //@godef("F1", S2F1) - - var _ *myFoo.StructFoo //@godef("myFoo", myFoo) -} - -const X = 0 //@mark(bX, "X"),godef("X", bX) diff --git a/internal/lsp/testdata/godef/b/b.go.golden b/internal/lsp/testdata/godef/b/b.go.golden deleted file mode 100644 index 5f7669b77ca..00000000000 --- a/internal/lsp/testdata/godef/b/b.go.golden +++ /dev/null @@ -1,454 +0,0 @@ --- AB-hoverdef -- -```go -func (a.I).B() -``` - -\@mark\(AB, \"B\"\) - -[`(a.I).B` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#I.B) --- AField-hoverdef -- -```go -field Field int -``` - -\@mark\(AField, \"Field\"\) - -[`(a.S).Field` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#S.Field) --- AField2-hoverdef -- -```go -field Field2 int -``` - -\@mark\(AField2, \"Field2\"\) - -[`(a.R).Field2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#R.Field2) --- AGoodbye-hoverdef -- -```go -func (a.H).Goodbye() -``` - -\@mark\(AGoodbye, \"Goodbye\"\) - -[`(a.H).Goodbye` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#H.Goodbye) --- AHello-hoverdef -- -```go -func (a.J).Hello() -``` - -\@mark\(AHello, \"Hello\"\) - -[`(a.J).Hello` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#J.Hello) --- AHey-hoverdef -- -```go -func (a.R).Hey() -``` - -[`(a.R).Hey` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#R.Hey) --- AHi-hoverdef -- -```go -func (a.A).Hi() -``` - -[`(a.A).Hi` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A.Hi) --- AImport-definition -- -godef/b/b.go:5:2-43: defined here as ```go -package a ("golang.org/x/tools/internal/lsp/godef/a") -``` - -[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls) --- AImport-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 5, - "column": 2, - "offset": 112 - }, - "end": { - "line": 5, - "column": 43, - "offset": 153 - } - }, - "description": "```go\npackage a (\"golang.org/x/tools/internal/lsp/godef/a\")\n```\n\n[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls)" -} - --- AImport-hoverdef -- -```go -package a ("golang.org/x/tools/internal/lsp/godef/a") -``` - -[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls) --- AString-definition -- -godef/a/a.go:26:6-7: defined here as ```go -type A string -``` - -\@mark\(AString, \"A\"\) - -[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A) --- AString-definition-json -- -{ - "span": { - "uri": "file://godef/a/a.go", - "start": { - "line": 26, - "column": 6, - "offset": 467 - }, - "end": { - "line": 26, - "column": 7, - "offset": 468 - } - }, - "description": "```go\ntype A string\n```\n\n\\@mark\\(AString, \\\"A\\\"\\)\n\n[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A)" -} - --- AString-hoverdef -- -```go -type A string -``` - -\@mark\(AString, \"A\"\) - -[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A) --- AStuff-definition -- -godef/a/a.go:28:6-12: defined here as ```go -func a.AStuff() -``` - -[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff) --- AStuff-definition-json -- -{ - "span": { - "uri": "file://godef/a/a.go", - "start": { - "line": 28, - "column": 6, - "offset": 504 - }, - "end": { - "line": 28, - "column": 12, - "offset": 510 - } - }, - "description": "```go\nfunc a.AStuff()\n```\n\n[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)" -} - --- AStuff-hoverdef -- -```go -func a.AStuff() -``` - -[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff) --- S1-definition -- -godef/b/b.go:27:6-8: defined here as ```go -type S1 struct { - F1 int //@mark(S1F1, "F1") - S2 //@godef("S2", S2),mark(S1S2, "S2") - a.A //@godef("A", AString) - aAlias //@godef("a", aAlias) -} -``` - -[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1) --- S1-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 27, - "column": 6, - "offset": 587 - }, - "end": { - "line": 27, - "column": 8, - "offset": 589 - } - }, - "description": "```go\ntype S1 struct {\n\tF1 int //@mark(S1F1, \"F1\")\n\tS2 //@godef(\"S2\", S2),mark(S1S2, \"S2\")\n\ta.A //@godef(\"A\", AString)\n\taAlias //@godef(\"a\", aAlias)\n}\n```\n\n[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)" -} - --- S1-hoverdef -- -```go -type S1 struct { - F1 int //@mark(S1F1, "F1") - S2 //@godef("S2", S2),mark(S1S2, "S2") - a.A //@godef("A", AString) - aAlias //@godef("a", aAlias) -} -``` - -[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1) --- S1F1-definition -- -godef/b/b.go:28:2-4: defined here as ```go -field F1 int -``` - -\@mark\(S1F1, \"F1\"\) - -[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1) --- S1F1-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 28, - "column": 2, - "offset": 606 - }, - "end": { - "line": 28, - "column": 4, - "offset": 608 - } - }, - "description": "```go\nfield F1 int\n```\n\n\\@mark\\(S1F1, \\\"F1\\\"\\)\n\n[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)" -} - --- S1F1-hoverdef -- -```go -field F1 int -``` - -\@mark\(S1F1, \"F1\"\) - -[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1) --- S1S2-definition -- -godef/b/b.go:29:2-4: defined here as ```go -field S2 S2 -``` - -\@godef\(\"S2\", S2\),mark\(S1S2, \"S2\"\) - -[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2) --- S1S2-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 29, - "column": 2, - "offset": 638 - }, - "end": { - "line": 29, - "column": 4, - "offset": 640 - } - }, - "description": "```go\nfield S2 S2\n```\n\n\\@godef\\(\\\"S2\\\", S2\\),mark\\(S1S2, \\\"S2\\\"\\)\n\n[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2)" -} - --- S1S2-hoverdef -- -```go -field S2 S2 -``` - -\@godef\(\"S2\", S2\),mark\(S1S2, \"S2\"\) - -[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2) --- S2-definition -- -godef/b/b.go:34:6-8: defined here as ```go -type S2 struct { - F1 string //@mark(S2F1, "F1") - F2 int //@mark(S2F2, "F2") - *a.A //@godef("A", AString),godef("a",AImport) -} -``` - -[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2) --- S2-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 34, - "column": 6, - "offset": 762 - }, - "end": { - "line": 34, - "column": 8, - "offset": 764 - } - }, - "description": "```go\ntype S2 struct {\n\tF1 string //@mark(S2F1, \"F1\")\n\tF2 int //@mark(S2F2, \"F2\")\n\t*a.A //@godef(\"A\", AString),godef(\"a\",AImport)\n}\n```\n\n[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2)" -} - --- S2-hoverdef -- -```go -type S2 struct { - F1 string //@mark(S2F1, "F1") - F2 int //@mark(S2F2, "F2") - *a.A //@godef("A", AString),godef("a",AImport) -} -``` - -[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2) --- S2F1-definition -- -godef/b/b.go:35:2-4: defined here as ```go -field F1 string -``` - -\@mark\(S2F1, \"F1\"\) - -[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1) --- S2F1-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 35, - "column": 2, - "offset": 781 - }, - "end": { - "line": 35, - "column": 4, - "offset": 783 - } - }, - "description": "```go\nfield F1 string\n```\n\n\\@mark\\(S2F1, \\\"F1\\\"\\)\n\n[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1)" -} - --- S2F1-hoverdef -- -```go -field F1 string -``` - -\@mark\(S2F1, \"F1\"\) - -[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1) --- S2F2-definition -- -godef/b/b.go:36:2-4: defined here as ```go -field F2 int -``` - -\@mark\(S2F2, \"F2\"\) - -[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2) --- S2F2-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 36, - "column": 2, - "offset": 814 - }, - "end": { - "line": 36, - "column": 4, - "offset": 816 - } - }, - "description": "```go\nfield F2 int\n```\n\n\\@mark\\(S2F2, \\\"F2\\\"\\)\n\n[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2)" -} - --- S2F2-hoverdef -- -```go -field F2 int -``` - -\@mark\(S2F2, \"F2\"\) - -[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2) --- aAlias-definition -- -godef/b/b.go:25:6-12: defined here as ```go -type aAlias = a.A -``` - -\@mark\(aAlias, \"aAlias\"\) --- aAlias-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 25, - "column": 6, - "offset": 542 - }, - "end": { - "line": 25, - "column": 12, - "offset": 548 - } - }, - "description": "```go\ntype aAlias = a.A\n```\n\n\\@mark\\(aAlias, \\\"aAlias\\\"\\)" -} - --- aAlias-hoverdef -- -```go -type aAlias = a.A -``` - -\@mark\(aAlias, \"aAlias\"\) --- bX-definition -- -godef/b/b.go:57:7-8: defined here as ```go -const X untyped int = 0 -``` - -\@mark\(bX, \"X\"\),godef\(\"X\", bX\) - -[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X) --- bX-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 57, - "column": 7, - "offset": 1249 - }, - "end": { - "line": 57, - "column": 8, - "offset": 1250 - } - }, - "description": "```go\nconst X untyped int = 0\n```\n\n\\@mark\\(bX, \\\"X\\\"\\),godef\\(\\\"X\\\", bX\\)\n\n[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X)" -} - --- bX-hoverdef -- -```go -const X untyped int = 0 -``` - -\@mark\(bX, \"X\"\),godef\(\"X\", bX\) - -[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X) --- myFoo-definition -- -godef/b/b.go:4:2-7: defined here as ```go -package myFoo ("golang.org/x/tools/internal/lsp/foo") -``` - -[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls) --- myFoo-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 4, - "column": 2, - "offset": 21 - }, - "end": { - "line": 4, - "column": 7, - "offset": 26 - } - }, - "description": "```go\npackage myFoo (\"golang.org/x/tools/internal/lsp/foo\")\n```\n\n[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls)" -} - --- myFoo-hoverdef -- -```go -package myFoo ("golang.org/x/tools/internal/lsp/foo") -``` - -[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls) diff --git a/internal/lsp/testdata/godef/b/c.go b/internal/lsp/testdata/godef/b/c.go deleted file mode 100644 index c8daf62422a..00000000000 --- a/internal/lsp/testdata/godef/b/c.go +++ /dev/null @@ -1,8 +0,0 @@ -package b - -// This is the in-editor version of the file. -// The on-disk version is in c.go.saved. - -var _ = S1{ //@godef("S1", S1) - F1: 99, //@godef("F1", S1F1) -} diff --git a/internal/lsp/testdata/godef/b/c.go.golden b/internal/lsp/testdata/godef/b/c.go.golden deleted file mode 100644 index e6205b7265c..00000000000 --- a/internal/lsp/testdata/godef/b/c.go.golden +++ /dev/null @@ -1,74 +0,0 @@ --- S1-definition -- -godef/b/b.go:27:6-8: defined here as ```go -type S1 struct { - F1 int //@mark(S1F1, "F1") - S2 //@godef("S2", S2),mark(S1S2, "S2") - a.A //@godef("A", AString) - aAlias //@godef("a", aAlias) -} -``` - -[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1) --- S1-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 27, - "column": 6, - "offset": 587 - }, - "end": { - "line": 27, - "column": 8, - "offset": 589 - } - }, - "description": "```go\ntype S1 struct {\n\tF1 int //@mark(S1F1, \"F1\")\n\tS2 //@godef(\"S2\", S2),mark(S1S2, \"S2\")\n\ta.A //@godef(\"A\", AString)\n\taAlias //@godef(\"a\", aAlias)\n}\n```\n\n[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)" -} - --- S1-hoverdef -- -```go -type S1 struct { - F1 int //@mark(S1F1, "F1") - S2 //@godef("S2", S2),mark(S1S2, "S2") - a.A //@godef("A", AString) - aAlias //@godef("a", aAlias) -} -``` - -[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1) --- S1F1-definition -- -godef/b/b.go:28:2-4: defined here as ```go -field F1 int -``` - -\@mark\(S1F1, \"F1\"\) - -[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1) --- S1F1-definition-json -- -{ - "span": { - "uri": "file://godef/b/b.go", - "start": { - "line": 28, - "column": 2, - "offset": 606 - }, - "end": { - "line": 28, - "column": 4, - "offset": 608 - } - }, - "description": "```go\nfield F1 int\n```\n\n\\@mark\\(S1F1, \\\"F1\\\"\\)\n\n[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)" -} - --- S1F1-hoverdef -- -```go -field F1 int -``` - -\@mark\(S1F1, \"F1\"\) - -[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1) diff --git a/internal/lsp/testdata/godef/b/c.go.saved b/internal/lsp/testdata/godef/b/c.go.saved deleted file mode 100644 index ff1a8794b48..00000000000 --- a/internal/lsp/testdata/godef/b/c.go.saved +++ /dev/null @@ -1,7 +0,0 @@ -package b - -// This is the on-disk version of c.go, which represents -// the in-editor version of the file. - -} - diff --git a/internal/lsp/testdata/godef/b/e.go.golden b/internal/lsp/testdata/godef/b/e.go.golden deleted file mode 100644 index f9af7b74317..00000000000 --- a/internal/lsp/testdata/godef/b/e.go.golden +++ /dev/null @@ -1,144 +0,0 @@ --- Member-definition -- -godef/a/d.go:6:2-8: defined here as ```go -field Member string -``` - -\@Member - -[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member) --- Member-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 6, - "column": 2, - "offset": 90 - }, - "end": { - "line": 6, - "column": 8, - "offset": 96 - } - }, - "description": "```go\nfield Member string\n```\n\n\\@Member\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)" -} - --- Member-hoverdef -- -```go -field Member string -``` - -\@Member - -[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member) --- Other-definition -- -godef/a/d.go:9:5-10: defined here as ```go -var a.Other a.Thing -``` - -\@Other - -[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other) --- Other-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 9, - "column": 5, - "offset": 121 - }, - "end": { - "line": 9, - "column": 10, - "offset": 126 - } - }, - "description": "```go\nvar a.Other a.Thing\n```\n\n\\@Other\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)" -} - --- Other-hoverdef -- -```go -var a.Other a.Thing -``` - -\@Other - -[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other) --- Thing-definition -- -godef/a/d.go:5:6-11: defined here as ```go -type Thing struct { - Member string //@Member -} -``` - -[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing) --- Thing-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 5, - "column": 6, - "offset": 65 - }, - "end": { - "line": 5, - "column": 11, - "offset": 70 - } - }, - "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)" -} - --- Thing-hoverdef -- -```go -type Thing struct { - Member string //@Member -} -``` - -[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing) --- Things-definition -- -godef/a/d.go:11:6-12: defined here as ```go -func a.Things(val []string) []a.Thing -``` - -[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things) --- Things-definition-json -- -{ - "span": { - "uri": "file://godef/a/d.go", - "start": { - "line": 11, - "column": 6, - "offset": 148 - }, - "end": { - "line": 11, - "column": 12, - "offset": 154 - } - }, - "description": "```go\nfunc a.Things(val []string) []a.Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)" -} - --- Things-hoverdef -- -```go -func a.Things(val []string) []a.Thing -``` - -[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things) --- eInt-hoverdef -- -```go -var x int -``` --- eInterface-hoverdef -- -```go -var x interface{} -``` --- eString-hoverdef -- -```go -var x string -``` diff --git a/internal/lsp/testdata/godef/b/h.go b/internal/lsp/testdata/godef/b/h.go deleted file mode 100644 index c8cbe850f9c..00000000000 --- a/internal/lsp/testdata/godef/b/h.go +++ /dev/null @@ -1,10 +0,0 @@ -package b - -import . "golang.org/x/tools/internal/lsp/godef/a" - -func _() { - // variable of type a.A - var _ A //@mark(AVariable, "_"),hoverdef("_", AVariable) - - AStuff() //@hoverdef("AStuff", AStuff) -} diff --git a/internal/lsp/testdata/godef/b/h.go.golden b/internal/lsp/testdata/godef/b/h.go.golden deleted file mode 100644 index f32f0264f8f..00000000000 --- a/internal/lsp/testdata/godef/b/h.go.golden +++ /dev/null @@ -1,12 +0,0 @@ --- AStuff-hoverdef -- -```go -func AStuff() -``` - -[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff) --- AVariable-hoverdef -- -```go -var _ A -``` - -variable of type a\.A diff --git a/internal/lsp/testdata/godef/hover_generics/hover.go b/internal/lsp/testdata/godef/hover_generics/hover.go deleted file mode 100644 index 7400e1acdd8..00000000000 --- a/internal/lsp/testdata/godef/hover_generics/hover.go +++ /dev/null @@ -1,15 +0,0 @@ -package hover - -type value[T any] struct { //@mark(value, "value"),hoverdef("value", value),mark(valueTdecl, "T"),hoverdef("T",valueTdecl) - val T //@mark(valueTparam, "T"),hoverdef("T", valueTparam) - Q int //@mark(valueQfield, "Q"),hoverdef("Q", valueQfield) -} - -type Value[T any] struct { //@mark(ValueTdecl, "T"),hoverdef("T",ValueTdecl) - val T //@mark(ValueTparam, "T"),hoverdef("T", ValueTparam) - Q int //@mark(ValueQfield, "Q"),hoverdef("Q", ValueQfield) -} - -func F[P interface{ ~int | string }]() { //@mark(Pparam, "P"),hoverdef("P",Pparam) - var _ P //@mark(Pvar, "P"),hoverdef("P",Pvar) -} diff --git a/internal/lsp/testdata/godef/hover_generics/hover.go.golden b/internal/lsp/testdata/godef/hover_generics/hover.go.golden deleted file mode 100644 index cfebcc472c9..00000000000 --- a/internal/lsp/testdata/godef/hover_generics/hover.go.golden +++ /dev/null @@ -1,45 +0,0 @@ --- Pparam-hoverdef -- -```go -type parameter P interface{~int|string} -``` --- Pvar-hoverdef -- -```go -type parameter P interface{~int|string} -``` --- ValueQfield-hoverdef -- -```go -field Q int -``` - -\@mark\(ValueQfield, \"Q\"\),hoverdef\(\"Q\", ValueQfield\) - -[`(hover.Value).Q` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/hover_generics?utm_source=gopls#Value.Q) --- ValueTdecl-hoverdef -- -```go -type parameter T any -``` --- ValueTparam-hoverdef -- -```go -type parameter T any -``` --- value-hoverdef -- -```go -type value[T any] struct { - val T //@mark(valueTparam, "T"),hoverdef("T", valueTparam) - Q int //@mark(valueQfield, "Q"),hoverdef("Q", valueQfield) -} -``` --- valueQfield-hoverdef -- -```go -field Q int -``` - -\@mark\(valueQfield, \"Q\"\),hoverdef\(\"Q\", valueQfield\) --- valueTdecl-hoverdef -- -```go -type parameter T any -``` --- valueTparam-hoverdef -- -```go -type parameter T any -``` diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go b/internal/lsp/testdata/godef/infer_generics/inferred.go deleted file mode 100644 index 2d92a959083..00000000000 --- a/internal/lsp/testdata/godef/infer_generics/inferred.go +++ /dev/null @@ -1,12 +0,0 @@ -package inferred - -func app[S interface{ ~[]E }, E interface{}](s S, e E) S { - return append(s, e) -} - -func _() { - _ = app[[]int] //@mark(constrInfer, "app"),hoverdef("app", constrInfer) - _ = app[[]int, int] //@mark(instance, "app"),hoverdef("app", instance) - _ = app[[]int]([]int{}, 0) //@mark(partialInfer, "app"),hoverdef("app", partialInfer) - _ = app([]int{}, 0) //@mark(argInfer, "app"),hoverdef("app", argInfer) -} diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go.golden b/internal/lsp/testdata/godef/infer_generics/inferred.go.golden deleted file mode 100644 index 4a36ff460b6..00000000000 --- a/internal/lsp/testdata/godef/infer_generics/inferred.go.golden +++ /dev/null @@ -1,20 +0,0 @@ --- argInfer-hoverdef -- -```go -func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S -``` --- constrInf-hoverdef -- -```go -func app(s []int, e int) []int // func[S₁ interface{~[]Eā‚‚}, Eā‚‚ interface{}](s S₁, e Eā‚‚) S₁ -``` --- constrInfer-hoverdef -- -```go -func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S -``` --- instance-hoverdef -- -```go -func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S -``` --- partialInfer-hoverdef -- -```go -func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S -``` diff --git a/internal/lsp/testdata/implementation/implementation.go b/internal/lsp/testdata/implementation/implementation.go deleted file mode 100644 index c3229121a3d..00000000000 --- a/internal/lsp/testdata/implementation/implementation.go +++ /dev/null @@ -1,31 +0,0 @@ -package implementation - -import "golang.org/x/tools/internal/lsp/implementation/other" - -type ImpP struct{} //@ImpP,implementations("ImpP", Laugher, OtherLaugher) - -func (*ImpP) Laugh() { //@mark(LaughP, "Laugh"),implementations("Laugh", Laugh, OtherLaugh) -} - -type ImpS struct{} //@ImpS,implementations("ImpS", Laugher, OtherLaugher) - -func (ImpS) Laugh() { //@mark(LaughS, "Laugh"),implementations("Laugh", Laugh, OtherLaugh) -} - -type Laugher interface { //@Laugher,implementations("Laugher", ImpP, OtherImpP, ImpS, OtherImpS) - Laugh() //@Laugh,implementations("Laugh", LaughP, OtherLaughP, LaughS, OtherLaughS) -} - -type Foo struct { //@implementations("Foo", Joker) - other.Foo -} - -type Joker interface { //@Joker - Joke() //@Joke,implementations("Joke", ImpJoker) -} - -type cryer int //@implementations("cryer", Cryer) - -func (cryer) Cry(other.CryType) {} //@mark(CryImpl, "Cry"),implementations("Cry", Cry) - -type Empty interface{} //@implementations("Empty") diff --git a/internal/lsp/testdata/importedcomplit/imported_complit.go.in b/internal/lsp/testdata/importedcomplit/imported_complit.go.in deleted file mode 100644 index 80d85245cb4..00000000000 --- a/internal/lsp/testdata/importedcomplit/imported_complit.go.in +++ /dev/null @@ -1,42 +0,0 @@ -package importedcomplit - -import ( - "golang.org/x/tools/internal/lsp/foo" - - // import completions - "fm" //@complete("\" //", fmtImport) - "go/pars" //@complete("\" //", parserImport) - "golang.org/x/tools/internal/lsp/signa" //@complete("na\" //", signatureImport) - "golang.org/x/too" //@complete("\" //", toolsImport) - "crypto/elli" //@complete("\" //", cryptoImport) - "golang.org/x/tools/internal/lsp/sign" //@complete("\" //", signatureImport) - "golang.org/x/tools/internal/lsp/sign" //@complete("ols", toolsImport) - namedParser "go/pars" //@complete("\" //", parserImport) -) - -func _() { - var V int //@item(icVVar, "V", "int", "var") - _ = foo.StructFoo{V} //@complete("}", Value, icVVar) -} - -func _() { - var ( - aa string //@item(icAAVar, "aa", "string", "var") - ab int //@item(icABVar, "ab", "int", "var") - ) - - _ = foo.StructFoo{a} //@complete("}", abVar, aaVar) - - var s struct { - AA string //@item(icFieldAA, "AA", "string", "field") - AB int //@item(icFieldAB, "AB", "int", "field") - } - - _ = foo.StructFoo{s.} //@complete("}", icFieldAB, icFieldAA) -} - -/* "fmt" */ //@item(fmtImport, "fmt", "\"fmt\"", "package") -/* "go/parser" */ //@item(parserImport, "parser", "\"go/parser\"", "package") -/* "golang.org/x/tools/internal/lsp/signature" */ //@item(signatureImport, "signature", "\"golang.org/x/tools/internal/lsp/signature\"", "package") -/* "golang.org/x/tools/" */ //@item(toolsImport, "tools/", "\"golang.org/x/tools/\"", "package") -/* "crypto/elliptic" */ //@item(cryptoImport, "elliptic", "\"crypto/elliptic\"", "package") diff --git a/internal/lsp/testdata/keywords/keywords.go b/internal/lsp/testdata/keywords/keywords.go deleted file mode 100644 index 1fa2c12baa1..00000000000 --- a/internal/lsp/testdata/keywords/keywords.go +++ /dev/null @@ -1,100 +0,0 @@ -package keywords - -//@rank("", type),rank("", func),rank("", var),rank("", const),rank("", import) - -func _() { - var test int //@rank(" //", int, interface) - var tChan chan int - var _ m //@complete(" //", map) - var _ f //@complete(" //", func) - var _ c //@complete(" //", chan) - - var _ str //@rank(" //", string, struct) - - type _ int //@rank(" //", interface, int) - - type _ str //@rank(" //", struct, string) - - switch test { - case 1: // TODO: trying to complete case here will break because the parser wont return *ast.Ident - b //@complete(" //", break) - case 2: - f //@complete(" //", fallthrough, for) - r //@complete(" //", return) - d //@complete(" //", default, defer) - c //@complete(" //", case, const) - } - - switch test.(type) { - case fo: //@complete(":") - case int: - b //@complete(" //", break) - case int32: - f //@complete(" //", for) - d //@complete(" //", default, defer) - r //@complete(" //", return) - c //@complete(" //", case, const) - } - - select { - case <-tChan: - b //@complete(" //", break) - c //@complete(" //", case, const) - } - - for index := 0; index < test; index++ { - c //@complete(" //", const, continue) - b //@complete(" //", break) - } - - for range []int{} { - c //@complete(" //", const, continue) - b //@complete(" //", break) - } - - // Test function level keywords - - //Using 2 characters to test because map output order is random - sw //@complete(" //", switch) - se //@complete(" //", select) - - f //@complete(" //", for) - d //@complete(" //", defer) - g //@rank(" //", go),rank(" //", goto) - r //@complete(" //", return) - i //@complete(" //", if) - e //@complete(" //", else) - v //@complete(" //", var) - c //@complete(" //", const) - - for i := r //@complete(" //", range) -} - -/* package */ //@item(package, "package", "", "keyword") -/* import */ //@item(import, "import", "", "keyword") -/* func */ //@item(func, "func", "", "keyword") -/* type */ //@item(type, "type", "", "keyword") -/* var */ //@item(var, "var", "", "keyword") -/* const */ //@item(const, "const", "", "keyword") -/* break */ //@item(break, "break", "", "keyword") -/* default */ //@item(default, "default", "", "keyword") -/* case */ //@item(case, "case", "", "keyword") -/* defer */ //@item(defer, "defer", "", "keyword") -/* go */ //@item(go, "go", "", "keyword") -/* for */ //@item(for, "for", "", "keyword") -/* if */ //@item(if, "if", "", "keyword") -/* else */ //@item(else, "else", "", "keyword") -/* switch */ //@item(switch, "switch", "", "keyword") -/* select */ //@item(select, "select", "", "keyword") -/* fallthrough */ //@item(fallthrough, "fallthrough", "", "keyword") -/* continue */ //@item(continue, "continue", "", "keyword") -/* return */ //@item(return, "return", "", "keyword") -/* var */ //@item(var, "var", "", "keyword") -/* const */ //@item(const, "const", "", "keyword") -/* goto */ //@item(goto, "goto", "", "keyword") -/* struct */ //@item(struct, "struct", "", "keyword") -/* interface */ //@item(interface, "interface", "", "keyword") -/* map */ //@item(map, "map", "", "keyword") -/* func */ //@item(func, "func", "", "keyword") -/* chan */ //@item(chan, "chan", "", "keyword") -/* range */ //@item(range, "range", "", "keyword") diff --git a/internal/lsp/testdata/links/links.go b/internal/lsp/testdata/links/links.go deleted file mode 100644 index 89492bafebf..00000000000 --- a/internal/lsp/testdata/links/links.go +++ /dev/null @@ -1,26 +0,0 @@ -package links - -import ( - "fmt" //@link(`fmt`,"https://pkg.go.dev/fmt?utm_source=gopls") - - "golang.org/x/tools/internal/lsp/foo" //@link(`golang.org/x/tools/internal/lsp/foo`,`https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls`) - - _ "database/sql" //@link(`database/sql`, `https://pkg.go.dev/database/sql?utm_source=gopls`) -) - -var ( - _ fmt.Formatter - _ foo.StructFoo - _ errors.Formatter -) - -// Foo function -func Foo() string { - /*https://example.com/comment */ //@link("https://example.com/comment","https://example.com/comment") - - url := "https://example.com/string_literal" //@link("https://example.com/string_literal","https://example.com/string_literal") - return url - - // TODO(golang/go#1234): Link the relevant issue. //@link("golang/go#1234", "https://github.com/golang/go/issues/1234") - // TODO(microsoft/vscode-go#12): Another issue. //@link("microsoft/vscode-go#12", "https://github.com/microsoft/vscode-go/issues/12") -} diff --git a/internal/lsp/testdata/missingfunction/channels.go b/internal/lsp/testdata/missingfunction/channels.go deleted file mode 100644 index 436491c1949..00000000000 --- a/internal/lsp/testdata/missingfunction/channels.go +++ /dev/null @@ -1,9 +0,0 @@ -package missingfunction - -func channels(s string) { - undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix") -} - -func c() (<-chan string, chan string) { - return make(<-chan string), make(chan string) -} diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go b/internal/lsp/testdata/missingfunction/consecutive_params.go deleted file mode 100644 index d2ec3be3232..00000000000 --- a/internal/lsp/testdata/missingfunction/consecutive_params.go +++ /dev/null @@ -1,6 +0,0 @@ -package missingfunction - -func consecutiveParams() { - var s string - undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/error_param.go b/internal/lsp/testdata/missingfunction/error_param.go deleted file mode 100644 index 9fd943ffb6d..00000000000 --- a/internal/lsp/testdata/missingfunction/error_param.go +++ /dev/null @@ -1,6 +0,0 @@ -package missingfunction - -func errorParam() { - var err error - undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/literals.go b/internal/lsp/testdata/missingfunction/literals.go deleted file mode 100644 index e276eae79ec..00000000000 --- a/internal/lsp/testdata/missingfunction/literals.go +++ /dev/null @@ -1,7 +0,0 @@ -package missingfunction - -type T struct{} - -func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/literals.go.golden b/internal/lsp/testdata/missingfunction/literals.go.golden deleted file mode 100644 index 04782b9bf50..00000000000 --- a/internal/lsp/testdata/missingfunction/literals.go.golden +++ /dev/null @@ -1,29 +0,0 @@ --- suggestedfix_literals_10_2 -- -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package missingfunction - -type T struct{} - -func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") -} - -func undefinedLiterals(s string, t1 T, t2 *T) { - panic("implement me!") -} --- suggestedfix_literals_6_2 -- -package missingfunction - -type T struct{} - -func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") -} - -func undefinedLiterals(s string, t1 T, t2 *T) { - panic("unimplemented") -} - diff --git a/internal/lsp/testdata/missingfunction/operation.go b/internal/lsp/testdata/missingfunction/operation.go deleted file mode 100644 index 0408219fe37..00000000000 --- a/internal/lsp/testdata/missingfunction/operation.go +++ /dev/null @@ -1,7 +0,0 @@ -package missingfunction - -import "time" - -func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/operation.go.golden b/internal/lsp/testdata/missingfunction/operation.go.golden deleted file mode 100644 index 5e35f300534..00000000000 --- a/internal/lsp/testdata/missingfunction/operation.go.golden +++ /dev/null @@ -1,29 +0,0 @@ --- suggestedfix_operation_10_2 -- -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package missingfunction - -import "time" - -func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") -} - -func undefinedOperation(duration time.Duration) { - panic("implement me!") -} --- suggestedfix_operation_6_2 -- -package missingfunction - -import "time" - -func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") -} - -func undefinedOperation(duration time.Duration) { - panic("unimplemented") -} - diff --git a/internal/lsp/testdata/missingfunction/selector.go b/internal/lsp/testdata/missingfunction/selector.go deleted file mode 100644 index afd1ab61f3a..00000000000 --- a/internal/lsp/testdata/missingfunction/selector.go +++ /dev/null @@ -1,6 +0,0 @@ -package missingfunction - -func selector() { - m := map[int]bool{} - undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/slice.go b/internal/lsp/testdata/missingfunction/slice.go deleted file mode 100644 index 4a562a2e762..00000000000 --- a/internal/lsp/testdata/missingfunction/slice.go +++ /dev/null @@ -1,5 +0,0 @@ -package missingfunction - -func slice() { - undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/tuple.go b/internal/lsp/testdata/missingfunction/tuple.go deleted file mode 100644 index 1c4782c15dd..00000000000 --- a/internal/lsp/testdata/missingfunction/tuple.go +++ /dev/null @@ -1,9 +0,0 @@ -package missingfunction - -func tuple() { - undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix") -} - -func b() (string, error) { - return "", nil -} diff --git a/internal/lsp/testdata/missingfunction/unique_params.go b/internal/lsp/testdata/missingfunction/unique_params.go deleted file mode 100644 index ffaba3f9cb9..00000000000 --- a/internal/lsp/testdata/missingfunction/unique_params.go +++ /dev/null @@ -1,7 +0,0 @@ -package missingfunction - -func uniqueArguments() { - var s string - var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") -} diff --git a/internal/lsp/testdata/missingfunction/unique_params.go.golden b/internal/lsp/testdata/missingfunction/unique_params.go.golden deleted file mode 100644 index 74fb91a8eb2..00000000000 --- a/internal/lsp/testdata/missingfunction/unique_params.go.golden +++ /dev/null @@ -1,30 +0,0 @@ --- suggestedfix_unique_params_10_2 -- -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package missingfunction - -func uniqueArguments() { - var s string - var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") -} - -func undefinedUniqueArguments(s1 string, i int, s2 string) { - panic("implement me!") -} - --- suggestedfix_unique_params_6_2 -- -package missingfunction - -func uniqueArguments() { - var s string - var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") -} - -func undefinedUniqueArguments(s1 string, i int, s2 string) { - panic("unimplemented") -} - diff --git a/internal/lsp/testdata/nodisk/nodisk.overlay.go b/internal/lsp/testdata/nodisk/nodisk.overlay.go deleted file mode 100644 index f9194be569c..00000000000 --- a/internal/lsp/testdata/nodisk/nodisk.overlay.go +++ /dev/null @@ -1,9 +0,0 @@ -package nodisk - -import ( - "golang.org/x/tools/internal/lsp/foo" -) - -func _() { - foo.Foo() //@complete("F", Foo, IntFoo, StructFoo) -} diff --git a/internal/lsp/testdata/noparse/noparse.go.in b/internal/lsp/testdata/noparse/noparse.go.in deleted file mode 100644 index 7dc23e02562..00000000000 --- a/internal/lsp/testdata/noparse/noparse.go.in +++ /dev/null @@ -1,11 +0,0 @@ -package noparse - -func bye(x int) { - hi() -} - -func stuff() { - x := 5 -} - -func .() {} //@diag(".", "syntax", "expected 'IDENT', found '.'", "error") diff --git a/internal/lsp/testdata/noparse_format/noparse_format.go.in b/internal/lsp/testdata/noparse_format/noparse_format.go.in deleted file mode 100644 index 4fc3824d9b8..00000000000 --- a/internal/lsp/testdata/noparse_format/noparse_format.go.in +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.11 - -package noparse_format //@format("package") - -func what() { - var b int - if { hi() //@diag("{", "syntax", "missing condition in if statement", "error") - } -} \ No newline at end of file diff --git a/internal/lsp/testdata/references/other/other.go b/internal/lsp/testdata/references/other/other.go deleted file mode 100644 index de35cc81a9e..00000000000 --- a/internal/lsp/testdata/references/other/other.go +++ /dev/null @@ -1,19 +0,0 @@ -package other - -import ( - references "golang.org/x/tools/internal/lsp/references" -) - -func GetXes() []references.X { - return []references.X{ - { - Y: 1, //@mark(GetXesY, "Y"),refs("Y", typeXY, GetXesY, anotherXY) - }, - } -} - -func _() { - references.Q = "hello" //@mark(assignExpQ, "Q") - bob := func(_ string) {} - bob(references.Q) //@mark(bobExpQ, "Q") -} diff --git a/internal/lsp/testdata/references/refs.go b/internal/lsp/testdata/references/refs.go deleted file mode 100644 index 933a36f54e9..00000000000 --- a/internal/lsp/testdata/references/refs.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package refs is a package used to test find references. -package refs - -type i int //@mark(typeI, "i"),refs("i", typeI, argI, returnI, embeddedI) - -type X struct { - Y int //@mark(typeXY, "Y") -} - -func _(_ i) []bool { //@mark(argI, "i") - return nil -} - -func _(_ []byte) i { //@mark(returnI, "i") - return 0 -} - -var q string //@mark(declQ, "q"),refs("q", declQ, assignQ, bobQ) - -var Q string //@mark(declExpQ, "Q"),refs("Q", declExpQ, assignExpQ, bobExpQ) - -func _() { - q = "hello" //@mark(assignQ, "q") - bob := func(_ string) {} - bob(q) //@mark(bobQ, "q") -} - -type e struct { - i //@mark(embeddedI, "i"),refs("i", embeddedI, embeddedIUse) -} - -func _() { - _ = e{}.i //@mark(embeddedIUse, "i") -} - -const ( - foo = iota //@refs("iota") -) diff --git a/internal/lsp/testdata/rename/b/b.go.golden b/internal/lsp/testdata/rename/b/b.go.golden deleted file mode 100644 index 9cdc5677fd4..00000000000 --- a/internal/lsp/testdata/rename/b/b.go.golden +++ /dev/null @@ -1,78 +0,0 @@ --- Bob-rename -- -package b - -var c int //@rename("int", "uint") - -func _() { - a := 1 //@rename("a", "error") - a = 2 - _ = a -} - -var ( - // Hello there. - // Bob does the thing. - Bob int //@rename("Foo", "Bob") -) - -/* -Hello description -*/ -func Hello() {} //@rename("Hello", "Goodbye") - --- Goodbye-rename -- -b.go: -package b - -var c int //@rename("int", "uint") - -func _() { - a := 1 //@rename("a", "error") - a = 2 - _ = a -} - -var ( - // Hello there. - // Foo does the thing. - Foo int //@rename("Foo", "Bob") -) - -/* -Goodbye description -*/ -func Goodbye() {} //@rename("Hello", "Goodbye") - -c.go: -package c - -import "golang.org/x/tools/internal/lsp/rename/b" - -func _() { - b.Goodbye() //@rename("Hello", "Goodbye") -} - --- error-rename -- -package b - -var c int //@rename("int", "uint") - -func _() { - error := 1 //@rename("a", "error") - error = 2 - _ = error -} - -var ( - // Hello there. - // Foo does the thing. - Foo int //@rename("Foo", "Bob") -) - -/* -Hello description -*/ -func Hello() {} //@rename("Hello", "Goodbye") - --- uint-rename -- -"int": builtin object diff --git a/internal/lsp/testdata/rename/bad/bad.go.golden b/internal/lsp/testdata/rename/bad/bad.go.golden deleted file mode 100644 index 7f45813926a..00000000000 --- a/internal/lsp/testdata/rename/bad/bad.go.golden +++ /dev/null @@ -1,2 +0,0 @@ --- rFunc-rename -- -renaming "sFunc" to "rFunc" not possible because "golang.org/x/tools/internal/lsp/rename/bad" has errors diff --git a/internal/lsp/testdata/rename/c/c.go b/internal/lsp/testdata/rename/c/c.go deleted file mode 100644 index 519d2f6fcdf..00000000000 --- a/internal/lsp/testdata/rename/c/c.go +++ /dev/null @@ -1,7 +0,0 @@ -package c - -import "golang.org/x/tools/internal/lsp/rename/b" - -func _() { - b.Hello() //@rename("Hello", "Goodbye") -} diff --git a/internal/lsp/testdata/rename/c/c.go.golden b/internal/lsp/testdata/rename/c/c.go.golden deleted file mode 100644 index 56937420c59..00000000000 --- a/internal/lsp/testdata/rename/c/c.go.golden +++ /dev/null @@ -1,32 +0,0 @@ --- Goodbye-rename -- -b.go: -package b - -var c int //@rename("int", "uint") - -func _() { - a := 1 //@rename("a", "error") - a = 2 - _ = a -} - -var ( - // Hello there. - // Foo does the thing. - Foo int //@rename("Foo", "Bob") -) - -/* -Goodbye description -*/ -func Goodbye() {} //@rename("Hello", "Goodbye") - -c.go: -package c - -import "golang.org/x/tools/internal/lsp/rename/b" - -func _() { - b.Goodbye() //@rename("Hello", "Goodbye") -} - diff --git a/internal/lsp/testdata/rename/crosspkg/other/other.go b/internal/lsp/testdata/rename/crosspkg/other/other.go deleted file mode 100644 index 10d17cd34b5..00000000000 --- a/internal/lsp/testdata/rename/crosspkg/other/other.go +++ /dev/null @@ -1,8 +0,0 @@ -package other - -import "golang.org/x/tools/internal/lsp/rename/crosspkg" - -func Other() { - crosspkg.Bar - crosspkg.Foo() //@rename("Foo", "Flamingo") -} diff --git a/internal/lsp/testdata/semantic/a.go.golden b/internal/lsp/testdata/semantic/a.go.golden deleted file mode 100644 index 19dd412407d..00000000000 --- a/internal/lsp/testdata/semantic/a.go.golden +++ /dev/null @@ -1,83 +0,0 @@ --- semantic -- -/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("") - -/*⇒6,keyword,[]*/import ( - _ "encoding/utf8" - /*⇒3,namespace,[]*/utf "encoding/utf8" - "fmt"/*⇐3,namespace,[]*/ /*⇒19,comment,[]*///@ semantic("fmt") - . "fmt" - "unicode/utf8"/*⇐4,namespace,[]*/ -) - -/*⇒3,keyword,[]*/var ( - /*⇒1,variable,[definition]*/a = /*⇒3,namespace,[]*/fmt./*⇒5,function,[]*/Print - /*⇒1,variable,[definition]*/b []/*⇒6,type,[defaultLibrary]*/string = []/*⇒6,type,[defaultLibrary]*/string{/*⇒5,string,[]*/"foo"} - /*⇒2,variable,[definition]*/c1 /*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int - /*⇒2,variable,[definition]*/c2 /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int - /*⇒2,variable,[definition]*/c3 = /*⇒4,function,[defaultLibrary]*/make([]/*⇒4,keyword,[]*/chan/*⇒2,operator,[]*/<- /*⇒3,type,[defaultLibrary]*/int) - /*⇒1,variable,[definition]*/b = /*⇒1,type,[]*/A{/*⇒1,variable,[]*/X: /*⇒2,number,[]*/23} - /*⇒1,variable,[definition]*/m /*⇒3,keyword,[]*/map[/*⇒4,type,[defaultLibrary]*/bool][/*⇒1,number,[]*/3]/*⇒1,operator,[]*/*/*⇒7,type,[defaultLibrary]*/float64 -) - -/*⇒5,keyword,[]*/const ( - /*⇒2,variable,[definition readonly]*/xx /*⇒1,type,[]*/F = /*⇒4,variable,[readonly]*/iota - /*⇒2,variable,[definition readonly]*/yy = /*⇒2,variable,[readonly]*/xx /*⇒1,operator,[]*/+ /*⇒1,number,[]*/3 - /*⇒2,variable,[definition readonly]*/zz = /*⇒2,string,[]*/"" - /*⇒2,variable,[definition readonly]*/ww = /*⇒6,string,[]*/"not " /*⇒1,operator,[]*/+ /*⇒2,variable,[readonly]*/zz -) - -/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct { - /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,comment,[]*/`foof` -} -/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface { - /*⇒1,type,[]*/A - /*⇒3,method,[definition]*/sad(/*⇒3,type,[defaultLibrary]*/int) /*⇒4,type,[defaultLibrary]*/bool -} - -/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/F /*⇒3,type,[defaultLibrary]*/int - -/*⇒4,keyword,[]*/func (/*⇒1,variable,[]*/a /*⇒1,operator,[]*/*/*⇒1,type,[]*/A) /*⇒1,method,[definition]*/f() /*⇒4,type,[defaultLibrary]*/bool { - /*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/z /*⇒6,type,[defaultLibrary]*/string - /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"foo" - /*⇒1,variable,[]*/a(/*⇒1,variable,[]*/x) - /*⇒1,variable,[definition]*/y /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"bar" /*⇒1,operator,[]*/+ /*⇒1,variable,[]*/x - /*⇒6,keyword,[]*/switch /*⇒1,variable,[]*/z { - /*⇒4,keyword,[]*/case /*⇒4,string,[]*/"xx": - /*⇒7,keyword,[]*/default: - } - /*⇒6,keyword,[]*/select { - /*⇒4,keyword,[]*/case /*⇒1,variable,[definition]*/z /*⇒2,operator,[]*/:= /*⇒2,operator,[]*/<-/*⇒2,variable,[]*/c3[/*⇒1,number,[]*/0]: - /*⇒7,keyword,[]*/default: - } - /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/k, /*⇒1,variable,[definition]*/v := /*⇒5,keyword,[]*/range /*⇒1,variable,[]*/m { - /*⇒6,keyword,[]*/return (/*⇒1,operator,[]*/!/*⇒1,variable,[]*/k) /*⇒2,operator,[]*/&& /*⇒1,variable,[]*/v[/*⇒1,number,[]*/0] /*⇒2,operator,[]*/== /*⇒3,variable,[readonly defaultLibrary]*/nil - } - /*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[]*/X - /*⇒1,variable,[definition]*/w /*⇒2,operator,[]*/:= /*⇒1,variable,[]*/b[/*⇒1,number,[]*/4:] - /*⇒1,variable,[definition]*/j /*⇒2,operator,[]*/:= /*⇒3,function,[defaultLibrary]*/len(/*⇒1,variable,[]*/x) - /*⇒1,variable,[]*/j/*⇒2,operator,[]*/-- - /*⇒1,variable,[definition]*/q /*⇒2,operator,[]*/:= []/*⇒9,keyword,[]*/interface{}{/*⇒1,variable,[]*/j, /*⇒3,number,[]*/23i, /*⇒1,operator,[]*/&/*⇒1,variable,[]*/y} - /*⇒1,function,[]*/g(/*⇒1,variable,[]*/q/*⇒3,operator,[]*/...) - /*⇒6,keyword,[]*/return /*⇒4,variable,[readonly]*/true -} - -/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/g(/*⇒2,parameter,[definition]*/vv /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) { - /*⇒2,variable,[definition]*/ff /*⇒2,operator,[]*/:= /*⇒4,keyword,[]*/func() {} - /*⇒5,keyword,[]*/defer /*⇒2,variable,[]*/ff() - /*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,function,[]*/RuneCount(/*⇒2,string,[]*/"") - /*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,variable,[]*/vv.(/*⇒6,type,[]*/string)) - /*⇒2,keyword,[]*/if /*⇒4,variable,[readonly]*/true { - } /*⇒4,keyword,[]*/else { - } -/*⇒5,parameter,[definition]*/Never: - /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/i /*⇒2,operator,[]*/:= /*⇒1,number,[]*/0; /*⇒1,variable,[]*/i /*⇒1,operator,[]*/< /*⇒2,number,[]*/10; { - /*⇒5,keyword,[]*/break Never - } - _, /*⇒2,variable,[definition]*/ok /*⇒2,operator,[]*/:= /*⇒2,variable,[]*/vv[/*⇒1,number,[]*/0].(/*⇒1,type,[]*/A) - /*⇒2,keyword,[]*/if /*⇒1,operator,[]*/!/*⇒2,variable,[]*/ok { - /*⇒6,keyword,[]*/switch /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒2,variable,[]*/vv[/*⇒1,number,[]*/0].(/*⇒4,keyword,[]*/type) { - } - /*⇒4,keyword,[]*/goto Never - } -} - diff --git a/internal/lsp/testdata/signature/signature_test.go.golden b/internal/lsp/testdata/signature/signature_test.go.golden deleted file mode 100644 index 3853dffc905..00000000000 --- a/internal/lsp/testdata/signature/signature_test.go.golden +++ /dev/null @@ -1,30 +0,0 @@ --- AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)-signature -- -AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias) - --- AliasMap(a map[*signature.Alias]signature.StringAlias) (b map[*signature.Alias]signature.StringAlias, c map[*signature.Alias]signature.StringAlias)-signature -- -AliasMap(a map[*signature.Alias]signature.StringAlias) (b map[*signature.Alias]signature.StringAlias, c map[*signature.Alias]signature.StringAlias) - --- AliasSlice(a []*sig.Alias) (b sig.Alias)-signature -- -AliasSlice(a []*sig.Alias) (b sig.Alias) - --- AliasSlice(a []*signature.Alias) (b signature.Alias)-signature -- -AliasSlice(a []*signature.Alias) (b signature.Alias) - --- GetAlias() signature.Alias-signature -- -GetAlias() signature.Alias - --- GetAliasPtr() *signature.Alias-signature -- -GetAliasPtr() *signature.Alias - --- OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias-signature -- -OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias - --- OtherAliasMap(a map[signature.Alias]signature.OtherAlias, b map[signature.Alias]signature.OtherAlias) map[signature.Alias]signature.OtherAlias-signature -- -OtherAliasMap(a map[signature.Alias]signature.OtherAlias, b map[signature.Alias]signature.OtherAlias) map[signature.Alias]signature.OtherAlias - --- SetAliasSlice(a []*signature.Alias)-signature -- -SetAliasSlice(a []*signature.Alias) - --- SetOtherAliasMap(a map[*signature.Alias]signature.OtherAlias)-signature -- -SetOtherAliasMap(a map[*signature.Alias]signature.OtherAlias) - diff --git a/internal/lsp/testdata/snippets/literal.go b/internal/lsp/testdata/snippets/literal.go deleted file mode 100644 index 43931d18ef7..00000000000 --- a/internal/lsp/testdata/snippets/literal.go +++ /dev/null @@ -1,22 +0,0 @@ -package snippets - -import ( - "golang.org/x/tools/internal/lsp/signature" - t "golang.org/x/tools/internal/lsp/types" -) - -type structy struct { - x signature.MyType -} - -func X(_ map[signature.Alias]t.CoolAlias) (map[signature.Alias]t.CoolAlias) { - return nil -} - -func _() { - X() //@signature(")", "X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias", 0) - _ = signature.MyType{} //@item(literalMyType, "signature.MyType{}", "", "var") - s := structy{ - x: //@snippet(" //", literalMyType, "signature.MyType{\\}", "signature.MyType{\\}") - } -} \ No newline at end of file diff --git a/internal/lsp/testdata/snippets/literal.go.golden b/internal/lsp/testdata/snippets/literal.go.golden deleted file mode 100644 index f9725f73305..00000000000 --- a/internal/lsp/testdata/snippets/literal.go.golden +++ /dev/null @@ -1,6 +0,0 @@ --- X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias-signature -- -X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias - --- X(_ map[signatures.Alias]types.CoolAlias) map[signatures.Alias]types.CoolAlias-signature -- -X(_ map[signatures.Alias]types.CoolAlias) map[signatures.Alias]types.CoolAlias - diff --git a/internal/lsp/testdata/stub/stub_assign.go b/internal/lsp/testdata/stub/stub_assign.go deleted file mode 100644 index 9336361d009..00000000000 --- a/internal/lsp/testdata/stub/stub_assign.go +++ /dev/null @@ -1,10 +0,0 @@ -package stub - -import "io" - -func main() { - var br io.ByteWriter - br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite") -} - -type byteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_call_expr.go b/internal/lsp/testdata/stub/stub_call_expr.go deleted file mode 100644 index 775b0e5545e..00000000000 --- a/internal/lsp/testdata/stub/stub_call_expr.go +++ /dev/null @@ -1,13 +0,0 @@ -package stub - -func main() { - check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite") -} - -func check(err error) { - if err != nil { - panic(err) - } -} - -type callExpr struct{} diff --git a/internal/lsp/testdata/stub/stub_function_return.go b/internal/lsp/testdata/stub/stub_function_return.go deleted file mode 100644 index bbf05885af2..00000000000 --- a/internal/lsp/testdata/stub/stub_function_return.go +++ /dev/null @@ -1,11 +0,0 @@ -package stub - -import ( - "io" -) - -func newCloser() io.Closer { - return closer{} //@suggestedfix("c", "refactor.rewrite") -} - -type closer struct{} diff --git a/internal/lsp/testdata/stub/stub_pointer.go b/internal/lsp/testdata/stub/stub_pointer.go deleted file mode 100644 index 2b3681b8357..00000000000 --- a/internal/lsp/testdata/stub/stub_pointer.go +++ /dev/null @@ -1,9 +0,0 @@ -package stub - -import "io" - -func getReaderFrom() io.ReaderFrom { - return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite") -} - -type pointerImpl struct{} diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go deleted file mode 100644 index e06dce0a846..00000000000 --- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go +++ /dev/null @@ -1,11 +0,0 @@ -package suggestedfix - -import ( - "log" -) - -func goodbye() { - s := "hiiiiiii" - s = s //@suggestedfix("s = s", "quickfix") - log.Print(s) -} diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden deleted file mode 100644 index 9e1d84d1d56..00000000000 --- a/internal/lsp/testdata/summary.txt.golden +++ /dev/null @@ -1,30 +0,0 @@ --- summary -- -CallHierarchyCount = 2 -CodeLensCount = 5 -CompletionsCount = 265 -CompletionSnippetCount = 106 -UnimportedCompletionsCount = 5 -DeepCompletionsCount = 5 -FuzzyCompletionsCount = 8 -RankedCompletionsCount = 163 -CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 37 -FoldingRangesCount = 2 -FormatCount = 6 -ImportCount = 8 -SemanticTokenCount = 3 -SuggestedFixCount = 63 -FunctionExtractionCount = 25 -MethodExtractionCount = 6 -DefinitionsCount = 95 -TypeDefinitionsCount = 18 -HighlightsCount = 69 -ReferencesCount = 27 -RenamesCount = 41 -PrepareRenamesCount = 7 -SymbolsCount = 5 -WorkspaceSymbolsCount = 20 -SignaturesCount = 33 -LinksCount = 7 -ImplementationsCount = 14 - diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden deleted file mode 100644 index 1c6ad922c36..00000000000 --- a/internal/lsp/testdata/summary_go1.18.txt.golden +++ /dev/null @@ -1,30 +0,0 @@ --- summary -- -CallHierarchyCount = 2 -CodeLensCount = 5 -CompletionsCount = 266 -CompletionSnippetCount = 116 -UnimportedCompletionsCount = 5 -DeepCompletionsCount = 5 -FuzzyCompletionsCount = 8 -RankedCompletionsCount = 173 -CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 37 -FoldingRangesCount = 2 -FormatCount = 6 -ImportCount = 8 -SemanticTokenCount = 3 -SuggestedFixCount = 64 -FunctionExtractionCount = 25 -MethodExtractionCount = 6 -DefinitionsCount = 108 -TypeDefinitionsCount = 18 -HighlightsCount = 69 -ReferencesCount = 27 -RenamesCount = 48 -PrepareRenamesCount = 7 -SymbolsCount = 5 -WorkspaceSymbolsCount = 20 -SignaturesCount = 33 -LinksCount = 7 -ImplementationsCount = 14 - diff --git a/internal/lsp/testdata/symbols/main.go b/internal/lsp/testdata/symbols/main.go deleted file mode 100644 index 8111250f349..00000000000 --- a/internal/lsp/testdata/symbols/main.go +++ /dev/null @@ -1,64 +0,0 @@ -package main - -import ( - "io" -) - -var _ = 1 - -var x = 42 //@mark(symbolsx, "x"), symbol("x", "x", "Variable", "", "main.x") - -const y = 43 //@symbol("y", "y", "Constant", "", "main.y") - -type Number int //@symbol("Number", "Number", "Number", "", "main.Number") - -type Alias = string //@symbol("Alias", "Alias", "String", "", "main.Alias") - -type NumberAlias = Number //@symbol("NumberAlias", "NumberAlias", "Number", "", "main.NumberAlias") - -type ( - Boolean bool //@symbol("Boolean", "Boolean", "Boolean", "", "main.Boolean") - BoolAlias = bool //@symbol("BoolAlias", "BoolAlias", "Boolean", "", "main.BoolAlias") -) - -type Foo struct { //@mark(symbolsFoo, "Foo"), symbol("Foo", "Foo", "Struct", "", "main.Foo") - Quux //@mark(fQuux, "Quux"), symbol("Quux", "Quux", "Field", "Foo", "main.Foo.Quux") - W io.Writer //@symbol("W" , "W", "Field", "Foo", "main.Foo.W") - Bar int //@mark(fBar, "Bar"), symbol("Bar", "Bar", "Field", "Foo", "main.Foo.Bar") - baz string //@symbol("baz", "baz", "Field", "Foo", "main.Foo.baz") -} - -type Quux struct { //@symbol("Quux", "Quux", "Struct", "", "main.Quux") - X, Y float64 //@mark(qX, "X"), symbol("X", "X", "Field", "Quux", "main.X"), symbol("Y", "Y", "Field", "Quux", "main.Y") -} - -func (f Foo) Baz() string { //@symbol("(Foo).Baz", "Baz", "Method", "", "main.Foo.Baz") - return f.baz -} - -func _() {} - -func (q *Quux) Do() {} //@mark(qDo, "Do"), symbol("(*Quux).Do", "Do", "Method", "", "main.Quux.Do") - -func main() { //@symbol("main", "main", "Function", "", "main.main") - -} - -type Stringer interface { //@symbol("Stringer", "Stringer", "Interface", "", "main.Stringer") - String() string //@symbol("String", "String", "Method", "Stringer", "main.Stringer.String") -} - -type ABer interface { //@mark(ABerInterface, "ABer"), symbol("ABer", "ABer", "Interface", "", "main.ABer") - B() //@symbol("B", "B", "Method", "ABer", "main.ABer.B") - A() string //@mark(ABerA, "A"), symbol("A", "A", "Method", "ABer", "main.ABer.A") -} - -type WithEmbeddeds interface { //@symbol("WithEmbeddeds", "WithEmbeddeds", "Interface", "", "main.WithEmbeddeds") - Do() //@symbol("Do", "Do", "Method", "WithEmbeddeds", "main.WithEmbeddeds.Do") - ABer //@symbol("ABer", "ABer", "Interface", "WithEmbeddeds", "main.WithEmbeddeds.ABer") - io.Writer //@mark(ioWriter, "io.Writer"), symbol("io.Writer", "io.Writer", "Interface", "WithEmbeddeds", "main.WithEmbeddeds.Writer") -} - -func Dunk() int { return 0 } //@symbol("Dunk", "Dunk", "Function", "", "main.Dunk") - -func dunk() {} //@symbol("dunk", "dunk", "Function", "", "main.dunk") diff --git a/internal/lsp/testdata/symbols/main.go.golden b/internal/lsp/testdata/symbols/main.go.golden deleted file mode 100644 index ebb6a8a5dd1..00000000000 --- a/internal/lsp/testdata/symbols/main.go.golden +++ /dev/null @@ -1,31 +0,0 @@ --- symbols -- -x Variable 9:5-9:6 -y Constant 11:7-11:8 -Number Number 13:6-13:12 -Alias String 15:6-15:11 -NumberAlias Number 17:6-17:17 -Boolean Boolean 20:2-20:9 -BoolAlias Boolean 21:2-21:11 -Foo Struct 24:6-24:9 - Bar Field 27:2-27:5 - Quux Field 25:2-25:6 - W Field 26:2-26:3 - baz Field 28:2-28:5 -Quux Struct 31:6-31:10 - X Field 32:2-32:3 - Y Field 32:5-32:6 -(Foo).Baz Method 35:14-35:17 -(*Quux).Do Method 41:16-41:18 -main Function 43:6-43:10 -Stringer Interface 47:6-47:14 - String Method 48:2-48:8 -ABer Interface 51:6-51:10 - A Method 53:2-53:3 - B Method 52:2-52:3 -WithEmbeddeds Interface 56:6-56:19 - ABer Interface 58:2-58:6 - Do Method 57:2-57:4 - io.Writer Interface 59:2-59:11 -Dunk Function 62:6-62:10 -dunk Function 64:6-64:10 - diff --git a/internal/lsp/testdata/testy/testy_test.go b/internal/lsp/testdata/testy/testy_test.go deleted file mode 100644 index 4939f86b50b..00000000000 --- a/internal/lsp/testdata/testy/testy_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package testy - -import ( - "testing" - - sig "golang.org/x/tools/internal/lsp/signature" - "golang.org/x/tools/internal/lsp/snippets" -) - -func TestSomething(t *testing.T) { //@item(TestSomething, "TestSomething(t *testing.T)", "", "func") - var x int //@mark(testyX, "x"),diag("x", "compiler", "x declared but not used", "error"),refs("x", testyX) - a() //@mark(testyA, "a") -} - -func _() { - _ = snippets.X(nil) //@signature("nil", "X(_ map[sig.Alias]types.CoolAlias) map[sig.Alias]types.CoolAlias", 0) - var _ sig.Alias -} diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go b/internal/lsp/testdata/typeerrors/noresultvalues.go deleted file mode 100644 index 84234c4b93a..00000000000 --- a/internal/lsp/testdata/typeerrors/noresultvalues.go +++ /dev/null @@ -1,5 +0,0 @@ -package typeerrors - -func x() { return nil } //@suggestedfix("nil", "quickfix") - -func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix") diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden deleted file mode 100644 index 07c54d44553..00000000000 --- a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden +++ /dev/null @@ -1,14 +0,0 @@ --- suggestedfix_noresultvalues_3_19 -- -package typeerrors - -func x() { return } //@suggestedfix("nil", "quickfix") - -func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix") - --- suggestedfix_noresultvalues_5_19 -- -package typeerrors - -func x() { return nil } //@suggestedfix("nil", "quickfix") - -func y() { return } //@suggestedfix("nil", "quickfix") - diff --git a/internal/lsp/testdata/typeparams/type_params.go b/internal/lsp/testdata/typeparams/type_params.go deleted file mode 100644 index 715726b1a41..00000000000 --- a/internal/lsp/testdata/typeparams/type_params.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package typeparams - -func one[a int | string]() {} -func two[a int | string, b float64 | int]() {} - -func _() { - one[]() //@rank("]", string, float64) - two[]() //@rank("]", int, float64) - two[int, f]() //@rank("]", float64, float32) -} - -func slices[a []int | []float64]() {} //@item(tpInts, "[]int", "[]int", "type"),item(tpFloats, "[]float64", "[]float64", "type") - -func _() { - slices[]() //@rank("]", tpInts),rank("]", tpFloats) -} - -type s[a int | string] struct{} - -func _() { - s[]{} //@rank("]", int, float64) -} - -func takesGeneric[a int | string](s[a]) { - "s[a]{}" //@item(tpInScopeLit, "s[a]{}", "", "var") - takesGeneric() //@rank(")", tpInScopeLit),snippet(")", tpInScopeLit, "s[a]{\\}", "s[a]{\\}") -} - -func _() { - s[int]{} //@item(tpInstLit, "s[int]{}", "", "var") - takesGeneric[int]() //@rank(")", tpInstLit),snippet(")", tpInstLit, "s[int]{\\}", "s[int]{\\}") - - "s[...]{}" //@item(tpUninstLit, "s[...]{}", "", "var") - takesGeneric() //@rank(")", tpUninstLit),snippet(")", tpUninstLit, "s[${1:}]{\\}", "s[${1:a}]{\\}") -} - -func returnTP[A int | float64](a A) A { //@item(returnTP, "returnTP", "something", "func") - return a -} - -func _() { - var _ int = returnTP //@snippet(" //", returnTP, "returnTP[${1:}](${2:})", "returnTP[${1:A int|float64}](${2:a A})") - - var aa int //@item(tpInt, "aa", "int", "var") - var ab float64 //@item(tpFloat, "ab", "float64", "var") - returnTP[int](a) //@rank(")", tpInt, tpFloat) -} - -func takesFunc[T any](func(T) T) { - var _ func(t T) T = f //@snippet(" //", tpLitFunc, "func(t T) T {$0\\}", "func(t T) T {$0\\}") -} - -func _() { - _ = "func(...) {}" //@item(tpLitFunc, "func(...) {}", "", "var") - takesFunc() //@snippet(")", tpLitFunc, "func(${1:}) ${2:} {$0\\}", "func(${1:t} ${2:T}) ${3:T} {$0\\}") - takesFunc[int]() //@snippet(")", tpLitFunc, "func(i int) int {$0\\}", "func(${1:i} int) int {$0\\}") -} diff --git a/internal/lsp/testdata/undeclared/var.go b/internal/lsp/testdata/undeclared/var.go deleted file mode 100644 index b5f9287d48d..00000000000 --- a/internal/lsp/testdata/undeclared/var.go +++ /dev/null @@ -1,14 +0,0 @@ -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") - } - r() //@diag("r", "compiler", "undeclared name: r", "error") - return z -} diff --git a/internal/lsp/testdata/undeclared/var.go.golden b/internal/lsp/testdata/undeclared/var.go.golden deleted file mode 100644 index 74adbe8ffde..00000000000 --- a/internal/lsp/testdata/undeclared/var.go.golden +++ /dev/null @@ -1,51 +0,0 @@ --- suggestedfix_var_10_6 -- -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") - z = 4 - } - i := - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") - } - r() //@diag("r", "compiler", "undeclared name: r", "error") - return z -} - --- suggestedfix_var_4_12 -- -package undeclared - -func m() int { - y := - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") - } - r() //@diag("r", "compiler", "undeclared name: r", "error") - return z -} - --- suggestedfix_var_7_18 -- -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") - n := - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") - } - r() //@diag("r", "compiler", "undeclared name: r", "error") - return z -} - diff --git a/internal/lsp/testdata/unimported/export_test.go b/internal/lsp/testdata/unimported/export_test.go deleted file mode 100644 index 4f85700fa79..00000000000 --- a/internal/lsp/testdata/unimported/export_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package unimported - -var TestExport int //@item(testexport, "TestExport", "(from \"golang.org/x/tools/internal/lsp/unimported\")", "var") diff --git a/internal/lsp/testdata/unimported/unimported_cand_type.go b/internal/lsp/testdata/unimported/unimported_cand_type.go deleted file mode 100644 index 531aa2d180a..00000000000 --- a/internal/lsp/testdata/unimported/unimported_cand_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package unimported - -import ( - _ "context" - - "golang.org/x/tools/internal/lsp/baz" - _ "golang.org/x/tools/internal/lsp/signature" // provide type information for unimported completions in the other file -) - -func _() { - foo.StructFoo{} //@item(litFooStructFoo, "foo.StructFoo{}", "struct{...}", "struct") - - // We get the literal completion for "foo.StructFoo{}" even though we haven't - // imported "foo" yet. - baz.FooStruct = f //@snippet(" //", litFooStructFoo, "foo.StructFoo{$0\\}", "foo.StructFoo{$0\\}") -} diff --git a/internal/lsp/testdata/workspacesymbol/a/a.go b/internal/lsp/testdata/workspacesymbol/a/a.go deleted file mode 100644 index 6e5a68b16fe..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a.go +++ /dev/null @@ -1,9 +0,0 @@ -package a - -var RandomGopherVariableA = "a" //@symbol("RandomGopherVariableA", "RandomGopherVariableA", "Variable", "", "a.RandomGopherVariableA") - -const RandomGopherConstantA = "a" //@symbol("RandomGopherConstantA", "RandomGopherConstantA", "Constant", "", "a.RandomGopherConstantA") - -const ( - randomgopherinvariable = iota //@symbol("randomgopherinvariable", "randomgopherinvariable", "Constant", "", "a.randomgopherinvariable") -) diff --git a/internal/lsp/testdata/workspacesymbol/a/a.go.golden b/internal/lsp/testdata/workspacesymbol/a/a.go.golden deleted file mode 100644 index c3f088577ba..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a.go.golden +++ /dev/null @@ -1,5 +0,0 @@ --- symbols -- -RandomGopherVariableA Variable 3:5-3:26 -RandomGopherConstantA Constant 5:7-5:28 -randomgopherinvariable Constant 8:2-8:24 - diff --git a/internal/lsp/testdata/workspacesymbol/a/a_test.go b/internal/lsp/testdata/workspacesymbol/a/a_test.go deleted file mode 100644 index 30d5340970a..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package a - -var RandomGopherTestVariableA = "a" //@symbol("RandomGopherTestVariableA", "RandomGopherTestVariableA", "Variable", "", "a.RandomGopherTestVariableA") diff --git a/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden b/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden deleted file mode 100644 index af74619439a..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden +++ /dev/null @@ -1,3 +0,0 @@ --- symbols -- -RandomGopherTestVariableA Variable 3:5-3:30 - diff --git a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go b/internal/lsp/testdata/workspacesymbol/a/a_x_test.go deleted file mode 100644 index 76eb8487d8e..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package a_test - -var RandomGopherXTestVariableA = "a" //@symbol("RandomGopherXTestVariableA", "RandomGopherXTestVariableA", "Variable", "", "a_test.RandomGopherXTestVariableA") diff --git a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden b/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden deleted file mode 100644 index dfd02a5c449..00000000000 --- a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden +++ /dev/null @@ -1,3 +0,0 @@ --- symbols -- -RandomGopherXTestVariableA Variable 3:5-3:31 - diff --git a/internal/lsp/testdata/workspacesymbol/b/b.go b/internal/lsp/testdata/workspacesymbol/b/b.go deleted file mode 100644 index 89ce0d92e06..00000000000 --- a/internal/lsp/testdata/workspacesymbol/b/b.go +++ /dev/null @@ -1,7 +0,0 @@ -package b - -var RandomGopherVariableB = "b" //@symbol("RandomGopherVariableB", "RandomGopherVariableB", "Variable", "", "b.RandomGopherVariableB") - -type RandomGopherStructB struct { //@symbol("RandomGopherStructB", "RandomGopherStructB", "Struct", "", "b.RandomGopherStructB") - Bar int //@mark(bBar, "Bar"), symbol("Bar", "Bar", "Field", "RandomGopherStructB", "b.RandomGopherStructB.Bar") -} diff --git a/internal/lsp/testdata/workspacesymbol/b/b.go.golden b/internal/lsp/testdata/workspacesymbol/b/b.go.golden deleted file mode 100644 index 4711c9d91ad..00000000000 --- a/internal/lsp/testdata/workspacesymbol/b/b.go.golden +++ /dev/null @@ -1,5 +0,0 @@ --- symbols -- -RandomGopherVariableB Variable 3:5-3:26 -RandomGopherStructB Struct 5:6-5:25 - Bar Field 6:2-6:5 - diff --git a/internal/lsp/tests/README.md b/internal/lsp/tests/README.md deleted file mode 100644 index 2c18675f7e5..00000000000 --- a/internal/lsp/tests/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Testing - -LSP has "marker tests" defined in `internal/lsp/testdata`, as well as -traditional tests. - -## Marker tests - -Marker tests have a standard input file, like -`internal/lsp/testdata/foo/bar.go`, and some may have a corresponding golden -file, like `internal/lsp/testdata/foo/bar.go.golden`. The former is the "input" -and the latter is the expected output. - -Each input file contains annotations like -`//@suggestedfix("}", "refactor.rewrite")`. These annotations are interpreted by -test runners to perform certain actions. The expected output after those actions -is encoded in the golden file. - -When tests are run, each annotation results in a new subtest, which is encoded -in the golden file with a heading like, - -```bash --- suggestedfix_bar_11_21 -- -// expected contents go here --- suggestedfix_bar_13_20 -- -// expected contents go here -``` - -The format of these headings vary: they are defined by the -[`Golden`](https://pkg.go.dev/golang.org/x/tools/internal/lsp/tests#Data.Golden) -function for each annotation. In the case above, the format is: annotation -name, file name, annotation line location, annotation character location. - -So, if `internal/lsp/testdata/foo/bar.go` has three `suggestedfix` annotations, -the golden file should have three headers with `suggestedfix_bar_xx_yy` -headings. - -To see a list of all available annotations, see the exported "expectations" in -[tests.go](https://github.com/golang/tools/blob/299f270db45902e93469b1152fafed034bb3f033/internal/lsp/tests/tests.go#L418-L447). - -To run marker tests, - -```bash -cd /path/to/tools - -# The marker tests are located in "internal/lsp", "internal/lsp/cmd, and -# "internal/lsp/source". -go test ./internal/lsp/... -``` - -There are quite a lot of marker tests, so to run one individually, pass the test -path and heading into a -run argument: - -```bash -cd /path/to/tools -go test ./internal/lsp/... -v -run TestLSP/Modules/SuggestedFix/bar_11_21 -``` - -## Resetting marker tests - -Sometimes, a change is made to lsp that requires a change to multiple golden -files. When this happens, you can run, - -```bash -cd /path/to/tools -./internal/lsp/reset_golden.sh -``` diff --git a/internal/lsp/tests/util.go b/internal/lsp/tests/util.go deleted file mode 100644 index 11dda1f8edd..00000000000 --- a/internal/lsp/tests/util.go +++ /dev/null @@ -1,580 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tests - -import ( - "bytes" - "context" - "fmt" - "go/token" - "path/filepath" - "sort" - "strconv" - "strings" - "testing" - - "golang.org/x/tools/internal/lsp/diff" - "golang.org/x/tools/internal/lsp/diff/myers" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/lsp/source/completion" - "golang.org/x/tools/internal/span" -) - -// DiffLinks takes the links we got and checks if they are located within the source or a Note. -// If the link is within a Note, the link is removed. -// Returns an diff comment if there are differences and empty string if no diffs. -func DiffLinks(mapper *protocol.ColumnMapper, wantLinks []Link, gotLinks []protocol.DocumentLink) string { - var notePositions []token.Position - links := make(map[span.Span]string, len(wantLinks)) - for _, link := range wantLinks { - links[link.Src] = link.Target - notePositions = append(notePositions, link.NotePosition) - } - for _, link := range gotLinks { - spn, err := mapper.RangeSpan(link.Range) - if err != nil { - return fmt.Sprintf("%v", err) - } - linkInNote := false - for _, notePosition := range notePositions { - // Drop the links found inside expectation notes arguments as this links are not collected by expect package. - if notePosition.Line == spn.Start().Line() && - notePosition.Column <= spn.Start().Column() { - delete(links, spn) - linkInNote = true - } - } - if linkInNote { - continue - } - if target, ok := links[spn]; ok { - delete(links, spn) - if target != link.Target { - return fmt.Sprintf("for %v want %v, got %v\n", spn, target, link.Target) - } - } else { - return fmt.Sprintf("unexpected link %v:%v\n", spn, link.Target) - } - } - for spn, target := range links { - return fmt.Sprintf("missing link %v:%v\n", spn, target) - } - return "" -} - -// DiffSymbols prints the diff between expected and actual symbols test results. -func DiffSymbols(t *testing.T, uri span.URI, want, got []protocol.DocumentSymbol) string { - sort.Slice(want, func(i, j int) bool { return want[i].Name < want[j].Name }) - sort.Slice(got, func(i, j int) bool { return got[i].Name < got[j].Name }) - if len(got) != len(want) { - return summarizeSymbols(-1, want, got, "different lengths got %v want %v", len(got), len(want)) - } - for i, w := range want { - g := got[i] - if w.Name != g.Name { - return summarizeSymbols(i, want, got, "incorrect name got %v want %v", g.Name, w.Name) - } - if w.Kind != g.Kind { - return summarizeSymbols(i, want, got, "incorrect kind got %v want %v", g.Kind, w.Kind) - } - if protocol.CompareRange(w.SelectionRange, g.SelectionRange) != 0 { - return summarizeSymbols(i, want, got, "incorrect span got %v want %v", g.SelectionRange, w.SelectionRange) - } - if msg := DiffSymbols(t, uri, w.Children, g.Children); msg != "" { - return fmt.Sprintf("children of %s: %s", w.Name, msg) - } - } - return "" -} - -func summarizeSymbols(i int, want, got []protocol.DocumentSymbol, reason string, args ...interface{}) string { - msg := &bytes.Buffer{} - fmt.Fprint(msg, "document symbols failed") - if i >= 0 { - fmt.Fprintf(msg, " at %d", i) - } - fmt.Fprint(msg, " because of ") - fmt.Fprintf(msg, reason, args...) - fmt.Fprint(msg, ":\nexpected:\n") - for _, s := range want { - fmt.Fprintf(msg, " %v %v %v\n", s.Name, s.Kind, s.SelectionRange) - } - fmt.Fprintf(msg, "got:\n") - for _, s := range got { - fmt.Fprintf(msg, " %v %v %v\n", s.Name, s.Kind, s.SelectionRange) - } - return msg.String() -} - -// DiffDiagnostics prints the diff between expected and actual diagnostics test -// results. -func DiffDiagnostics(uri span.URI, want, got []*source.Diagnostic) string { - source.SortDiagnostics(want) - source.SortDiagnostics(got) - - if len(got) != len(want) { - return summarizeDiagnostics(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want)) - } - for i, w := range want { - g := got[i] - if w.Message != g.Message { - return summarizeDiagnostics(i, uri, want, got, "incorrect Message got %v want %v", g.Message, w.Message) - } - if w.Severity != g.Severity { - return summarizeDiagnostics(i, uri, want, got, "incorrect Severity got %v want %v", g.Severity, w.Severity) - } - if w.Source != g.Source { - return summarizeDiagnostics(i, uri, want, got, "incorrect Source got %v want %v", g.Source, w.Source) - } - if !rangeOverlaps(g.Range, w.Range) { - return summarizeDiagnostics(i, uri, want, got, "range %v does not overlap %v", g.Range, w.Range) - } - } - return "" -} - -// rangeOverlaps reports whether r1 and r2 overlap. -func rangeOverlaps(r1, r2 protocol.Range) bool { - if inRange(r2.Start, r1) || inRange(r1.Start, r2) { - return true - } - return false -} - -// inRange reports whether p is contained within [r.Start, r.End), or if p == -// r.Start == r.End (special handling for the case where the range is a single -// point). -func inRange(p protocol.Position, r protocol.Range) bool { - if protocol.IsPoint(r) { - return protocol.ComparePosition(r.Start, p) == 0 - } - if protocol.ComparePosition(r.Start, p) <= 0 && protocol.ComparePosition(p, r.End) < 0 { - return true - } - return false -} - -func summarizeDiagnostics(i int, uri span.URI, want, got []*source.Diagnostic, reason string, args ...interface{}) string { - msg := &bytes.Buffer{} - fmt.Fprint(msg, "diagnostics failed") - if i >= 0 { - fmt.Fprintf(msg, " at %d", i) - } - fmt.Fprint(msg, " because of ") - fmt.Fprintf(msg, reason, args...) - fmt.Fprint(msg, ":\nexpected:\n") - for _, d := range want { - fmt.Fprintf(msg, " %s:%v: %s\n", uri, d.Range, d.Message) - } - fmt.Fprintf(msg, "got:\n") - for _, d := range got { - fmt.Fprintf(msg, " %s:%v: %s\n", uri, d.Range, d.Message) - } - return msg.String() -} - -func DiffCodeLens(uri span.URI, want, got []protocol.CodeLens) string { - sortCodeLens(want) - sortCodeLens(got) - - if len(got) != len(want) { - return summarizeCodeLens(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want)) - } - for i, w := range want { - g := got[i] - if w.Command.Command != g.Command.Command { - return summarizeCodeLens(i, uri, want, got, "incorrect Command Name got %v want %v", g.Command.Command, w.Command.Command) - } - if w.Command.Title != g.Command.Title { - return summarizeCodeLens(i, uri, want, got, "incorrect Command Title got %v want %v", g.Command.Title, w.Command.Title) - } - if protocol.ComparePosition(w.Range.Start, g.Range.Start) != 0 { - return summarizeCodeLens(i, uri, want, got, "incorrect Start got %v want %v", g.Range.Start, w.Range.Start) - } - if !protocol.IsPoint(g.Range) { // Accept any 'want' range if the codelens returns a zero-length range. - if protocol.ComparePosition(w.Range.End, g.Range.End) != 0 { - return summarizeCodeLens(i, uri, want, got, "incorrect End got %v want %v", g.Range.End, w.Range.End) - } - } - } - return "" -} - -func sortCodeLens(c []protocol.CodeLens) { - sort.Slice(c, func(i int, j int) bool { - if r := protocol.CompareRange(c[i].Range, c[j].Range); r != 0 { - return r < 0 - } - if c[i].Command.Command < c[j].Command.Command { - return true - } else if c[i].Command.Command == c[j].Command.Command { - return c[i].Command.Title < c[j].Command.Title - } else { - return false - } - }) -} - -func summarizeCodeLens(i int, uri span.URI, want, got []protocol.CodeLens, reason string, args ...interface{}) string { - msg := &bytes.Buffer{} - fmt.Fprint(msg, "codelens failed") - if i >= 0 { - fmt.Fprintf(msg, " at %d", i) - } - fmt.Fprint(msg, " because of ") - fmt.Fprintf(msg, reason, args...) - fmt.Fprint(msg, ":\nexpected:\n") - for _, d := range want { - fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title) - } - fmt.Fprintf(msg, "got:\n") - for _, d := range got { - fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title) - } - return msg.String() -} - -func DiffSignatures(spn span.Span, want, got *protocol.SignatureHelp) (string, error) { - decorate := func(f string, args ...interface{}) string { - return fmt.Sprintf("invalid signature at %s: %s", spn, fmt.Sprintf(f, args...)) - } - if len(got.Signatures) != 1 { - return decorate("wanted 1 signature, got %d", len(got.Signatures)), nil - } - if got.ActiveSignature != 0 { - return decorate("wanted active signature of 0, got %d", int(got.ActiveSignature)), nil - } - if want.ActiveParameter != got.ActiveParameter { - return decorate("wanted active parameter of %d, got %d", want.ActiveParameter, int(got.ActiveParameter)), nil - } - g := got.Signatures[0] - w := want.Signatures[0] - if NormalizeAny(w.Label) != NormalizeAny(g.Label) { - wLabel := w.Label + "\n" - d, err := myers.ComputeEdits("", wLabel, g.Label+"\n") - if err != nil { - return "", err - } - return decorate("mismatched labels:\n%q", diff.ToUnified("want", "got", wLabel, d)), err - } - var paramParts []string - for _, p := range g.Parameters { - paramParts = append(paramParts, p.Label) - } - paramsStr := strings.Join(paramParts, ", ") - if !strings.Contains(g.Label, paramsStr) { - return decorate("expected signature %q to contain params %q", g.Label, paramsStr), nil - } - return "", nil -} - -// NormalizeAny replaces occurrences of interface{} in input with any. -// -// In Go 1.18, standard library functions were changed to use the 'any' -// alias in place of interface{}, which affects their type string. -func NormalizeAny(input string) string { - return strings.ReplaceAll(input, "interface{}", "any") -} - -// DiffCallHierarchyItems returns the diff between expected and actual call locations for incoming/outgoing call hierarchies -func DiffCallHierarchyItems(gotCalls []protocol.CallHierarchyItem, expectedCalls []protocol.CallHierarchyItem) string { - expected := make(map[protocol.Location]bool) - for _, call := range expectedCalls { - expected[protocol.Location{URI: call.URI, Range: call.Range}] = true - } - - got := make(map[protocol.Location]bool) - for _, call := range gotCalls { - got[protocol.Location{URI: call.URI, Range: call.Range}] = true - } - if len(got) != len(expected) { - return fmt.Sprintf("expected %d calls but got %d", len(expected), len(got)) - } - for spn := range got { - if !expected[spn] { - return fmt.Sprintf("incorrect calls, expected locations %v but got locations %v", expected, got) - } - } - return "" -} - -func ToProtocolCompletionItems(items []completion.CompletionItem) []protocol.CompletionItem { - var result []protocol.CompletionItem - for _, item := range items { - result = append(result, ToProtocolCompletionItem(item)) - } - return result -} - -func ToProtocolCompletionItem(item completion.CompletionItem) protocol.CompletionItem { - pItem := protocol.CompletionItem{ - Label: item.Label, - Kind: item.Kind, - Detail: item.Detail, - Documentation: item.Documentation, - InsertText: item.InsertText, - TextEdit: &protocol.TextEdit{ - NewText: item.Snippet(), - }, - // Negate score so best score has lowest sort text like real API. - SortText: fmt.Sprint(-item.Score), - } - if pItem.InsertText == "" { - pItem.InsertText = pItem.Label - } - return pItem -} - -func FilterBuiltins(src span.Span, items []protocol.CompletionItem) []protocol.CompletionItem { - var ( - got []protocol.CompletionItem - wantBuiltins = strings.Contains(string(src.URI()), "builtins") - wantKeywords = strings.Contains(string(src.URI()), "keywords") - ) - for _, item := range items { - if !wantBuiltins && isBuiltin(item.Label, item.Detail, item.Kind) { - continue - } - - if !wantKeywords && token.Lookup(item.Label).IsKeyword() { - continue - } - - got = append(got, item) - } - return got -} - -func isBuiltin(label, detail string, kind protocol.CompletionItemKind) bool { - if detail == "" && kind == protocol.ClassCompletion { - return true - } - // Remaining builtin constants, variables, interfaces, and functions. - trimmed := label - if i := strings.Index(trimmed, "("); i >= 0 { - trimmed = trimmed[:i] - } - switch trimmed { - case "append", "cap", "close", "complex", "copy", "delete", - "error", "false", "imag", "iota", "len", "make", "new", - "nil", "panic", "print", "println", "real", "recover", "true": - return true - } - return false -} - -func CheckCompletionOrder(want, got []protocol.CompletionItem, strictScores bool) string { - var ( - matchedIdxs []int - lastGotIdx int - lastGotSort float64 - inOrder = true - errorMsg = "completions out of order" - ) - for _, w := range want { - var found bool - for i, g := range got { - if w.Label == g.Label && NormalizeAny(w.Detail) == NormalizeAny(g.Detail) && w.Kind == g.Kind { - matchedIdxs = append(matchedIdxs, i) - found = true - - if i < lastGotIdx { - inOrder = false - } - lastGotIdx = i - - sort, _ := strconv.ParseFloat(g.SortText, 64) - if strictScores && len(matchedIdxs) > 1 && sort <= lastGotSort { - inOrder = false - errorMsg = "candidate scores not strictly decreasing" - } - lastGotSort = sort - - break - } - } - if !found { - return summarizeCompletionItems(-1, []protocol.CompletionItem{w}, got, "didn't find expected completion") - } - } - - sort.Ints(matchedIdxs) - matched := make([]protocol.CompletionItem, 0, len(matchedIdxs)) - for _, idx := range matchedIdxs { - matched = append(matched, got[idx]) - } - - if !inOrder { - return summarizeCompletionItems(-1, want, matched, errorMsg) - } - - return "" -} - -func DiffSnippets(want string, got *protocol.CompletionItem) string { - if want == "" { - if got != nil { - x := got.TextEdit - return fmt.Sprintf("expected no snippet but got %s", x.NewText) - } - } else { - if got == nil { - return fmt.Sprintf("couldn't find completion matching %q", want) - } - x := got.TextEdit - if want != x.NewText { - return fmt.Sprintf("expected snippet %q, got %q", want, x.NewText) - } - } - return "" -} - -func FindItem(list []protocol.CompletionItem, want completion.CompletionItem) *protocol.CompletionItem { - for _, item := range list { - if item.Label == want.Label { - return &item - } - } - return nil -} - -// DiffCompletionItems prints the diff between expected and actual completion -// test results. -func DiffCompletionItems(want, got []protocol.CompletionItem) string { - if len(got) != len(want) { - return summarizeCompletionItems(-1, want, got, "different lengths got %v want %v", len(got), len(want)) - } - for i, w := range want { - g := got[i] - if w.Label != g.Label { - return summarizeCompletionItems(i, want, got, "incorrect Label got %v want %v", g.Label, w.Label) - } - if NormalizeAny(w.Detail) != NormalizeAny(g.Detail) { - return summarizeCompletionItems(i, want, got, "incorrect Detail got %v want %v", g.Detail, w.Detail) - } - if w.Documentation != "" && !strings.HasPrefix(w.Documentation, "@") { - if w.Documentation != g.Documentation { - return summarizeCompletionItems(i, want, got, "incorrect Documentation got %v want %v", g.Documentation, w.Documentation) - } - } - if w.Kind != g.Kind { - return summarizeCompletionItems(i, want, got, "incorrect Kind got %v want %v", g.Kind, w.Kind) - } - } - return "" -} - -func summarizeCompletionItems(i int, want, got []protocol.CompletionItem, reason string, args ...interface{}) string { - msg := &bytes.Buffer{} - fmt.Fprint(msg, "completion failed") - if i >= 0 { - fmt.Fprintf(msg, " at %d", i) - } - fmt.Fprint(msg, " because of ") - fmt.Fprintf(msg, reason, args...) - fmt.Fprint(msg, ":\nexpected:\n") - for _, d := range want { - fmt.Fprintf(msg, " %v\n", d) - } - fmt.Fprintf(msg, "got:\n") - for _, d := range got { - fmt.Fprintf(msg, " %v\n", d) - } - return msg.String() -} - -func EnableAllAnalyzers(view source.View, opts *source.Options) { - if opts.Analyses == nil { - opts.Analyses = make(map[string]bool) - } - for _, a := range opts.DefaultAnalyzers { - if !a.IsEnabled(view) { - opts.Analyses[a.Analyzer.Name] = true - } - } - for _, a := range opts.TypeErrorAnalyzers { - if !a.IsEnabled(view) { - opts.Analyses[a.Analyzer.Name] = true - } - } - for _, a := range opts.ConvenienceAnalyzers { - if !a.IsEnabled(view) { - opts.Analyses[a.Analyzer.Name] = true - } - } - for _, a := range opts.StaticcheckAnalyzers { - if !a.IsEnabled(view) { - opts.Analyses[a.Analyzer.Name] = true - } - } -} - -func WorkspaceSymbolsString(ctx context.Context, data *Data, queryURI span.URI, symbols []protocol.SymbolInformation) (string, error) { - queryDir := filepath.Dir(queryURI.Filename()) - var filtered []string - for _, s := range symbols { - uri := s.Location.URI.SpanURI() - dir := filepath.Dir(uri.Filename()) - if !source.InDir(queryDir, dir) { // assume queries always issue from higher directories - continue - } - m, err := data.Mapper(uri) - if err != nil { - return "", err - } - spn, err := m.Span(s.Location) - if err != nil { - return "", err - } - filtered = append(filtered, fmt.Sprintf("%s %s %s", spn, s.Name, s.Kind)) - } - sort.Strings(filtered) - return strings.Join(filtered, "\n") + "\n", nil -} - -func WorkspaceSymbolsTestTypeToMatcher(typ WorkspaceSymbolsTestType) source.SymbolMatcher { - switch typ { - case WorkspaceSymbolsFuzzy: - return source.SymbolFuzzy - case WorkspaceSymbolsCaseSensitive: - return source.SymbolCaseSensitive - default: - return source.SymbolCaseInsensitive - } -} - -func Diff(t *testing.T, want, got string) string { - if want == got { - return "" - } - // Add newlines to avoid newline messages in diff. - want += "\n" - got += "\n" - d, err := myers.ComputeEdits("", want, got) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("%q", diff.ToUnified("want", "got", want, d)) -} - -// StripSubscripts removes type parameter id subscripts. -// -// TODO(rfindley): remove this function once subscripts are removed from the -// type parameter type string. -func StripSubscripts(s string) string { - var runes []rune - for _, r := range s { - // For debugging/uniqueness purposes, TypeString on a type parameter adds a - // subscript corresponding to the type parameter's unique id. This is going - // to be removed, but in the meantime we skip the subscript runes to get a - // deterministic output. - if 'ā‚€' <= r && r < 'ā‚€'+10 { - continue // trim type parameter subscripts - } - runes = append(runes, r) - } - return string(runes) -} diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go deleted file mode 100644 index 3276a47bf99..00000000000 --- a/internal/lsp/text_synchronization.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "bytes" - "context" - "errors" - "fmt" - "path/filepath" - "time" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" - "golang.org/x/tools/internal/xcontext" -) - -// ModificationSource identifies the originating cause of a file modification. -type ModificationSource int - -const ( - // FromDidOpen is a file modification caused by opening a file. - FromDidOpen = ModificationSource(iota) - - // FromDidChange is a file modification caused by changing a file. - FromDidChange - - // FromDidChangeWatchedFiles is a file modification caused by a change to a - // watched file. - FromDidChangeWatchedFiles - - // FromDidSave is a file modification caused by a file save. - FromDidSave - - // FromDidClose is a file modification caused by closing a file. - FromDidClose - - // FromRegenerateCgo refers to file modifications caused by regenerating - // the cgo sources for the workspace. - FromRegenerateCgo - - // FromInitialWorkspaceLoad refers to the loading of all packages in the - // workspace when the view is first created. - FromInitialWorkspaceLoad -) - -func (m ModificationSource) String() string { - switch m { - case FromDidOpen: - return "opened files" - case FromDidChange: - return "changed files" - case FromDidChangeWatchedFiles: - return "files changed on disk" - case FromDidSave: - return "saved files" - case FromDidClose: - return "close files" - case FromRegenerateCgo: - return "regenerate cgo" - case FromInitialWorkspaceLoad: - return "initial workspace load" - default: - return "unknown file modification" - } -} - -func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error { - uri := params.TextDocument.URI.SpanURI() - if !uri.IsFile() { - return nil - } - // There may not be any matching view in the current session. If that's - // the case, try creating a new view based on the opened file path. - // - // TODO(rstambler): This seems like it would continuously add new - // views, but it won't because ViewOf only returns an error when there - // are no views in the session. I don't know if that logic should go - // here, or if we can continue to rely on that implementation detail. - if _, err := s.session.ViewOf(uri); err != nil { - dir := filepath.Dir(uri.Filename()) - if err := s.addFolders(ctx, []protocol.WorkspaceFolder{{ - URI: string(protocol.URIFromPath(dir)), - Name: filepath.Base(dir), - }}); err != nil { - return err - } - } - return s.didModifyFiles(ctx, []source.FileModification{{ - URI: uri, - Action: source.Open, - Version: params.TextDocument.Version, - Text: []byte(params.TextDocument.Text), - LanguageID: params.TextDocument.LanguageID, - }}, FromDidOpen) -} - -func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error { - uri := params.TextDocument.URI.SpanURI() - if !uri.IsFile() { - return nil - } - - text, err := s.changedText(ctx, uri, params.ContentChanges) - if err != nil { - return err - } - c := source.FileModification{ - URI: uri, - Action: source.Change, - Version: params.TextDocument.Version, - Text: text, - } - if err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange); err != nil { - return err - } - return s.warnAboutModifyingGeneratedFiles(ctx, uri) -} - -// warnAboutModifyingGeneratedFiles shows a warning if a user tries to edit a -// generated file for the first time. -func (s *Server) warnAboutModifyingGeneratedFiles(ctx context.Context, uri span.URI) error { - s.changedFilesMu.Lock() - _, ok := s.changedFiles[uri] - if !ok { - s.changedFiles[uri] = struct{}{} - } - s.changedFilesMu.Unlock() - - // This file has already been edited before. - if ok { - return nil - } - - // Ideally, we should be able to specify that a generated file should - // be opened as read-only. Tell the user that they should not be - // editing a generated file. - view, err := s.session.ViewOf(uri) - if err != nil { - return err - } - snapshot, release := view.Snapshot(ctx) - isGenerated := source.IsGenerated(ctx, snapshot, uri) - release() - - if !isGenerated { - return nil - } - return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ - Message: fmt.Sprintf("Do not edit this file! %s is a generated file.", uri.Filename()), - Type: protocol.Warning, - }) -} - -func (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error { - var modifications []source.FileModification - for _, change := range params.Changes { - uri := change.URI.SpanURI() - if !uri.IsFile() { - continue - } - action := changeTypeToFileAction(change.Type) - modifications = append(modifications, source.FileModification{ - URI: uri, - Action: action, - OnDisk: true, - }) - } - return s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles) -} - -func (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error { - uri := params.TextDocument.URI.SpanURI() - if !uri.IsFile() { - return nil - } - c := source.FileModification{ - URI: uri, - Action: source.Save, - } - if params.Text != nil { - c.Text = []byte(*params.Text) - } - return s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave) -} - -func (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error { - uri := params.TextDocument.URI.SpanURI() - if !uri.IsFile() { - return nil - } - return s.didModifyFiles(ctx, []source.FileModification{ - { - URI: uri, - Action: source.Close, - Version: -1, - Text: nil, - }, - }, FromDidClose) -} - -func (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) error { - diagnoseDone := make(chan struct{}) - if s.session.Options().VerboseWorkDoneProgress { - work := s.progress.Start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil) - defer func() { - go func() { - <-diagnoseDone - work.End(ctx, "Done.") - }() - }() - } - - onDisk := cause == FromDidChangeWatchedFiles - delay := s.session.Options().ExperimentalWatchedFileDelay - s.fileChangeMu.Lock() - defer s.fileChangeMu.Unlock() - if !onDisk || delay == 0 { - // No delay: process the modifications immediately. - return s.processModifications(ctx, modifications, onDisk, diagnoseDone) - } - // Debounce and batch up pending modifications from watched files. - pending := &pendingModificationSet{ - diagnoseDone: diagnoseDone, - changes: modifications, - } - // Invariant: changes appended to s.pendingOnDiskChanges are eventually - // handled in the order they arrive. This guarantee is only partially - // enforced here. Specifically: - // 1. s.fileChangesMu ensures that the append below happens in the order - // notifications were received, so that the changes within each batch are - // ordered properly. - // 2. The debounced func below holds s.fileChangesMu while processing all - // changes in s.pendingOnDiskChanges, ensuring that no batches are - // processed out of order. - // 3. Session.ExpandModificationsToDirectories and Session.DidModifyFiles - // process changes in order. - s.pendingOnDiskChanges = append(s.pendingOnDiskChanges, pending) - ctx = xcontext.Detach(ctx) - okc := s.watchedFileDebouncer.debounce("", 0, time.After(delay)) - go func() { - if ok := <-okc; !ok { - return - } - s.fileChangeMu.Lock() - var allChanges []source.FileModification - // For accurate progress notifications, we must notify all goroutines - // waiting for the diagnose pass following a didChangeWatchedFiles - // notification. This is necessary for regtest assertions. - var dones []chan struct{} - for _, pending := range s.pendingOnDiskChanges { - allChanges = append(allChanges, pending.changes...) - dones = append(dones, pending.diagnoseDone) - } - - allDone := make(chan struct{}) - if err := s.processModifications(ctx, allChanges, onDisk, allDone); err != nil { - event.Error(ctx, "processing delayed file changes", err) - } - s.pendingOnDiskChanges = nil - s.fileChangeMu.Unlock() - <-allDone - for _, done := range dones { - close(done) - } - }() - return nil -} - -// processModifications update server state to reflect file changes, and -// triggers diagnostics to run asynchronously. The diagnoseDone channel will be -// closed once diagnostics complete. -func (s *Server) processModifications(ctx context.Context, modifications []source.FileModification, onDisk bool, diagnoseDone chan struct{}) error { - s.stateMu.Lock() - if s.state >= serverShutDown { - // This state check does not prevent races below, and exists only to - // produce a better error message. The actual race to the cache should be - // guarded by Session.viewMu. - s.stateMu.Unlock() - close(diagnoseDone) - return errors.New("server is shut down") - } - s.stateMu.Unlock() - // If the set of changes included directories, expand those directories - // to their files. - modifications = s.session.ExpandModificationsToDirectories(ctx, modifications) - - snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications) - if err != nil { - close(diagnoseDone) - return err - } - - go func() { - s.diagnoseSnapshots(snapshots, onDisk) - for _, release := range releases { - release() - } - close(diagnoseDone) - }() - - // After any file modifications, we need to update our watched files, - // in case something changed. Compute the new set of directories to watch, - // and if it differs from the current set, send updated registrations. - return s.updateWatchedDirectories(ctx) -} - -// DiagnosticWorkTitle returns the title of the diagnostic work resulting from a -// file change originating from the given cause. -func DiagnosticWorkTitle(cause ModificationSource) string { - return fmt.Sprintf("diagnosing %v", cause) -} - -func (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) { - if len(changes) == 0 { - return nil, fmt.Errorf("%w: no content changes provided", jsonrpc2.ErrInternal) - } - - // Check if the client sent the full content of the file. - // We accept a full content change even if the server expected incremental changes. - if len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 { - return []byte(changes[0].Text), nil - } - return s.applyIncrementalChanges(ctx, uri, changes) -} - -func (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) { - fh, err := s.session.GetFile(ctx, uri) - if err != nil { - return nil, err - } - content, err := fh.Read() - if err != nil { - return nil, fmt.Errorf("%w: file not found (%v)", jsonrpc2.ErrInternal, err) - } - for _, change := range changes { - // Make sure to update column mapper along with the content. - m := protocol.NewColumnMapper(uri, content) - if change.Range == nil { - return nil, fmt.Errorf("%w: unexpected nil range for change", jsonrpc2.ErrInternal) - } - spn, err := m.RangeSpan(*change.Range) - if err != nil { - return nil, err - } - if !spn.HasOffset() { - return nil, fmt.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal) - } - start, end := spn.Start().Offset(), spn.End().Offset() - if end < start { - return nil, fmt.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal) - } - var buf bytes.Buffer - buf.Write(content[:start]) - buf.WriteString(change.Text) - buf.Write(content[end:]) - content = buf.Bytes() - } - return content, nil -} - -func changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction { - switch ct { - case protocol.Changed: - return source.Change - case protocol.Created: - return source.Create - case protocol.Deleted: - return source.Delete - } - return source.UnknownFileAction -} diff --git a/internal/lsp/work/completion.go b/internal/lsp/work/completion.go deleted file mode 100644 index c7227bc268b..00000000000 --- a/internal/lsp/work/completion.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package work - -import ( - "context" - "errors" - "fmt" - "go/token" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Completion(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle, position protocol.Position) (*protocol.CompletionList, error) { - ctx, done := event.Start(ctx, "work.Completion") - defer done() - - // Get the position of the cursor. - pw, err := snapshot.ParseWork(ctx, fh) - if err != nil { - return nil, fmt.Errorf("getting go.work file handle: %w", err) - } - pos, err := pw.Mapper.Pos(position) - if err != nil { - return nil, fmt.Errorf("computing cursor position: %w", err) - } - - // Find the use statement the user is in. - cursor := pos - 1 - use, pathStart, _ := usePath(pw, cursor) - if use == nil { - return &protocol.CompletionList{}, nil - } - completingFrom := use.Path[:cursor-token.Pos(pathStart)] - - // We're going to find the completions of the user input - // (completingFrom) by doing a walk on the innermost directory - // of the given path, and comparing the found paths to make sure - // that they match the component of the path after the - // innermost directory. - // - // We'll maintain two paths when doing this: pathPrefixSlash - // is essentially the path the user typed in, and pathPrefixAbs - // is the path made absolute from the go.work directory. - - pathPrefixSlash := completingFrom - pathPrefixAbs := filepath.FromSlash(pathPrefixSlash) - if !filepath.IsAbs(pathPrefixAbs) { - pathPrefixAbs = filepath.Join(filepath.Dir(pw.URI.Filename()), pathPrefixAbs) - } - - // pathPrefixDir is the directory that will be walked to find matches. - // If pathPrefixSlash is not explicitly a directory boundary (is either equivalent to "." or - // ends in a separator) we need to examine its parent directory to find sibling files that - // match. - depthBound := 5 - pathPrefixDir, pathPrefixBase := pathPrefixAbs, "" - pathPrefixSlashDir := pathPrefixSlash - if filepath.Clean(pathPrefixSlash) != "." && !strings.HasSuffix(pathPrefixSlash, "/") { - depthBound++ - pathPrefixDir, pathPrefixBase = filepath.Split(pathPrefixAbs) - pathPrefixSlashDir = dirNonClean(pathPrefixSlash) - } - - var completions []string - // Stop traversing deeper once we've hit 10k files to try to stay generally under 100ms. - const numSeenBound = 10000 - var numSeen int - stopWalking := errors.New("hit numSeenBound") - err = filepath.Walk(pathPrefixDir, func(wpath string, info os.FileInfo, err error) error { - if numSeen > numSeenBound { - // Stop traversing if we hit bound. - return stopWalking - } - numSeen++ - - // rel is the path relative to pathPrefixDir. - // Make sure that it has pathPrefixBase as a prefix - // otherwise it won't match the beginning of the - // base component of the path the user typed in. - rel := strings.TrimPrefix(wpath[len(pathPrefixDir):], string(filepath.Separator)) - if info.IsDir() && wpath != pathPrefixDir && !strings.HasPrefix(rel, pathPrefixBase) { - return filepath.SkipDir - } - - // Check for a match (a module directory). - if filepath.Base(rel) == "go.mod" { - relDir := strings.TrimSuffix(dirNonClean(rel), string(os.PathSeparator)) - completionPath := join(pathPrefixSlashDir, filepath.ToSlash(relDir)) - - if !strings.HasPrefix(completionPath, completingFrom) { - return nil - } - if strings.HasSuffix(completionPath, "/") { - // Don't suggest paths that end in "/". This happens - // when the input is a path that ends in "/" and - // the completion is empty. - return nil - } - completion := completionPath[len(completingFrom):] - if completingFrom == "" && !strings.HasPrefix(completion, "./") { - // Bias towards "./" prefixes. - completion = join(".", completion) - } - - completions = append(completions, completion) - } - - if depth := strings.Count(rel, string(filepath.Separator)); depth >= depthBound { - return filepath.SkipDir - } - return nil - }) - if err != nil && !errors.Is(err, stopWalking) { - return nil, fmt.Errorf("walking to find completions: %w", err) - } - - sort.Strings(completions) - - var items []protocol.CompletionItem - for _, c := range completions { - items = append(items, protocol.CompletionItem{ - Label: c, - InsertText: c, - }) - } - return &protocol.CompletionList{Items: items}, nil -} - -// dirNonClean is filepath.Dir, without the Clean at the end. -func dirNonClean(path string) string { - vol := filepath.VolumeName(path) - i := len(path) - 1 - for i >= len(vol) && !os.IsPathSeparator(path[i]) { - i-- - } - return path[len(vol) : i+1] -} - -func join(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return strings.TrimSuffix(a, "/") + "/" + b -} diff --git a/internal/lsp/work/diagnostics.go b/internal/lsp/work/diagnostics.go deleted file mode 100644 index e583e60fd75..00000000000 --- a/internal/lsp/work/diagnostics.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package work - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/debug/tag" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) { - ctx, done := event.Start(ctx, "work.Diagnostics", tag.Snapshot.Of(snapshot.ID())) - defer done() - - reports := map[source.VersionedFileIdentity][]*source.Diagnostic{} - uri := snapshot.WorkFile() - if uri == "" { - return nil, nil - } - fh, err := snapshot.GetVersionedFile(ctx, uri) - if err != nil { - return nil, err - } - reports[fh.VersionedFileIdentity()] = []*source.Diagnostic{} - diagnostics, err := DiagnosticsForWork(ctx, snapshot, fh) - if err != nil { - return nil, err - } - for _, d := range diagnostics { - fh, err := snapshot.GetVersionedFile(ctx, d.URI) - if err != nil { - return nil, err - } - reports[fh.VersionedFileIdentity()] = append(reports[fh.VersionedFileIdentity()], d) - } - - return reports, nil -} - -func DiagnosticsForWork(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) { - pw, err := snapshot.ParseWork(ctx, fh) - if err != nil { - if pw == nil || len(pw.ParseErrors) == 0 { - return nil, err - } - return pw.ParseErrors, nil - } - - // Add diagnostic if a directory does not contain a module. - var diagnostics []*source.Diagnostic - for _, use := range pw.File.Use { - rng, err := source.LineToRange(pw.Mapper, fh.URI(), use.Syntax.Start, use.Syntax.End) - if err != nil { - return nil, err - } - - modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use)) - if err != nil { - return nil, err - } - if _, err := modfh.Read(); err != nil && os.IsNotExist(err) { - diagnostics = append(diagnostics, &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.UnknownError, // Do we need a new source for this? - Message: fmt.Sprintf("directory %v does not contain a module", use.Path), - }) - } - } - return diagnostics, nil -} - -func modFileURI(pw *source.ParsedWorkFile, use *modfile.Use) span.URI { - workdir := filepath.Dir(pw.URI.Filename()) - - modroot := filepath.FromSlash(use.Path) - if !filepath.IsAbs(modroot) { - modroot = filepath.Join(workdir, modroot) - } - - return span.URIFromPath(filepath.Join(modroot, "go.mod")) -} diff --git a/internal/lsp/work/format.go b/internal/lsp/work/format.go deleted file mode 100644 index 35b804a73b5..00000000000 --- a/internal/lsp/work/format.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package work - -import ( - "context" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) { - ctx, done := event.Start(ctx, "work.Format") - defer done() - - pw, err := snapshot.ParseWork(ctx, fh) - if err != nil { - return nil, err - } - formatted := modfile.Format(pw.File.Syntax) - // Calculate the edits to be made due to the change. - diff, err := snapshot.View().Options().ComputeEdits(fh.URI(), string(pw.Mapper.Content), string(formatted)) - if err != nil { - return nil, err - } - return source.ToProtocolEdits(pw.Mapper, diff) -} diff --git a/internal/lsp/work/hover.go b/internal/lsp/work/hover.go deleted file mode 100644 index 8f7822d5b4b..00000000000 --- a/internal/lsp/work/hover.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package work - -import ( - "bytes" - "context" - "fmt" - "go/token" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) { - // We only provide hover information for the view's go.work file. - if fh.URI() != snapshot.WorkFile() { - return nil, nil - } - - ctx, done := event.Start(ctx, "work.Hover") - defer done() - - // Get the position of the cursor. - pw, err := snapshot.ParseWork(ctx, fh) - if err != nil { - return nil, fmt.Errorf("getting go.work file handle: %w", err) - } - pos, err := pw.Mapper.Pos(position) - if err != nil { - return nil, fmt.Errorf("computing cursor position: %w", err) - } - - // Confirm that the cursor is inside a use statement, and then find - // the position of the use statement's directory path. - use, pathStart, pathEnd := usePath(pw, pos) - - // The cursor position is not on a use statement. - if use == nil { - return nil, nil - } - - // Get the mod file denoted by the use. - modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use)) - if err != nil { - return nil, fmt.Errorf("getting modfile handle: %w", err) - } - pm, err := snapshot.ParseMod(ctx, modfh) - if err != nil { - return nil, fmt.Errorf("getting modfile handle: %w", err) - } - mod := pm.File.Module.Mod - - // Get the range to highlight for the hover. - rng, err := source.ByteOffsetsToRange(pw.Mapper, fh.URI(), pathStart, pathEnd) - if err != nil { - return nil, err - } - options := snapshot.View().Options() - return &protocol.Hover{ - Contents: protocol.MarkupContent{ - Kind: options.PreferredContentFormat, - Value: mod.Path, - }, - Range: rng, - }, nil -} - -func usePath(pw *source.ParsedWorkFile, pos token.Pos) (use *modfile.Use, pathStart, pathEnd int) { - for _, u := range pw.File.Use { - path := []byte(u.Path) - s, e := u.Syntax.Start.Byte, u.Syntax.End.Byte - i := bytes.Index(pw.Mapper.Content[s:e], path) - if i == -1 { - // This should not happen. - continue - } - // Shift the start position to the location of the - // module directory within the use statement. - pathStart, pathEnd = s+i, s+i+len(path) - if token.Pos(pathStart) <= pos && pos <= token.Pos(pathEnd) { - return u, pathStart, pathEnd - } - } - return nil, 0, 0 -} diff --git a/internal/lsp/workspace.go b/internal/lsp/workspace.go deleted file mode 100644 index a1f837e2309..00000000000 --- a/internal/lsp/workspace.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - "fmt" - - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/span" -) - -func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error { - event := params.Event - for _, folder := range event.Removed { - view := s.session.View(folder.Name) - if view != nil { - view.Shutdown(ctx) - } else { - return fmt.Errorf("view %s for %v not found", folder.Name, folder.URI) - } - } - return s.addFolders(ctx, event.Added) -} - -func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) { - s.stateMu.Lock() - state := s.state - s.stateMu.Unlock() - if state < serverInitialized { - return nil, func() {}, fmt.Errorf("addView called before server initialized") - } - options := s.session.Options().Clone() - if err := s.fetchConfig(ctx, name, uri, options); err != nil { - return nil, func() {}, err - } - _, snapshot, release, err := s.session.NewView(ctx, name, uri, options) - return snapshot, release, err -} - -func (s *Server) didChangeConfiguration(ctx context.Context, _ *protocol.DidChangeConfigurationParams) error { - // Apply any changes to the session-level settings. - options := s.session.Options().Clone() - semanticTokensRegistered := options.SemanticTokens - if err := s.fetchConfig(ctx, "", "", options); err != nil { - return err - } - s.session.SetOptions(options) - - // Go through each view, getting and updating its configuration. - for _, view := range s.session.Views() { - options := s.session.Options().Clone() - if err := s.fetchConfig(ctx, view.Name(), view.Folder(), options); err != nil { - return err - } - view, err := view.SetOptions(ctx, options) - if err != nil { - return err - } - go func() { - snapshot, release := view.Snapshot(ctx) - defer release() - s.diagnoseDetached(snapshot) - }() - } - - registration := semanticTokenRegistration(options.SemanticTypes, options.SemanticMods) - // Update any session-specific registrations or unregistrations. - if !semanticTokensRegistered && options.SemanticTokens { - if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ - Registrations: []protocol.Registration{registration}, - }); err != nil { - return err - } - } else if semanticTokensRegistered && !options.SemanticTokens { - if err := s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{ - Unregisterations: []protocol.Unregistration{ - { - ID: registration.ID, - Method: registration.Method, - }, - }, - }); err != nil { - return err - } - } - return nil -} - -func semanticTokenRegistration(tokenTypes, tokenModifiers []string) protocol.Registration { - return protocol.Registration{ - ID: "textDocument/semanticTokens", - Method: "textDocument/semanticTokens", - RegisterOptions: &protocol.SemanticTokensOptions{ - Legend: protocol.SemanticTokensLegend{ - // TODO(pjw): trim these to what we use (and an unused one - // at position 0 of TokTypes, to catch typos) - TokenTypes: tokenTypes, - TokenModifiers: tokenModifiers, - }, - Full: true, - Range: true, - }, - } -} diff --git a/internal/lsp/workspace_symbol.go b/internal/lsp/workspace_symbol.go deleted file mode 100644 index 20c5763ab73..00000000000 --- a/internal/lsp/workspace_symbol.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lsp - -import ( - "context" - - "golang.org/x/tools/internal/event" - "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" -) - -func (s *Server) symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) { - ctx, done := event.Start(ctx, "lsp.Server.symbol") - defer done() - - views := s.session.Views() - matcher := s.session.Options().SymbolMatcher - style := s.session.Options().SymbolStyle - return source.WorkspaceSymbols(ctx, matcher, style, views, params.Query) -} diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 89f79c68b7d..e56af3bb45b 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -2,150 +2,88 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package memoize supports memoizing the return values of functions with -// idempotent results that are expensive to compute. +// Package memoize defines a "promise" abstraction that enables +// memoization of the result of calling an expensive but idempotent +// function. // -// To use this package, build a store and use it to acquire handles with the -// Bind method. +// Call p = NewPromise(f) to obtain a promise for the future result of +// calling f(), and call p.Get() to obtain that result. All calls to +// p.Get return the result of a single call of f(). +// Get blocks if the function has not finished (or started). +// +// A Store is a map of arbitrary keys to promises. Use Store.Promise +// to create a promise in the store. All calls to Handle(k) return the +// same promise as long as it is in the store. These promises are +// reference-counted and must be explicitly released. Once the last +// reference is released, the promise is removed from the store. package memoize import ( "context" - "flag" "fmt" "reflect" + "runtime/trace" "sync" "sync/atomic" "golang.org/x/tools/internal/xcontext" ) -var ( - panicOnDestroyed = flag.Bool("memoize_panic_on_destroyed", false, - "Panic when a destroyed generation is read rather than returning an error. "+ - "Panicking may make it easier to debug lifetime errors, especially when "+ - "used with GOTRACEBACK=crash to see all running goroutines.") -) - -// Store binds keys to functions, returning handles that can be used to access -// the functions results. -type Store struct { - mu sync.Mutex - // handles is the set of values stored. - handles map[interface{}]*Handle - - // generations is the set of generations live in this store. - generations map[*Generation]struct{} -} - -// Generation creates a new Generation associated with s. Destroy must be -// called on the returned Generation once it is no longer in use. name is -// for debugging purposes only. -func (s *Store) Generation(name string) *Generation { - s.mu.Lock() - defer s.mu.Unlock() - if s.handles == nil { - s.handles = map[interface{}]*Handle{} - s.generations = map[*Generation]struct{}{} - } - g := &Generation{store: s, name: name} - s.generations[g] = struct{}{} - return g -} - -// A Generation is a logical point in time of the cache life-cycle. Cache -// entries associated with a Generation will not be removed until the -// Generation is destroyed. -type Generation struct { - // destroyed is 1 after the generation is destroyed. Atomic. - destroyed uint32 - store *Store - name string - // destroyedBy describes the caller that togged destroyed from 0 to 1. - destroyedBy string - // wg tracks the reference count of this generation. - wg sync.WaitGroup -} - -// Destroy waits for all operations referencing g to complete, then removes -// all references to g from cache entries. Cache entries that no longer -// reference any non-destroyed generation are removed. Destroy must be called -// exactly once for each generation, and destroyedBy describes the caller. -func (g *Generation) Destroy(destroyedBy string) { - g.wg.Wait() - - prevDestroyedBy := g.destroyedBy - g.destroyedBy = destroyedBy - if ok := atomic.CompareAndSwapUint32(&g.destroyed, 0, 1); !ok { - panic("Destroy on generation " + g.name + " already destroyed by " + prevDestroyedBy) - } - - g.store.mu.Lock() - defer g.store.mu.Unlock() - for k, e := range g.store.handles { - e.mu.Lock() - if _, ok := e.generations[g]; ok { - delete(e.generations, g) // delete even if it's dead, in case of dangling references to the entry. - if len(e.generations) == 0 { - delete(g.store.handles, k) - e.state = stateDestroyed - if e.cleanup != nil && e.value != nil { - e.cleanup(e.value) - } - } - } - e.mu.Unlock() - } - delete(g.store.generations, g) -} +// Function is the type of a function that can be memoized. +// +// If the arg is a RefCounted, its Acquire/Release operations are called. +// +// The argument must not materially affect the result of the function +// in ways that are not captured by the promise's key, since if +// Promise.Get is called twice concurrently, with the same (implicit) +// key but different arguments, the Function is called only once but +// its result must be suitable for both callers. +// +// The main purpose of the argument is to avoid the Function closure +// needing to retain large objects (in practice: the snapshot) in +// memory that can be supplied at call time by any caller. +type Function func(ctx context.Context, arg interface{}) interface{} -// Acquire creates a new reference to g, and returns a func to release that -// reference. -func (g *Generation) Acquire() func() { - destroyed := atomic.LoadUint32(&g.destroyed) - if destroyed != 0 { - panic("acquire on generation " + g.name + " destroyed by " + g.destroyedBy) - } - g.wg.Add(1) - return g.wg.Done +// A RefCounted is a value whose functional lifetime is determined by +// reference counting. +// +// Its Acquire method is called before the Function is invoked, and +// the corresponding release is called when the Function returns. +// Usually both events happen within a single call to Get, so Get +// would be fine with a "borrowed" reference, but if the context is +// cancelled, Get may return before the Function is complete, causing +// the argument to escape, and potential premature destruction of the +// value. For a reference-counted type, this requires a pair of +// increment/decrement operations to extend its life. +type RefCounted interface { + // Acquire prevents the value from being destroyed until the + // returned function is called. + Acquire() func() } -// Arg is a marker interface that can be embedded to indicate a type is -// intended for use as a Function argument. -type Arg interface{ memoizeArg() } - -// Function is the type for functions that can be memoized. -// The result must be a pointer. -type Function func(ctx context.Context, arg Arg) interface{} +// A Promise represents the future result of a call to a function. +type Promise struct { + debug string // for observability -type state int - -const ( - stateIdle = iota - stateRunning - stateCompleted - stateDestroyed -) + // refcount is the reference count in the containing Store, used by + // Store.Promise. It is guarded by Store.promisesMu on the containing Store. + refcount int32 -// Handle is returned from a store when a key is bound to a function. -// It is then used to access the results of that function. -// -// A Handle starts out in idle state, waiting for something to demand its -// evaluation. It then transitions into running state. While it's running, -// waiters tracks the number of Get calls waiting for a result, and the done -// channel is used to notify waiters of the next state transition. Once the -// evaluation finishes, value is set, state changes to completed, and done -// is closed, unblocking waiters. Alternatively, as Get calls are cancelled, -// they decrement waiters. If it drops to zero, the inner context is cancelled, -// computation is abandoned, and state resets to idle to start the process over -// again. -type Handle struct { - key interface{} - mu sync.Mutex - - // generations is the set of generations in which this handle is valid. - generations map[*Generation]struct{} + mu sync.Mutex + // A Promise starts out IDLE, waiting for something to demand + // its evaluation. It then transitions into RUNNING state. + // + // While RUNNING, waiters tracks the number of Get calls + // waiting for a result, and the done channel is used to + // notify waiters of the next state transition. Once + // evaluation finishes, value is set, state changes to + // COMPLETED, and done is closed, unblocking waiters. + // + // Alternatively, as Get calls are cancelled, they decrement + // waiters. If it drops to zero, the inner context is + // cancelled, computation is abandoned, and state resets to + // IDLE to start the process over again. state state // done is set in running state, and closed when exiting it. done chan struct{} @@ -157,230 +95,241 @@ type Handle struct { function Function // value is set in completed state. value interface{} - // cleanup, if non-nil, is used to perform any necessary clean-up on values - // produced by function. - cleanup func(interface{}) } -// Bind returns a handle for the given key and function. -// -// Each call to bind will return the same handle if it is already bound. Bind -// will always return a valid handle, creating one if needed. Each key can -// only have one handle at any given time. The value will be held at least -// until the associated generation is destroyed. Bind does not cause the value -// to be generated. +// NewPromise returns a promise for the future result of calling the +// specified function. // -// If cleanup is non-nil, it will be called on any non-nil values produced by -// function when they are no longer referenced. -func (g *Generation) Bind(key interface{}, function Function, cleanup func(interface{})) *Handle { - // panic early if the function is nil - // it would panic later anyway, but in a way that was much harder to debug +// The debug string is used to classify promises in logs and metrics. +// It should be drawn from a small set. +func NewPromise(debug string, function Function) *Promise { if function == nil { - panic("the function passed to bind must not be nil") - } - if atomic.LoadUint32(&g.destroyed) != 0 { - panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy) - } - g.store.mu.Lock() - defer g.store.mu.Unlock() - h, ok := g.store.handles[key] - if !ok { - h := &Handle{ - key: key, - function: function, - generations: map[*Generation]struct{}{g: {}}, - cleanup: cleanup, - } - g.store.handles[key] = h - return h - } - h.mu.Lock() - defer h.mu.Unlock() - if _, ok := h.generations[g]; !ok { - h.generations[g] = struct{}{} - } - return h -} - -// Stats returns the number of each type of value in the store. -func (s *Store) Stats() map[reflect.Type]int { - s.mu.Lock() - defer s.mu.Unlock() - - result := map[reflect.Type]int{} - for k := range s.handles { - result[reflect.TypeOf(k)]++ + panic("nil function") } - return result -} - -// DebugOnlyIterate iterates through all live cache entries and calls f on them. -// It should only be used for debugging purposes. -func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { - s.mu.Lock() - defer s.mu.Unlock() - - for k, e := range s.handles { - var v interface{} - e.mu.Lock() - if e.state == stateCompleted { - v = e.value - } - e.mu.Unlock() - if v == nil { - continue - } - f(k, v) + return &Promise{ + debug: debug, + function: function, } } -func (g *Generation) Inherit(hs ...*Handle) { - for _, h := range hs { - if atomic.LoadUint32(&g.destroyed) != 0 { - panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy) - } +type state int - h.mu.Lock() - if h.state == stateDestroyed { - panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name)) - } - h.generations[g] = struct{}{} - h.mu.Unlock() - } -} +const ( + stateIdle = iota // newly constructed, or last waiter was cancelled + stateRunning // start was called and not cancelled + stateCompleted // function call ran to completion +) -// Cached returns the value associated with a handle. +// Cached returns the value associated with a promise. // // It will never cause the value to be generated. // It will return the cached value, if present. -func (h *Handle) Cached(g *Generation) interface{} { - h.mu.Lock() - defer h.mu.Unlock() - if _, ok := h.generations[g]; !ok { - return nil - } - if h.state == stateCompleted { - return h.value +func (p *Promise) Cached() interface{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.state == stateCompleted { + return p.value } return nil } -// Get returns the value associated with a handle. +// Get returns the value associated with a promise. +// +// All calls to Promise.Get on a given promise return the +// same result but the function is called (to completion) at most once. // // If the value is not yet ready, the underlying function will be invoked. -// If ctx is cancelled, Get returns nil. -func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { - release := g.Acquire() - defer release() - +// +// If ctx is cancelled, Get returns (nil, Canceled). +// If all concurrent calls to Get are cancelled, the context provided +// to the function is cancelled. A later call to Get may attempt to +// call the function again. +func (p *Promise) Get(ctx context.Context, arg interface{}) (interface{}, error) { if ctx.Err() != nil { return nil, ctx.Err() } - h.mu.Lock() - if _, ok := h.generations[g]; !ok { - h.mu.Unlock() - - err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name) - if *panicOnDestroyed && ctx.Err() != nil { - panic(err) - } - return nil, err - } - switch h.state { + p.mu.Lock() + switch p.state { case stateIdle: - return h.run(ctx, g, arg) + return p.run(ctx, arg) case stateRunning: - return h.wait(ctx) + return p.wait(ctx) case stateCompleted: - defer h.mu.Unlock() - return h.value, nil - case stateDestroyed: - h.mu.Unlock() - err := fmt.Errorf("Get on destroyed entry %#v (type %T) in generation %v", h.key, h.key, g.name) - if *panicOnDestroyed { - panic(err) - } - return nil, err + defer p.mu.Unlock() + return p.value, nil default: panic("unknown state") } } -// run starts h.function and returns the result. h.mu must be locked. -func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { +// run starts p.function and returns the result. p.mu must be locked. +func (p *Promise) run(ctx context.Context, arg interface{}) (interface{}, error) { childCtx, cancel := context.WithCancel(xcontext.Detach(ctx)) - h.cancel = cancel - h.state = stateRunning - h.done = make(chan struct{}) - function := h.function // Read under the lock + p.cancel = cancel + p.state = stateRunning + p.done = make(chan struct{}) + function := p.function // Read under the lock + + // Make sure that the argument isn't destroyed while we're running in it. + release := func() {} + if rc, ok := arg.(RefCounted); ok { + release = rc.Acquire() + } - // Make sure that the generation isn't destroyed while we're running in it. - release := g.Acquire() go func() { - defer release() - // Just in case the function does something expensive without checking - // the context, double-check we're still alive. - if childCtx.Err() != nil { - return - } - v := function(childCtx, arg) - if childCtx.Err() != nil { - // It's possible that v was computed despite the context cancellation. In - // this case we should ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) + trace.WithRegion(childCtx, fmt.Sprintf("Promise.run %s", p.debug), func() { + defer release() + // Just in case the function does something expensive without checking + // the context, double-check we're still alive. + if childCtx.Err() != nil { + return + } + v := function(childCtx, arg) + if childCtx.Err() != nil { + return } - return - } - h.mu.Lock() - defer h.mu.Unlock() - // It's theoretically possible that the handle has been cancelled out - // of the run that started us, and then started running again since we - // checked childCtx above. Even so, that should be harmless, since each - // run should produce the same results. - if h.state != stateRunning { - // v will never be used, so ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) + p.mu.Lock() + defer p.mu.Unlock() + // It's theoretically possible that the promise has been cancelled out + // of the run that started us, and then started running again since we + // checked childCtx above. Even so, that should be harmless, since each + // run should produce the same results. + if p.state != stateRunning { + return } - return - } - // At this point v will be cleaned up whenever h is destroyed. - h.value = v - h.function = nil - h.state = stateCompleted - close(h.done) + + p.value = v + p.function = nil // aid GC + p.state = stateCompleted + close(p.done) + }) }() - return h.wait(ctx) + return p.wait(ctx) } -// wait waits for the value to be computed, or ctx to be cancelled. h.mu must be locked. -func (h *Handle) wait(ctx context.Context) (interface{}, error) { - h.waiters++ - done := h.done - h.mu.Unlock() +// wait waits for the value to be computed, or ctx to be cancelled. p.mu must be locked. +func (p *Promise) wait(ctx context.Context) (interface{}, error) { + p.waiters++ + done := p.done + p.mu.Unlock() select { case <-done: - h.mu.Lock() - defer h.mu.Unlock() - if h.state == stateCompleted { - return h.value, nil + p.mu.Lock() + defer p.mu.Unlock() + if p.state == stateCompleted { + return p.value, nil } return nil, nil case <-ctx.Done(): - h.mu.Lock() - defer h.mu.Unlock() - h.waiters-- - if h.waiters == 0 && h.state == stateRunning { - h.cancel() - close(h.done) - h.state = stateIdle - h.done = nil - h.cancel = nil + p.mu.Lock() + defer p.mu.Unlock() + p.waiters-- + if p.waiters == 0 && p.state == stateRunning { + p.cancel() + close(p.done) + p.state = stateIdle + p.done = nil + p.cancel = nil } return nil, ctx.Err() } } + +// An EvictionPolicy controls the eviction behavior of keys in a Store when +// they no longer have any references. +type EvictionPolicy int + +const ( + // ImmediatelyEvict evicts keys as soon as they no longer have references. + ImmediatelyEvict EvictionPolicy = iota + + // NeverEvict does not evict keys. + NeverEvict +) + +// A Store maps arbitrary keys to reference-counted promises. +// +// The zero value is a valid Store, though a store may also be created via +// NewStore if a custom EvictionPolicy is required. +type Store struct { + evictionPolicy EvictionPolicy + + promisesMu sync.Mutex + promises map[interface{}]*Promise +} + +// NewStore creates a new store with the given eviction policy. +func NewStore(policy EvictionPolicy) *Store { + return &Store{evictionPolicy: policy} +} + +// Promise returns a reference-counted promise for the future result of +// calling the specified function. +// +// Calls to Promise with the same key return the same promise, incrementing its +// reference count. The caller must call the returned function to decrement +// the promise's reference count when it is no longer needed. The returned +// function must not be called more than once. +// +// Once the last reference has been released, the promise is removed from the +// store. +func (store *Store) Promise(key interface{}, function Function) (*Promise, func()) { + store.promisesMu.Lock() + p, ok := store.promises[key] + if !ok { + p = NewPromise(reflect.TypeOf(key).String(), function) + if store.promises == nil { + store.promises = map[interface{}]*Promise{} + } + store.promises[key] = p + } + p.refcount++ + store.promisesMu.Unlock() + + var released int32 + release := func() { + if !atomic.CompareAndSwapInt32(&released, 0, 1) { + panic("release called more than once") + } + store.promisesMu.Lock() + + p.refcount-- + if p.refcount == 0 && store.evictionPolicy != NeverEvict { + // Inv: if p.refcount > 0, then store.promises[key] == p. + delete(store.promises, key) + } + store.promisesMu.Unlock() + } + + return p, release +} + +// Stats returns the number of each type of key in the store. +func (s *Store) Stats() map[reflect.Type]int { + result := map[reflect.Type]int{} + + s.promisesMu.Lock() + defer s.promisesMu.Unlock() + + for k := range s.promises { + result[reflect.TypeOf(k)]++ + } + return result +} + +// DebugOnlyIterate iterates through the store and, for each completed +// promise, calls f(k, v) for the map key k and function result v. It +// should only be used for debugging purposes. +func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { + s.promisesMu.Lock() + defer s.promisesMu.Unlock() + + for k, p := range s.promises { + if v := p.Cached(); v != nil { + f(k, v) + } + } +} diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index f05966b4614..c54572d59ca 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -6,102 +6,161 @@ package memoize_test import ( "context" - "strings" + "sync" "testing" + "time" "golang.org/x/tools/internal/memoize" ) func TestGet(t *testing.T) { - s := &memoize.Store{} - g := s.Generation("x") + var store memoize.Store evaled := 0 - h := g.Bind("key", func(context.Context, memoize.Arg) interface{} { + h, release := store.Promise("key", func(context.Context, interface{}) interface{} { evaled++ return "res" - }, nil) - expectGet(t, h, g, "res") - expectGet(t, h, g, "res") + }) + defer release() + expectGet(t, h, "res") + expectGet(t, h, "res") if evaled != 1 { t.Errorf("got %v calls to function, wanted 1", evaled) } } -func expectGet(t *testing.T, h *memoize.Handle, g *memoize.Generation, wantV interface{}) { +func expectGet(t *testing.T, h *memoize.Promise, wantV interface{}) { t.Helper() - gotV, gotErr := h.Get(context.Background(), g, nil) + gotV, gotErr := h.Get(context.Background(), nil) if gotV != wantV || gotErr != nil { t.Fatalf("Get() = %v, %v, wanted %v, nil", gotV, gotErr, wantV) } } -func expectGetError(t *testing.T, h *memoize.Handle, g *memoize.Generation, substr string) { - gotV, gotErr := h.Get(context.Background(), g, nil) - if gotErr == nil || !strings.Contains(gotErr.Error(), substr) { - t.Fatalf("Get() = %v, %v, wanted err %q", gotV, gotErr, substr) +func TestNewPromise(t *testing.T) { + calls := 0 + f := func(context.Context, interface{}) interface{} { + calls++ + return calls } -} -func TestGenerations(t *testing.T) { - s := &memoize.Store{} - // Evaluate key in g1. - g1 := s.Generation("g1") - h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }, nil) - expectGet(t, h1, g1, "res") - - // Get key in g2. It should inherit the value from g1. - g2 := s.Generation("g2") - h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} { - t.Fatal("h2 should not need evaluation") - return "error" - }, nil) - expectGet(t, h2, g2, "res") - - // With g1 destroyed, g2 should still work. - g1.Destroy("TestGenerations") - expectGet(t, h2, g2, "res") - - // With all generations destroyed, key should be re-evaluated. - g2.Destroy("TestGenerations") - g3 := s.Generation("g3") - h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }, nil) - expectGet(t, h3, g3, "new res") + // All calls to Get on the same promise return the same result. + p1 := memoize.NewPromise("debug", f) + expectGet(t, p1, 1) + expectGet(t, p1, 1) + + // A new promise calls the function again. + p2 := memoize.NewPromise("debug", f) + expectGet(t, p2, 2) + expectGet(t, p2, 2) + + // The original promise is unchanged. + expectGet(t, p1, 1) } -func TestCleanup(t *testing.T) { - s := &memoize.Store{} - g1 := s.Generation("g1") +func TestStoredPromiseRefCounting(t *testing.T) { + var store memoize.Store v1 := false v2 := false - cleanup := func(v interface{}) { - *(v.(*bool)) = true - } - h1 := g1.Bind("key1", func(context.Context, memoize.Arg) interface{} { + p1, release1 := store.Promise("key1", func(context.Context, interface{}) interface{} { return &v1 - }, nil) - h2 := g1.Bind("key2", func(context.Context, memoize.Arg) interface{} { + }) + p2, release2 := store.Promise("key2", func(context.Context, interface{}) interface{} { return &v2 - }, cleanup) - expectGet(t, h1, g1, &v1) - expectGet(t, h2, g1, &v2) - g2 := s.Generation("g2") - g2.Inherit(h1, h2) - - g1.Destroy("TestCleanup") - expectGet(t, h1, g2, &v1) - expectGet(t, h2, g2, &v2) - for k, v := range map[string]*bool{"key1": &v1, "key2": &v2} { - if got, want := *v, false; got != want { - t.Errorf("after destroying g1, bound value %q is cleaned up", k) - } + }) + expectGet(t, p1, &v1) + expectGet(t, p2, &v2) + + expectGet(t, p1, &v1) + expectGet(t, p2, &v2) + + p2Copy, release2Copy := store.Promise("key2", func(context.Context, interface{}) interface{} { + return &v1 + }) + if p2 != p2Copy { + t.Error("Promise returned a new value while old is not destroyed yet") } - g2.Destroy("TestCleanup") + expectGet(t, p2Copy, &v2) + + release2() + if got, want := v2, false; got != want { + t.Errorf("after destroying first v2 ref, got %v, want %v", got, want) + } + release2Copy() if got, want := v1, false; got != want { - t.Error("after destroying g2, v1 is cleaned up") + t.Errorf("after destroying v2, got %v, want %v", got, want) + } + release1() + + p2Copy, release2Copy = store.Promise("key2", func(context.Context, interface{}) interface{} { + return &v2 + }) + if p2 == p2Copy { + t.Error("Promise returned previously destroyed value") + } + release2Copy() +} + +func TestPromiseDestroyedWhileRunning(t *testing.T) { + // Test that calls to Promise.Get return even if the promise is destroyed while running. + + var store memoize.Store + c := make(chan int) + + var v int + h, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} { + <-c + <-c + if err := ctx.Err(); err != nil { + t.Errorf("ctx.Err() = %v, want nil", err) + } + return &v + }) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) // arbitrary timeout; may be removed if it causes flakes + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + var got interface{} + var err error + go func() { + got, err = h.Get(ctx, nil) + wg.Done() + }() + + c <- 0 // send once to enter the promise function + release() // release before the promise function returns + c <- 0 // let the promise function proceed + + wg.Wait() + + if err != nil { + t.Errorf("Get() failed: %v", err) + } + if got != &v { + t.Errorf("Get() = %v, want %v", got, v) } - if got, want := v2, true; got != want { - t.Error("after destroying g2, v2 is not cleaned up") +} + +func TestDoubleReleasePanics(t *testing.T) { + var store memoize.Store + _, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} { return 0 }) + + panicked := false + + func() { + defer func() { + if recover() != nil { + panicked = true + } + }() + release() + release() + }() + + if !panicked { + t.Errorf("calling release() twice did not panic") } } diff --git a/internal/persistent/map.go b/internal/persistent/map.go new file mode 100644 index 00000000000..b29cfe41943 --- /dev/null +++ b/internal/persistent/map.go @@ -0,0 +1,311 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The persistent package defines various persistent data structures; +// that is, data structures that can be efficiently copied and modified +// in sublinear time. +package persistent + +import ( + "fmt" + "math/rand" + "strings" + "sync/atomic" +) + +// Implementation details: +// * Each value is reference counted by nodes which hold it. +// * Each node is reference counted by its parent nodes. +// * Each map is considered a top-level parent node from reference counting perspective. +// * Each change does always effectivelly produce a new top level node. +// +// Functions which operate directly with nodes do have a notation in form of +// `foo(arg1:+n1, arg2:+n2) (ret1:+n3)`. +// Each argument is followed by a delta change to its reference counter. +// In case if no change is expected, the delta will be `-0`. + +// Map is an associative mapping from keys to values, both represented as +// interface{}. Key comparison and iteration order is defined by a +// client-provided function that implements a strict weak order. +// +// Maps can be Cloned in constant time. +// Get, Store, and Delete operations are done on average in logarithmic time. +// Maps can be Updated in O(m log(n/m)) time for maps of size n and m, where m < n. +// +// Values are reference counted, and a client-supplied release function +// is called when a value is no longer referenced by a map or any clone. +// +// Internally the implementation is based on a randomized persistent treap: +// https://en.wikipedia.org/wiki/Treap. +type Map struct { + less func(a, b interface{}) bool + root *mapNode +} + +func (m *Map) String() string { + var buf strings.Builder + buf.WriteByte('{') + var sep string + m.Range(func(k, v interface{}) { + fmt.Fprintf(&buf, "%s%v: %v", sep, k, v) + sep = ", " + }) + buf.WriteByte('}') + return buf.String() +} + +type mapNode struct { + key interface{} + value *refValue + weight uint64 + refCount int32 + left, right *mapNode +} + +type refValue struct { + refCount int32 + value interface{} + release func(key, value interface{}) +} + +func newNodeWithRef(key, value interface{}, release func(key, value interface{})) *mapNode { + return &mapNode{ + key: key, + value: &refValue{ + value: value, + release: release, + refCount: 1, + }, + refCount: 1, + weight: rand.Uint64(), + } +} + +func (node *mapNode) shallowCloneWithRef() *mapNode { + atomic.AddInt32(&node.value.refCount, 1) + return &mapNode{ + key: node.key, + value: node.value, + weight: node.weight, + refCount: 1, + } +} + +func (node *mapNode) incref() *mapNode { + if node != nil { + atomic.AddInt32(&node.refCount, 1) + } + return node +} + +func (node *mapNode) decref() { + if node == nil { + return + } + if atomic.AddInt32(&node.refCount, -1) == 0 { + if atomic.AddInt32(&node.value.refCount, -1) == 0 { + if node.value.release != nil { + node.value.release(node.key, node.value.value) + } + node.value.value = nil + node.value.release = nil + } + node.left.decref() + node.right.decref() + } +} + +// NewMap returns a new map whose keys are ordered by the given comparison +// function (a strict weak order). It is the responsibility of the caller to +// Destroy it at later time. +func NewMap(less func(a, b interface{}) bool) *Map { + return &Map{ + less: less, + } +} + +// Clone returns a copy of the given map. It is a responsibility of the caller +// to Destroy it at later time. +func (pm *Map) Clone() *Map { + return &Map{ + less: pm.less, + root: pm.root.incref(), + } +} + +// Destroy destroys the map. +// +// After Destroy, the Map should not be used again. +func (pm *Map) Destroy() { + // The implementation of these two functions is the same, + // but their intent is different. + pm.Clear() +} + +// Clear removes all entries from the map. +func (pm *Map) Clear() { + pm.root.decref() + pm.root = nil +} + +// Range calls f sequentially in ascending key order for all entries in the map. +func (pm *Map) Range(f func(key, value interface{})) { + pm.root.forEach(f) +} + +func (node *mapNode) forEach(f func(key, value interface{})) { + if node == nil { + return + } + node.left.forEach(f) + f(node.key, node.value.value) + node.right.forEach(f) +} + +// Get returns the map value associated with the specified key, or nil if no entry +// is present. The ok result indicates whether an entry was found in the map. +func (pm *Map) Get(key interface{}) (interface{}, bool) { + node := pm.root + for node != nil { + if pm.less(key, node.key) { + node = node.left + } else if pm.less(node.key, key) { + node = node.right + } else { + return node.value.value, true + } + } + return nil, false +} + +// SetAll updates the map with key/value pairs from the other map, overwriting existing keys. +// It is equivalent to calling Set for each entry in the other map but is more efficient. +// Both maps must have the same comparison function, otherwise behavior is undefined. +func (pm *Map) SetAll(other *Map) { + root := pm.root + pm.root = union(root, other.root, pm.less, true) + root.decref() +} + +// Set updates the value associated with the specified key. +// If release is non-nil, it will be called with entry's key and value once the +// key is no longer contained in the map or any clone. +func (pm *Map) Set(key, value interface{}, release func(key, value interface{})) { + first := pm.root + second := newNodeWithRef(key, value, release) + pm.root = union(first, second, pm.less, true) + first.decref() + second.decref() +} + +// union returns a new tree which is a union of first and second one. +// If overwrite is set to true, second one would override a value for any duplicate keys. +// +// union(first:-0, second:-0) (result:+1) +// Union borrows both subtrees without affecting their refcount and returns a +// new reference that the caller is expected to call decref. +func union(first, second *mapNode, less func(a, b interface{}) bool, overwrite bool) *mapNode { + if first == nil { + return second.incref() + } + if second == nil { + return first.incref() + } + + if first.weight < second.weight { + second, first, overwrite = first, second, !overwrite + } + + left, mid, right := split(second, first.key, less, false) + var result *mapNode + if overwrite && mid != nil { + result = mid.shallowCloneWithRef() + } else { + result = first.shallowCloneWithRef() + } + result.weight = first.weight + result.left = union(first.left, left, less, overwrite) + result.right = union(first.right, right, less, overwrite) + left.decref() + mid.decref() + right.decref() + return result +} + +// split the tree midway by the key into three different ones. +// Return three new trees: left with all nodes with smaller than key, mid with +// the node matching the key, right with all nodes larger than key. +// If there are no nodes in one of trees, return nil instead of it. +// If requireMid is set (such as during deletion), then all return arguments +// are nil if mid is not found. +// +// split(n:-0) (left:+1, mid:+1, right:+1) +// Split borrows n without affecting its refcount, and returns three +// new references that that caller is expected to call decref. +func split(n *mapNode, key interface{}, less func(a, b interface{}) bool, requireMid bool) (left, mid, right *mapNode) { + if n == nil { + return nil, nil, nil + } + + if less(n.key, key) { + left, mid, right := split(n.right, key, less, requireMid) + if requireMid && mid == nil { + return nil, nil, nil + } + newN := n.shallowCloneWithRef() + newN.left = n.left.incref() + newN.right = left + return newN, mid, right + } else if less(key, n.key) { + left, mid, right := split(n.left, key, less, requireMid) + if requireMid && mid == nil { + return nil, nil, nil + } + newN := n.shallowCloneWithRef() + newN.left = right + newN.right = n.right.incref() + return left, mid, newN + } + mid = n.shallowCloneWithRef() + return n.left.incref(), mid, n.right.incref() +} + +// Delete deletes the value for a key. +func (pm *Map) Delete(key interface{}) { + root := pm.root + left, mid, right := split(root, key, pm.less, true) + if mid == nil { + return + } + pm.root = merge(left, right) + left.decref() + mid.decref() + right.decref() + root.decref() +} + +// merge two trees while preserving the weight invariant. +// All nodes in left must have smaller keys than any node in right. +// +// merge(left:-0, right:-0) (result:+1) +// Merge borrows its arguments without affecting their refcount +// and returns a new reference that the caller is expected to call decref. +func merge(left, right *mapNode) *mapNode { + switch { + case left == nil: + return right.incref() + case right == nil: + return left.incref() + case left.weight > right.weight: + root := left.shallowCloneWithRef() + root.left = left.left.incref() + root.right = merge(left.right, right) + return root + default: + root := right.shallowCloneWithRef() + root.left = merge(left, right.left) + root.right = right.right.incref() + return root + } +} diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go new file mode 100644 index 00000000000..9f89a1d300c --- /dev/null +++ b/internal/persistent/map_test.go @@ -0,0 +1,355 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package persistent + +import ( + "fmt" + "math/rand" + "reflect" + "sync/atomic" + "testing" +) + +type mapEntry struct { + key int + value int +} + +type validatedMap struct { + impl *Map + expected map[int]int // current key-value mapping. + deleted map[mapEntry]int // maps deleted entries to their clock time of last deletion + seen map[mapEntry]int // maps seen entries to their clock time of last insertion + clock int +} + +func TestSimpleMap(t *testing.T) { + deletedEntries := make(map[mapEntry]int) + seenEntries := make(map[mapEntry]int) + + m1 := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + + m3 := m1.clone() + validateRef(t, m1, m3) + m3.set(t, 8, 8) + validateRef(t, m1, m3) + m3.destroy() + + assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{ + {key: 8, value: 8}: {}, + }) + + validateRef(t, m1) + m1.set(t, 1, 1) + validateRef(t, m1) + m1.set(t, 2, 2) + validateRef(t, m1) + m1.set(t, 3, 3) + validateRef(t, m1) + m1.remove(t, 2) + validateRef(t, m1) + m1.set(t, 6, 6) + validateRef(t, m1) + + assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{ + {key: 2, value: 2}: {}, + {key: 8, value: 8}: {}, + }) + + m2 := m1.clone() + validateRef(t, m1, m2) + m1.set(t, 6, 60) + validateRef(t, m1, m2) + m1.remove(t, 1) + validateRef(t, m1, m2) + + gotAllocs := int(testing.AllocsPerRun(10, func() { + m1.impl.Delete(100) + m1.impl.Delete(1) + })) + wantAllocs := 0 + if gotAllocs != wantAllocs { + t.Errorf("wanted %d allocs, got %d", wantAllocs, gotAllocs) + } + + for i := 10; i < 14; i++ { + m1.set(t, i, i) + validateRef(t, m1, m2) + } + + m1.set(t, 10, 100) + validateRef(t, m1, m2) + + m1.remove(t, 12) + validateRef(t, m1, m2) + + m2.set(t, 4, 4) + validateRef(t, m1, m2) + m2.set(t, 5, 5) + validateRef(t, m1, m2) + + m1.destroy() + + assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{ + {key: 2, value: 2}: {}, + {key: 6, value: 60}: {}, + {key: 8, value: 8}: {}, + {key: 10, value: 10}: {}, + {key: 10, value: 100}: {}, + {key: 11, value: 11}: {}, + {key: 12, value: 12}: {}, + {key: 13, value: 13}: {}, + }) + + m2.set(t, 7, 7) + validateRef(t, m2) + + m2.destroy() + + assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries)) +} + +func TestRandomMap(t *testing.T) { + deletedEntries := make(map[mapEntry]int) + seenEntries := make(map[mapEntry]int) + + m := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + + keys := make([]int, 0, 1000) + for i := 0; i < 1000; i++ { + key := rand.Intn(10000) + m.set(t, key, key) + keys = append(keys, key) + + if i%10 == 1 { + index := rand.Intn(len(keys)) + last := len(keys) - 1 + key = keys[index] + keys[index], keys[last] = keys[last], keys[index] + keys = keys[:last] + + m.remove(t, key) + } + } + + m.destroy() + assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries)) +} + +func entrySet(m map[mapEntry]int) map[mapEntry]struct{} { + set := make(map[mapEntry]struct{}) + for k := range m { + set[k] = struct{}{} + } + return set +} + +func TestUpdate(t *testing.T) { + deletedEntries := make(map[mapEntry]int) + seenEntries := make(map[mapEntry]int) + + m1 := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + m2 := m1.clone() + + m1.set(t, 1, 1) + m1.set(t, 2, 2) + m2.set(t, 2, 20) + m2.set(t, 3, 3) + m1.setAll(t, m2) + + m1.destroy() + m2.destroy() + assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries)) +} + +func validateRef(t *testing.T, maps ...*validatedMap) { + t.Helper() + + actualCountByEntry := make(map[mapEntry]int32) + nodesByEntry := make(map[mapEntry]map[*mapNode]struct{}) + expectedCountByEntry := make(map[mapEntry]int32) + for i, m := range maps { + dfsRef(m.impl.root, actualCountByEntry, nodesByEntry) + dumpMap(t, fmt.Sprintf("%d:", i), m.impl.root) + } + for entry, nodes := range nodesByEntry { + expectedCountByEntry[entry] = int32(len(nodes)) + } + assertSameMap(t, expectedCountByEntry, actualCountByEntry) +} + +func dfsRef(node *mapNode, countByEntry map[mapEntry]int32, nodesByEntry map[mapEntry]map[*mapNode]struct{}) { + if node == nil { + return + } + + entry := mapEntry{key: node.key.(int), value: node.value.value.(int)} + countByEntry[entry] = atomic.LoadInt32(&node.value.refCount) + + nodes, ok := nodesByEntry[entry] + if !ok { + nodes = make(map[*mapNode]struct{}) + nodesByEntry[entry] = nodes + } + nodes[node] = struct{}{} + + dfsRef(node.left, countByEntry, nodesByEntry) + dfsRef(node.right, countByEntry, nodesByEntry) +} + +func dumpMap(t *testing.T, prefix string, n *mapNode) { + if n == nil { + t.Logf("%s nil", prefix) + return + } + t.Logf("%s {key: %v, value: %v (ref: %v), ref: %v, weight: %v}", prefix, n.key, n.value.value, n.value.refCount, n.refCount, n.weight) + dumpMap(t, prefix+"l", n.left) + dumpMap(t, prefix+"r", n.right) +} + +func (vm *validatedMap) validate(t *testing.T) { + t.Helper() + + validateNode(t, vm.impl.root, vm.impl.less) + + // Note: this validation may not make sense if maps were constructed using + // SetAll operations. If this proves to be problematic, remove the clock, + // deleted, and seen fields. + for key, value := range vm.expected { + entry := mapEntry{key: key, value: value} + if deleteAt := vm.deleted[entry]; deleteAt > vm.seen[entry] { + t.Fatalf("entry is deleted prematurely, key: %d, value: %d", key, value) + } + } + + actualMap := make(map[int]int, len(vm.expected)) + vm.impl.Range(func(key, value interface{}) { + if other, ok := actualMap[key.(int)]; ok { + t.Fatalf("key is present twice, key: %d, first value: %d, second value: %d", key, value, other) + } + actualMap[key.(int)] = value.(int) + }) + + assertSameMap(t, actualMap, vm.expected) +} + +func validateNode(t *testing.T, node *mapNode, less func(a, b interface{}) bool) { + if node == nil { + return + } + + if node.left != nil { + if less(node.key, node.left.key) { + t.Fatalf("left child has larger key: %v vs %v", node.left.key, node.key) + } + if node.left.weight > node.weight { + t.Fatalf("left child has larger weight: %v vs %v", node.left.weight, node.weight) + } + } + + if node.right != nil { + if less(node.right.key, node.key) { + t.Fatalf("right child has smaller key: %v vs %v", node.right.key, node.key) + } + if node.right.weight > node.weight { + t.Fatalf("right child has larger weight: %v vs %v", node.right.weight, node.weight) + } + } + + validateNode(t, node.left, less) + validateNode(t, node.right, less) +} + +func (vm *validatedMap) setAll(t *testing.T, other *validatedMap) { + vm.impl.SetAll(other.impl) + + // Note: this is buggy because we are not updating vm.clock, vm.deleted, or + // vm.seen. + for key, value := range other.expected { + vm.expected[key] = value + } + vm.validate(t) +} + +func (vm *validatedMap) set(t *testing.T, key, value int) { + entry := mapEntry{key: key, value: value} + + vm.clock++ + vm.seen[entry] = vm.clock + + vm.impl.Set(key, value, func(deletedKey, deletedValue interface{}) { + if deletedKey != key || deletedValue != value { + t.Fatalf("unexpected passed in deleted entry: %v/%v, expected: %v/%v", deletedKey, deletedValue, key, value) + } + // Not safe if closure shared between two validatedMaps. + vm.deleted[entry] = vm.clock + }) + vm.expected[key] = value + vm.validate(t) + + gotValue, ok := vm.impl.Get(key) + if !ok || gotValue != value { + t.Fatalf("unexpected get result after insertion, key: %v, expected: %v, got: %v (%v)", key, value, gotValue, ok) + } +} + +func (vm *validatedMap) remove(t *testing.T, key int) { + vm.clock++ + vm.impl.Delete(key) + delete(vm.expected, key) + vm.validate(t) + + gotValue, ok := vm.impl.Get(key) + if ok { + t.Fatalf("unexpected get result after removal, key: %v, got: %v", key, gotValue) + } +} + +func (vm *validatedMap) clone() *validatedMap { + expected := make(map[int]int, len(vm.expected)) + for key, value := range vm.expected { + expected[key] = value + } + + return &validatedMap{ + impl: vm.impl.Clone(), + expected: expected, + deleted: vm.deleted, + seen: vm.seen, + } +} + +func (vm *validatedMap) destroy() { + vm.impl.Destroy() +} + +func assertSameMap(t *testing.T, map1, map2 interface{}) { + t.Helper() + + if !reflect.DeepEqual(map1, map2) { + t.Fatalf("different maps:\n%v\nvs\n%v", map1, map2) + } +} diff --git a/internal/pkgbits/codes.go b/internal/pkgbits/codes.go new file mode 100644 index 00000000000..f0cabde96eb --- /dev/null +++ b/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/internal/pkgbits/decoder.go b/internal/pkgbits/decoder.go new file mode 100644 index 00000000000..b92e8e6eb32 --- /dev/null +++ b/internal/pkgbits/decoder.go @@ -0,0 +1,517 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/internal/pkgbits/doc.go b/internal/pkgbits/doc.go new file mode 100644 index 00000000000..c8a2796b5e4 --- /dev/null +++ b/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/internal/pkgbits/encoder.go b/internal/pkgbits/encoder.go new file mode 100644 index 00000000000..6482617a4fc --- /dev/null +++ b/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/internal/pkgbits/flags.go b/internal/pkgbits/flags.go new file mode 100644 index 00000000000..654222745fa --- /dev/null +++ b/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/internal/pkgbits/frames_go1.go b/internal/pkgbits/frames_go1.go new file mode 100644 index 00000000000..5294f6a63ed --- /dev/null +++ b/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/internal/pkgbits/frames_go17.go b/internal/pkgbits/frames_go17.go new file mode 100644 index 00000000000..2324ae7adfe --- /dev/null +++ b/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/internal/pkgbits/reloc.go b/internal/pkgbits/reloc.go new file mode 100644 index 00000000000..fcdfb97ca99 --- /dev/null +++ b/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/internal/pkgbits/support.go b/internal/pkgbits/support.go new file mode 100644 index 00000000000..ad26d3b28ca --- /dev/null +++ b/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/internal/pkgbits/sync.go b/internal/pkgbits/sync.go new file mode 100644 index 00000000000..5bd51ef7170 --- /dev/null +++ b/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/internal/pkgbits/syncmarker_string.go b/internal/pkgbits/syncmarker_string.go new file mode 100644 index 00000000000..4a5b0ca5f2f --- /dev/null +++ b/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/internal/robustio/copyfiles.go b/internal/robustio/copyfiles.go new file mode 100644 index 00000000000..6e9f4b3875f --- /dev/null +++ b/internal/robustio/copyfiles.go @@ -0,0 +1,117 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +// The copyfiles script copies the contents of the internal cmd/go robustio +// package to the current directory, with adjustments to make it build. +// +// NOTE: In retrospect this script got out of hand, as we have to perform +// various operations on the package to get it to build at old Go versions. If +// in the future it proves to be flaky, delete it and just copy code manually. +package main + +import ( + "bytes" + "go/build/constraint" + "go/scanner" + "go/token" + "log" + "os" + "path/filepath" + "runtime" + "strings" +) + +func main() { + dir := filepath.Join(runtime.GOROOT(), "src", "cmd", "go", "internal", "robustio") + + entries, err := os.ReadDir(dir) + if err != nil { + log.Fatalf("reading the robustio dir: %v", err) + } + + // Collect file content so that we can validate before copying. + fileContent := make(map[string][]byte) + windowsImport := []byte("\t\"internal/syscall/windows\"\n") + foundWindowsImport := false + for _, entry := range entries { + if strings.HasSuffix(entry.Name(), ".go") { + pth := filepath.Join(dir, entry.Name()) + content, err := os.ReadFile(pth) + if err != nil { + log.Fatalf("reading %q: %v", entry.Name(), err) + } + + // Replace the use of internal/syscall/windows.ERROR_SHARING_VIOLATION + // with a local constant. + if entry.Name() == "robustio_windows.go" && bytes.Contains(content, windowsImport) { + foundWindowsImport = true + content = bytes.Replace(content, windowsImport, nil, 1) + content = bytes.Replace(content, []byte("windows.ERROR_SHARING_VIOLATION"), []byte("ERROR_SHARING_VIOLATION"), -1) + } + + // Replace os.ReadFile with ioutil.ReadFile (for 1.15 and older). We + // attempt to match calls (via the '('), to avoid matching mentions of + // os.ReadFile in comments. + // + // TODO(rfindley): once we (shortly!) no longer support 1.15, remove + // this and break the build. + if bytes.Contains(content, []byte("os.ReadFile(")) { + content = bytes.Replace(content, []byte("\"os\""), []byte("\"io/ioutil\"\n\t\"os\""), 1) + content = bytes.Replace(content, []byte("os.ReadFile("), []byte("ioutil.ReadFile("), -1) + } + + // Add +build constraints, for 1.16. + content = addPlusBuildConstraints(content) + + fileContent[entry.Name()] = content + } + } + + if !foundWindowsImport { + log.Fatal("missing expected import of internal/syscall/windows in robustio_windows.go") + } + + for name, content := range fileContent { + if err := os.WriteFile(name, content, 0644); err != nil { + log.Fatalf("writing %q: %v", name, err) + } + } +} + +// addPlusBuildConstraints splices in +build constraints for go:build +// constraints encountered in the source. +// +// Gopls still builds at Go 1.16, which requires +build constraints. +func addPlusBuildConstraints(src []byte) []byte { + var s scanner.Scanner + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + s.Init(file, src, nil /* no error handler */, scanner.ScanComments) + + result := make([]byte, 0, len(src)) + lastInsertion := 0 + for { + pos, tok, lit := s.Scan() + if tok == token.EOF { + break + } + if tok == token.COMMENT { + if c, err := constraint.Parse(lit); err == nil { + plusBuild, err := constraint.PlusBuildLines(c) + if err != nil { + log.Fatalf("computing +build constraint for %q: %v", lit, err) + } + insertAt := file.Offset(pos) + len(lit) + result = append(result, src[lastInsertion:insertAt]...) + result = append(result, []byte("\n"+strings.Join(plusBuild, "\n"))...) + lastInsertion = insertAt + } + } + } + result = append(result, src[lastInsertion:]...) + return result +} diff --git a/internal/robustio/gopls_windows.go b/internal/robustio/gopls_windows.go new file mode 100644 index 00000000000..949f2781619 --- /dev/null +++ b/internal/robustio/gopls_windows.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import "syscall" + +// The robustio package is copied from cmd/go/internal/robustio, a package used +// by the go command to retry known flaky operations on certain operating systems. + +//go:generate go run copyfiles.go + +// Since the gopls module cannot access internal/syscall/windows, copy a +// necessary constant. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/internal/robustio/robustio.go b/internal/robustio/robustio.go new file mode 100644 index 00000000000..0a559fc9b80 --- /dev/null +++ b/internal/robustio/robustio.go @@ -0,0 +1,69 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +import "time" + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} + +// A FileID uniquely identifies a file in the file system. +// +// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file +// names denote the same file. +// A FileID is comparable, and thus suitable for use as a map key. +type FileID struct { + device, inode uint64 +} + +// GetFileID returns the file system's identifier for the file, and its +// modification time. +// Like os.Stat, it reads through symbolic links. +func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/internal/robustio/robustio_darwin.go b/internal/robustio/robustio_darwin.go new file mode 100644 index 00000000000..99fd8ebc2ff --- /dev/null +++ b/internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/internal/robustio/robustio_flaky.go b/internal/robustio/robustio_flaky.go new file mode 100644 index 00000000000..c6f99724468 --- /dev/null +++ b/internal/robustio/robustio_flaky.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin +// +build windows darwin + +package robustio + +import ( + "errors" + "io/ioutil" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = ioutil.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/internal/robustio/robustio_other.go b/internal/robustio/robustio_other.go new file mode 100644 index 00000000000..c11dbf9f14b --- /dev/null +++ b/internal/robustio/robustio_other.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin +// +build !windows,!darwin + +package robustio + +import ( + "io/ioutil" + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/internal/robustio/robustio_plan9.go b/internal/robustio/robustio_plan9.go new file mode 100644 index 00000000000..9fa4cacb5a3 --- /dev/null +++ b/internal/robustio/robustio_plan9.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + dir := fi.Sys().(*syscall.Dir) + return FileID{ + device: uint64(dir.Type)<<32 | uint64(dir.Dev), + inode: dir.Qid.Path, + }, fi.ModTime(), nil +} diff --git a/internal/robustio/robustio_posix.go b/internal/robustio/robustio_posix.go new file mode 100644 index 00000000000..8aa13d02786 --- /dev/null +++ b/internal/robustio/robustio_posix.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +// TODO(adonovan): use 'unix' tag when go1.19 can be assumed. + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + stat := fi.Sys().(*syscall.Stat_t) + return FileID{ + device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) + inode: stat.Ino, + }, fi.ModTime(), nil +} diff --git a/internal/robustio/robustio_test.go b/internal/robustio/robustio_test.go new file mode 100644 index 00000000000..10244e21d69 --- /dev/null +++ b/internal/robustio/robustio_test.go @@ -0,0 +1,88 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio_test + +import ( + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "golang.org/x/tools/internal/robustio" +) + +func TestFileInfo(t *testing.T) { + // A nonexistent file has no ID. + nonexistent := filepath.Join(t.TempDir(), "nonexistent") + if _, _, err := robustio.GetFileID(nonexistent); err == nil { + t.Fatalf("GetFileID(nonexistent) succeeded unexpectedly") + } + + // A regular file has an ID. + real := filepath.Join(t.TempDir(), "real") + if err := os.WriteFile(real, nil, 0644); err != nil { + t.Fatalf("can't create regular file: %v", err) + } + realID, realMtime, err := robustio.GetFileID(real) + if err != nil { + t.Fatalf("can't get ID of regular file: %v", err) + } + + // Sleep so that we get a new mtime for subsequent writes. + time.Sleep(2 * time.Second) + + // A second regular file has a different ID. + real2 := filepath.Join(t.TempDir(), "real2") + if err := os.WriteFile(real2, nil, 0644); err != nil { + t.Fatalf("can't create second regular file: %v", err) + } + real2ID, real2Mtime, err := robustio.GetFileID(real2) + if err != nil { + t.Fatalf("can't get ID of second regular file: %v", err) + } + if realID == real2ID { + t.Errorf("realID %+v == real2ID %+v", realID, real2ID) + } + if realMtime.Equal(real2Mtime) { + t.Errorf("realMtime %v == real2Mtime %v", realMtime, real2Mtime) + } + + // A symbolic link has the same ID as its target. + if runtime.GOOS != "plan9" { + symlink := filepath.Join(t.TempDir(), "symlink") + if err := os.Symlink(real, symlink); err != nil { + t.Fatalf("can't create symbolic link: %v", err) + } + symlinkID, symlinkMtime, err := robustio.GetFileID(symlink) + if err != nil { + t.Fatalf("can't get ID of symbolic link: %v", err) + } + if realID != symlinkID { + t.Errorf("realID %+v != symlinkID %+v", realID, symlinkID) + } + if !realMtime.Equal(symlinkMtime) { + t.Errorf("realMtime %v != symlinkMtime %v", realMtime, symlinkMtime) + } + } + + // Two hard-linked files have the same ID. + if runtime.GOOS != "plan9" && runtime.GOOS != "android" { + hardlink := filepath.Join(t.TempDir(), "hardlink") + if err := os.Link(real, hardlink); err != nil { + t.Fatal(err) + } + hardlinkID, hardlinkMtime, err := robustio.GetFileID(hardlink) + if err != nil { + t.Fatalf("can't get ID of hard link: %v", err) + } + if realID != hardlinkID { + t.Errorf("realID %+v != hardlinkID %+v", realID, hardlinkID) + } + if !realMtime.Equal(hardlinkMtime) { + t.Errorf("realMtime %v != hardlinkMtime %v", realMtime, hardlinkMtime) + } + } +} diff --git a/internal/robustio/robustio_windows.go b/internal/robustio/robustio_windows.go new file mode 100644 index 00000000000..616c32883d6 --- /dev/null +++ b/internal/robustio/robustio_windows.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + "time" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} + +// Note: it may be convenient to have this helper return fs.FileInfo, but +// implementing this is actually quite involved on Windows. Since we only +// currently use mtime, keep it simple. +func getFileID(filename string) (FileID, time.Time, error) { + filename16, err := syscall.UTF16PtrFromString(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) + if err != nil { + return FileID{}, time.Time{}, err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &i); err != nil { + return FileID{}, time.Time{}, err + } + mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) + return FileID{ + device: uint64(i.VolumeSerialNumber), + inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), + }, mtime, nil +} diff --git a/internal/span/parse.go b/internal/span/parse.go deleted file mode 100644 index c4cec16e90d..00000000000 --- a/internal/span/parse.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "path/filepath" - "strconv" - "strings" - "unicode/utf8" -) - -// Parse returns the location represented by the input. -// Only file paths are accepted, not URIs. -// The returned span will be normalized, and thus if printed may produce a -// different string. -func Parse(input string) Span { - return ParseInDir(input, ".") -} - -// ParseInDir is like Parse, but interprets paths relative to wd. -func ParseInDir(input, wd string) Span { - uri := func(path string) URI { - if !filepath.IsAbs(path) { - path = filepath.Join(wd, path) - } - return URIFromPath(path) - } - // :0:0#0-0:0#0 - valid := input - var hold, offset int - hadCol := false - suf := rstripSuffix(input) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep == ":" { - valid = suf.remains - hold = suf.num - hadCol = true - suf = rstripSuffix(suf.remains) - } - switch { - case suf.sep == ":": - return New(uri(suf.remains), NewPoint(suf.num, hold, offset), Point{}) - case suf.sep == "-": - // we have a span, fall out of the case to continue - default: - // separator not valid, rewind to either the : or the start - return New(uri(valid), NewPoint(hold, 0, offset), Point{}) - } - // only the span form can get here - // at this point we still don't know what the numbers we have mean - // if have not yet seen a : then we might have either a line or a column depending - // on whether start has a column or not - // we build an end point and will fix it later if needed - end := NewPoint(suf.num, hold, offset) - hold, offset = 0, 0 - suf = rstripSuffix(suf.remains) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep != ":" { - // turns out we don't have a span after all, rewind - return New(uri(valid), end, Point{}) - } - valid = suf.remains - hold = suf.num - suf = rstripSuffix(suf.remains) - if suf.sep != ":" { - // line#offset only - return New(uri(valid), NewPoint(hold, 0, offset), end) - } - // we have a column, so if end only had one number, it is also the column - if !hadCol { - end = NewPoint(suf.num, end.v.Line, end.v.Offset) - } - return New(uri(suf.remains), NewPoint(suf.num, hold, offset), end) -} - -type suffix struct { - remains string - sep string - num int -} - -func rstripSuffix(input string) suffix { - if len(input) == 0 { - return suffix{"", "", -1} - } - remains := input - num := -1 - // first see if we have a number at the end - last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) - if last >= 0 && last < len(remains)-1 { - number, err := strconv.ParseInt(remains[last+1:], 10, 64) - if err == nil { - num = int(number) - remains = remains[:last+1] - } - } - // now see if we have a trailing separator - r, w := utf8.DecodeLastRuneInString(remains) - if r != ':' && r != '#' && r == '#' { - return suffix{input, "", -1} - } - remains = remains[:len(remains)-w] - return suffix{remains, string(r), num} -} diff --git a/internal/span/span.go b/internal/span/span.go deleted file mode 100644 index 502145bbea7..00000000000 --- a/internal/span/span.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package span contains support for representing with positions and ranges in -// text files. -package span - -import ( - "encoding/json" - "fmt" - "go/token" - "path" -) - -// Span represents a source code range in standardized form. -type Span struct { - v span -} - -// Point represents a single point within a file. -// In general this should only be used as part of a Span, as on its own it -// does not carry enough information. -type Point struct { - v point -} - -type span struct { - URI URI `json:"uri"` - Start point `json:"start"` - End point `json:"end"` -} - -type point struct { - Line int `json:"line"` - Column int `json:"column"` - Offset int `json:"offset"` -} - -// Invalid is a span that reports false from IsValid -var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} - -var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} - -func New(uri URI, start Point, end Point) Span { - s := Span{v: span{URI: uri, Start: start.v, End: end.v}} - s.v.clean() - return s -} - -func NewPoint(line, col, offset int) Point { - p := Point{v: point{Line: line, Column: col, Offset: offset}} - p.v.clean() - return p -} - -func Compare(a, b Span) int { - if r := CompareURI(a.URI(), b.URI()); r != 0 { - return r - } - if r := comparePoint(a.v.Start, b.v.Start); r != 0 { - return r - } - return comparePoint(a.v.End, b.v.End) -} - -func ComparePoint(a, b Point) int { - return comparePoint(a.v, b.v) -} - -func comparePoint(a, b point) int { - if !a.hasPosition() { - if a.Offset < b.Offset { - return -1 - } - if a.Offset > b.Offset { - return 1 - } - return 0 - } - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Column < b.Column { - return -1 - } - if a.Column > b.Column { - return 1 - } - return 0 -} - -func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } -func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } -func (s Span) IsValid() bool { return s.v.Start.isValid() } -func (s Span) IsPoint() bool { return s.v.Start == s.v.End } -func (s Span) URI() URI { return s.v.URI } -func (s Span) Start() Point { return Point{s.v.Start} } -func (s Span) End() Point { return Point{s.v.End} } -func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } -func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } - -func (p Point) HasPosition() bool { return p.v.hasPosition() } -func (p Point) HasOffset() bool { return p.v.hasOffset() } -func (p Point) IsValid() bool { return p.v.isValid() } -func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } -func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } -func (p Point) Line() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Line -} -func (p Point) Column() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Column -} -func (p Point) Offset() int { - if !p.v.hasOffset() { - panic(fmt.Errorf("offset not set in %v", p.v)) - } - return p.v.Offset -} - -func (p point) hasPosition() bool { return p.Line > 0 } -func (p point) hasOffset() bool { return p.Offset >= 0 } -func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } -func (p point) isZero() bool { - return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) -} - -func (s *span) clean() { - //this presumes the points are already clean - if !s.End.isValid() || (s.End == point{}) { - s.End = s.Start - } -} - -func (p *point) clean() { - if p.Line < 0 { - p.Line = 0 - } - if p.Column <= 0 { - if p.Line > 0 { - p.Column = 1 - } else { - p.Column = 0 - } - } - if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { - p.Offset = -1 - } -} - -// Format implements fmt.Formatter to print the Location in a standard form. -// The format produced is one that can be read back in using Parse. -func (s Span) Format(f fmt.State, c rune) { - fullForm := f.Flag('+') - preferOffset := f.Flag('#') - // we should always have a uri, simplify if it is file format - //TODO: make sure the end of the uri is unambiguous - uri := string(s.v.URI) - if c == 'f' { - uri = path.Base(uri) - } else if !fullForm { - uri = s.v.URI.Filename() - } - fmt.Fprint(f, uri) - if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { - return - } - // see which bits of start to write - printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) - printLine := s.HasPosition() && (fullForm || !printOffset) - printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) - fmt.Fprint(f, ":") - if printLine { - fmt.Fprintf(f, "%d", s.v.Start.Line) - } - if printColumn { - fmt.Fprintf(f, ":%d", s.v.Start.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.Start.Offset) - } - // start is written, do we need end? - if s.IsPoint() { - return - } - // we don't print the line if it did not change - printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) - fmt.Fprint(f, "-") - if printLine { - fmt.Fprintf(f, "%d", s.v.End.Line) - } - if printColumn { - if printLine { - fmt.Fprint(f, ":") - } - fmt.Fprintf(f, "%d", s.v.End.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.End.Offset) - } -} - -func (s Span) WithPosition(tf *token.File) (Span, error) { - if err := s.update(tf, true, false); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithOffset(tf *token.File) (Span, error) { - if err := s.update(tf, false, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithAll(tf *token.File) (Span, error) { - if err := s.update(tf, true, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s *Span) update(tf *token.File, withPos, withOffset bool) error { - if !s.IsValid() { - return fmt.Errorf("cannot add information to an invalid span") - } - if withPos && !s.HasPosition() { - if err := s.v.Start.updatePosition(tf); err != nil { - return err - } - if s.v.End.Offset == s.v.Start.Offset { - s.v.End = s.v.Start - } else if err := s.v.End.updatePosition(tf); err != nil { - return err - } - } - if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { - if err := s.v.Start.updateOffset(tf); err != nil { - return err - } - if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { - s.v.End.Offset = s.v.Start.Offset - } else if err := s.v.End.updateOffset(tf); err != nil { - return err - } - } - return nil -} - -func (p *point) updatePosition(tf *token.File) error { - line, col, err := ToPosition(tf, p.Offset) - if err != nil { - return err - } - p.Line = line - p.Column = col - return nil -} - -func (p *point) updateOffset(tf *token.File) error { - offset, err := ToOffset(tf, p.Line, p.Column) - if err != nil { - return err - } - p.Offset = offset - return nil -} diff --git a/internal/span/span_test.go b/internal/span/span_test.go deleted file mode 100644 index cff59c3d116..00000000000 --- a/internal/span/span_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span_test - -import ( - "fmt" - "go/token" - "path/filepath" - "strings" - "testing" - - "golang.org/x/tools/internal/span" -) - -var ( - tests = [][]string{ - {"C:/file_a", "C:/file_a", "file:///C:/file_a:1:1#0"}, - {"C:/file_b:1:2", "C:/file_b:#1", "file:///C:/file_b:1:2#1"}, - {"C:/file_c:1000", "C:/file_c:#9990", "file:///C:/file_c:1000:1#9990"}, - {"C:/file_d:14:9", "C:/file_d:#138", "file:///C:/file_d:14:9#138"}, - {"C:/file_e:1:2-7", "C:/file_e:#1-#6", "file:///C:/file_e:1:2#1-1:7#6"}, - {"C:/file_f:500-502", "C:/file_f:#4990-#5010", "file:///C:/file_f:500:1#4990-502:1#5010"}, - {"C:/file_g:3:7-8", "C:/file_g:#26-#27", "file:///C:/file_g:3:7#26-3:8#27"}, - {"C:/file_h:3:7-4:8", "C:/file_h:#26-#37", "file:///C:/file_h:3:7#26-4:8#37"}, - } -) - -func TestFormat(t *testing.T) { - converter := lines(10) - for _, test := range tests { - for ti, text := range test[:2] { - spn := span.Parse(text) - if ti <= 1 { - // we can check %v produces the same as the input - expect := toPath(test[ti]) - if got := fmt.Sprintf("%v", spn); got != expect { - t.Errorf("printing %q got %q expected %q", text, got, expect) - } - } - complete, err := spn.WithAll(converter) - if err != nil { - t.Error(err) - } - for fi, format := range []string{"%v", "%#v", "%+v"} { - expect := toPath(test[fi]) - if got := fmt.Sprintf(format, complete); got != expect { - t.Errorf("printing completed %q as %q got %q expected %q [%+v]", text, format, got, expect, spn) - } - } - } - } -} - -func toPath(value string) string { - if strings.HasPrefix(value, "file://") { - return value - } - return filepath.FromSlash(value) -} - -// lines creates a new tokenConverter for a file with 1000 lines, each width -// bytes wide. -func lines(width int) *token.File { - fset := token.NewFileSet() - f := fset.AddFile("", -1, 1000*width) - var lines []int - for i := 0; i < 1000; i++ { - lines = append(lines, i*width) - } - f.SetLines(lines) - return f -} diff --git a/internal/span/token.go b/internal/span/token.go deleted file mode 100644 index af01d7b8348..00000000000 --- a/internal/span/token.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "go/token" - - "golang.org/x/tools/internal/lsp/bug" -) - -// Range represents a source code range in token.Pos form. -// It also carries the FileSet that produced the positions, so that it is -// self contained. -type Range struct { - Start token.Pos - End token.Pos - - // TokFile may be nil if Start or End is invalid. - // TODO: Eventually we should guarantee that it is non-nil. - TokFile *token.File -} - -// NewRange creates a new Range from a FileSet and two positions. -// To represent a point pass a 0 as the end pos. -func NewRange(fset *token.FileSet, start, end token.Pos) Range { - tf := fset.File(start) - if tf == nil { - bug.Reportf("nil file") - } - return Range{ - Start: start, - End: end, - TokFile: tf, - } -} - -// NewTokenFile returns a token.File for the given file content. -func NewTokenFile(filename string, content []byte) *token.File { - fset := token.NewFileSet() - f := fset.AddFile(filename, -1, len(content)) - f.SetLinesForContent(content) - return f -} - -// IsPoint returns true if the range represents a single point. -func (r Range) IsPoint() bool { - return r.Start == r.End -} - -// Span converts a Range to a Span that represents the Range. -// It will fill in all the members of the Span, calculating the line and column -// information. -func (r Range) Span() (Span, error) { - return FileSpan(r.TokFile, r.TokFile, r.Start, r.End) -} - -// FileSpan returns a span within the file referenced by start and end, using a -// token.File to translate between offsets and positions. -// -// The start and end position must be contained within posFile, though due to -// line directives they may reference positions in another file. If srcFile is -// provided, it is used to map the line:column positions referenced by start -// and end to offsets in the corresponding file. -func FileSpan(posFile, srcFile *token.File, start, end token.Pos) (Span, error) { - if !start.IsValid() { - return Span{}, fmt.Errorf("start pos is not valid") - } - if posFile == nil { - return Span{}, bug.Errorf("missing file association") // should never get here with a nil file - } - var s Span - var err error - var startFilename string - startFilename, s.v.Start.Line, s.v.Start.Column, err = position(posFile, start) - if err != nil { - return Span{}, err - } - s.v.URI = URIFromPath(startFilename) - if end.IsValid() { - var endFilename string - endFilename, s.v.End.Line, s.v.End.Column, err = position(posFile, end) - if err != nil { - return Span{}, err - } - // In the presence of line directives, a single File can have sections from - // multiple file names. - if endFilename != startFilename { - return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) - } - } - s.v.Start.clean() - s.v.End.clean() - s.v.clean() - tf := posFile - if srcFile != nil { - tf = srcFile - } - if startFilename != tf.Name() { - return Span{}, bug.Errorf("must supply Converter for file %q", startFilename) - } - return s.WithOffset(tf) -} - -func position(tf *token.File, pos token.Pos) (string, int, int, error) { - off, err := offset(tf, pos) - if err != nil { - return "", 0, 0, err - } - return positionFromOffset(tf, off) -} - -func positionFromOffset(tf *token.File, offset int) (string, int, int, error) { - if offset > tf.Size() { - return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, tf.Size()) - } - pos := tf.Pos(offset) - p := tf.Position(pos) - // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if - // the file's last character is not a newline. - if offset == tf.Size() { - return p.Filename, p.Line + 1, 1, nil - } - return p.Filename, p.Line, p.Column, nil -} - -// offset is a copy of the Offset function in go/token, but with the adjustment -// that it does not panic on invalid positions. -func offset(tf *token.File, pos token.Pos) (int, error) { - if int(pos) < tf.Base() || int(pos) > tf.Base()+tf.Size() { - return 0, fmt.Errorf("invalid pos: %d not in [%d, %d]", pos, tf.Base(), tf.Base()+tf.Size()) - } - return int(pos) - tf.Base(), nil -} - -// Range converts a Span to a Range that represents the Span for the supplied -// File. -func (s Span) Range(tf *token.File) (Range, error) { - s, err := s.WithOffset(tf) - if err != nil { - return Range{}, err - } - // go/token will panic if the offset is larger than the file's size, - // so check here to avoid panicking. - if s.Start().Offset() > tf.Size() { - return Range{}, bug.Errorf("start offset %v is past the end of the file %v", s.Start(), tf.Size()) - } - if s.End().Offset() > tf.Size() { - return Range{}, bug.Errorf("end offset %v is past the end of the file %v", s.End(), tf.Size()) - } - return Range{ - Start: tf.Pos(s.Start().Offset()), - End: tf.Pos(s.End().Offset()), - TokFile: tf, - }, nil -} - -// ToPosition converts a byte offset in the file corresponding to tf into -// 1-based line and utf-8 column indexes. -func ToPosition(tf *token.File, offset int) (int, int, error) { - _, line, col, err := positionFromOffset(tf, offset) - return line, col, err -} - -// ToOffset converts a 1-base line and utf-8 column index into a byte offset in -// the file corresponding to tf. -func ToOffset(tf *token.File, line, col int) (int, error) { - if line < 0 { - return -1, fmt.Errorf("line is not valid") - } - lineMax := tf.LineCount() + 1 - if line > lineMax { - return -1, fmt.Errorf("line is beyond end of file %v", lineMax) - } else if line == lineMax { - if col > 1 { - return -1, fmt.Errorf("column is beyond end of file") - } - // at the end of the file, allowing for a trailing eol - return tf.Size(), nil - } - pos := tf.LineStart(line) - if !pos.IsValid() { - return -1, fmt.Errorf("line is not in file") - } - // we assume that column is in bytes here, and that the first byte of a - // line is at column 1 - pos += token.Pos(col - 1) - return offset(tf, pos) -} diff --git a/internal/span/token_test.go b/internal/span/token_test.go deleted file mode 100644 index 1e0b53e1244..00000000000 --- a/internal/span/token_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span_test - -import ( - "fmt" - "go/token" - "path" - "testing" - - "golang.org/x/tools/internal/span" -) - -var testdata = []struct { - uri string - content []byte -}{ - {"/a.go", []byte(` -// file a.go -package test -`)}, - {"/b.go", []byte(` -// -// -// file b.go -package test`)}, - {"/c.go", []byte(` -// file c.go -package test`)}, -} - -var tokenTests = []span.Span{ - span.New(span.URIFromPath("/a.go"), span.NewPoint(1, 1, 0), span.Point{}), - span.New(span.URIFromPath("/a.go"), span.NewPoint(3, 7, 20), span.NewPoint(3, 7, 20)), - span.New(span.URIFromPath("/b.go"), span.NewPoint(4, 9, 15), span.NewPoint(4, 13, 19)), - span.New(span.URIFromPath("/c.go"), span.NewPoint(4, 1, 26), span.Point{}), -} - -func TestToken(t *testing.T) { - fset := token.NewFileSet() - files := map[span.URI]*token.File{} - for _, f := range testdata { - file := fset.AddFile(f.uri, -1, len(f.content)) - file.SetLinesForContent(f.content) - files[span.URIFromPath(f.uri)] = file - } - for _, test := range tokenTests { - f := files[test.URI()] - t.Run(path.Base(f.Name()), func(t *testing.T) { - checkToken(t, f, span.New( - test.URI(), - span.NewPoint(test.Start().Line(), test.Start().Column(), 0), - span.NewPoint(test.End().Line(), test.End().Column(), 0), - ), test) - checkToken(t, f, span.New( - test.URI(), - span.NewPoint(0, 0, test.Start().Offset()), - span.NewPoint(0, 0, test.End().Offset()), - ), test) - }) - } -} - -func checkToken(t *testing.T, f *token.File, in, expect span.Span) { - rng, err := in.Range(f) - if err != nil { - t.Error(err) - } - gotLoc, err := rng.Span() - if err != nil { - t.Error(err) - } - expected := fmt.Sprintf("%+v", expect) - got := fmt.Sprintf("%+v", gotLoc) - if expected != got { - t.Errorf("For %v expected %q got %q", in, expected, got) - } -} diff --git a/internal/span/utf16.go b/internal/span/utf16.go deleted file mode 100644 index f4c93a6ead3..00000000000 --- a/internal/span/utf16.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "unicode/utf8" -) - -// ToUTF16Column calculates the utf16 column expressed by the point given the -// supplied file contents. -// This is used to convert from the native (always in bytes) column -// representation and the utf16 counts used by some editors. -func ToUTF16Column(p Point, content []byte) (int, error) { - if !p.HasPosition() { - return -1, fmt.Errorf("ToUTF16Column: point is missing position") - } - if !p.HasOffset() { - return -1, fmt.Errorf("ToUTF16Column: point is missing offset") - } - offset := p.Offset() // 0-based - colZero := p.Column() - 1 // 0-based - if colZero == 0 { - // 0-based column 0, so it must be chr 1 - return 1, nil - } else if colZero < 0 { - return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) - } - // work out the offset at the start of the line using the column - lineOffset := offset - colZero - if lineOffset < 0 || offset > len(content) { - return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) - } - // Use the offset to pick out the line start. - // This cannot panic: offset > len(content) and lineOffset < offset. - start := content[lineOffset:] - - // Now, truncate down to the supplied column. - start = start[:colZero] - - cnt := 0 - for _, r := range string(start) { - cnt++ - if r > 0xffff { - cnt++ - } - } - return cnt + 1, nil // the +1 is for 1-based columns -} - -// FromUTF16Column advances the point by the utf16 character offset given the -// supplied line contents. -// This is used to convert from the utf16 counts used by some editors to the -// native (always in bytes) column representation. -// -// The resulting Point always has an offset. -// -// TODO: it looks like this may incorrectly confer a "position" to the -// resulting Point, when it shouldn't. If p.HasPosition() == false, the -// resulting Point will return p.HasPosition() == true, but have the wrong -// position. -func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { - if !p.HasOffset() { - return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") - } - // if chr is 1 then no adjustment needed - if chr <= 1 { - return p, nil - } - if p.Offset() >= len(content) { - return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) - } - remains := content[p.Offset():] - // scan forward the specified number of characters - for count := 1; count < chr; count++ { - if len(remains) <= 0 { - return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") - } - r, w := utf8.DecodeRune(remains) - if r == '\n' { - // Per the LSP spec: - // - // > If the character value is greater than the line length it - // > defaults back to the line length. - break - } - remains = remains[w:] - if r >= 0x10000 { - // a two point rune - count++ - // if we finished in a two point rune, do not advance past the first - if count >= chr { - break - } - } - p.v.Column += w - p.v.Offset += w - } - return p, nil -} diff --git a/internal/span/utf16_test.go b/internal/span/utf16_test.go deleted file mode 100644 index 1eae7975bb4..00000000000 --- a/internal/span/utf16_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span_test - -import ( - "strings" - "testing" - - "golang.org/x/tools/internal/span" -) - -// The funny character below is 4 bytes long in UTF-8; two UTF-16 code points -var funnyString = []byte("𐐀23\n𐐀45") - -var toUTF16Tests = []struct { - scenario string - input []byte - line int // 1-indexed count - col int // 1-indexed byte position in line - offset int // 0-indexed byte offset into input - resUTF16col int // 1-indexed UTF-16 col number - pre string // everything before the cursor on the line - post string // everything from the cursor onwards - err string // expected error string in call to ToUTF16Column - issue *bool -}{ - { - scenario: "cursor missing content", - input: nil, - err: "ToUTF16Column: point is missing position", - }, - { - scenario: "cursor missing position", - input: funnyString, - line: -1, - col: -1, - err: "ToUTF16Column: point is missing position", - }, - { - scenario: "cursor missing offset", - input: funnyString, - line: 1, - col: 1, - offset: -1, - err: "ToUTF16Column: point is missing offset", - }, - { - scenario: "zero length input; cursor at first col, first line", - input: []byte(""), - line: 1, - col: 1, - offset: 0, - resUTF16col: 1, - }, - { - scenario: "cursor before funny character; first line", - input: funnyString, - line: 1, - col: 1, - offset: 0, - resUTF16col: 1, - pre: "", - post: "𐐀23", - }, - { - scenario: "cursor after funny character; first line", - input: funnyString, - line: 1, - col: 5, // 4 + 1 (1-indexed) - offset: 4, - resUTF16col: 3, // 2 + 1 (1-indexed) - pre: "𐐀", - post: "23", - }, - { - scenario: "cursor after last character on first line", - input: funnyString, - line: 1, - col: 7, // 4 + 1 + 1 + 1 (1-indexed) - offset: 6, // 4 + 1 + 1 - resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed) - pre: "𐐀23", - post: "", - }, - { - scenario: "cursor before funny character; second line", - input: funnyString, - line: 2, - col: 1, - offset: 7, // length of first line - resUTF16col: 1, - pre: "", - post: "𐐀45", - }, - { - scenario: "cursor after funny character; second line", - input: funnyString, - line: 1, - col: 5, // 4 + 1 (1-indexed) - offset: 11, // 7 (length of first line) + 4 - resUTF16col: 3, // 2 + 1 (1-indexed) - pre: "𐐀", - post: "45", - }, - { - scenario: "cursor after last character on second line", - input: funnyString, - line: 2, - col: 7, // 4 + 1 + 1 + 1 (1-indexed) - offset: 13, // 7 (length of first line) + 4 + 1 + 1 - resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed) - pre: "𐐀45", - post: "", - }, - { - scenario: "cursor beyond end of file", - input: funnyString, - line: 2, - col: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed) - offset: 14, // 4 + 1 + 1 + 1 - err: "ToUTF16Column: offsets 7-14 outside file contents (13)", - }, -} - -var fromUTF16Tests = []struct { - scenario string - input []byte - line int // 1-indexed line number (isn't actually used) - offset int // 0-indexed byte offset to beginning of line - utf16col int // 1-indexed UTF-16 col number - resCol int // 1-indexed byte position in line - resOffset int // 0-indexed byte offset into input - pre string // everything before the cursor on the line - post string // everything from the cursor onwards - err string // expected error string in call to ToUTF16Column -}{ - { - scenario: "zero length input; cursor at first col, first line", - input: []byte(""), - line: 1, - offset: 0, - utf16col: 1, - resCol: 1, - resOffset: 0, - pre: "", - post: "", - }, - { - scenario: "missing offset", - input: funnyString, - line: 1, - offset: -1, - err: "FromUTF16Column: point is missing offset", - }, - { - scenario: "cursor before funny character", - input: funnyString, - line: 1, - utf16col: 1, - resCol: 1, - resOffset: 0, - pre: "", - post: "𐐀23", - }, - { - scenario: "cursor after funny character", - input: funnyString, - line: 1, - utf16col: 3, - resCol: 5, - resOffset: 4, - pre: "𐐀", - post: "23", - }, - { - scenario: "cursor after last character on line", - input: funnyString, - line: 1, - utf16col: 5, - resCol: 7, - resOffset: 6, - pre: "𐐀23", - post: "", - }, - { - scenario: "cursor beyond last character on line", - input: funnyString, - line: 1, - offset: 0, - utf16col: 6, - resCol: 7, - resOffset: 6, - pre: "𐐀23", - post: "", - }, - { - scenario: "cursor before funny character; second line", - input: funnyString, - line: 2, - offset: 7, // length of first line - utf16col: 1, - resCol: 1, - resOffset: 7, - pre: "", - post: "𐐀45", - }, - { - scenario: "cursor after funny character; second line", - input: funnyString, - line: 2, - offset: 7, // length of first line - utf16col: 3, // 2 + 1 (1-indexed) - resCol: 5, // 4 + 1 (1-indexed) - resOffset: 11, // 7 (length of first line) + 4 - pre: "𐐀", - post: "45", - }, - { - scenario: "cursor after last character on second line", - input: funnyString, - line: 2, - offset: 7, // length of first line - utf16col: 5, // 2 + 1 + 1 + 1 (1-indexed) - resCol: 7, // 4 + 1 + 1 + 1 (1-indexed) - resOffset: 13, // 7 (length of first line) + 4 + 1 + 1 - pre: "𐐀45", - post: "", - }, - { - scenario: "cursor beyond end of file", - input: funnyString, - line: 2, - offset: 7, - utf16col: 6, // 2 + 1 + 1 + 1 + 1(1-indexed) - resCol: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed) - resOffset: 14, // 7 (length of first line) + 4 + 1 + 1 + 1 - err: "FromUTF16Column: chr goes beyond the content", - }, - { - scenario: "offset beyond end of file", - input: funnyString, - line: 2, - offset: 14, - utf16col: 2, - err: "FromUTF16Column: offset (14) greater than length of content (13)", - }, -} - -func TestToUTF16(t *testing.T) { - for _, e := range toUTF16Tests { - t.Run(e.scenario, func(t *testing.T) { - if e.issue != nil && !*e.issue { - t.Skip("expected to fail") - } - p := span.NewPoint(e.line, e.col, e.offset) - got, err := span.ToUTF16Column(p, e.input) - if err != nil { - if err.Error() != e.err { - t.Fatalf("expected error %v; got %v", e.err, err) - } - return - } - if e.err != "" { - t.Fatalf("unexpected success; wanted %v", e.err) - } - if got != e.resUTF16col { - t.Fatalf("expected result %v; got %v", e.resUTF16col, got) - } - pre, post := getPrePost(e.input, p.Offset()) - if string(pre) != e.pre { - t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre) - } - if string(post) != e.post { - t.Fatalf("expected #%d, post %q; got %q", p.Offset(), e.post, post) - } - }) - } -} - -func TestFromUTF16(t *testing.T) { - for _, e := range fromUTF16Tests { - t.Run(e.scenario, func(t *testing.T) { - p := span.NewPoint(e.line, 1, e.offset) - p, err := span.FromUTF16Column(p, e.utf16col, []byte(e.input)) - if err != nil { - if err.Error() != e.err { - t.Fatalf("expected error %v; got %v", e.err, err) - } - return - } - if e.err != "" { - t.Fatalf("unexpected success; wanted %v", e.err) - } - if p.Column() != e.resCol { - t.Fatalf("expected resulting col %v; got %v", e.resCol, p.Column()) - } - if p.Offset() != e.resOffset { - t.Fatalf("expected resulting offset %v; got %v", e.resOffset, p.Offset()) - } - pre, post := getPrePost(e.input, p.Offset()) - if string(pre) != e.pre { - t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre) - } - if string(post) != e.post { - t.Fatalf("expected #%d post %q; got %q", p.Offset(), e.post, post) - } - }) - } -} - -func getPrePost(content []byte, offset int) (string, string) { - pre, post := string(content)[:offset], string(content)[offset:] - if i := strings.LastIndex(pre, "\n"); i >= 0 { - pre = pre[i+1:] - } - if i := strings.IndexRune(post, '\n'); i >= 0 { - post = post[:i] - } - return pre, post -} diff --git a/internal/testenv/exec.go b/internal/testenv/exec.go new file mode 100644 index 00000000000..f103ad9d82e --- /dev/null +++ b/internal/testenv/exec.go @@ -0,0 +1,149 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "context" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "testing" + "time" +) + +// HasExec reports whether the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +func HasExec() bool { + switch runtime.GOOS { + case "js", "ios": + return false + } + return true +} + +// NeedsExec checks that the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +// If not, NeedsExec calls t.Skip with an explanation. +func NeedsExec(t testing.TB) { + if !HasExec() { + t.Skipf("skipping test: cannot exec subprocess on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// CommandContext is like exec.CommandContext, but: +// - skips t if the platform does not support os/exec, +// - if supported, sends SIGQUIT instead of SIGKILL in its Cancel function +// - if the test has a deadline, adds a Context timeout and (if supported) WaitDelay +// for an arbitrary grace period before the test's deadline expires, +// - if Cmd has the Cancel field, fails the test if the command is canceled +// due to the test's deadline, and +// - if supported, sets a Cleanup function that verifies that the test did not +// leak a subprocess. +func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd { + t.Helper() + NeedsExec(t) + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if td, ok := Deadline(t); ok { + // Start with a minimum grace period, just long enough to consume the + // output of a reasonable program after it terminates. + gracePeriod = 100 * time.Millisecond + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + scale, err := strconv.Atoi(s) + if err != nil { + t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err) + } + gracePeriod *= time.Duration(scale) + } + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up: one for the delay between the first + // termination signal being sent (via the Cancel callback when the Context + // expires) and the process being forcibly terminated (via the WaitDelay + // field), and a second one for the delay becween the process being + // terminated and and the test logging its output for debugging. + // + // (We want to ensure that the test process itself has enough time to + // log the output before it is also terminated.) + cmdTimeout := testTimeout - 2*gracePeriod + + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout) + } + } + + cmd := exec.CommandContext(ctx, name, args...) + + // Use reflection to set the Cancel and WaitDelay fields, if present. + // TODO(bcmills): When we no longer support Go versions below 1.20, + // remove the use of reflect and assume that the fields are always present. + rc := reflect.ValueOf(cmd).Elem() + + if rCancel := rc.FieldByName("Cancel"); rCancel.IsValid() { + rCancel.Set(reflect.ValueOf(func() error { + if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded { + // The command timed out due to running too close to the test's deadline + // (because we specifically set a shorter Context deadline for that + // above). There is no way the test did that intentionally — it's too + // close to the wire! — so mark it as a test failure. That way, if the + // test expects the command to fail for some other reason, it doesn't + // have to distinguish between that reason and a timeout. + t.Errorf("test timed out while running command: %v", cmd) + } else { + // The command is being terminated due to ctx being canceled, but + // apparently not due to an explicit test deadline that we added. + // Log that information in case it is useful for diagnosing a failure, + // but don't actually fail the test because of it. + t.Logf("%v: terminating command: %v", ctx.Err(), cmd) + } + return cmd.Process.Signal(Sigquit) + })) + } + + if rWaitDelay := rc.FieldByName("WaitDelay"); rWaitDelay.IsValid() { + rWaitDelay.Set(reflect.ValueOf(gracePeriod)) + } + + // t.Cleanup was added in Go 1.14; for earlier Go versions, + // we just let the Context leak. + type Cleanupper interface { + Cleanup(func()) + } + if ct, ok := t.(Cleanupper); ok { + ct.Cleanup(func() { + if cancelCtx != nil { + cancelCtx() + } + if cmd.Process != nil && cmd.ProcessState == nil { + t.Errorf("command was started, but test did not wait for it to complete: %v", cmd) + } + }) + } + + return cmd +} + +// Command is like exec.Command, but applies the same changes as +// testenv.CommandContext (with a default Context). +func Command(t testing.TB, name string, args ...string) *exec.Cmd { + t.Helper() + return CommandContext(t, context.Background(), name, args...) +} diff --git a/internal/testenv/testenv.go b/internal/testenv/testenv.go index bfadb44be65..8184db0ba40 100644 --- a/internal/testenv/testenv.go +++ b/internal/testenv/testenv.go @@ -16,21 +16,14 @@ import ( "runtime/debug" "strings" "sync" + "testing" "time" + "golang.org/x/tools/internal/goroot" + exec "golang.org/x/sys/execabs" ) -// Testing is an abstraction of a *testing.T. -type Testing interface { - Skipf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) -} - -type helperer interface { - Helper() -} - // packageMainIsDevel reports whether the module containing package main // is a development version (if module information is available). func packageMainIsDevel() bool { @@ -97,6 +90,23 @@ func hasTool(tool string) error { GOROOT := strings.TrimSpace(string(out)) if GOROOT != runtime.GOROOT() { checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) + return + } + + // Also ensure that that GOROOT includes a compiler: 'go' commands + // don't in general work without it, and some builders + // (such as android-amd64-emu) seem to lack it in the test environment. + cmd := exec.Command(tool, "tool", "-n", "compile") + stderr := new(bytes.Buffer) + stderr.Write([]byte("\n")) + cmd.Stderr = stderr + out, err = cmd.Output() + if err != nil { + checkGoGoroot.err = fmt.Errorf("%v: %v%s", cmd, err, stderr) + return + } + if _, err := os.Stat(string(bytes.TrimSpace(out))); err != nil { + checkGoGoroot.err = err } }) if checkGoGoroot.err != nil { @@ -172,14 +182,13 @@ func allowMissingTool(tool string) bool { // NeedsTool skips t if the named tool is not present in the path. // As a special case, "cgo" means "go" is present and can compile cgo programs. -func NeedsTool(t Testing, tool string) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsTool(t testing.TB, tool string) { err := hasTool(tool) if err == nil { return } + + t.Helper() if allowMissingTool(tool) { t.Skipf("skipping because %s tool not available: %v", tool, err) } else { @@ -189,10 +198,8 @@ func NeedsTool(t Testing, tool string) { // NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by // the current process environment is not present in the path. -func NeedsGoPackages(t Testing) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoPackages(t testing.TB) { + t.Helper() tool := os.Getenv("GOPACKAGESDRIVER") switch tool { @@ -212,10 +219,8 @@ func NeedsGoPackages(t Testing) { // NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied // by env is not present in the path. -func NeedsGoPackagesEnv(t Testing, env []string) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoPackagesEnv(t testing.TB, env []string) { + t.Helper() for _, v := range env { if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { @@ -236,10 +241,8 @@ func NeedsGoPackagesEnv(t Testing, env []string) { // and then run them with os.StartProcess or exec.Command. // Android doesn't have the userspace go build needs to run, // and js/wasm doesn't support running subprocesses. -func NeedsGoBuild(t Testing) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoBuild(t testing.TB) { + t.Helper() // This logic was derived from internal/testing.HasGoBuild and // may need to be updated as that function evolves. @@ -298,29 +301,25 @@ func Go1Point() int { // NeedsGo1Point skips t if the Go version used to run the test is older than // 1.x. -func NeedsGo1Point(t Testing, x int) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGo1Point(t testing.TB, x int) { if Go1Point() < x { + t.Helper() t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x) } } // SkipAfterGo1Point skips t if the Go version used to run the test is newer than // 1.x. -func SkipAfterGo1Point(t Testing, x int) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func SkipAfterGo1Point(t testing.TB, x int) { if Go1Point() > x { + t.Helper() t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x) } } // Deadline returns the deadline of t, if known, // using the Deadline method added in Go 1.15. -func Deadline(t Testing) (time.Time, bool) { +func Deadline(t testing.TB) (time.Time, bool) { td, ok := t.(interface { Deadline() (time.Time, bool) }) @@ -329,3 +328,65 @@ func Deadline(t Testing) (time.Time, bool) { } return td.Deadline() } + +// WriteImportcfg writes an importcfg file used by the compiler or linker to +// dstPath containing entries for the packages in std and cmd in addition +// to the package to package file mappings in additionalPackageFiles. +func WriteImportcfg(t testing.TB, dstPath string, additionalPackageFiles map[string]string) { + importcfg, err := goroot.Importcfg() + for k, v := range additionalPackageFiles { + importcfg += fmt.Sprintf("\npackagefile %s=%s", k, v) + } + if err != nil { + t.Fatalf("preparing the importcfg failed: %s", err) + } + ioutil.WriteFile(dstPath, []byte(importcfg), 0655) + if err != nil { + t.Fatalf("writing the importcfg failed: %s", err) + } +} + +var ( + gorootOnce sync.Once + gorootPath string + gorootErr error +) + +func findGOROOT() (string, error) { + gorootOnce.Do(func() { + gorootPath = runtime.GOROOT() + if gorootPath != "" { + // If runtime.GOROOT() is non-empty, assume that it is valid. (It might + // not be: for example, the user may have explicitly set GOROOT + // to the wrong directory.) + return + } + + cmd := exec.Command("go", "env", "GOROOT") + out, err := cmd.Output() + if err != nil { + gorootErr = fmt.Errorf("%v: %v", cmd, err) + } + gorootPath = strings.TrimSpace(string(out)) + }) + + return gorootPath, gorootErr +} + +// GOROOT reports the path to the directory containing the root of the Go +// project source tree. This is normally equivalent to runtime.GOROOT, but +// works even if the test binary was built with -trimpath. +// +// If GOROOT cannot be found, GOROOT skips t if t is non-nil, +// or panics otherwise. +func GOROOT(t testing.TB) string { + path, err := findGOROOT() + if err != nil { + if t == nil { + panic(err) + } + t.Helper() + t.Skip(err) + } + return path +} diff --git a/internal/testenv/testenv_notunix.go b/internal/testenv/testenv_notunix.go new file mode 100644 index 00000000000..74de6f0a8e2 --- /dev/null +++ b/internal/testenv/testenv_notunix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || plan9 || (js && wasm) +// +build windows plan9 js,wasm + +package testenv + +import "os" + +// Sigquit is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var Sigquit = os.Kill diff --git a/internal/testenv/testenv_unix.go b/internal/testenv/testenv_unix.go new file mode 100644 index 00000000000..bc6af1ff81d --- /dev/null +++ b/internal/testenv/testenv_unix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package testenv + +import "syscall" + +// Sigquit is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var Sigquit = syscall.SIGQUIT diff --git a/internal/tokeninternal/tokeninternal.go b/internal/tokeninternal/tokeninternal.go new file mode 100644 index 00000000000..a3fb2d4f29d --- /dev/null +++ b/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "go/token" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + type tokenFile118 struct { + _ *token.FileSet // deleted in go1.19 + tokenFile119 + } + + type uP = unsafe.Pointer + switch unsafe.Sizeof(*file) { + case unsafe.Sizeof(tokenFile118{}): + var ptr *tokenFile118 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + case unsafe.Sizeof(tokenFile119{}): + var ptr *tokenFile119 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + default: + panic("unexpected token.File size") + } +} diff --git a/internal/typeparams/normalize_test.go b/internal/typeparams/normalize_test.go index 5969eee3de0..769433d701d 100644 --- a/internal/typeparams/normalize_test.go +++ b/internal/typeparams/normalize_test.go @@ -9,6 +9,7 @@ import ( "go/parser" "go/token" "go/types" + "regexp" "strings" "testing" @@ -38,7 +39,7 @@ func TestStructuralTerms(t *testing.T) { {"package emptyintersection; type T[P interface{ ~int; string }] int", "", "empty type set"}, {"package embedded0; type T[P interface{ I }] int; type I interface { int }", "int", ""}, - {"package embedded1; type T[P interface{ I | string }] int; type I interface{ int | ~string }", "int|~string", ""}, + {"package embedded1; type T[P interface{ I | string }] int; type I interface{ int | ~string }", "int ?\\| ?~string", ""}, {"package embedded2; type T[P interface{ I; string }] int; type I interface{ int | ~string }", "string", ""}, {"package named; type T[P C] int; type C interface{ ~int|int }", "~int", ""}, @@ -52,7 +53,7 @@ type B interface{ int|string } type C interface { ~string|~int } type T[P interface{ A|B; C }] int -`, "~string|int", ""}, +`, "~string ?\\| ?int", ""}, } for _, test := range tests { @@ -96,7 +97,8 @@ type T[P interface{ A|B; C }] int qf := types.RelativeTo(pkg) got = types.TypeString(NewUnion(terms), qf) } - if got != test.want { + want := regexp.MustCompile(test.want) + if !want.MatchString(got) { t.Errorf("StructuralTerms(%s) = %q, want %q", T, got, test.want) } }) diff --git a/internal/typesinternal/errorcode.go b/internal/typesinternal/errorcode.go index d38ee3c27cd..07484073a57 100644 --- a/internal/typesinternal/errorcode.go +++ b/internal/typesinternal/errorcode.go @@ -30,6 +30,12 @@ type ErrorCode int // convention that "bad" implies a problem with syntax, and "invalid" implies a // problem with types. +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + const ( _ ErrorCode = iota @@ -153,12 +159,12 @@ const ( /* decls > var (+ other variable assignment codes) */ - // UntypedNil occurs when the predeclared (untyped) value nil is used to + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to // initialize a variable declared without an explicit type. // // Example: // var x = nil - UntypedNil + UntypedNilUse // WrongAssignCount occurs when the number of values on the right-hand side // of an assignment or or initialization expression does not match the number @@ -1523,4 +1529,32 @@ const ( // Example: // type T[P any] struct{ *P } MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + ) diff --git a/internal/typesinternal/errorcode_string.go b/internal/typesinternal/errorcode_string.go index de90e9515ae..15ecf7c5ded 100644 --- a/internal/typesinternal/errorcode_string.go +++ b/internal/typesinternal/errorcode_string.go @@ -8,6 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] _ = x[Test-1] _ = x[BlankPkgName-2] _ = x[MismatchedPkgName-3] @@ -23,7 +24,7 @@ func _() { _ = x[InvalidConstInit-13] _ = x[InvalidConstVal-14] _ = x[InvalidConstType-15] - _ = x[UntypedNil-16] + _ = x[UntypedNilUse-16] _ = x[WrongAssignCount-17] _ = x[UnassignableOperand-18] _ = x[NoNewVar-19] @@ -152,16 +153,27 @@ func _() { _ = x[MisplacedConstraintIface-142] _ = x[InvalidMethodTypeParams-143] _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] } -const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam" +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) -var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136} +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) func (i ErrorCode) String() string { - i -= 1 - if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { - return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] } diff --git a/internal/typesinternal/errorcode_test.go b/internal/typesinternal/errorcode_test.go new file mode 100644 index 00000000000..63d13f19eae --- /dev/null +++ b/internal/typesinternal/errorcode_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal_test + +import ( + "fmt" + "go/ast" + "go/constant" + "go/parser" + "go/token" + "go/types" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" +) + +func TestErrorCodes(t *testing.T) { + t.Skip("unskip this test to verify the correctness of errorcode.go for the current Go version") + + // For older go versions, this file was src/go/types/errorcodes.go. + stdPath := filepath.Join(runtime.GOROOT(), "src", "internal", "types", "errors", "codes.go") + stdCodes, err := loadCodes(stdPath) + if err != nil { + t.Fatalf("loading std codes: %v", err) + } + + localPath := "errorcode.go" + localCodes, err := loadCodes(localPath) + if err != nil { + t.Fatalf("loading local codes: %v", err) + } + + // Verify that all std codes are present, with the correct value. + type codeVal struct { + Name string + Value int64 + } + var byValue []codeVal + for k, v := range stdCodes { + byValue = append(byValue, codeVal{k, v}) + } + sort.Slice(byValue, func(i, j int) bool { + return byValue[i].Value < byValue[j].Value + }) + + localLookup := make(map[int64]string) + for k, v := range localCodes { + if _, ok := localLookup[v]; ok { + t.Errorf("duplicate error code value %d", v) + } + localLookup[v] = k + } + + for _, std := range byValue { + local, ok := localCodes[std.Name] + if !ok { + if v, ok := localLookup[std.Value]; ok { + t.Errorf("Missing code for %s (code %d is %s)", std.Name, std.Value, v) + } else { + t.Errorf("Missing code for %s", std.Name) + } + } + if local != std.Value { + t.Errorf("Mismatching value for %s: got %d, but stdlib has %d", std.Name, local, std.Value) + } + } +} + +// loadCodes loads all constant values found in filepath. +// +// The given file must type-check cleanly as a standalone file. +func loadCodes(filepath string) (map[string]int64, error) { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, filepath, nil, 0) + if err != nil { + return nil, err + } + var config types.Config + pkg, err := config.Check("p", fset, []*ast.File{f}, nil) + if err != nil { + return nil, err + } + + codes := make(map[string]int64) + for _, name := range pkg.Scope().Names() { + obj := pkg.Scope().Lookup(name) + c, ok := obj.(*types.Const) + if !ok { + continue + } + name := strings.TrimPrefix(name, "_") // compatibility with earlier go versions + codes[name], ok = constant.Int64Val(c.Val()) + if !ok { + return nil, fmt.Errorf("non integral value %v for %s", c.Val(), name) + } + } + if len(codes) < 100 { + return nil, fmt.Errorf("sanity check: got %d codes but expected at least 100", len(codes)) + } + return codes, nil +} diff --git a/playground/socket/socket_test.go b/playground/socket/socket_test.go index b866e37afdb..d410afea875 100644 --- a/playground/socket/socket_test.go +++ b/playground/socket/socket_test.go @@ -69,9 +69,5 @@ func TestLimiter(t *testing.T) { if n != msgLimit+1 { t.Errorf("received %v messages, want %v", n, msgLimit+1) } - select { - case <-kr: - case <-time.After(100 * time.Millisecond): - t.Errorf("process wasn't killed after reaching limit") - } + <-kr } diff --git a/present/args.go b/present/args.go index d63196e028c..b4f7503b6da 100644 --- a/present/args.go +++ b/present/args.go @@ -18,7 +18,7 @@ import ( // regular expressions. That is the only change to the code from codewalk.go. // See http://9p.io/sys/doc/sam/sam.html Table II for details on the syntax. -// addrToByte evaluates the given address starting at offset start in data. +// addrToByteRange evaluates the given address starting at offset start in data. // It returns the lo and hi byte offset of the matched region within data. func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) { if addr == "" { diff --git a/refactor/rename/check.go b/refactor/rename/check.go index 7a0627b96c7..9f29b98a0a4 100644 --- a/refactor/rename/check.go +++ b/refactor/rename/check.go @@ -478,7 +478,7 @@ func (r *renamer) checkStructField(from *types.Var) { r.checkSelections(from) } -// checkSelection checks that all uses and selections that resolve to +// checkSelections checks that all uses and selections that resolve to // the specified object would continue to do so after the renaming. func (r *renamer) checkSelections(from types.Object) { for pkg, info := range r.packages { diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go index ff4212b7645..6b4d5284aec 100644 --- a/refactor/satisfy/find.go +++ b/refactor/satisfy/find.go @@ -10,10 +10,7 @@ // // THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME. // -// It is provided only for the gorename tool. Ideally this -// functionality will become part of the type-checker in due course, -// since it is computing it anyway, and it is robust for ill-typed -// inputs, which this package is not. +// It is provided only for the gopls tool. It requires well-typed inputs. package satisfy // import "golang.org/x/tools/refactor/satisfy" // NOTES: @@ -25,9 +22,6 @@ package satisfy // import "golang.org/x/tools/refactor/satisfy" // ... // }}) // -// TODO(adonovan): make this robust against ill-typed input. -// Or move it into the type-checker. -// // Assignability conversions are possible in the following places: // - in assignments y = x, y := x, var y = x. // - from call argument types to formal parameter types @@ -51,11 +45,15 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" ) // A Constraint records the fact that the RHS type does and must // satisfy the LHS type, which is an interface. // The names are suggestive of an assignment statement LHS = RHS. +// +// The constraint is implicitly universally quantified over any type +// parameters appearing within the two types. type Constraint struct { LHS, RHS types.Type } @@ -129,13 +127,13 @@ func (f *Finder) exprN(e ast.Expr) types.Type { case *ast.CallExpr: // x, err := f(args) - sig := f.expr(e.Fun).Underlying().(*types.Signature) + sig := coreType(f.expr(e.Fun)).(*types.Signature) f.call(sig, e.Args) case *ast.IndexExpr: // y, ok := x[i] x := f.expr(e.X) - f.assign(f.expr(e.Index), x.Underlying().(*types.Map).Key()) + f.assign(f.expr(e.Index), coreType(x).(*types.Map).Key()) case *ast.TypeAssertExpr: // y, ok := x.(T) @@ -200,7 +198,8 @@ func (f *Finder) call(sig *types.Signature, args []ast.Expr) { } } -func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr, T types.Type) types.Type { +// builtin visits the arguments of a builtin type with signature sig. +func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr) { switch obj.Name() { case "make", "new": // skip the type operand @@ -215,7 +214,7 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex f.expr(args[1]) } else { // append(x, y, z) - tElem := s.Underlying().(*types.Slice).Elem() + tElem := coreType(s).(*types.Slice).Elem() for _, arg := range args[1:] { f.assign(tElem, f.expr(arg)) } @@ -224,14 +223,12 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex case "delete": m := f.expr(args[0]) k := f.expr(args[1]) - f.assign(m.Underlying().(*types.Map).Key(), k) + f.assign(coreType(m).(*types.Map).Key(), k) default: // ordinary call f.call(sig, args) } - - return T } func (f *Finder) extract(tuple types.Type, i int) types.Type { @@ -358,6 +355,7 @@ func (f *Finder) expr(e ast.Expr) types.Type { f.sig = saved case *ast.CompositeLit: + // No need for coreType here: go1.18 disallows P{...} for type param P. switch T := deref(tv.Type).Underlying().(type) { case *types.Struct: for i, elem := range e.Elts { @@ -403,12 +401,20 @@ func (f *Finder) expr(e ast.Expr) types.Type { } case *ast.IndexExpr: - x := f.expr(e.X) - i := f.expr(e.Index) - if ux, ok := x.Underlying().(*types.Map); ok { - f.assign(ux.Key(), i) + if instance(f.info, e.X) { + // f[T] or C[T] -- generic instantiation + } else { + // x[i] or m[k] -- index or lookup operation + x := f.expr(e.X) + i := f.expr(e.Index) + if ux, ok := coreType(x).(*types.Map); ok { + f.assign(ux.Key(), i) + } } + case *typeparams.IndexListExpr: + // f[X, Y] -- generic instantiation + case *ast.SliceExpr: f.expr(e.X) if e.Low != nil { @@ -432,14 +438,29 @@ func (f *Finder) expr(e ast.Expr) types.Type { f.assign(tvFun.Type, arg0) } else { // function call + + // unsafe call. Treat calls to functions in unsafe like ordinary calls, + // except that their signature cannot be determined by their func obj. + // Without this special handling, f.expr(e.Fun) would fail below. + if s, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + if obj, ok := f.info.Uses[s.Sel].(*types.Builtin); ok && obj.Pkg().Path() == "unsafe" { + sig := f.info.Types[e.Fun].Type.(*types.Signature) + f.call(sig, e.Args) + return tv.Type + } + } + + // builtin call if id, ok := unparen(e.Fun).(*ast.Ident); ok { if obj, ok := f.info.Uses[id].(*types.Builtin); ok { sig := f.info.Types[id].Type.(*types.Signature) - return f.builtin(obj, sig, e.Args, tv.Type) + f.builtin(obj, sig, e.Args) + return tv.Type } } + // ordinary call - f.call(f.expr(e.Fun).Underlying().(*types.Signature), e.Args) + f.call(coreType(f.expr(e.Fun)).(*types.Signature), e.Args) } case *ast.StarExpr: @@ -499,7 +520,7 @@ func (f *Finder) stmt(s ast.Stmt) { case *ast.SendStmt: ch := f.expr(s.Chan) val := f.expr(s.Value) - f.assign(ch.Underlying().(*types.Chan).Elem(), val) + f.assign(coreType(ch).(*types.Chan).Elem(), val) case *ast.IncDecStmt: f.expr(s.X) @@ -647,35 +668,35 @@ func (f *Finder) stmt(s ast.Stmt) { if s.Key != nil { k := f.expr(s.Key) var xelem types.Type - // keys of array, *array, slice, string aren't interesting - switch ux := x.Underlying().(type) { + // Keys of array, *array, slice, string aren't interesting + // since the RHS key type is just an int. + switch ux := coreType(x).(type) { case *types.Chan: xelem = ux.Elem() case *types.Map: xelem = ux.Key() } if xelem != nil { - f.assign(xelem, k) + f.assign(k, xelem) } } if s.Value != nil { val := f.expr(s.Value) var xelem types.Type - // values of strings aren't interesting - switch ux := x.Underlying().(type) { + // Values of type strings aren't interesting because + // the RHS value type is just a rune. + switch ux := coreType(x).(type) { case *types.Array: xelem = ux.Elem() - case *types.Chan: - xelem = ux.Elem() case *types.Map: xelem = ux.Elem() case *types.Pointer: // *array - xelem = deref(ux).(*types.Array).Elem() + xelem = coreType(deref(ux)).(*types.Array).Elem() case *types.Slice: xelem = ux.Elem() } if xelem != nil { - f.assign(xelem, val) + f.assign(val, xelem) } } } @@ -690,7 +711,7 @@ func (f *Finder) stmt(s ast.Stmt) { // deref returns a pointer's element type; otherwise it returns typ. func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { + if p, ok := coreType(typ).(*types.Pointer); ok { return p.Elem() } return typ @@ -699,3 +720,19 @@ func deref(typ types.Type) types.Type { func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } func isInterface(T types.Type) bool { return types.IsInterface(T) } + +func coreType(T types.Type) types.Type { return typeparams.CoreType(T) } + +func instance(info *types.Info, expr ast.Expr) bool { + var id *ast.Ident + switch x := expr.(type) { + case *ast.Ident: + id = x + case *ast.SelectorExpr: + id = x.Sel + default: + return false + } + _, ok := typeparams.GetInstances(info)[id] + return ok +} diff --git a/refactor/satisfy/find_test.go b/refactor/satisfy/find_test.go new file mode 100644 index 00000000000..35a1e87caf4 --- /dev/null +++ b/refactor/satisfy/find_test.go @@ -0,0 +1,238 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package satisfy_test + +import ( + "fmt" + "go/ast" + "go/importer" + "go/parser" + "go/token" + "go/types" + "reflect" + "sort" + "testing" + + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/refactor/satisfy" +) + +// This test exercises various operations on core types of type parameters. +// (It also provides pretty decent coverage of the non-generic operations.) +func TestGenericCoreOperations(t *testing.T) { + if !typeparams.Enabled { + t.Skip("!typeparams.Enabled") + } + + const src = `package foo + +import "unsafe" + +type I interface { f() } + +type impl struct{} +func (impl) f() {} + +// A big pile of single-serving types that implement I. +type A struct{impl} +type B struct{impl} +type C struct{impl} +type D struct{impl} +type E struct{impl} +type F struct{impl} +type G struct{impl} +type H struct{impl} +type J struct{impl} +type K struct{impl} +type L struct{impl} +type M struct{impl} +type N struct{impl} +type O struct{impl} +type P struct{impl} +type Q struct{impl} +type R struct{impl} +type S struct{impl} +type T struct{impl} +type U struct{impl} +type V struct{impl} + +type Generic[T any] struct{impl} +func (Generic[T]) g(T) {} + +type GI[T any] interface{ + g(T) +} + +func _[Slice interface{ []I }](s Slice) Slice { + s[0] = L{} // I <- L + return append(s, A{}) // I <- A +} + +func _[Func interface{ func(I) B }](fn Func) { + b := fn(C{}) // I <- C + var _ I = b // I <- B +} + +func _[Chan interface{ chan D }](ch Chan) { + var i I + for i = range ch {} // I <- D + _ = i +} + +func _[Chan interface{ chan E }](ch Chan) { + var _ I = <-ch // I <- E +} + +func _[Chan interface{ chan I }](ch Chan) { + ch <- F{} // I <- F +} + +func _[Map interface{ map[G]H }](m Map) { + var k, v I + for k, v = range m {} // I <- G, I <- H + _, _ = k, v +} + +func _[Map interface{ map[I]K }](m Map) { + var _ I = m[J{}] // I <- J, I <- K + delete(m, R{}) // I <- R + _, _ = m[J{}] +} + +func _[Array interface{ [1]I }](a Array) { + a[0] = M{} // I <- M +} + +func _[Array interface{ [1]N }](a Array) { + var _ I = a[0] // I <- N +} + +func _[Array interface{ [1]O }](a Array) { + var v I + for _, v = range a {} // I <- O + _ = v +} + +func _[ArrayPtr interface{ *[1]P }](a ArrayPtr) { + var v I + for _, v = range a {} // I <- P + _ = v +} + +func _[Slice interface{ []Q }](s Slice) { + var v I + for _, v = range s {} // I <- Q + _ = v +} + +func _[Func interface{ func() (S, bool) }](fn Func) { + var i I + i, _ = fn() // I <- S + _ = i +} + +func _() I { + var _ I = T{} // I <- T + var _ I = Generic[T]{} // I <- Generic[T] + var _ I = Generic[string]{} // I <- Generic[string] + return U{} // I <- U +} + +var _ GI[string] = Generic[string]{} // GI[string] <- Generic[string] + +// universally quantified constraints: +// the type parameter may appear on the left, the right, or both sides. + +func _[T any](g Generic[T]) GI[T] { + return g // GI[T] <- Generic[T] +} + +func _[T any]() { + type GI2[T any] interface{ g(string) } + var _ GI2[T] = Generic[string]{} // GI2[T] <- Generic[string] +} + +type Gen2[T any] struct{} +func (f Gen2[T]) g(string) { global = f } // GI[string] <- Gen2[T] + +var global GI[string] + +func _() { + var x [3]V + // golang/go#56227: the finder should visit calls in the unsafe package. + _ = unsafe.Slice(&x[0], func() int { var _ I = x[0]; return 3 }()) // I <- V +} +` + got := constraints(t, src) + want := []string{ + "p.GI2[T] <- p.Generic[string]", // implicitly "forall T" quantified + "p.GI[T] <- p.Generic[T]", // implicitly "forall T" quantified + "p.GI[string] <- p.Gen2[T]", // implicitly "forall T" quantified + "p.GI[string] <- p.Generic[string]", + "p.I <- p.A", + "p.I <- p.B", + "p.I <- p.C", + "p.I <- p.D", + "p.I <- p.E", + "p.I <- p.F", + "p.I <- p.G", + "p.I <- p.Generic[p.T]", + "p.I <- p.Generic[string]", + "p.I <- p.H", + "p.I <- p.J", + "p.I <- p.K", + "p.I <- p.L", + "p.I <- p.M", + "p.I <- p.N", + "p.I <- p.O", + "p.I <- p.P", + "p.I <- p.Q", + "p.I <- p.R", + "p.I <- p.S", + "p.I <- p.T", + "p.I <- p.U", + "p.I <- p.V", + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("found unexpected constraints: got %s, want %s", got, want) + } +} + +func constraints(t *testing.T, src string) []string { + // parse + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "p.go", src, 0) + if err != nil { + t.Fatal(err) // parse error + } + files := []*ast.File{f} + + // type-check + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(info) + conf := types.Config{ + Importer: importer.Default(), + } + if _, err := conf.Check("p", fset, files, info); err != nil { + t.Fatal(err) // type error + } + + // gather constraints + var finder satisfy.Finder + finder.Find(info, files) + var constraints []string + for c := range finder.Result { + constraints = append(constraints, fmt.Sprintf("%v <- %v", c.LHS, c.RHS)) + } + sort.Strings(constraints) + return constraints +}