diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index 15167cd746c..00000000000
--- a/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9680..00000000000
--- a/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/README.md b/README.md
index 5cd8f0ac6e9..d9d7edd7332 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,9 @@ Selected commands:
- `cmd/toolstash` is a utility to simplify working with multiple versions of the Go toolchain.
These commands may be fetched with a command such as
-`go install golang.org/x/tools/cmd/goimports@latest`.
+```
+go install golang.org/x/tools/cmd/goimports@latest
+```
Selected packages:
diff --git a/cmd/auth/cookieauth/cookieauth.go b/cmd/auth/cookieauth/cookieauth.go
index feefaff0b6e..8b0ff17664b 100644
--- a/cmd/auth/cookieauth/cookieauth.go
+++ b/cmd/auth/cookieauth/cookieauth.go
@@ -40,7 +40,6 @@ func main() {
f, err := os.Open(os.Args[1])
if err != nil {
log.Fatalf("failed to read cookie file: %v\n", os.Args[1])
- os.Exit(1)
}
defer f.Close()
diff --git a/cmd/bundle/main.go b/cmd/bundle/main.go
index 96cbce9a131..194797bd822 100644
--- a/cmd/bundle/main.go
+++ b/cmd/bundle/main.go
@@ -84,6 +84,7 @@ import (
"os"
"strconv"
"strings"
+ "unicode"
"golang.org/x/tools/go/packages"
)
@@ -233,7 +234,7 @@ func bundle(src, dst, dstpkg, prefix, buildTags string) ([]byte, error) {
fmt.Fprintf(&out, "// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.\n")
if *outputFile != "" && buildTags == "" {
- fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(os.Args[1:], " "))
+ fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(quoteArgs(os.Args[1:]), " "))
} else {
fmt.Fprintf(&out, "// $ bundle %s\n", strings.Join(os.Args[1:], " "))
}
@@ -447,6 +448,35 @@ func printSameLineComment(out *bytes.Buffer, comments []*ast.CommentGroup, fset
return pos
}
+func quoteArgs(ss []string) []string {
+ // From go help generate:
+ //
+ // > The arguments to the directive are space-separated tokens or
+ // > double-quoted strings passed to the generator as individual
+ // > arguments when it is run.
+ //
+ // > Quoted strings use Go syntax and are evaluated before execution; a
+ // > quoted string appears as a single argument to the generator.
+ //
+ var qs []string
+ for _, s := range ss {
+ if s == "" || containsSpace(s) {
+ s = strconv.Quote(s)
+ }
+ qs = append(qs, s)
+ }
+ return qs
+}
+
+func containsSpace(s string) bool {
+ for _, r := range s {
+ if unicode.IsSpace(r) {
+ return true
+ }
+ }
+ return false
+}
+
type flagFunc func(string)
func (f flagFunc) Set(s string) error {
diff --git a/cmd/compilebench/main.go b/cmd/compilebench/main.go
index 28dc450e9e0..754acdca0e4 100644
--- a/cmd/compilebench/main.go
+++ b/cmd/compilebench/main.go
@@ -335,10 +335,10 @@ type compile struct{ dir string }
func (compile) long() bool { return false }
func (c compile) run(name string, count int) error {
- // Make sure dependencies needed by go tool compile are installed to GOROOT/pkg.
- out, err := exec.Command(*flagGoCmd, "build", "-i", c.dir).CombinedOutput()
+ // Make sure dependencies needed by go tool compile are built.
+ out, err := exec.Command(*flagGoCmd, "build", c.dir).CombinedOutput()
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", c.dir, err, out)
+ return fmt.Errorf("go build %s: %v\n%s", c.dir, err, out)
}
// Find dir and source file list.
@@ -347,6 +347,11 @@ func (c compile) run(name string, count int) error {
return err
}
+ importcfg, err := genImportcfgFile(c.dir, false)
+ if err != nil {
+ return err
+ }
+
// If this package has assembly files, we'll need to pass a symabis
// file to the compiler; call a helper to invoke the assembler
// to do that.
@@ -371,6 +376,10 @@ func (c compile) run(name string, count int) error {
if symAbisFile != "" {
args = append(args, "-symabis", symAbisFile)
}
+ if importcfg != "" {
+ args = append(args, "-importcfg", importcfg)
+ defer os.Remove(importcfg)
+ }
args = append(args, pkg.GoFiles...)
if err := runBuildCmd(name, count, pkg.Dir, compiler, args); err != nil {
return err
@@ -406,18 +415,28 @@ func (r link) run(name string, count int) error {
}
// Build dependencies.
- out, err := exec.Command(*flagGoCmd, "build", "-i", "-o", "/dev/null", r.dir).CombinedOutput()
+ out, err := exec.Command(*flagGoCmd, "build", "-o", "/dev/null", r.dir).CombinedOutput()
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", r.dir, err, out)
+ return fmt.Errorf("go build -a %s: %v\n%s", r.dir, err, out)
}
+ importcfg, err := genImportcfgFile(r.dir, true)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(importcfg)
+
// Build the main package.
pkg, err := goList(r.dir)
if err != nil {
return err
}
- args := []string{"-o", "_compilebench_.o"}
+ args := []string{"-o", "_compilebench_.o", "-importcfg", importcfg}
args = append(args, pkg.GoFiles...)
+ if *flagTrace {
+ fmt.Fprintf(os.Stderr, "running: %s %+v\n",
+ compiler, args)
+ }
cmd := exec.Command(compiler, args...)
cmd.Dir = pkg.Dir
cmd.Stdout = os.Stderr
@@ -429,7 +448,7 @@ func (r link) run(name string, count int) error {
defer os.Remove(pkg.Dir + "/_compilebench_.o")
// Link the main package.
- args = []string{"-o", "_compilebench_.exe"}
+ args = []string{"-o", "_compilebench_.exe", "-importcfg", importcfg}
args = append(args, strings.Fields(*flagLinkerFlags)...)
args = append(args, strings.Fields(r.flags)...)
args = append(args, "_compilebench_.o")
@@ -578,3 +597,49 @@ func genSymAbisFile(pkg *Pkg, symAbisFile, incdir string) error {
}
return nil
}
+
+// genImportcfgFile generates an importcfg file for building package
+// dir. Returns the generated importcfg file path (or empty string
+// if the package has no dependency).
+func genImportcfgFile(dir string, full bool) (string, error) {
+ need := "{{.Imports}}"
+ if full {
+ // for linking, we need transitive dependencies
+ need = "{{.Deps}}"
+ }
+
+ // find imported/dependent packages
+ cmd := exec.Command(*flagGoCmd, "list", "-f", need, dir)
+ cmd.Stderr = os.Stderr
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("go list -f %s %s: %v", need, dir, err)
+ }
+ // trim [ ]\n
+ if len(out) < 3 || out[0] != '[' || out[len(out)-2] != ']' || out[len(out)-1] != '\n' {
+ return "", fmt.Errorf("unexpected output from go list -f %s %s: %s", need, dir, out)
+ }
+ out = out[1 : len(out)-2]
+ if len(out) == 0 {
+ return "", nil
+ }
+
+ // build importcfg for imported packages
+ cmd = exec.Command(*flagGoCmd, "list", "-export", "-f", "{{if .Export}}packagefile {{.ImportPath}}={{.Export}}{{end}}")
+ cmd.Args = append(cmd.Args, strings.Fields(string(out))...)
+ cmd.Stderr = os.Stderr
+ out, err = cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("generating importcfg for %s: %s: %v", dir, cmd, err)
+ }
+
+ f, err := os.CreateTemp("", "importcfg")
+ if err != nil {
+ return "", fmt.Errorf("creating tmp importcfg file failed: %v", err)
+ }
+ defer f.Close()
+ if _, err := f.Write(out); err != nil {
+ return "", fmt.Errorf("writing importcfg file %s failed: %v", f.Name(), err)
+ }
+ return f.Name(), nil
+}
diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go
index 62cb08d23a8..69d84ad5012 100644
--- a/cmd/digraph/digraph.go
+++ b/cmd/digraph/digraph.go
@@ -34,7 +34,7 @@ The support commands are:
sccs
all strongly connected components (one per line)
scc
- the set of nodes nodes strongly connected to the specified one
+ the set of nodes strongly connected to the specified one
focus
the subgraph containing all directed paths that pass through the specified node
diff --git a/cmd/godoc/godoc_test.go b/cmd/godoc/godoc_test.go
index 4eb341af1e1..3e91ac6f94c 100644
--- a/cmd/godoc/godoc_test.go
+++ b/cmd/godoc/godoc_test.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package main_test
+package main
import (
"bytes"
+ "context"
"fmt"
"go/build"
"io/ioutil"
@@ -13,10 +14,10 @@ import (
"net/http"
"os"
"os/exec"
- "path/filepath"
"regexp"
"runtime"
"strings"
+ "sync"
"testing"
"time"
@@ -24,42 +25,39 @@ import (
"golang.org/x/tools/internal/testenv"
)
-// buildGodoc builds the godoc executable.
-// It returns its path, and a cleanup function.
-//
-// TODO(adonovan): opt: do this at most once, and do the cleanup
-// exactly once. How though? There's no atexit.
-func buildGodoc(t *testing.T) (bin string, cleanup func()) {
- t.Helper()
-
- if runtime.GOARCH == "arm" {
- t.Skip("skipping test on arm platforms; too slow")
- }
- if runtime.GOOS == "android" {
- t.Skipf("the dependencies are not available on android")
+func TestMain(m *testing.M) {
+ if os.Getenv("GODOC_TEST_IS_GODOC") != "" {
+ main()
+ os.Exit(0)
}
- testenv.NeedsTool(t, "go")
- tmp, err := ioutil.TempDir("", "godoc-regtest-")
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if cleanup == nil { // probably, go build failed.
- os.RemoveAll(tmp)
- }
- }()
+ // Inform subprocesses that they should run the cmd/godoc main instead of
+ // running tests. It's a close approximation to building and running the real
+ // command, and much less complicated and expensive to build and clean up.
+ os.Setenv("GODOC_TEST_IS_GODOC", "1")
- bin = filepath.Join(tmp, "godoc")
- if runtime.GOOS == "windows" {
- bin += ".exe"
- }
- cmd := exec.Command("go", "build", "-o", bin)
- if err := cmd.Run(); err != nil {
- t.Fatalf("Building godoc: %v", err)
+ os.Exit(m.Run())
+}
+
+var exe struct {
+ path string
+ err error
+ once sync.Once
+}
+
+func godocPath(t *testing.T) string {
+ switch runtime.GOOS {
+ case "js", "ios":
+ t.Skipf("skipping test that requires exec")
}
- return bin, func() { os.RemoveAll(tmp) }
+ exe.once.Do(func() {
+ exe.path, exe.err = os.Executable()
+ })
+ if exe.err != nil {
+ t.Fatal(exe.err)
+ }
+ return exe.path
}
func serverAddress(t *testing.T) string {
@@ -74,60 +72,42 @@ func serverAddress(t *testing.T) string {
return ln.Addr().String()
}
-func waitForServerReady(t *testing.T, cmd *exec.Cmd, addr string) {
- ch := make(chan error, 1)
- go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }()
- go waitForServer(t, ch,
+func waitForServerReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/", addr),
"Go Documentation Server",
- 15*time.Second,
false)
- if err := <-ch; err != nil {
- t.Skipf("skipping due to https://go.dev/issue/50014: %v", err)
- }
}
-func waitForSearchReady(t *testing.T, cmd *exec.Cmd, addr string) {
- ch := make(chan error, 1)
- go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }()
- go waitForServer(t, ch,
+func waitForSearchReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/search?q=FALLTHROUGH", addr),
"The list of tokens.",
- 2*time.Minute,
false)
- if err := <-ch; err != nil {
- t.Skipf("skipping due to https://go.dev/issue/50014: %v", err)
- }
}
-func waitUntilScanComplete(t *testing.T, addr string) {
- ch := make(chan error)
- go waitForServer(t, ch,
+func waitUntilScanComplete(t *testing.T, ctx context.Context, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/pkg", addr),
"Scan is not yet complete",
- 2*time.Minute,
// setting reverse as true, which means this waits
// until the string is not returned in the response anymore
- true,
- )
- if err := <-ch; err != nil {
- t.Skipf("skipping due to https://go.dev/issue/50014: %v", err)
- }
+ true)
}
-const pollInterval = 200 * time.Millisecond
+const pollInterval = 50 * time.Millisecond
-// waitForServer waits for server to meet the required condition.
-// It sends a single error value to ch, unless the test has failed.
-// The error value is nil if the required condition was met within
-// timeout, or non-nil otherwise.
-func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout time.Duration, reverse bool) {
- deadline := time.Now().Add(timeout)
- for time.Now().Before(deadline) {
- time.Sleep(pollInterval)
- if t.Failed() {
- return
+// waitForServer waits for server to meet the required condition,
+// failing the test if ctx is canceled before that occurs.
+func waitForServer(t *testing.T, ctx context.Context, url, match string, reverse bool) {
+ start := time.Now()
+ for {
+ if ctx.Err() != nil {
+ t.Helper()
+ t.Fatalf("server failed to respond in %v", time.Since(start))
}
+
+ time.Sleep(pollInterval)
res, err := http.Get(url)
if err != nil {
continue
@@ -140,11 +120,9 @@ func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout tim
switch {
case !reverse && bytes.Contains(body, []byte(match)),
reverse && !bytes.Contains(body, []byte(match)):
- ch <- nil
return
}
}
- ch <- fmt.Errorf("server failed to respond in %v", timeout)
}
// hasTag checks whether a given release tag is contained in the current version
@@ -158,24 +136,18 @@ func hasTag(t string) bool {
return false
}
-func killAndWait(cmd *exec.Cmd) {
- cmd.Process.Kill()
- cmd.Process.Wait()
-}
-
func TestURL(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping on plan9; fails to start up quickly enough")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
testcase := func(url string, contents string) func(t *testing.T) {
return func(t *testing.T) {
stdout, stderr := new(bytes.Buffer), new(bytes.Buffer)
args := []string{fmt.Sprintf("-url=%s", url)}
- cmd := exec.Command(bin, args...)
+ cmd := testenv.Command(t, bin, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Args[0] = "godoc"
@@ -205,8 +177,8 @@ func TestURL(t *testing.T) {
// Basic integration test for godoc HTTP interface.
func TestWeb(t *testing.T) {
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
+
for _, x := range packagestest.All {
t.Run(x.Name(), func(t *testing.T) {
testWeb(t, x, bin, false)
@@ -217,17 +189,19 @@ func TestWeb(t *testing.T) {
// Basic integration test for godoc HTTP interface.
func TestWebIndex(t *testing.T) {
if testing.Short() {
- t.Skip("skipping test in -short mode")
+ t.Skip("skipping slow test in -short mode")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
testWeb(t, packagestest.GOPATH, bin, true)
}
// Basic integration test for godoc HTTP interface.
func testWeb(t *testing.T, x packagestest.Exporter, bin string, withIndex bool) {
- if runtime.GOOS == "plan9" {
- t.Skip("skipping on plan9; fails to start up quickly enough")
+ switch runtime.GOOS {
+ case "plan9":
+ t.Skip("skipping on plan9: fails to start up quickly enough")
+ case "android", "ios":
+ t.Skip("skipping on mobile: lacks GOROOT/api in test environment")
}
// Write a fake GOROOT/GOPATH with some third party packages.
@@ -256,23 +230,39 @@ package a; import _ "godoc.test/repo2/a"; const Name = "repo1a"`,
if withIndex {
args = append(args, "-index", "-index_interval=-1s")
}
- cmd := exec.Command(bin, args...)
+ cmd := testenv.Command(t, bin, args...)
cmd.Dir = e.Config.Dir
cmd.Env = e.Config.Env
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
+ cmdOut := new(strings.Builder)
+ cmd.Stdout = cmdOut
+ cmd.Stderr = cmdOut
cmd.Args[0] = "godoc"
if err := cmd.Start(); err != nil {
t.Fatalf("failed to start godoc: %s", err)
}
- defer killAndWait(cmd)
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ err := cmd.Wait()
+ t.Logf("%v: %v", cmd, err)
+ cancel()
+ }()
+ defer func() {
+ // Shut down the server cleanly if possible.
+ if runtime.GOOS == "windows" {
+ cmd.Process.Kill() // Windows doesn't support os.Interrupt.
+ } else {
+ cmd.Process.Signal(os.Interrupt)
+ }
+ <-ctx.Done()
+ t.Logf("server output:\n%s", cmdOut)
+ }()
if withIndex {
- waitForSearchReady(t, cmd, addr)
+ waitForSearchReady(t, ctx, cmd, addr)
} else {
- waitForServerReady(t, cmd, addr)
- waitUntilScanComplete(t, addr)
+ waitForServerReady(t, ctx, cmd, addr)
+ waitUntilScanComplete(t, ctx, addr)
}
tests := []struct {
@@ -454,22 +444,17 @@ func TestNoMainModule(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping on plan9; for consistency with other tests that build godoc binary")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
- tempDir, err := ioutil.TempDir("", "godoc-test-")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tempDir)
+ bin := godocPath(t)
+ tempDir := t.TempDir()
// Run godoc in an empty directory with module mode explicitly on,
// so that 'go env GOMOD' reports os.DevNull.
- cmd := exec.Command(bin, "-url=/")
+ cmd := testenv.Command(t, bin, "-url=/")
cmd.Dir = tempDir
cmd.Env = append(os.Environ(), "GO111MODULE=on")
var stderr bytes.Buffer
cmd.Stderr = &stderr
- err = cmd.Run()
+ err := cmd.Run()
if err != nil {
t.Fatalf("godoc command failed: %v\nstderr=%q", err, stderr.String())
}
diff --git a/cmd/guru/guru.go b/cmd/guru/guru.go
index fdb13f92932..18bb0840a1e 100644
--- a/cmd/guru/guru.go
+++ b/cmd/guru/guru.go
@@ -207,7 +207,7 @@ func pkgContainsFile(bp *build.Package, filename string) byte {
return 0 // not found
}
-// ParseQueryPos parses the source query position pos and returns the
+// parseQueryPos parses the source query position pos and returns the
// AST node of the loaded program lprog that it identifies.
// If needExact, it must identify a single AST subtree;
// this is appropriate for queries that allow fairly arbitrary syntax,
diff --git a/cmd/splitdwarf/splitdwarf.go b/cmd/splitdwarf/splitdwarf.go
index 13888aa512e..9729b0b7a6a 100644
--- a/cmd/splitdwarf/splitdwarf.go
+++ b/cmd/splitdwarf/splitdwarf.go
@@ -182,7 +182,7 @@ for input_exe need to allow writing.
oldsym := symtab.Syms[ii]
newsymtab.Syms = append(newsymtab.Syms, oldsym)
- linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: uint32(linkeditstringcur),
+ linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: linkeditstringcur,
Type: oldsym.Type, Sect: oldsym.Sect, Desc: oldsym.Desc, Value: oldsym.Value})
linkeditstringcur += uint32(len(oldsym.Name)) + 1
linkeditstrings = append(linkeditstrings, oldsym.Name)
diff --git a/cmd/ssadump/main.go b/cmd/ssadump/main.go
index 138e7f69ff2..cfb9122b24d 100644
--- a/cmd/ssadump/main.go
+++ b/cmd/ssadump/main.go
@@ -157,12 +157,15 @@ func doMain() error {
// Build SSA for all packages.
prog.Build()
- // The interpreter needs the runtime package.
- // It is a limitation of go/packages that
- // we cannot add "runtime" to its initial set,
- // we can only check that it is present.
- if prog.ImportedPackage("runtime") == nil {
- return fmt.Errorf("-run: program does not depend on runtime")
+ // Earlier versions of the interpreter needed the runtime
+ // package; however, interp cannot handle unsafe constructs
+ // used during runtime's package initialization at the moment.
+ // The key construct blocking support is:
+ // *((*T)(unsafe.Pointer(p)))
+ // Unfortunately, this means only trivial programs can be
+ // interpreted by ssadump.
+ if prog.ImportedPackage("runtime") != nil {
+ return fmt.Errorf("-run: program depends on runtime package (interpreter can run only trivial programs)")
}
if runtime.GOARCH != build.Default.GOARCH {
diff --git a/cmd/stringer/endtoend_test.go b/cmd/stringer/endtoend_test.go
index 5b969a52e36..268c2f61be2 100644
--- a/cmd/stringer/endtoend_test.go
+++ b/cmd/stringer/endtoend_test.go
@@ -14,7 +14,6 @@ import (
"fmt"
"go/build"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -113,7 +112,7 @@ func TestTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- result, err := ioutil.ReadFile(output)
+ result, err := os.ReadFile(output)
if err != nil {
t.Fatal(err)
}
@@ -128,7 +127,7 @@ func TestTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- result, err = ioutil.ReadFile(output)
+ result, err = os.ReadFile(output)
if err != nil {
t.Fatal(err)
}
@@ -184,7 +183,7 @@ func buildStringer(t *testing.T) (dir string, stringer string) {
t.Helper()
testenv.NeedsTool(t, "go")
- dir, err := ioutil.TempDir("", "stringer")
+ dir, err := os.MkdirTemp("", "stringer")
if err != nil {
t.Fatal(err)
}
diff --git a/cmd/stringer/golden_test.go b/cmd/stringer/golden_test.go
index b29763174b3..2e2ec58de69 100644
--- a/cmd/stringer/golden_test.go
+++ b/cmd/stringer/golden_test.go
@@ -10,7 +10,6 @@
package main
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -452,7 +451,7 @@ func (i Token) String() string {
func TestGolden(t *testing.T) {
testenv.NeedsTool(t, "go")
- dir, err := ioutil.TempDir("", "stringer")
+ dir, err := os.MkdirTemp("", "stringer")
if err != nil {
t.Error(err)
}
@@ -466,7 +465,7 @@ func TestGolden(t *testing.T) {
input := "package test\n" + test.input
file := test.name + ".go"
absFile := filepath.Join(dir, file)
- err := ioutil.WriteFile(absFile, []byte(input), 0644)
+ err := os.WriteFile(absFile, []byte(input), 0644)
if err != nil {
t.Error(err)
}
diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go
index 9f9c85a0370..998d1a51bfd 100644
--- a/cmd/stringer/stringer.go
+++ b/cmd/stringer/stringer.go
@@ -76,7 +76,6 @@ import (
"go/format"
"go/token"
"go/types"
- "io/ioutil"
"log"
"os"
"path/filepath"
@@ -166,7 +165,7 @@ func main() {
baseName := fmt.Sprintf("%s_string.go", types[0])
outputName = filepath.Join(dir, strings.ToLower(baseName))
}
- err := ioutil.WriteFile(outputName, src, 0644)
+ err := os.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
@@ -217,7 +216,7 @@ type Package struct {
// parsePackage exits if there is an error.
func (g *Generator) parsePackage(patterns []string, tags []string) {
cfg := &packages.Config{
- Mode: packages.LoadSyntax,
+ Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax,
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
Tests: false,
diff --git a/cmd/toolstash/buildall b/cmd/toolstash/buildall
index 0c6492c9efa..4fc22f7f8fc 100755
--- a/cmd/toolstash/buildall
+++ b/cmd/toolstash/buildall
@@ -38,10 +38,10 @@ if [ "$pattern" = "" ]; then
fi
targets="$(go tool dist list; echo linux/386/softfloat)"
-targets="$(echo "$targets" | tr '/' '-' | sort | egrep "$pattern" | egrep -v 'android-arm|darwin-arm')"
+targets="$(echo "$targets" | tr '/' '-' | sort | grep -E "$pattern" | grep -E -v 'android-arm|darwin-arm')"
# put linux first in the target list to get all the architectures up front.
-targets="$(echo "$targets" | egrep 'linux') $(echo "$targets" | egrep -v 'linux')"
+targets="$(echo "$targets" | grep -E 'linux') $(echo "$targets" | grep -E -v 'linux')"
if [ "$sete" = true ]; then
set -e
diff --git a/container/intsets/sparse.go b/container/intsets/sparse.go
index c06aec80b0d..d5fe156ed36 100644
--- a/container/intsets/sparse.go
+++ b/container/intsets/sparse.go
@@ -190,7 +190,7 @@ func (b *block) min(take bool) int {
if take {
b.bits[i] = w &^ (1 << uint(tz))
}
- return b.offset + int(i*bitsPerWord) + tz
+ return b.offset + i*bitsPerWord + tz
}
}
panic("BUG: empty block")
diff --git a/copyright/copyright.go b/copyright/copyright.go
index eb56ef28b22..db63c59922e 100644
--- a/copyright/copyright.go
+++ b/copyright/copyright.go
@@ -94,7 +94,7 @@ func checkFile(toolsDir, filename string) (bool, error) {
return shouldAddCopyright, nil
}
-// Copied from golang.org/x/tools/internal/lsp/source/util.go.
+// Copied from golang.org/x/tools/gopls/internal/lsp/source/util.go.
// Matches cgo generated comment as well as the proposed standard:
//
// https://golang.org/s/generatedcode
diff --git a/go.mod b/go.mod
index 985b9cc120c..d90447639aa 100644
--- a/go.mod
+++ b/go.mod
@@ -1,12 +1,12 @@
module golang.org/x/tools
-go 1.17
+go 1.18 // tagx:compat 1.16
require (
- github.com/yuin/goldmark v1.4.1
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
- golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
- golang.org/x/text v0.3.7
+ github.com/yuin/goldmark v1.4.13
+ golang.org/x/mod v0.7.0
+ golang.org/x/net v0.5.0
+ golang.org/x/sys v0.4.0
)
+
+require golang.org/x/sync v0.1.0
diff --git a/go.sum b/go.sum
index 85cf00cab79..fbb3d74e321 100644
--- a/go.sum
+++ b/go.sum
@@ -1,30 +1,34 @@
-github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/go/analysis/analysis.go b/go/analysis/analysis.go
index d11505a165c..44ada22a03a 100644
--- a/go/analysis/analysis.go
+++ b/go/analysis/analysis.go
@@ -11,8 +11,6 @@ import (
"go/token"
"go/types"
"reflect"
-
- "golang.org/x/tools/internal/analysisinternal"
)
// An Analyzer describes an analysis function and its options.
@@ -48,6 +46,7 @@ type Analyzer struct {
// RunDespiteErrors allows the driver to invoke
// the Run method of this analyzer even on a
// package that contains parse or type errors.
+ // The Pass.TypeErrors field may consequently be non-empty.
RunDespiteErrors bool
// Requires is a set of analyzers that must run successfully
@@ -75,17 +74,6 @@ type Analyzer struct {
func (a *Analyzer) String() string { return a.Name }
-func init() {
- // Set the analysisinternal functions to be able to pass type errors
- // to the Pass type without modifying the go/analysis API.
- analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
- p.(*Pass).typeErrors = errors
- }
- analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
- return p.(*Pass).typeErrors
- }
-}
-
// A Pass provides information to the Run function that
// applies a specific analyzer to a single Go package.
//
@@ -106,6 +94,7 @@ type Pass struct {
Pkg *types.Package // type information about the package
TypesInfo *types.Info // type information about the syntax trees
TypesSizes types.Sizes // function for computing sizes of types
+ TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors)
// Report reports a Diagnostic, a finding about a specific location
// in the analyzed source code such as a potential mistake.
diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go
index 6ef2e7984fa..a3a53ba9f20 100644
--- a/go/analysis/analysistest/analysistest.go
+++ b/go/analysis/analysistest/analysistest.go
@@ -19,14 +19,13 @@ import (
"sort"
"strconv"
"strings"
+ "testing"
"text/scanner"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/checker"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/testenv"
"golang.org/x/tools/txtar"
)
@@ -114,7 +113,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
// should match up.
for _, act := range r {
// file -> message -> edits
- fileEdits := make(map[*token.File]map[string][]diff.TextEdit)
+ fileEdits := make(map[*token.File]map[string][]diff.Edit)
fileContents := make(map[*token.File][]byte)
// Validate edits, prepare the fileEdits map and read the file contents.
@@ -142,17 +141,13 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
}
fileContents[file] = contents
}
- spn, err := span.NewRange(act.Pass.Fset, edit.Pos, edit.End).Span()
- if err != nil {
- t.Errorf("error converting edit to span %s: %v", file.Name(), err)
- }
-
if _, ok := fileEdits[file]; !ok {
- fileEdits[file] = make(map[string][]diff.TextEdit)
+ fileEdits[file] = make(map[string][]diff.Edit)
}
- fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.TextEdit{
- Span: spn,
- NewText: string(edit.NewText),
+ fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.Edit{
+ Start: file.Offset(edit.Pos),
+ End: file.Offset(edit.End),
+ New: string(edit.NewText),
})
}
}
@@ -189,7 +184,11 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
for _, vf := range ar.Files {
if vf.Name == sf {
found = true
- out := diff.ApplyEdits(string(orig), edits)
+ out, err := diff.Apply(string(orig), edits)
+ if err != nil {
+ t.Errorf("%s: error applying fixes: %v", file.Name(), err)
+ continue
+ }
// the file may contain multiple trailing
// newlines if the user places empty lines
// between files in the archive. normalize
@@ -200,12 +199,9 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
t.Errorf("%s: error formatting edited source: %v\n%s", file.Name(), err, out)
continue
}
- if want != string(formatted) {
- d, err := myers.ComputeEdits("", want, string(formatted))
- if err != nil {
- t.Errorf("failed to compute suggested fix diff: %v", err)
- }
- t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, d))
+ if got := string(formatted); got != want {
+ unified := diff.Unified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, got)
+ t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified)
}
break
}
@@ -217,12 +213,16 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
} else {
// all suggested fixes are represented by a single file
- var catchallEdits []diff.TextEdit
+ var catchallEdits []diff.Edit
for _, edits := range fixes {
catchallEdits = append(catchallEdits, edits...)
}
- out := diff.ApplyEdits(string(orig), catchallEdits)
+ out, err := diff.Apply(string(orig), catchallEdits)
+ if err != nil {
+ t.Errorf("%s: error applying fixes: %v", file.Name(), err)
+ continue
+ }
want := string(ar.Comment)
formatted, err := format.Source([]byte(out))
@@ -230,12 +230,9 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
t.Errorf("%s: error formatting resulting source: %v\n%s", file.Name(), err, out)
continue
}
- if want != string(formatted) {
- d, err := myers.ComputeEdits("", want, string(formatted))
- if err != nil {
- t.Errorf("%s: failed to compute suggested fix diff: %s", file.Name(), err)
- }
- t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(file.Name()+".golden", "actual", want, d))
+ if got := string(formatted); got != want {
+ unified := diff.Unified(file.Name()+".golden", "actual", want, got)
+ t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified)
}
}
}
@@ -282,7 +279,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
// attempted, even if unsuccessful. It is safe for a test to ignore all
// the results, but a test may use it to perform additional checks.
func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result {
- if t, ok := t.(testenv.Testing); ok {
+ if t, ok := t.(testing.TB); ok {
testenv.NeedsGoPackages(t)
}
diff --git a/go/analysis/diagnostic.go b/go/analysis/diagnostic.go
index cd462a0cb55..5cdcf46d2a1 100644
--- a/go/analysis/diagnostic.go
+++ b/go/analysis/diagnostic.go
@@ -37,7 +37,7 @@ type Diagnostic struct {
// declaration.
type RelatedInformation struct {
Pos token.Pos
- End token.Pos
+ End token.Pos // optional
Message string
}
diff --git a/go/analysis/doc.go b/go/analysis/doc.go
index 03c31525e36..c5429c9e239 100644
--- a/go/analysis/doc.go
+++ b/go/analysis/doc.go
@@ -177,14 +177,14 @@ Diagnostic is defined as:
The optional Category field is a short identifier that classifies the
kind of message when an analysis produces several kinds of diagnostic.
-Many analyses want to associate diagnostics with a severity level.
-Because Diagnostic does not have a severity level field, an Analyzer's
-diagnostics effectively all have the same severity level. To separate which
-diagnostics are high severity and which are low severity, expose multiple
-Analyzers instead. Analyzers should also be separated when their
-diagnostics belong in different groups, or could be tagged differently
-before being shown to the end user. Analyzers should document their severity
-level to help downstream tools surface diagnostics properly.
+The Diagnostic struct does not have a field to indicate its severity
+because opinions about the relative importance of Analyzers and their
+diagnostics vary widely among users. The design of this framework does
+not hold each Analyzer responsible for identifying the severity of its
+diagnostics. Instead, we expect that drivers will allow the user to
+customize the filtering and prioritization of diagnostics based on the
+producing Analyzer and optional Category, according to the user's
+preferences.
Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
and buildtag, inspect the raw text of Go source files or even non-Go
@@ -244,6 +244,9 @@ if the default encoding is unsuitable. Facts should be stateless.
Because serialized facts may appear within build outputs, the gob encoding
of a fact must be deterministic, to avoid spurious cache misses in
build systems that use content-addressable caches.
+The driver makes a single call to the gob encoder for all facts
+exported by a given analysis pass, so that the topology of
+shared data structures referenced by multiple facts is preserved.
The Pass type has functions to import and export facts,
associated either with an object or with a package:
@@ -297,7 +300,7 @@ singlechecker and multichecker subpackages.
The singlechecker package provides the main function for a command that
runs one analyzer. By convention, each analyzer such as
-go/passes/findcall should be accompanied by a singlechecker-based
+go/analysis/passes/findcall should be accompanied by a singlechecker-based
command such as go/analysis/passes/findcall/cmd/findcall, defined in its
entirety as:
diff --git a/go/analysis/internal/analysisflags/flags.go b/go/analysis/internal/analysisflags/flags.go
index 4b7be2d1f5f..2ea630608c4 100644
--- a/go/analysis/internal/analysisflags/flags.go
+++ b/go/analysis/internal/analysisflags/flags.go
@@ -339,9 +339,38 @@ func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) {
}
// A JSONTree is a mapping from package ID to analysis name to result.
-// Each result is either a jsonError or a list of jsonDiagnostic.
+// Each result is either a jsonError or a list of JSONDiagnostic.
type JSONTree map[string]map[string]interface{}
+// A TextEdit describes the replacement of a portion of a file.
+// Start and End are zero-based half-open indices into the original byte
+// sequence of the file, and New is the new text.
+type JSONTextEdit struct {
+ Filename string `json:"filename"`
+ Start int `json:"start"`
+ End int `json:"end"`
+ New string `json:"new"`
+}
+
+// A JSONSuggestedFix describes an edit that should be applied as a whole or not
+// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix
+// consists of multiple non-contiguous edits.
+type JSONSuggestedFix struct {
+ Message string `json:"message"`
+ Edits []JSONTextEdit `json:"edits"`
+}
+
+// A JSONDiagnostic can be used to encode and decode analysis.Diagnostics to and
+// from JSON.
+// TODO(matloob): Should the JSON diagnostics contain ranges?
+// If so, how should they be formatted?
+type JSONDiagnostic struct {
+ Category string `json:"category,omitempty"`
+ Posn string `json:"posn"`
+ Message string `json:"message"`
+ SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"`
+}
+
// Add adds the result of analysis 'name' on package 'id'.
// The result is either a list of diagnostics or an error.
func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) {
@@ -352,20 +381,31 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.
}
v = jsonError{err.Error()}
} else if len(diags) > 0 {
- type jsonDiagnostic struct {
- Category string `json:"category,omitempty"`
- Posn string `json:"posn"`
- Message string `json:"message"`
- }
- var diagnostics []jsonDiagnostic
- // TODO(matloob): Should the JSON diagnostics contain ranges?
- // If so, how should they be formatted?
+ diagnostics := make([]JSONDiagnostic, 0, len(diags))
for _, f := range diags {
- diagnostics = append(diagnostics, jsonDiagnostic{
- Category: f.Category,
- Posn: fset.Position(f.Pos).String(),
- Message: f.Message,
- })
+ var fixes []JSONSuggestedFix
+ for _, fix := range f.SuggestedFixes {
+ var edits []JSONTextEdit
+ for _, edit := range fix.TextEdits {
+ edits = append(edits, JSONTextEdit{
+ Filename: fset.Position(edit.Pos).Filename,
+ Start: fset.Position(edit.Pos).Offset,
+ End: fset.Position(edit.End).Offset,
+ New: string(edit.NewText),
+ })
+ }
+ fixes = append(fixes, JSONSuggestedFix{
+ Message: fix.Message,
+ Edits: edits,
+ })
+ }
+ jdiag := JSONDiagnostic{
+ Category: f.Category,
+ Posn: fset.Position(f.Pos).String(),
+ Message: f.Message,
+ SuggestedFixes: fixes,
+ }
+ diagnostics = append(diagnostics, jdiag)
}
v = diagnostics
}
diff --git a/go/analysis/internal/checker/checker.go b/go/analysis/internal/checker/checker.go
index 51cbf689ac0..cf76bdebe71 100644
--- a/go/analysis/internal/checker/checker.go
+++ b/go/analysis/internal/checker/checker.go
@@ -33,8 +33,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
)
var (
@@ -147,7 +145,11 @@ func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
roots := analyze(initial, analyzers)
if Fix {
- applyFixes(roots)
+ if err := applyFixes(roots); err != nil {
+ // Fail when applying fixes failed.
+ log.Print(err)
+ return 1
+ }
}
return printDiagnostics(roots)
}
@@ -305,7 +307,7 @@ func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action
return roots
}
-func applyFixes(roots []*action) {
+func applyFixes(roots []*action) error {
visited := make(map[*action]bool)
var apply func(*action) error
var visitAll func(actions []*action) error
@@ -313,7 +315,9 @@ func applyFixes(roots []*action) {
for _, act := range actions {
if !visited[act] {
visited[act] = true
- visitAll(act.deps)
+ if err := visitAll(act.deps); err != nil {
+ return err
+ }
if err := apply(act); err != nil {
return err
}
@@ -332,6 +336,10 @@ func applyFixes(roots []*action) {
edit offsetedit
left, right *node
}
+ // Edits x and y are equivalent.
+ equiv := func(x, y offsetedit) bool {
+ return x.start == y.start && x.end == y.end && bytes.Equal(x.newText, y.newText)
+ }
var insert func(tree **node, edit offsetedit) error
insert = func(treeptr **node, edit offsetedit) error {
@@ -345,6 +353,13 @@ func applyFixes(roots []*action) {
} else if edit.start >= tree.edit.end {
return insert(&tree.right, edit)
}
+ if equiv(edit, tree.edit) { // equivalent edits?
+ // We skip over equivalent edits without considering them
+ // an error. This handles identical edits coming from the
+ // multiple ways of loading a package into a
+ // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]".
+ return nil
+ }
// Overlapping text edit.
return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)",
@@ -384,14 +399,16 @@ func applyFixes(roots []*action) {
return nil
}
- visitAll(roots)
+ if err := visitAll(roots); err != nil {
+ return err
+ }
fset := token.NewFileSet() // Shared by parse calls below
// Now we've got a set of valid edits for each file. Get the new file contents.
for f, tree := range editsForFile {
contents, err := ioutil.ReadFile(f.Name())
if err != nil {
- log.Fatal(err)
+ return err
}
cur := 0 // current position in the file
@@ -408,6 +425,8 @@ func applyFixes(roots []*action) {
if edit.start > cur {
out.Write(contents[cur:edit.start])
out.Write(edit.newText)
+ } else if cur == 0 && edit.start == 0 { // edit starts at first character?
+ out.Write(edit.newText)
}
cur = edit.end
@@ -430,8 +449,11 @@ func applyFixes(roots []*action) {
}
}
- ioutil.WriteFile(f.Name(), out.Bytes(), 0644)
+ if err := ioutil.WriteFile(f.Name(), out.Bytes(), 0644); err != nil {
+ return err
+ }
}
+ return nil
}
// printDiagnostics prints the diagnostics for the root packages in either
@@ -578,7 +600,6 @@ type action struct {
deps []*action
objectFacts map[objectFactKey]analysis.Fact
packageFacts map[packageFactKey]analysis.Fact
- inputs map[*analysis.Analyzer]interface{}
result interface{}
diagnostics []analysis.Diagnostic
err error
@@ -676,14 +697,16 @@ func (act *action) execOnce() {
// Run the analysis.
pass := &analysis.Pass{
- Analyzer: act.a,
- Fset: act.pkg.Fset,
- Files: act.pkg.Syntax,
- OtherFiles: act.pkg.OtherFiles,
- IgnoredFiles: act.pkg.IgnoredFiles,
- Pkg: act.pkg.Types,
- TypesInfo: act.pkg.TypesInfo,
- TypesSizes: act.pkg.TypesSizes,
+ Analyzer: act.a,
+ Fset: act.pkg.Fset,
+ Files: act.pkg.Syntax,
+ OtherFiles: act.pkg.OtherFiles,
+ IgnoredFiles: act.pkg.IgnoredFiles,
+ Pkg: act.pkg.Types,
+ TypesInfo: act.pkg.TypesInfo,
+ TypesSizes: act.pkg.TypesSizes,
+ TypeErrors: act.pkg.TypeErrors,
+
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: act.importObjectFact,
@@ -695,36 +718,6 @@ func (act *action) execOnce() {
}
act.pass = pass
- var errors []types.Error
- // Get any type errors that are attributed to the pkg.
- // This is necessary to test analyzers that provide
- // suggested fixes for compiler/type errors.
- for _, err := range act.pkg.Errors {
- if err.Kind != packages.TypeError {
- continue
- }
- // err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-"
- spn := span.Parse(err.Pos)
- // Extract the token positions from the error string.
- line, col, offset := spn.Start().Line(), spn.Start().Column(), -1
- act.pkg.Fset.Iterate(func(f *token.File) bool {
- if f.Name() != spn.URI().Filename() {
- return true
- }
- offset = int(f.LineStart(line)) + col - 1
- return false
- })
- if offset == -1 {
- continue
- }
- errors = append(errors, types.Error{
- Fset: act.pkg.Fset,
- Msg: err.Msg,
- Pos: token.Pos(offset),
- })
- }
- analysisinternal.SetTypeErrors(pass, errors)
-
var err error
if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
err = fmt.Errorf("analysis skipped due to errors in package")
@@ -766,7 +759,7 @@ func inheritFacts(act, dep *action) {
if serialize {
encodedFact, err := codeFact(fact)
if err != nil {
- log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
+ log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
}
fact = encodedFact
}
@@ -894,7 +887,7 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
func (act *action) allObjectFacts() []analysis.ObjectFact {
facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
for k := range act.objectFacts {
- facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]})
+ facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
}
return facts
}
@@ -940,7 +933,7 @@ func factType(fact analysis.Fact) reflect.Type {
func (act *action) allPackageFacts() []analysis.PackageFact {
facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
for k := range act.packageFacts {
- facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]})
+ facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
}
return facts
}
diff --git a/go/analysis/internal/checker/checker_test.go b/go/analysis/internal/checker/checker_test.go
index eee211c21a4..f07963fa008 100644
--- a/go/analysis/internal/checker/checker_test.go
+++ b/go/analysis/internal/checker/checker_test.go
@@ -19,14 +19,9 @@ import (
"golang.org/x/tools/internal/testenv"
)
-var from, to string
-
func TestApplyFixes(t *testing.T) {
testenv.NeedsGoPackages(t)
- from = "bar"
- to = "baz"
-
files := map[string]string{
"rename/test.go": `package rename
@@ -75,6 +70,10 @@ var analyzer = &analysis.Analyzer{
}
func run(pass *analysis.Pass) (interface{}, error) {
+ const (
+ from = "bar"
+ to = "baz"
+ )
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{(*ast.Ident)(nil)}
inspect.Preorder(nodeFilter, func(n ast.Node) {
@@ -129,6 +128,18 @@ func Foo(s string) int {
RunDespiteErrors: true,
}
+ // A no-op analyzer that should finish regardless of
+ // parse or type errors in the code.
+ noopWithFact := &analysis.Analyzer{
+ Name: "noopfact",
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: func(pass *analysis.Pass) (interface{}, error) {
+ return nil, nil
+ },
+ RunDespiteErrors: true,
+ FactTypes: []analysis.Fact{&EmptyFact{}},
+ }
+
for _, test := range []struct {
name string
pattern []string
@@ -137,7 +148,17 @@ func Foo(s string) int {
}{
// parse/type errors
{name: "skip-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{analyzer}, code: 1},
- {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 0},
+ // RunDespiteErrors allows a driver to run an Analyzer even after parse/type errors.
+ //
+ // The noop analyzer doesn't use facts, so the driver loads only the root
+ // package from source. For the rest, it asks 'go list' for export data,
+ // which fails because the compiler encounters the type error. Since the
+ // errors come from 'go list', the driver doesn't run the analyzer.
+ {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 1},
+ // The noopfact analyzer does use facts, so the driver loads source for
+ // all dependencies, does type checking itself, recognizes the error as a
+ // type error, and runs the analyzer.
+ {name: "despite-error-fact", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noopWithFact}, code: 0},
// combination of parse/type errors and no errors
{name: "despite-error-and-no-error", pattern: []string{"file=" + path, "sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 1},
// non-existing package error
@@ -151,6 +172,10 @@ func Foo(s string) int {
// no errors
{name: "no-errors", pattern: []string{"sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 0},
} {
+ if test.name == "despite-error" && testenv.Go1Point() < 20 {
+ // The behavior in the comment on the despite-error test only occurs for Go 1.20+.
+ continue
+ }
if got := checker.Run(test.pattern, test.analyzers); got != test.code {
t.Errorf("got incorrect exit code %d for test %s; want %d", got, test.name, test.code)
}
@@ -158,3 +183,7 @@ func Foo(s string) int {
defer cleanup()
}
+
+type EmptyFact struct{}
+
+func (f *EmptyFact) AFact() {}
diff --git a/go/analysis/internal/checker/fix_test.go b/go/analysis/internal/checker/fix_test.go
new file mode 100644
index 00000000000..a114c01b645
--- /dev/null
+++ b/go/analysis/internal/checker/fix_test.go
@@ -0,0 +1,143 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checker_test
+
+import (
+ "flag"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "runtime"
+ "testing"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/internal/checker"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func main() {
+ checker.Fix = true
+ patterns := flag.Args()
+
+ code := checker.Run(patterns, []*analysis.Analyzer{analyzer})
+ os.Exit(code)
+}
+
+// TestFixes ensures that checker.Run applies fixes correctly.
+// This test fork/execs the main function above.
+func TestFixes(t *testing.T) {
+ oses := map[string]bool{"darwin": true, "linux": true}
+ if !oses[runtime.GOOS] {
+ t.Skipf("skipping fork/exec test on this platform")
+ }
+
+ if os.Getenv("TESTFIXES_CHILD") == "1" {
+ // child process
+
+ // replace [progname -test.run=TestFixes -- ...]
+ // by [progname ...]
+ os.Args = os.Args[2:]
+ os.Args[0] = "vet"
+ main()
+ panic("unreachable")
+ }
+
+ testenv.NeedsTool(t, "go")
+
+ files := map[string]string{
+ "rename/foo.go": `package rename
+
+func Foo() {
+ bar := 12
+ _ = bar
+}
+
+// the end
+`,
+ "rename/intestfile_test.go": `package rename
+
+func InTestFile() {
+ bar := 13
+ _ = bar
+}
+
+// the end
+`,
+ "rename/foo_test.go": `package rename_test
+
+func Foo() {
+ bar := 14
+ _ = bar
+}
+
+// the end
+`,
+ }
+ fixed := map[string]string{
+ "rename/foo.go": `package rename
+
+func Foo() {
+ baz := 12
+ _ = baz
+}
+
+// the end
+`,
+ "rename/intestfile_test.go": `package rename
+
+func InTestFile() {
+ baz := 13
+ _ = baz
+}
+
+// the end
+`,
+ "rename/foo_test.go": `package rename_test
+
+func Foo() {
+ baz := 14
+ _ = baz
+}
+
+// the end
+`,
+ }
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatalf("Creating test files failed with %s", err)
+ }
+ defer cleanup()
+
+ args := []string{"-test.run=TestFixes", "--", "rename"}
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append(os.Environ(), "TESTFIXES_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off")
+
+ out, err := cmd.CombinedOutput()
+ if len(out) > 0 {
+ t.Logf("%s: out=<<%s>>", args, out)
+ }
+ var exitcode int
+ if err, ok := err.(*exec.ExitError); ok {
+ exitcode = err.ExitCode() // requires go1.12
+ }
+
+ const diagnosticsExitCode = 3
+ if exitcode != diagnosticsExitCode {
+ t.Errorf("%s: exited %d, want %d", args, exitcode, diagnosticsExitCode)
+ }
+
+ for name, want := range fixed {
+ path := path.Join(dir, "src", name)
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Errorf("error reading %s: %v", path, err)
+ }
+ if got := string(contents); got != want {
+ t.Errorf("contents of %s file did not match expectations. got=%s, want=%s", path, got, want)
+ }
+ }
+}
diff --git a/go/analysis/internal/checker/start_test.go b/go/analysis/internal/checker/start_test.go
new file mode 100644
index 00000000000..ede21159bc8
--- /dev/null
+++ b/go/analysis/internal/checker/start_test.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checker_test
+
+import (
+ "go/ast"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/internal/checker"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TestStartFixes make sure modifying the first character
+// of the file takes effect.
+func TestStartFixes(t *testing.T) {
+ testenv.NeedsGoPackages(t)
+
+ files := map[string]string{
+ "comment/doc.go": `/* Package comment */
+package comment
+`}
+
+ want := `// Package comment
+package comment
+`
+
+ testdata, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ path := filepath.Join(testdata, "src/comment/doc.go")
+ checker.Fix = true
+ checker.Run([]string{"file=" + path}, []*analysis.Analyzer{commentAnalyzer})
+
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := string(contents)
+ if got != want {
+ t.Errorf("contents of rewritten file\ngot: %s\nwant: %s", got, want)
+ }
+
+ defer cleanup()
+}
+
+var commentAnalyzer = &analysis.Analyzer{
+ Name: "comment",
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: commentRun,
+}
+
+func commentRun(pass *analysis.Pass) (interface{}, error) {
+ const (
+ from = "/* Package comment */"
+ to = "// Package comment"
+ )
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ inspect.Preorder(nil, func(n ast.Node) {
+ if n, ok := n.(*ast.Comment); ok && n.Text == from {
+ pass.Report(analysis.Diagnostic{
+ Pos: n.Pos(),
+ End: n.End(),
+ SuggestedFixes: []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: n.Pos(),
+ End: n.End(),
+ NewText: []byte(to),
+ }},
+ }},
+ })
+ }
+ })
+
+ return nil, nil
+}
diff --git a/go/analysis/passes/asmdecl/asmdecl.go b/go/analysis/passes/asmdecl/asmdecl.go
index 6fbfe7e181c..7288559fc0e 100644
--- a/go/analysis/passes/asmdecl/asmdecl.go
+++ b/go/analysis/passes/asmdecl/asmdecl.go
@@ -92,7 +92,7 @@ var (
asmArchMips64LE = asmArch{name: "mips64le", bigEndian: false, stack: "R29", lr: true}
asmArchPpc64 = asmArch{name: "ppc64", bigEndian: true, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}}
asmArchPpc64LE = asmArch{name: "ppc64le", bigEndian: false, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}}
- asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true}
+ asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true, retRegs: []string{"X10", "F10"}}
asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true}
asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false}
diff --git a/go/analysis/passes/asmdecl/asmdecl_test.go b/go/analysis/passes/asmdecl/asmdecl_test.go
index f6b01a9c308..50938a07571 100644
--- a/go/analysis/passes/asmdecl/asmdecl_test.go
+++ b/go/analysis/passes/asmdecl/asmdecl_test.go
@@ -19,11 +19,12 @@ var goosarches = []string{
"linux/arm", // asm3.s
// TODO: skip test on loong64 until go toolchain supported loong64.
// "linux/loong64", // asm10.s
- "linux/mips64", // asm5.s
- "linux/s390x", // asm6.s
- "linux/ppc64", // asm7.s
- "linux/mips", // asm8.s,
- "js/wasm", // asm9.s
+ "linux/mips64", // asm5.s
+ "linux/s390x", // asm6.s
+ "linux/ppc64", // asm7.s
+ "linux/mips", // asm8.s,
+ "js/wasm", // asm9.s
+ "linux/riscv64", // asm11.s
}
func Test(t *testing.T) {
diff --git a/go/analysis/passes/asmdecl/testdata/src/a/asm11.s b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s
new file mode 100644
index 00000000000..e81e8ee179f
--- /dev/null
+++ b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64
+
+// writing to result in ABIInternal function
+TEXT ·returnABIInternal(SB), NOSPLIT, $8
+ MOV $123, X10
+ RET
+TEXT ·returnmissingABIInternal(SB), NOSPLIT, $8
+ MOV $123, X20
+ RET // want `RET without writing to result register`
diff --git a/go/analysis/passes/assign/assign.go b/go/analysis/passes/assign/assign.go
index 3586638efc0..89146b73346 100644
--- a/go/analysis/passes/assign/assign.go
+++ b/go/analysis/passes/assign/assign.go
@@ -12,6 +12,7 @@ import (
"fmt"
"go/ast"
"go/token"
+ "go/types"
"reflect"
"golang.org/x/tools/go/analysis"
@@ -51,7 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
for i, lhs := range stmt.Lhs {
rhs := stmt.Rhs[i]
if analysisutil.HasSideEffects(pass.TypesInfo, lhs) ||
- analysisutil.HasSideEffects(pass.TypesInfo, rhs) {
+ analysisutil.HasSideEffects(pass.TypesInfo, rhs) ||
+ isMapIndex(pass.TypesInfo, lhs) {
continue // expressions may not be equal
}
if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
@@ -74,3 +76,14 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
+
+// isMapIndex returns true if e is a map index expression.
+func isMapIndex(info *types.Info, e ast.Expr) bool {
+ if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok {
+ if typ := info.Types[idx.X].Type; typ != nil {
+ _, ok := typ.Underlying().(*types.Map)
+ return ok
+ }
+ }
+ return false
+}
diff --git a/go/analysis/passes/assign/testdata/src/a/a.go b/go/analysis/passes/assign/testdata/src/a/a.go
index eaec634d181..f9663120b4a 100644
--- a/go/analysis/passes/assign/testdata/src/a/a.go
+++ b/go/analysis/passes/assign/testdata/src/a/a.go
@@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) {
}
func num() int { return 2 }
+
+func Index() {
+ s := []int{1}
+ s[0] = s[0] // want "self-assignment"
+
+ var a [5]int
+ a[0] = a[0] // want "self-assignment"
+
+ pa := &[2]int{1, 2}
+ pa[1] = pa[1] // want "self-assignment"
+
+ var pss *struct { // report self assignment despite nil dereference
+ s []int
+ }
+ pss.s[0] = pss.s[0] // want "self-assignment"
+
+ m := map[int]string{1: "a"}
+ m[0] = m[0] // bail on map self-assignments due to side effects
+ m[1] = m[1] // not modeling what elements must be in the map
+ (m[2]) = (m[2]) // even with parens
+ type Map map[string]bool
+ named := make(Map)
+ named["s"] = named["s"] // even on named maps.
+ var psm *struct {
+ m map[string]int
+ }
+ psm.m["key"] = psm.m["key"] // handles dereferences
+}
diff --git a/go/analysis/passes/assign/testdata/src/a/a.go.golden b/go/analysis/passes/assign/testdata/src/a/a.go.golden
index 6c91d3666cc..f45b7f208e2 100644
--- a/go/analysis/passes/assign/testdata/src/a/a.go.golden
+++ b/go/analysis/passes/assign/testdata/src/a/a.go.golden
@@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) {
}
func num() int { return 2 }
+
+func Index() {
+ s := []int{1}
+ // want "self-assignment"
+
+ var a [5]int
+ // want "self-assignment"
+
+ pa := &[2]int{1, 2}
+ // want "self-assignment"
+
+ var pss *struct { // report self assignment despite nil dereference
+ s []int
+ }
+ // want "self-assignment"
+
+ m := map[int]string{1: "a"}
+ m[0] = m[0] // bail on map self-assignments due to side effects
+ m[1] = m[1] // not modeling what elements must be in the map
+ (m[2]) = (m[2]) // even with parens
+ type Map map[string]bool
+ named := make(Map)
+ named["s"] = named["s"] // even on named maps.
+ var psm *struct {
+ m map[string]int
+ }
+ psm.m["key"] = psm.m["key"] // handles dereferences
+}
diff --git a/go/analysis/passes/buildssa/buildssa.go b/go/analysis/passes/buildssa/buildssa.go
index 4ec0e73ff2c..02b7b18b3f5 100644
--- a/go/analysis/passes/buildssa/buildssa.go
+++ b/go/analysis/passes/buildssa/buildssa.go
@@ -48,8 +48,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
// Some Analyzers may need GlobalDebug, in which case we'll have
// to set it globally, but let's wait till we need it.
- // Monomorphize at least until type parameters are available.
- mode := ssa.InstantiateGenerics
+ mode := ssa.BuilderMode(0)
prog := ssa.NewProgram(pass.Fset, mode)
diff --git a/go/analysis/passes/composite/composite.go b/go/analysis/passes/composite/composite.go
index d3670aca97a..64e184d3439 100644
--- a/go/analysis/passes/composite/composite.go
+++ b/go/analysis/passes/composite/composite.go
@@ -7,6 +7,7 @@
package composite
import (
+ "fmt"
"go/ast"
"go/types"
"strings"
@@ -83,7 +84,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
}
for _, typ := range structuralTypes {
under := deref(typ.Underlying())
- if _, ok := under.(*types.Struct); !ok {
+ strct, ok := under.(*types.Struct)
+ if !ok {
// skip non-struct composite literals
continue
}
@@ -92,20 +94,47 @@ func run(pass *analysis.Pass) (interface{}, error) {
continue
}
- // check if the CompositeLit contains an unkeyed field
+ // check if the struct contains an unkeyed field
allKeyValue := true
- for _, e := range cl.Elts {
+ var suggestedFixAvailable = len(cl.Elts) == strct.NumFields()
+ var missingKeys []analysis.TextEdit
+ for i, e := range cl.Elts {
if _, ok := e.(*ast.KeyValueExpr); !ok {
allKeyValue = false
- break
+ if i >= strct.NumFields() {
+ break
+ }
+ field := strct.Field(i)
+ if !field.Exported() {
+ // Adding unexported field names for structs not defined
+ // locally will not work.
+ suggestedFixAvailable = false
+ break
+ }
+ missingKeys = append(missingKeys, analysis.TextEdit{
+ Pos: e.Pos(),
+ End: e.Pos(),
+ NewText: []byte(fmt.Sprintf("%s: ", field.Name())),
+ })
}
}
if allKeyValue {
- // all the composite literal fields are keyed
+ // all the struct fields are keyed
continue
}
- pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName)
+ diag := analysis.Diagnostic{
+ Pos: cl.Pos(),
+ End: cl.End(),
+ Message: fmt.Sprintf("%s struct literal uses unkeyed fields", typeName),
+ }
+ if suggestedFixAvailable {
+ diag.SuggestedFixes = []analysis.SuggestedFix{{
+ Message: "Add field names to struct literal",
+ TextEdits: missingKeys,
+ }}
+ }
+ pass.Report(diag)
return
}
})
diff --git a/go/analysis/passes/composite/composite_test.go b/go/analysis/passes/composite/composite_test.go
index 952de8bfdad..7afaaa7ffd4 100644
--- a/go/analysis/passes/composite/composite_test.go
+++ b/go/analysis/passes/composite/composite_test.go
@@ -18,5 +18,5 @@ func Test(t *testing.T) {
if typeparams.Enabled {
pkgs = append(pkgs, "typeparams")
}
- analysistest.Run(t, testdata, composite.Analyzer, pkgs...)
+ analysistest.RunWithSuggestedFixes(t, testdata, composite.Analyzer, pkgs...)
}
diff --git a/go/analysis/passes/composite/testdata/src/a/a.go b/go/analysis/passes/composite/testdata/src/a/a.go
index 3a5bc203b03..cd69d395173 100644
--- a/go/analysis/passes/composite/testdata/src/a/a.go
+++ b/go/analysis/passes/composite/testdata/src/a/a.go
@@ -11,6 +11,7 @@ import (
"go/scanner"
"go/token"
"image"
+ "sync"
"unicode"
)
@@ -79,6 +80,18 @@ var badStructLiteral = flag.Flag{ // want "unkeyed fields"
nil, // Value
"DefValue",
}
+var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+ "DefValue",
+ "Extra Field",
+}
+var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+}
var delta [3]rune
@@ -100,6 +113,10 @@ var badScannerErrorList = scanner.ErrorList{
&scanner.Error{token.Position{}, "foobar"}, // want "unkeyed fields"
}
+// sync.Mutex has unexported fields. We expect a diagnostic but no
+// suggested fix.
+var mu = sync.Mutex{0, 0} // want "unkeyed fields"
+
// Check whitelisted structs: if vet is run with --compositewhitelist=false,
// this line triggers an error.
var whitelistedPoint = image.Point{1, 2}
diff --git a/go/analysis/passes/composite/testdata/src/a/a.go.golden b/go/analysis/passes/composite/testdata/src/a/a.go.golden
new file mode 100644
index 00000000000..fe73a2e0a1d
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/a/a.go.golden
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the test for untagged struct literals.
+
+package a
+
+import (
+ "flag"
+ "go/scanner"
+ "go/token"
+ "image"
+ "sync"
+ "unicode"
+)
+
+var Okay1 = []string{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay2 = map[string]bool{
+ "Name": true,
+ "Usage": true,
+ "DefValue": true,
+}
+
+var Okay3 = struct {
+ X string
+ Y string
+ Z string
+}{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay4 = []struct {
+ A int
+ B int
+}{
+ {1, 2},
+ {3, 4},
+}
+
+type MyStruct struct {
+ X string
+ Y string
+ Z string
+}
+
+var Okay5 = &MyStruct{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay6 = []MyStruct{
+ {"foo", "bar", "baz"},
+ {"aa", "bb", "cc"},
+}
+
+var Okay7 = []*MyStruct{
+ {"foo", "bar", "baz"},
+ {"aa", "bb", "cc"},
+}
+
+// Testing is awkward because we need to reference things from a separate package
+// to trigger the warnings.
+
+var goodStructLiteral = flag.Flag{
+ Name: "Name",
+ Usage: "Usage",
+}
+var badStructLiteral = flag.Flag{ // want "unkeyed fields"
+ Name: "Name",
+ Usage: "Usage",
+ Value: nil, // Value
+ DefValue: "DefValue",
+}
+var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+ "DefValue",
+ "Extra Field",
+}
+var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+}
+
+var delta [3]rune
+
+// SpecialCase is a named slice of CaseRange to test issue 9171.
+var goodNamedSliceLiteral = unicode.SpecialCase{
+ {Lo: 1, Hi: 2, Delta: delta},
+ unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta},
+}
+var badNamedSliceLiteral = unicode.SpecialCase{
+ {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+ unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+}
+
+// ErrorList is a named slice, so no warnings should be emitted.
+var goodScannerErrorList = scanner.ErrorList{
+ &scanner.Error{Msg: "foobar"},
+}
+var badScannerErrorList = scanner.ErrorList{
+ &scanner.Error{Pos: token.Position{}, Msg: "foobar"}, // want "unkeyed fields"
+}
+
+// sync.Mutex has unexported fields. We expect a diagnostic but no
+// suggested fix.
+var mu = sync.Mutex{0, 0} // want "unkeyed fields"
+
+// Check whitelisted structs: if vet is run with --compositewhitelist=false,
+// this line triggers an error.
+var whitelistedPoint = image.Point{1, 2}
+
+// Do not check type from unknown package.
+// See issue 15408.
+var unknownPkgVar = unicode.NoSuchType{"foo", "bar"}
+
+// A named pointer slice of CaseRange to test issue 23539. In
+// particular, we're interested in how some slice elements omit their
+// type.
+var goodNamedPointerSliceLiteral = []*unicode.CaseRange{
+ {Lo: 1, Hi: 2},
+ &unicode.CaseRange{Lo: 1, Hi: 2},
+}
+var badNamedPointerSliceLiteral = []*unicode.CaseRange{
+ {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+ &unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+}
+
+// unicode.Range16 is whitelisted, so there'll be no vet error
+var range16 = unicode.Range16{0xfdd0, 0xfdef, 1}
+
+// unicode.Range32 is whitelisted, so there'll be no vet error
+var range32 = unicode.Range32{0x1fffe, 0x1ffff, 1}
diff --git a/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden
new file mode 100644
index 00000000000..20b652e88dd
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package a
+
+import "testing"
+
+var fuzzTargets = []testing.InternalFuzzTarget{
+ {"Fuzz", Fuzz},
+}
+
+func Fuzz(f *testing.F) {}
diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
index dd5d57efed4..f9a5e1fb105 100644
--- a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
+++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
@@ -6,7 +6,7 @@ package typeparams
import "typeparams/lib"
-type localStruct struct { F int }
+type localStruct struct{ F int }
func F[
T1 ~struct{ f int },
@@ -20,8 +20,8 @@ func F[
_ = T1{2}
_ = T2a{2}
_ = T2b{2} // want "unkeyed fields"
- _ = T3{1,2}
- _ = T4{1,2}
- _ = T5{1:2}
- _ = T6{1:2}
+ _ = T3{1, 2}
+ _ = T4{1, 2}
+ _ = T5{1: 2}
+ _ = T6{1: 2}
}
diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden
new file mode 100644
index 00000000000..66cd9158cb6
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import "typeparams/lib"
+
+type localStruct struct{ F int }
+
+func F[
+ T1 ~struct{ f int },
+ T2a localStruct,
+ T2b lib.Struct,
+ T3 ~[]int,
+ T4 lib.Slice,
+ T5 ~map[int]int,
+ T6 lib.Map,
+]() {
+ _ = T1{2}
+ _ = T2a{2}
+ _ = T2b{F: 2} // want "unkeyed fields"
+ _ = T3{1, 2}
+ _ = T4{1, 2}
+ _ = T5{1: 2}
+ _ = T6{1: 2}
+}
diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock.go b/go/analysis/passes/copylock/testdata/src/a/copylock.go
index 7704b3a42b2..4ab66dca1f6 100644
--- a/go/analysis/passes/copylock/testdata/src/a/copylock.go
+++ b/go/analysis/passes/copylock/testdata/src/a/copylock.go
@@ -50,27 +50,27 @@ func BadFunc() {
var t Tlock
var tp *Tlock
tp = &t
- *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync.Mutex`
- t = *tp // want "assignment copies lock value to t: a.Tlock contains sync.Once contains sync.Mutex"
+ *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync\b.*`
+ t = *tp // want `assignment copies lock value to t: a.Tlock contains sync.Once contains sync\b.*`
y := *x // want "assignment copies lock value to y: sync.Mutex"
- var z = t // want "variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync.Mutex"
+ var z = t // want `variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync\b.*`
w := struct{ L sync.Mutex }{
L: *x, // want `literal copies lock value from \*x: sync.Mutex`
}
var q = map[int]Tlock{
- 1: t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex"
- 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex`
+ 1: t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*`
+ 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*`
}
yy := []Tlock{
- t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex"
- *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex`
+ t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*`
+ *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*`
}
// override 'new' keyword
new := func(interface{}) {}
- new(t) // want "call of new copies lock value: a.Tlock contains sync.Once contains sync.Mutex"
+ new(t) // want `call of new copies lock value: a.Tlock contains sync.Once contains sync\b.*`
// copy of array of locks
var muA [5]sync.Mutex
@@ -193,9 +193,9 @@ func SyncTypesCheck() {
var onceX sync.Once
var onceXX = sync.Once{}
onceX1 := new(sync.Once)
- onceY := onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex"
- onceY = onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex"
- var onceYY = onceX // want "variable declaration copies lock value to onceYY: sync.Once contains sync.Mutex"
+ onceY := onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*`
+ onceY = onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*`
+ var onceYY = onceX // want `variable declaration copies lock value to onceYY: sync.Once contains sync\b.*`
onceP := &onceX
onceZ := &sync.Once{}
}
diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
index 801bc6f24f1..0d3168f1ef1 100644
--- a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
+++ b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
@@ -126,7 +126,7 @@ func AcceptedCases() {
// sync.Mutex gets called out, but without any reference to the sync.Once.
type LocalOnce sync.Once
-func (LocalOnce) Bad() {} // want "Bad passes lock by value: a.LocalOnce contains sync.Mutex"
+func (LocalOnce) Bad() {} // want `Bad passes lock by value: a.LocalOnce contains sync.\b.*`
// False negative:
// LocalMutex doesn't have a Lock method.
diff --git a/go/analysis/passes/fieldalignment/fieldalignment.go b/go/analysis/passes/fieldalignment/fieldalignment.go
index 78afe94ab30..aff663046a3 100644
--- a/go/analysis/passes/fieldalignment/fieldalignment.go
+++ b/go/analysis/passes/fieldalignment/fieldalignment.go
@@ -23,7 +23,7 @@ import (
const Doc = `find structs that would use less memory if their fields were sorted
This analyzer find structs that can be rearranged to use less memory, and provides
-a suggested edit with the optimal order.
+a suggested edit with the most compact order.
Note that there are two different diagnostics reported. One checks struct size,
and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the
@@ -41,6 +41,11 @@ has 24 pointer bytes because it has to scan further through the *uint32.
struct { string; uint32 }
has 8 because it can stop immediately after the string pointer.
+
+Be aware that the most compact order is not always the most efficient.
+In rare cases it may cause two variables each updated by its own goroutine
+to occupy the same CPU cache line, inducing a form of memory contention
+known as "false sharing" that slows down both goroutines.
`
var Analyzer = &analysis.Analyzer{
diff --git a/go/analysis/passes/inspect/inspect.go b/go/analysis/passes/inspect/inspect.go
index c1c1127d089..165c70cbd36 100644
--- a/go/analysis/passes/inspect/inspect.go
+++ b/go/analysis/passes/inspect/inspect.go
@@ -24,7 +24,7 @@
// inspect.Preorder(nil, func(n ast.Node) {
// ...
// })
-// return nil
+// return nil, nil
// }
package inspect
diff --git a/go/analysis/passes/loopclosure/loopclosure.go b/go/analysis/passes/loopclosure/loopclosure.go
index 98de9a9bacd..5291d1b2cd0 100644
--- a/go/analysis/passes/loopclosure/loopclosure.go
+++ b/go/analysis/passes/loopclosure/loopclosure.go
@@ -18,19 +18,60 @@ import (
const Doc = `check references to loop variables from within nested functions
-This analyzer checks for references to loop variables from within a
-function literal inside the loop body. It checks only instances where
-the function literal is called in a defer or go statement that is the
-last statement in the loop body, as otherwise we would need whole
-program analysis.
+This analyzer reports places where a function literal references the
+iteration variable of an enclosing loop, and the loop calls the function
+in such a way (e.g. with go or defer) that it may outlive the loop
+iteration and possibly observe the wrong value of the variable.
-For example:
+In this example, all the deferred functions run after the loop has
+completed, so all observe the final value of v.
- for i, v := range s {
- go func() {
- println(i, v) // not what you might expect
- }()
- }
+ for _, v := range list {
+ defer func() {
+ use(v) // incorrect
+ }()
+ }
+
+One fix is to create a new variable for each iteration of the loop:
+
+ for _, v := range list {
+ v := v // new var per iteration
+ defer func() {
+ use(v) // ok
+ }()
+ }
+
+The next example uses a go statement and has a similar problem.
+In addition, it has a data race because the loop updates v
+concurrent with the goroutines accessing it.
+
+ for _, v := range elem {
+ go func() {
+ use(v) // incorrect, and a data race
+ }()
+ }
+
+A fix is the same as before. The checker also reports problems
+in goroutines started by golang.org/x/sync/errgroup.Group.
+A hard-to-spot variant of this form is common in parallel tests:
+
+ func Test(t *testing.T) {
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ use(test) // incorrect, and a data race
+ })
+ }
+ }
+
+The t.Parallel() call causes the rest of the function to execute
+concurrent with the loop.
+
+The analyzer reports references only in the last statement,
+as it is not deep enough to understand the effects of subsequent
+statements that might render the reference benign.
+("Last statement" is defined recursively in compound
+statements such as if, switch, and select.)
See: https://golang.org/doc/go_faq.html#closures_and_goroutines`
@@ -50,10 +91,12 @@ func run(pass *analysis.Pass) (interface{}, error) {
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
// Find the variables updated by the loop statement.
- var vars []*ast.Ident
+ var vars []types.Object
addVar := func(expr ast.Expr) {
- if id, ok := expr.(*ast.Ident); ok {
- vars = append(vars, id)
+ if id, _ := expr.(*ast.Ident); id != nil {
+ if obj := pass.TypesInfo.ObjectOf(id); obj != nil {
+ vars = append(vars, obj)
+ }
}
}
var body *ast.BlockStmt
@@ -79,52 +122,141 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
- // Inspect a go or defer statement
- // if it's the last one in the loop body.
- // (We give up if there are following statements,
- // because it's hard to prove go isn't followed by wait,
- // or defer by return.)
- if len(body.List) == 0 {
- return
- }
- // The function invoked in the last return statement.
- var fun ast.Expr
- switch s := body.List[len(body.List)-1].(type) {
- case *ast.GoStmt:
- fun = s.Call.Fun
- case *ast.DeferStmt:
- fun = s.Call.Fun
- case *ast.ExprStmt: // check for errgroup.Group.Go()
- if call, ok := s.X.(*ast.CallExpr); ok {
- fun = goInvokes(pass.TypesInfo, call)
- }
- }
- lit, ok := fun.(*ast.FuncLit)
- if !ok {
- return
- }
- ast.Inspect(lit.Body, func(n ast.Node) bool {
- id, ok := n.(*ast.Ident)
- if !ok || id.Obj == nil {
- return true
+ // Inspect statements to find function literals that may be run outside of
+ // the current loop iteration.
+ //
+ // For go, defer, and errgroup.Group.Go, we ignore all but the last
+ // statement, because it's hard to prove go isn't followed by wait, or
+ // defer by return. "Last" is defined recursively.
+ //
+ // TODO: consider allowing the "last" go/defer/Go statement to be followed by
+ // N "trivial" statements, possibly under a recursive definition of "trivial"
+ // so that that checker could, for example, conclude that a go statement is
+ // followed by an if statement made of only trivial statements and trivial expressions,
+ // and hence the go statement could still be checked.
+ forEachLastStmt(body.List, func(last ast.Stmt) {
+ var stmts []ast.Stmt
+ switch s := last.(type) {
+ case *ast.GoStmt:
+ stmts = litStmts(s.Call.Fun)
+ case *ast.DeferStmt:
+ stmts = litStmts(s.Call.Fun)
+ case *ast.ExprStmt: // check for errgroup.Group.Go
+ if call, ok := s.X.(*ast.CallExpr); ok {
+ stmts = litStmts(goInvoke(pass.TypesInfo, call))
+ }
}
- if pass.TypesInfo.Types[id].Type == nil {
- // Not referring to a variable (e.g. struct field name)
- return true
+ for _, stmt := range stmts {
+ reportCaptured(pass, vars, stmt)
}
- for _, v := range vars {
- if v.Obj == id.Obj {
- pass.ReportRangef(id, "loop variable %s captured by func literal",
- id.Name)
+ })
+
+ // Also check for testing.T.Run (with T.Parallel).
+ // We consider every t.Run statement in the loop body, because there is
+ // no commonly used mechanism for synchronizing parallel subtests.
+ // It is of course theoretically possible to synchronize parallel subtests,
+ // though such a pattern is likely to be exceedingly rare as it would be
+ // fighting against the test runner.
+ for _, s := range body.List {
+ switch s := s.(type) {
+ case *ast.ExprStmt:
+ if call, ok := s.X.(*ast.CallExpr); ok {
+ for _, stmt := range parallelSubtest(pass.TypesInfo, call) {
+ reportCaptured(pass, vars, stmt)
+ }
+
}
}
- return true
- })
+ }
})
return nil, nil
}
-// goInvokes returns a function expression that would be called asynchronously
+// reportCaptured reports a diagnostic stating a loop variable
+// has been captured by a func literal if checkStmt has escaping
+// references to vars. vars is expected to be variables updated by a loop statement,
+// and checkStmt is expected to be a statements from the body of a func literal in the loop.
+func reportCaptured(pass *analysis.Pass, vars []types.Object, checkStmt ast.Stmt) {
+ ast.Inspect(checkStmt, func(n ast.Node) bool {
+ id, ok := n.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ obj := pass.TypesInfo.Uses[id]
+ if obj == nil {
+ return true
+ }
+ for _, v := range vars {
+ if v == obj {
+ pass.ReportRangef(id, "loop variable %s captured by func literal", id.Name)
+ }
+ }
+ return true
+ })
+}
+
+// forEachLastStmt calls onLast on each "last" statement in a list of statements.
+// "Last" is defined recursively so, for example, if the last statement is
+// a switch statement, then each switch case is also visited to examine
+// its last statements.
+func forEachLastStmt(stmts []ast.Stmt, onLast func(last ast.Stmt)) {
+ if len(stmts) == 0 {
+ return
+ }
+
+ s := stmts[len(stmts)-1]
+ switch s := s.(type) {
+ case *ast.IfStmt:
+ loop:
+ for {
+ forEachLastStmt(s.Body.List, onLast)
+ switch e := s.Else.(type) {
+ case *ast.BlockStmt:
+ forEachLastStmt(e.List, onLast)
+ break loop
+ case *ast.IfStmt:
+ s = e
+ case nil:
+ break loop
+ }
+ }
+ case *ast.ForStmt:
+ forEachLastStmt(s.Body.List, onLast)
+ case *ast.RangeStmt:
+ forEachLastStmt(s.Body.List, onLast)
+ case *ast.SwitchStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CaseClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ case *ast.TypeSwitchStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CaseClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ case *ast.SelectStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CommClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ default:
+ onLast(s)
+ }
+}
+
+// litStmts returns all statements from the function body of a function
+// literal.
+//
+// If fun is not a function literal, it returns nil.
+func litStmts(fun ast.Expr) []ast.Stmt {
+ lit, _ := fun.(*ast.FuncLit)
+ if lit == nil {
+ return nil
+ }
+ return lit.Body.List
+}
+
+// goInvoke returns a function expression that would be called asynchronously
// (but not awaited) in another goroutine as a consequence of the call.
// For example, given the g.Go call below, it returns the function literal expression.
//
@@ -133,33 +265,164 @@ func run(pass *analysis.Pass) (interface{}, error) {
// g.Go(func() error { ... })
//
// Currently only "golang.org/x/sync/errgroup.Group()" is considered.
-func goInvokes(info *types.Info, call *ast.CallExpr) ast.Expr {
- f := typeutil.StaticCallee(info, call)
- // Note: Currently only supports: golang.org/x/sync/errgroup.Go.
- if f == nil || f.Name() != "Go" {
+func goInvoke(info *types.Info, call *ast.CallExpr) ast.Expr {
+ if !isMethodCall(info, call, "golang.org/x/sync/errgroup", "Group", "Go") {
return nil
}
- recv := f.Type().(*types.Signature).Recv()
- if recv == nil {
+ return call.Args[0]
+}
+
+// parallelSubtest returns statements that can be easily proven to execute
+// concurrently via the go test runner, as t.Run has been invoked with a
+// function literal that calls t.Parallel.
+//
+// In practice, users rely on the fact that statements before the call to
+// t.Parallel are synchronous. For example by declaring test := test inside the
+// function literal, but before the call to t.Parallel.
+//
+// Therefore, we only flag references in statements that are obviously
+// dominated by a call to t.Parallel. As a simple heuristic, we only consider
+// statements following the final labeled statement in the function body, to
+// avoid scenarios where a jump would cause either the call to t.Parallel or
+// the problematic reference to be skipped.
+//
+// import "testing"
+//
+// func TestFoo(t *testing.T) {
+// tests := []int{0, 1, 2}
+// for i, test := range tests {
+// t.Run("subtest", func(t *testing.T) {
+// println(i, test) // OK
+// t.Parallel()
+// println(i, test) // Not OK
+// })
+// }
+// }
+func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt {
+ if !isMethodCall(info, call, "testing", "T", "Run") {
return nil
}
- rtype, ok := recv.Type().(*types.Pointer)
- if !ok {
+
+ lit, _ := call.Args[1].(*ast.FuncLit)
+ if lit == nil {
return nil
}
- named, ok := rtype.Elem().(*types.Named)
- if !ok {
+
+ // Capture the *testing.T object for the first argument to the function
+ // literal.
+ if len(lit.Type.Params.List[0].Names) == 0 {
return nil
}
- if named.Obj().Name() != "Group" {
+
+ tObj := info.Defs[lit.Type.Params.List[0].Names[0]]
+ if tObj == nil {
return nil
}
+
+ // Match statements that occur after a call to t.Parallel following the final
+ // labeled statement in the function body.
+ //
+ // We iterate over lit.Body.List to have a simple, fast and "frequent enough"
+ // dominance relationship for t.Parallel(): lit.Body.List[i] dominates
+ // lit.Body.List[j] for i < j unless there is a jump.
+ var stmts []ast.Stmt
+ afterParallel := false
+ for _, stmt := range lit.Body.List {
+ stmt, labeled := unlabel(stmt)
+ if labeled {
+ // Reset: naively we don't know if a jump could have caused the
+ // previously considered statements to be skipped.
+ stmts = nil
+ afterParallel = false
+ }
+
+ if afterParallel {
+ stmts = append(stmts, stmt)
+ continue
+ }
+
+ // Check if stmt is a call to t.Parallel(), for the correct t.
+ exprStmt, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ continue
+ }
+ expr := exprStmt.X
+ if isMethodCall(info, expr, "testing", "T", "Parallel") {
+ call, _ := expr.(*ast.CallExpr)
+ if call == nil {
+ continue
+ }
+ x, _ := call.Fun.(*ast.SelectorExpr)
+ if x == nil {
+ continue
+ }
+ id, _ := x.X.(*ast.Ident)
+ if id == nil {
+ continue
+ }
+ if info.Uses[id] == tObj {
+ afterParallel = true
+ }
+ }
+ }
+
+ return stmts
+}
+
+// unlabel returns the inner statement for the possibly labeled statement stmt,
+// stripping any (possibly nested) *ast.LabeledStmt wrapper.
+//
+// The second result reports whether stmt was an *ast.LabeledStmt.
+func unlabel(stmt ast.Stmt) (ast.Stmt, bool) {
+ labeled := false
+ for {
+ labelStmt, ok := stmt.(*ast.LabeledStmt)
+ if !ok {
+ return stmt, labeled
+ }
+ labeled = true
+ stmt = labelStmt.Stmt
+ }
+}
+
+// isMethodCall reports whether expr is a method call of
+// ...
+func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool {
+ call, ok := expr.(*ast.CallExpr)
+ if !ok {
+ return false
+ }
+
+ // Check that we are calling a method
+ f := typeutil.StaticCallee(info, call)
+ if f == nil || f.Name() != method {
+ return false
+ }
+ recv := f.Type().(*types.Signature).Recv()
+ if recv == nil {
+ return false
+ }
+
+ // Check that the receiver is a . or
+ // *..
+ rtype := recv.Type()
+ if ptr, ok := recv.Type().(*types.Pointer); ok {
+ rtype = ptr.Elem()
+ }
+ named, ok := rtype.(*types.Named)
+ if !ok {
+ return false
+ }
+ if named.Obj().Name() != typeName {
+ return false
+ }
pkg := f.Pkg()
if pkg == nil {
- return nil
+ return false
}
- if pkg.Path() != "golang.org/x/sync/errgroup" {
- return nil
+ if pkg.Path() != pkgPath {
+ return false
}
- return call.Args[0]
+
+ return true
}
diff --git a/go/analysis/passes/loopclosure/loopclosure_test.go b/go/analysis/passes/loopclosure/loopclosure_test.go
index 1498838d7ff..55fb2a4a3d6 100644
--- a/go/analysis/passes/loopclosure/loopclosure_test.go
+++ b/go/analysis/passes/loopclosure/loopclosure_test.go
@@ -5,16 +5,16 @@
package loopclosure_test
import (
- "golang.org/x/tools/internal/typeparams"
"testing"
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/loopclosure"
+ "golang.org/x/tools/internal/typeparams"
)
func Test(t *testing.T) {
testdata := analysistest.TestData()
- tests := []string{"a", "golang.org/..."}
+ tests := []string{"a", "golang.org/...", "subtests"}
if typeparams.Enabled {
tests = append(tests, "typeparams")
}
diff --git a/go/analysis/passes/loopclosure/testdata/src/a/a.go b/go/analysis/passes/loopclosure/testdata/src/a/a.go
index 2c8e2e6c411..7a7f05f663f 100644
--- a/go/analysis/passes/loopclosure/testdata/src/a/a.go
+++ b/go/analysis/passes/loopclosure/testdata/src/a/a.go
@@ -6,7 +6,13 @@
package testdata
-import "golang.org/x/sync/errgroup"
+import (
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+)
+
+var A int
func _() {
var s []int
@@ -49,6 +55,19 @@ func _() {
println(i, v)
}()
}
+
+ // iteration variable declared outside the loop
+ for A = range s {
+ go func() {
+ println(A) // want "loop variable A captured by func literal"
+ }()
+ }
+ // iteration variable declared in a different file
+ for B = range s {
+ go func() {
+ println(B) // want "loop variable B captured by func literal"
+ }()
+ }
// If the key of the range statement is not an identifier
// the code should not panic (it used to).
var x [2]int
@@ -91,9 +110,73 @@ func _() {
}
}
-// Group is used to test that loopclosure does not match on any type named "Group".
-// The checker only matches on methods "(*...errgroup.Group).Go".
-type Group struct{};
+// Cases that rely on recursively checking for last statements.
+func _() {
+
+ for i := range "outer" {
+ for j := range "inner" {
+ if j < 1 {
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ } else if j < 2 {
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ } else {
+ go func() {
+ print(i)
+ }()
+ println("we don't catch the error above because of this statement")
+ }
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if j < 1 {
+ switch j {
+ case 0:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ } else if j < 2 {
+ var a interface{} = j
+ switch a.(type) {
+ case int:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ } else {
+ ch := make(chan string)
+ select {
+ case <-ch:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ }
+ }
+ }
+}
+
+// Group is used to test that loopclosure only matches Group.Go when Group is
+// from the golang.org/x/sync/errgroup package.
+type Group struct{}
func (g *Group) Go(func() error) {}
@@ -108,6 +191,21 @@ func _() {
return nil
})
}
+
+ for i, v := range s {
+ if i > 0 {
+ g.Go(func() error {
+ print(i) // want "loop variable i captured by func literal"
+ return nil
+ })
+ } else {
+ g.Go(func() error {
+ print(v) // want "loop variable v captured by func literal"
+ return nil
+ })
+ }
+ }
+
// Do not match other Group.Go cases
g1 := new(Group)
for i, v := range s {
@@ -118,3 +216,28 @@ func _() {
})
}
}
+
+// Real-world example from #16520, slightly simplified
+func _() {
+ var nodes []interface{}
+
+ critical := new(errgroup.Group)
+ others := sync.WaitGroup{}
+
+ isCritical := func(node interface{}) bool { return false }
+ run := func(node interface{}) error { return nil }
+
+ for _, node := range nodes {
+ if isCritical(node) {
+ critical.Go(func() error {
+ return run(node) // want "loop variable node captured by func literal"
+ })
+ } else {
+ others.Add(1)
+ go func() {
+ _ = run(node) // want "loop variable node captured by func literal"
+ others.Done()
+ }()
+ }
+ }
+}
diff --git a/go/analysis/passes/loopclosure/testdata/src/a/b.go b/go/analysis/passes/loopclosure/testdata/src/a/b.go
new file mode 100644
index 00000000000..d4e5da418e5
--- /dev/null
+++ b/go/analysis/passes/loopclosure/testdata/src/a/b.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+// B is declared in a separate file to test that object resolution spans the
+// entire package.
+var B int
diff --git a/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go
new file mode 100644
index 00000000000..c95fa1f0b1e
--- /dev/null
+++ b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go
@@ -0,0 +1,198 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests that the loopclosure analyzer detects leaked
+// references via parallel subtests.
+
+package subtests
+
+import (
+ "testing"
+)
+
+// T is used to test that loopclosure only matches T.Run when T is from the
+// testing package.
+type T struct{}
+
+// Run should not match testing.T.Run. Note that the second argument is
+// intentionally a *testing.T, not a *T, so that we can check both
+// testing.T.Parallel inside a T.Run, and a T.Parallel inside a testing.T.Run.
+func (t *T) Run(string, func(*testing.T)) {
+}
+
+func (t *T) Parallel() {}
+
+func _(t *testing.T) {
+ for i, test := range []int{1, 2, 3} {
+ // Check that parallel subtests are identified.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that serial tests are OK.
+ t.Run("", func(t *testing.T) {
+ println(i)
+ println(test)
+ })
+
+ // Check that the location of t.Parallel matters.
+ t.Run("", func(t *testing.T) {
+ println(i)
+ println(test)
+ t.Parallel()
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that *testing.T value matters.
+ t.Run("", func(t *testing.T) {
+ var x testing.T
+ x.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that shadowing the loop variables within the test literal is OK if
+ // it occurs before t.Parallel().
+ t.Run("", func(t *testing.T) {
+ i := i
+ test := test
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that shadowing the loop variables within the test literal is Not
+ // OK if it occurs after t.Parallel().
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ i := i // want "loop variable i captured by func literal"
+ test := test // want "loop variable test captured by func literal"
+ println(i) // OK
+ println(test) // OK
+ })
+
+ // Check uses in nested blocks.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ {
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ }
+ })
+
+ // Check that we catch uses in nested subtests.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ t.Run("", func(t *testing.T) {
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+ })
+
+ // Check that there is no diagnostic if t is not a *testing.T.
+ t.Run("", func(_ *testing.T) {
+ t := &T{}
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that there is no diagnostic when a jump to a label may have caused
+ // the call to t.Parallel to have been skipped.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test
+ }
+ t.Parallel()
+ Test:
+ println(i)
+ println(test)
+ })
+
+ // Check that there is no diagnostic when a jump to a label may have caused
+ // the loop variable reference to be skipped, but there is a diagnostic
+ // when both the call to t.Parallel and the loop variable reference occur
+ // after the final label in the block.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test
+ }
+ t.Parallel()
+ println(i) // maybe OK
+ Test:
+ t.Parallel()
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that multiple labels are handled.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test1
+ } else {
+ goto Test2
+ }
+ Test1:
+ Test2:
+ t.Parallel()
+ println(test) // want "loop variable test captured by func literal"
+ })
+ }
+}
+
+// Check that there is no diagnostic when loop variables are shadowed within
+// the loop body.
+func _(t *testing.T) {
+ for i, test := range []int{1, 2, 3} {
+ i := i
+ test := test
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+ }
+}
+
+// Check that t.Run must be *testing.T.Run.
+func _(t *T) {
+ for i, test := range []int{1, 2, 3} {
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+ }
+}
+
+// Check that the top-level must be parallel in order to cause a diagnostic.
+//
+// From https://pkg.go.dev/testing:
+//
+// "Run does not return until parallel subtests have completed, providing a
+// way to clean up after a group of parallel tests"
+func _(t *testing.T) {
+ for _, test := range []int{1, 2, 3} {
+ // In this subtest, a/b must complete before the synchronous subtest "a"
+ // completes, so the reference to test does not escape the current loop
+ // iteration.
+ t.Run("a", func(s *testing.T) {
+ s.Run("b", func(u *testing.T) {
+ u.Parallel()
+ println(test)
+ })
+ })
+
+ // In this subtest, c executes concurrently, so the reference to test may
+ // escape the current loop iteration.
+ t.Run("c", func(s *testing.T) {
+ s.Parallel()
+ s.Run("d", func(u *testing.T) {
+ println(test) // want "loop variable test captured by func literal"
+ })
+ })
+ }
+}
diff --git a/go/analysis/passes/nilness/nilness.go b/go/analysis/passes/nilness/nilness.go
index 8db18c73ade..6849c33ccef 100644
--- a/go/analysis/passes/nilness/nilness.go
+++ b/go/analysis/passes/nilness/nilness.go
@@ -15,6 +15,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for redundant or impossible nil comparisons
@@ -62,7 +63,6 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (interface{}, error) {
ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
- // TODO(48525): ssainput.SrcFuncs is missing fn._Instances(). runFunc will be skipped.
for _, fn := range ssainput.SrcFuncs {
runFunc(pass, fn)
}
@@ -103,8 +103,11 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) {
for _, instr := range b.Instrs {
switch instr := instr.(type) {
case ssa.CallInstruction:
- notNil(stack, instr, instr.Common().Value,
- instr.Common().Description())
+ // A nil receiver may be okay for type params.
+ cc := instr.Common()
+ if !(cc.IsInvoke() && typeparams.IsTypeParam(cc.Value.Type())) {
+ notNil(stack, instr, cc.Value, cc.Description())
+ }
case *ssa.FieldAddr:
notNil(stack, instr, instr.X, "field selection")
case *ssa.IndexAddr:
@@ -307,9 +310,9 @@ func nilnessOf(stack []fact, v ssa.Value) nilness {
return isnonnil
case *ssa.Const:
if v.IsNil() {
- return isnil
+ return isnil // nil or zero value of a pointer-like type
} else {
- return isnonnil
+ return unknown // non-pointer
}
}
diff --git a/go/analysis/passes/nilness/nilness_test.go b/go/analysis/passes/nilness/nilness_test.go
index 86c4a769da8..99c4dfbac1d 100644
--- a/go/analysis/passes/nilness/nilness_test.go
+++ b/go/analysis/passes/nilness/nilness_test.go
@@ -24,3 +24,11 @@ func TestInstantiated(t *testing.T) {
testdata := analysistest.TestData()
analysistest.Run(t, testdata, nilness.Analyzer, "c")
}
+
+func TestTypeSet(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestTypeSet requires type parameters")
+ }
+ testdata := analysistest.TestData()
+ analysistest.Run(t, testdata, nilness.Analyzer, "d")
+}
diff --git a/go/analysis/passes/nilness/testdata/src/a/a.go b/go/analysis/passes/nilness/testdata/src/a/a.go
index aa7f9a8f859..0629e08d89e 100644
--- a/go/analysis/passes/nilness/testdata/src/a/a.go
+++ b/go/analysis/passes/nilness/testdata/src/a/a.go
@@ -209,3 +209,10 @@ func f13() {
var d *Y
print(d.value) // want "nil dereference in field selection"
}
+
+func f14() {
+ var x struct{ f string }
+ if x == struct{ f string }{} { // we don't catch this tautology as we restrict to reference types
+ print(x)
+ }
+}
diff --git a/go/analysis/passes/nilness/testdata/src/c/c.go b/go/analysis/passes/nilness/testdata/src/c/c.go
index 2b2036595a6..c9a05a714ff 100644
--- a/go/analysis/passes/nilness/testdata/src/c/c.go
+++ b/go/analysis/passes/nilness/testdata/src/c/c.go
@@ -2,7 +2,7 @@ package c
func instantiated[X any](x *X) int {
if x == nil {
- print(*x) // not reported until _Instances are added to SrcFuncs
+ print(*x) // want "nil dereference in load"
}
return 1
}
diff --git a/go/analysis/passes/nilness/testdata/src/d/d.go b/go/analysis/passes/nilness/testdata/src/d/d.go
new file mode 100644
index 00000000000..72bd1c87217
--- /dev/null
+++ b/go/analysis/passes/nilness/testdata/src/d/d.go
@@ -0,0 +1,55 @@
+package d
+
+type message interface{ PR() }
+
+func noparam() {
+ var messageT message
+ messageT.PR() // want "nil dereference in dynamic method call"
+}
+
+func paramNonnil[T message]() {
+ var messageT T
+ messageT.PR() // cannot conclude messageT is nil.
+}
+
+func instance() {
+ // buildssa.BuilderMode does not include InstantiateGenerics.
+ paramNonnil[message]() // no warning is expected as param[message] id not built.
+}
+
+func param[T interface {
+ message
+ ~*int | ~chan int
+}]() {
+ var messageT T // messageT is nil.
+ messageT.PR() // nil receiver may be okay. See param[nilMsg].
+}
+
+type nilMsg chan int
+
+func (m nilMsg) PR() {
+ if m == nil {
+ print("not an error")
+ }
+}
+
+var G func() = param[nilMsg] // no warning
+
+func allNillable[T ~*int | ~chan int]() {
+ var x, y T // both are nillable and are nil.
+ if x != y { // want "impossible condition: nil != nil"
+ print("unreachable")
+ }
+}
+
+func notAll[T ~*int | ~chan int | ~int]() {
+ var x, y T // neither are nillable due to ~int
+ if x != y { // no warning
+ print("unreachable")
+ }
+}
+
+func noninvoke[T ~func()]() {
+ var x T
+ x() // want "nil dereference in dynamic function call"
+}
diff --git a/go/analysis/passes/printf/printf.go b/go/analysis/passes/printf/printf.go
index c4ccc95b4fb..3ac4fcaa28e 100644
--- a/go/analysis/passes/printf/printf.go
+++ b/go/analysis/passes/printf/printf.go
@@ -583,7 +583,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
argNum := firstArg
maxArgNum := firstArg
anyIndex := false
- anyW := false
for i, w := 0, 0; i < len(format); i += w {
w = 1
if format[i] != '%' {
@@ -606,11 +605,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name)
return
}
- if anyW {
- pass.Reportf(call.Pos(), "%s call has more than one error-wrapping directive %%w", state.name)
- return
- }
- anyW = true
}
if len(state.argNums) > 0 {
// Continue with the next sequential argument.
@@ -672,12 +666,13 @@ func (s *formatState) parseIndex() bool {
s.scanNum()
ok := true
if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' {
- ok = false
- s.nbytes = strings.Index(s.format, "]")
+ ok = false // syntax error is either missing "]" or invalid index.
+ s.nbytes = strings.Index(s.format[start:], "]")
if s.nbytes < 0 {
s.pass.ReportRangef(s.call, "%s format %s is missing closing ]", s.name, s.format)
return false
}
+ s.nbytes = s.nbytes + start
}
arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32)
if err != nil || !ok || arg32 <= 0 || arg32 > int64(len(s.call.Args)-s.firstArg) {
@@ -915,7 +910,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o
if reason != "" {
details = " (" + reason + ")"
}
- pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s%s", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString, details)
+ pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s%s, see also https://pkg.go.dev/fmt#hdr-Printing", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString, details)
return false
}
if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) {
@@ -950,11 +945,16 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) {
return "", false
}
+ // inScope returns true if e is in the scope of f.
+ inScope := func(e ast.Expr, f *types.Func) bool {
+ return f.Scope() != nil && f.Scope().Contains(e.Pos())
+ }
+
// Is the expression e within the body of that String or Error method?
var method *types.Func
- if strOk && strMethod.Pkg() == pass.Pkg && strMethod.Scope().Contains(e.Pos()) {
+ if strOk && strMethod.Pkg() == pass.Pkg && inScope(e, strMethod) {
method = strMethod
- } else if errOk && errMethod.Pkg() == pass.Pkg && errMethod.Scope().Contains(e.Pos()) {
+ } else if errOk && errMethod.Pkg() == pass.Pkg && inScope(e, errMethod) {
method = errMethod
} else {
return "", false
diff --git a/go/analysis/passes/printf/testdata/src/a/a.go b/go/analysis/passes/printf/testdata/src/a/a.go
index 5eca3172dec..0c4d11bf0c0 100644
--- a/go/analysis/passes/printf/testdata/src/a/a.go
+++ b/go/analysis/passes/printf/testdata/src/a/a.go
@@ -217,6 +217,7 @@ func PrintfTests() {
Printf("%[2]*.[1]*[3]d x", 2, "hi", 4) // want `a.Printf format %\[2]\*\.\[1\]\*\[3\]d uses non-int \x22hi\x22 as argument of \*`
Printf("%[0]s x", "arg1") // want `a.Printf format has invalid argument index \[0\]`
Printf("%[0]d x", 1) // want `a.Printf format has invalid argument index \[0\]`
+ Printf("%[3]*.[2*[1]f", 1, 2, 3) // want `a.Printf format has invalid argument index \[2\*\[1\]`
// Something that satisfies the error interface.
var e error
fmt.Println(e.Error()) // ok
@@ -341,7 +342,7 @@ func PrintfTests() {
_ = fmt.Errorf("%[2]w %[1]s", "x", err) // OK
_ = fmt.Errorf("%[2]w %[1]s", e, "x") // want `fmt.Errorf format %\[2\]w has arg "x" of wrong type string`
_ = fmt.Errorf("%w", "x") // want `fmt.Errorf format %w has arg "x" of wrong type string`
- _ = fmt.Errorf("%w %w", err, err) // want `fmt.Errorf call has more than one error-wrapping directive %w`
+ _ = fmt.Errorf("%w %w", err, err) // OK
_ = fmt.Errorf("%w", interface{}(nil)) // want `fmt.Errorf format %w has arg interface{}\(nil\) of wrong type interface{}`
_ = fmt.Errorf("%w", errorTestOK(0)) // concrete value implements error
_ = fmt.Errorf("%w", errSubset) // interface value implements error
diff --git a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
index 76a9a205a70..c4d7e530d93 100644
--- a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
+++ b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
@@ -121,3 +121,25 @@ func TestTermReduction[T1 interface{ ~int | string }, T2 interface {
fmt.Printf("%d", t2)
fmt.Printf("%s", t2) // want "wrong type.*contains typeparams.myInt"
}
+
+type U[T any] struct{}
+
+func (u U[T]) String() string {
+ fmt.Println(u) // want `fmt.Println arg u causes recursive call to \(typeparams.U\[T\]\).String method`
+ return ""
+}
+
+type S[T comparable] struct {
+ t T
+}
+
+func (s S[T]) String() T {
+ fmt.Println(s) // Not flagged. We currently do not consider String() T to implement fmt.Stringer (see #55928).
+ return s.t
+}
+
+func TestInstanceStringer() {
+ // Tests String method with nil Scope (#55350)
+ fmt.Println(&S[string]{})
+ fmt.Println(&U[string]{})
+}
diff --git a/go/analysis/passes/printf/types.go b/go/analysis/passes/printf/types.go
index 270e917c809..7cbb0bdbf5f 100644
--- a/go/analysis/passes/printf/types.go
+++ b/go/analysis/passes/printf/types.go
@@ -299,13 +299,3 @@ func isConvertibleToString(typ types.Type) bool {
return false
}
-
-// hasBasicType reports whether x's type is a types.Basic with the given kind.
-func hasBasicType(pass *analysis.Pass, x ast.Expr, kind types.BasicKind) bool {
- t := pass.TypesInfo.Types[x].Type
- if t != nil {
- t = t.Underlying()
- }
- b, ok := t.(*types.Basic)
- return ok && b.Kind() == kind
-}
diff --git a/go/analysis/passes/sortslice/analyzer.go b/go/analysis/passes/sortslice/analyzer.go
index 5eb957a1883..f85837d66bf 100644
--- a/go/analysis/passes/sortslice/analyzer.go
+++ b/go/analysis/passes/sortslice/analyzer.go
@@ -52,11 +52,20 @@ func run(pass *analysis.Pass) (interface{}, error) {
arg := call.Args[0]
typ := pass.TypesInfo.Types[arg].Type
+
+ if tuple, ok := typ.(*types.Tuple); ok {
+ typ = tuple.At(0).Type() // special case for Slice(f(...))
+ }
+
switch typ.Underlying().(type) {
case *types.Slice, *types.Interface:
return
}
+ // Restore typ to the original type, we may unwrap the tuple above,
+ // typ might not be the type of arg.
+ typ = pass.TypesInfo.Types[arg].Type
+
var fixes []analysis.SuggestedFix
switch v := typ.Underlying().(type) {
case *types.Array:
diff --git a/go/analysis/passes/sortslice/testdata/src/a/a.go b/go/analysis/passes/sortslice/testdata/src/a/a.go
index bc6cc16e9f1..c6aca8df13b 100644
--- a/go/analysis/passes/sortslice/testdata/src/a/a.go
+++ b/go/analysis/passes/sortslice/testdata/src/a/a.go
@@ -6,8 +6,8 @@ import "sort"
func IncorrectSort() {
i := 5
sortFn := func(i, j int) bool { return false }
- sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int"
- sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int"
+ sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int"
+ sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int"
sort.SliceIsSorted(i, sortFn) // want "sort.SliceIsSorted's argument must be a slice; is called with int"
}
@@ -62,3 +62,23 @@ func UnderlyingSlice() {
sort.SliceStable(s, sortFn)
sort.SliceIsSorted(s, sortFn)
}
+
+// FunctionResultsAsArguments passes a function which returns two values
+// that satisfy sort.Slice signature. It should not produce a diagnostic.
+func FunctionResultsAsArguments() {
+ s := []string{"a", "z", "ooo"}
+ sort.Slice(less(s))
+ sort.Slice(lessPtr(s)) // want `sort.Slice's argument must be a slice; is called with \(\*\[\]string,.*`
+}
+
+func less(s []string) ([]string, func(i, j int) bool) {
+ return s, func(i, j int) bool {
+ return s[i] < s[j]
+ }
+}
+
+func lessPtr(s []string) (*[]string, func(i, j int) bool) {
+ return &s, func(i, j int) bool {
+ return s[i] < s[j]
+ }
+}
diff --git a/go/analysis/passes/stdmethods/stdmethods.go b/go/analysis/passes/stdmethods/stdmethods.go
index cc9497179da..41f455d1003 100644
--- a/go/analysis/passes/stdmethods/stdmethods.go
+++ b/go/analysis/passes/stdmethods/stdmethods.go
@@ -134,6 +134,19 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) {
}
}
+ // Special case: Unwrap has two possible signatures.
+ // Check for Unwrap() []error here.
+ if id.Name == "Unwrap" {
+ if args.Len() == 0 && results.Len() == 1 {
+ t := typeString(results.At(0).Type())
+ if t == "error" || t == "[]error" {
+ return
+ }
+ }
+ pass.ReportRangef(id, "method Unwrap() should have signature Unwrap() error or Unwrap() []error")
+ return
+ }
+
// Do the =s (if any) all match?
if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") {
return
diff --git a/go/analysis/passes/stdmethods/testdata/src/a/a.go b/go/analysis/passes/stdmethods/testdata/src/a/a.go
index c95cf5d2b76..2b01f46932f 100644
--- a/go/analysis/passes/stdmethods/testdata/src/a/a.go
+++ b/go/analysis/passes/stdmethods/testdata/src/a/a.go
@@ -49,7 +49,7 @@ func (E) Error() string { return "" } // E implements error.
func (E) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (E) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type F int
@@ -57,8 +57,18 @@ func (F) Error() string { return "" } // Both F and *F implement error.
func (*F) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (*F) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type G int
func (G) As(interface{}) bool // ok
+
+type W int
+
+func (W) Error() string { return "" }
+func (W) Unwrap() error { return nil } // ok
+
+type M int
+
+func (M) Error() string { return "" }
+func (M) Unwrap() []error { return nil } // ok
diff --git a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
index 72df30d4960..3d4146e9b2c 100644
--- a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
+++ b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
@@ -30,7 +30,7 @@ func (E[_]) Error() string { return "" } // E implements error.
func (E[P]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (E[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type F[P any] int
@@ -38,4 +38,4 @@ func (F[_]) Error() string { return "" } // Both F and *F implement error.
func (*F[_]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (*F[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
diff --git a/go/analysis/passes/tests/testdata/src/a/go118_test.go b/go/analysis/passes/tests/testdata/src/a/go118_test.go
index dc898daca0b..e2bc3f3a0bd 100644
--- a/go/analysis/passes/tests/testdata/src/a/go118_test.go
+++ b/go/analysis/passes/tests/testdata/src/a/go118_test.go
@@ -94,3 +94,8 @@ func FuzzObjectMethod(f *testing.F) {
}
f.Fuzz(obj.myVar) // ok
}
+
+// Test for golang/go#56505: checking fuzz arguments should not panic on *error.
+func FuzzIssue56505(f *testing.F) {
+ f.Fuzz(func(e *error) {}) // want "the first parameter of a fuzz target must be \\*testing.T"
+}
diff --git a/go/analysis/passes/tests/tests.go b/go/analysis/passes/tests/tests.go
index 56b20ebd519..935aad00c98 100644
--- a/go/analysis/passes/tests/tests.go
+++ b/go/analysis/passes/tests/tests.go
@@ -269,7 +269,9 @@ func isTestingType(typ types.Type, testingType string) bool {
if !ok {
return false
}
- return named.Obj().Pkg().Path() == "testing" && named.Obj().Name() == testingType
+ obj := named.Obj()
+ // obj.Pkg is nil for the error type.
+ return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == testingType
}
// Validate that fuzz target function's arguments are of accepted types.
@@ -475,10 +477,12 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) {
if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
// Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters.
// We have currently decided to also warn before compilation/package loading. This can help users in IDEs.
+ // TODO(adonovan): use ReportRangef(tparams).
pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix)
}
if !isTestSuffix(fn.Name.Name[len(prefix):]) {
+ // TODO(adonovan): use ReportRangef(fn.Name).
pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix)
}
}
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go b/go/analysis/passes/timeformat/testdata/src/a/a.go
new file mode 100644
index 00000000000..98481446e55
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+ "time"
+
+ "b"
+)
+
+func hasError() {
+ a, _ := time.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+ a.Format(`2006-02-01`) // want `2006-02-01 should be 2006-01-02`
+ a.Format("2006-02-01 15:04:05") // want `2006-02-01 should be 2006-01-02`
+
+ const c = "2006-02-01"
+ a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+ a.Format("2006-01-02")
+
+ const c = "2006-01-02"
+ a.Format(c)
+
+ v := "2006-02-01"
+ a.Format(v) // Allowed though variables.
+
+ m := map[string]string{
+ "y": "2006-02-01",
+ }
+ a.Format(m["y"])
+
+ s := []string{"2006-02-01"}
+ a.Format(s[0])
+
+ a.Format(badFormat())
+
+ o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+ o.Format("2006-02-01")
+}
+
+func badFormat() string {
+ return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go.golden b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
new file mode 100644
index 00000000000..9eccded63b4
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+ "time"
+
+ "b"
+)
+
+func hasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+ a.Format(`2006-01-02`) // want `2006-02-01 should be 2006-01-02`
+ a.Format("2006-01-02 15:04:05") // want `2006-02-01 should be 2006-01-02`
+
+ const c = "2006-02-01"
+ a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+ a.Format("2006-01-02")
+
+ const c = "2006-01-02"
+ a.Format(c)
+
+ v := "2006-02-01"
+ a.Format(v) // Allowed though variables.
+
+ m := map[string]string{
+ "y": "2006-02-01",
+ }
+ a.Format(m["y"])
+
+ s := []string{"2006-02-01"}
+ a.Format(s[0])
+
+ a.Format(badFormat())
+
+ o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+ o.Format("2006-02-01")
+}
+
+func badFormat() string {
+ return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/b/b.go b/go/analysis/passes/timeformat/testdata/src/b/b.go
new file mode 100644
index 00000000000..de5690863c9
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/b/b.go
@@ -0,0 +1,11 @@
+package b
+
+type B struct {
+}
+
+func Parse(string, string) B {
+ return B{}
+}
+
+func (b B) Format(string) {
+}
diff --git a/go/analysis/passes/timeformat/timeformat.go b/go/analysis/passes/timeformat/timeformat.go
new file mode 100644
index 00000000000..acb198f95c4
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat.go
@@ -0,0 +1,129 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeformat defines an Analyzer that checks for the use
+// of time.Format or time.Parse calls with a bad format.
+package timeformat
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+const badFormat = "2006-02-01"
+const goodFormat = "2006-01-02"
+
+const Doc = `check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "timeformat",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+ nodeFilter := []ast.Node{
+ (*ast.CallExpr)(nil),
+ }
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ call := n.(*ast.CallExpr)
+ fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func)
+ if !ok {
+ return
+ }
+ if !isTimeDotFormat(fn) && !isTimeDotParse(fn) {
+ return
+ }
+ if len(call.Args) > 0 {
+ arg := call.Args[0]
+ badAt := badFormatAt(pass.TypesInfo, arg)
+
+ if badAt > -1 {
+ // Check if it's a literal string, otherwise we can't suggest a fix.
+ if _, ok := arg.(*ast.BasicLit); ok {
+ pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or `
+ end := pos + len(badFormat)
+
+ pass.Report(analysis.Diagnostic{
+ Pos: token.Pos(pos),
+ End: token.Pos(end),
+ Message: badFormat + " should be " + goodFormat,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Replace " + badFormat + " with " + goodFormat,
+ TextEdits: []analysis.TextEdit{{
+ Pos: token.Pos(pos),
+ End: token.Pos(end),
+ NewText: []byte(goodFormat),
+ }},
+ }},
+ })
+ } else {
+ pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat)
+ }
+ }
+ }
+ })
+ return nil, nil
+}
+
+func isTimeDotFormat(f *types.Func) bool {
+ if f.Name() != "Format" || f.Pkg().Path() != "time" {
+ return false
+ }
+ sig, ok := f.Type().(*types.Signature)
+ if !ok {
+ return false
+ }
+ // Verify that the receiver is time.Time.
+ recv := sig.Recv()
+ if recv == nil {
+ return false
+ }
+ named, ok := recv.Type().(*types.Named)
+ return ok && named.Obj().Name() == "Time"
+}
+
+func isTimeDotParse(f *types.Func) bool {
+ if f.Name() != "Parse" || f.Pkg().Path() != "time" {
+ return false
+ }
+ // Verify that there is no receiver.
+ sig, ok := f.Type().(*types.Signature)
+ return ok && sig.Recv() == nil
+}
+
+// badFormatAt return the start of a bad format in e or -1 if no bad format is found.
+func badFormatAt(info *types.Info, e ast.Expr) int {
+ tv, ok := info.Types[e]
+ if !ok { // no type info, assume good
+ return -1
+ }
+
+ t, ok := tv.Type.(*types.Basic)
+ if !ok || t.Info()&types.IsString == 0 {
+ return -1
+ }
+
+ if tv.Value == nil {
+ return -1
+ }
+
+ return strings.Index(constant.StringVal(tv.Value), badFormat)
+}
diff --git a/go/analysis/passes/timeformat/timeformat_test.go b/go/analysis/passes/timeformat/timeformat_test.go
new file mode 100644
index 00000000000..86bbe1bb3fb
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat_test.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package timeformat_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/passes/timeformat"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, timeformat.Analyzer, "a")
+}
diff --git a/go/analysis/unitchecker/unitchecker.go b/go/analysis/unitchecker/unitchecker.go
index 9827b57f529..37693564e5b 100644
--- a/go/analysis/unitchecker/unitchecker.go
+++ b/go/analysis/unitchecker/unitchecker.go
@@ -50,7 +50,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
- "golang.org/x/tools/go/analysis/internal/facts"
+ "golang.org/x/tools/internal/facts"
"golang.org/x/tools/internal/typeparams"
)
@@ -249,6 +249,10 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
// In VetxOnly mode, analyzers are only for their facts,
// so we can skip any analysis that neither produces facts
// nor depends on any analysis that produces facts.
+ //
+ // TODO(adonovan): fix: the command (and logic!) here are backwards.
+ // It should say "...nor is required by any...". (Issue 443099)
+ //
// Also build a map to hold working state and result.
type action struct {
once sync.Once
@@ -287,13 +291,13 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
analyzers = filtered
// Read facts from imported packages.
- read := func(path string) ([]byte, error) {
- if vetx, ok := cfg.PackageVetx[path]; ok {
+ read := func(imp *types.Package) ([]byte, error) {
+ if vetx, ok := cfg.PackageVetx[imp.Path()]; ok {
return ioutil.ReadFile(vetx)
}
return nil, nil // no .vetx file, no facts
}
- facts, err := facts.Decode(pkg, read)
+ facts, err := facts.NewDecoder(pkg).Decode(read)
if err != nil {
return nil, err
}
@@ -340,6 +344,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
Pkg: pkg,
TypesInfo: info,
TypesSizes: tc.Sizes,
+ TypeErrors: nil, // unitchecker doesn't RunDespiteErrors
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: facts.ImportObjectFact,
diff --git a/go/analysis/unitchecker/unitchecker_test.go b/go/analysis/unitchecker/unitchecker_test.go
index 7e5b848de86..197abd9a168 100644
--- a/go/analysis/unitchecker/unitchecker_test.go
+++ b/go/analysis/unitchecker/unitchecker_test.go
@@ -20,6 +20,7 @@ import (
"strings"
"testing"
+ "golang.org/x/tools/go/analysis/passes/assign"
"golang.org/x/tools/go/analysis/passes/findcall"
"golang.org/x/tools/go/analysis/passes/printf"
"golang.org/x/tools/go/analysis/unitchecker"
@@ -41,6 +42,7 @@ func main() {
unitchecker.Main(
findcall.Analyzer,
printf.Analyzer,
+ assign.Analyzer,
)
}
@@ -74,6 +76,13 @@ func _() {
}
func MyFunc123() {}
+`,
+ "c/c.go": `package c
+
+func _() {
+ i := 5
+ i = i
+}
`,
}}})
defer exported.Cleanup()
@@ -84,6 +93,9 @@ func MyFunc123() {}
const wantB = `# golang.org/fake/b
([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:6:13: call of MyFunc123\(...\)
([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:7:11: call of MyFunc123\(...\)
+`
+ const wantC = `# golang.org/fake/c
+([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5: self-assignment of i to i
`
const wantAJSON = `# golang.org/fake/a
\{
@@ -91,23 +103,62 @@ func MyFunc123() {}
"findcall": \[
\{
"posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go:4:11",
- "message": "call of MyFunc123\(...\)"
+ "message": "call of MyFunc123\(...\)",
+ "suggested_fixes": \[
+ \{
+ "message": "Add '_TEST_'",
+ "edits": \[
+ \{
+ "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go",
+ "start": 32,
+ "end": 32,
+ "new": "_TEST_"
+ \}
+ \]
+ \}
+ \]
+ \}
+ \]
+ \}
+\}
+`
+ const wantCJSON = `# golang.org/fake/c
+\{
+ "golang.org/fake/c": \{
+ "assign": \[
+ \{
+ "posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5",
+ "message": "self-assignment of i to i",
+ "suggested_fixes": \[
+ \{
+ "message": "Remove",
+ "edits": \[
+ \{
+ "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go",
+ "start": 37,
+ "end": 42,
+ "new": ""
+ \}
+ \]
+ \}
+ \]
\}
\]
\}
\}
`
-
for _, test := range []struct {
- args string
- wantOut string
- wantExit int
+ args string
+ wantOut string
+ wantExitError bool
}{
- {args: "golang.org/fake/a", wantOut: wantA, wantExit: 2},
- {args: "golang.org/fake/b", wantOut: wantB, wantExit: 2},
- {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExit: 2},
- {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExit: 0},
- {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExit: 2},
+ {args: "golang.org/fake/a", wantOut: wantA, wantExitError: true},
+ {args: "golang.org/fake/b", wantOut: wantB, wantExitError: true},
+ {args: "golang.org/fake/c", wantOut: wantC, wantExitError: true},
+ {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExitError: true},
+ {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExitError: false},
+ {args: "-json golang.org/fake/c", wantOut: wantCJSON, wantExitError: false},
+ {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExitError: true},
} {
cmd := exec.Command("go", "vet", "-vettool="+os.Args[0], "-findcall.name=MyFunc123")
cmd.Args = append(cmd.Args, strings.Fields(test.args)...)
@@ -119,13 +170,17 @@ func MyFunc123() {}
if exitErr, ok := err.(*exec.ExitError); ok {
exitcode = exitErr.ExitCode()
}
- if exitcode != test.wantExit {
- t.Errorf("%s: got exit code %d, want %d", test.args, exitcode, test.wantExit)
+ if (exitcode != 0) != test.wantExitError {
+ want := "zero"
+ if test.wantExitError {
+ want = "nonzero"
+ }
+ t.Errorf("%s: got exit code %d, want %s", test.args, exitcode, want)
}
matched, err := regexp.Match(test.wantOut, out)
if err != nil {
- t.Fatal(err)
+ t.Fatalf("regexp.Match(<<%s>>): %v", test.wantOut, err)
}
if !matched {
t.Errorf("%s: got <<%s>>, want match of regexp <<%s>>", test.args, out, test.wantOut)
diff --git a/go/ast/inspector/typeof.go b/go/ast/inspector/typeof.go
index 11ab2bc85aa..703c8139544 100644
--- a/go/ast/inspector/typeof.go
+++ b/go/ast/inspector/typeof.go
@@ -11,6 +11,7 @@ package inspector
import (
"go/ast"
+ "math"
"golang.org/x/tools/internal/typeparams"
)
@@ -218,7 +219,7 @@ func typeOf(n ast.Node) uint64 {
func maskOf(nodes []ast.Node) uint64 {
if nodes == nil {
- return 1<<64 - 1 // match all node types
+ return math.MaxUint64 // match all node types
}
var mask uint64
for _, n := range nodes {
diff --git a/go/buildutil/util.go b/go/buildutil/util.go
index d771b18e32d..bee6390de4c 100644
--- a/go/buildutil/util.go
+++ b/go/buildutil/util.go
@@ -80,7 +80,7 @@ func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Packag
// (go/build.Context defines these as methods, but does not export them.)
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// HasSubdir calls ctxt.HasSubdir (if not nil) or else uses
// the local file system to answer the question.
func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
if f := ctxt.HasSubdir; f != nil {
diff --git a/go/callgraph/callgraph.go b/go/callgraph/callgraph.go
index 352ce0c76ed..905623753d6 100644
--- a/go/callgraph/callgraph.go
+++ b/go/callgraph/callgraph.go
@@ -37,6 +37,8 @@ package callgraph // import "golang.org/x/tools/go/callgraph"
// More generally, we could eliminate "uninteresting" nodes such as
// nodes from packages we don't care about.
+// TODO(zpavlinovic): decide how callgraphs handle calls to and from generic function bodies.
+
import (
"fmt"
"go/token"
diff --git a/go/callgraph/cha/cha.go b/go/callgraph/cha/cha.go
index 170040426b8..7075a73cbe8 100644
--- a/go/callgraph/cha/cha.go
+++ b/go/callgraph/cha/cha.go
@@ -22,6 +22,8 @@
// partial programs, such as libraries without a main or test function.
package cha // import "golang.org/x/tools/go/callgraph/cha"
+// TODO(zpavlinovic): update CHA for how it handles generic function bodies.
+
import (
"go/types"
diff --git a/go/callgraph/cha/testdata/generics.go b/go/callgraph/cha/testdata/generics.go
index 79250a56ca1..0323c7582b6 100644
--- a/go/callgraph/cha/testdata/generics.go
+++ b/go/callgraph/cha/testdata/generics.go
@@ -41,5 +41,9 @@ func f(h func(), g func(I), k func(A), a A, b B) {
// f --> instantiated[main.A]
// f --> instantiated[main.A]
// f --> instantiated[main.B]
+// instantiated --> (*A).Foo
+// instantiated --> (*B).Foo
+// instantiated --> (A).Foo
+// instantiated --> (B).Foo
// instantiated[main.A] --> (A).Foo
// instantiated[main.B] --> (B).Foo
diff --git a/go/callgraph/static/static.go b/go/callgraph/static/static.go
index c7fae75bbde..62d2364bf2c 100644
--- a/go/callgraph/static/static.go
+++ b/go/callgraph/static/static.go
@@ -6,6 +6,8 @@
// only static call edges.
package static // import "golang.org/x/tools/go/callgraph/static"
+// TODO(zpavlinovic): update static for how it handles generic function bodies.
+
import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go
index 365d7a5b0f7..2ad0f89fd83 100644
--- a/go/callgraph/vta/graph.go
+++ b/go/callgraph/vta/graph.go
@@ -568,7 +568,9 @@ func (b *builder) panic(p *ssa.Panic) {
func (b *builder) call(c ssa.CallInstruction) {
// When c is r := recover() call register instruction, we add Recover -> r.
if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" {
- b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(c.(*ssa.Call)))
+ if v, ok := c.(ssa.Value); ok {
+ b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(v))
+ }
return
}
@@ -586,14 +588,14 @@ func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) {
return
}
cc := c.Common()
- // When c is an unresolved method call (cc.Method != nil), cc.Value contains
- // the receiver object rather than cc.Args[0].
- if cc.Method != nil {
- b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value))
- }
offset := 0
if cc.Method != nil {
+ // We don't add interprocedural flows for receiver objects.
+ // At a call site, the receiver object is interface while the
+ // callee object is concrete. The flow from interface to
+ // concrete type does not make sense. The flow other way around
+ // would bake in information from the initial call graph.
offset = 1
}
for i, v := range cc.Args {
@@ -654,7 +656,7 @@ func (b *builder) addInFlowEdge(s, d node) {
// Creates const, pointer, global, func, and local nodes based on register instructions.
func (b *builder) nodeFromVal(val ssa.Value) node {
- if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) && !isFunction(p.Elem()) {
+ if p, ok := val.Type().(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) {
// Nested pointer to interfaces are modeled as a special
// nestedPtrInterface node.
if i := interfaceUnderPtr(p.Elem()); i != nil {
@@ -687,7 +689,9 @@ func (b *builder) nodeFromVal(val ssa.Value) node {
// semantically equivalent types can have different implementations,
// this method guarantees the same implementation is always used.
func (b *builder) representative(n node) node {
- if !hasInitialTypes(n) {
+ if n.Type() == nil {
+ // panicArg and recoverReturn do not have
+ // types and are unique by definition.
return n
}
t := canonicalize(n.Type(), &b.canon)
diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go
index 5934ebc2167..6127780ac4e 100644
--- a/go/callgraph/vta/propagation.go
+++ b/go/callgraph/vta/propagation.go
@@ -175,6 +175,18 @@ func nodeTypes(nodes []node, builder *trie.Builder, propTypeId func(p propType)
return &typeSet
}
+// hasInitialTypes check if a node can have initial types.
+// Returns true iff `n` is not a panic, recover, nestedPtr*
+// node, nor a node whose type is an interface.
+func hasInitialTypes(n node) bool {
+ switch n.(type) {
+ case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface:
+ return false
+ default:
+ return !types.IsInterface(n.Type())
+ }
+}
+
// getPropType creates a propType for `node` based on its type.
// propType.typ is always node.Type(). If node is function, then
// propType.val is the underlying function; nil otherwise.
diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go
index 00b21277f22..f4a754f9663 100644
--- a/go/callgraph/vta/propagation_test.go
+++ b/go/callgraph/vta/propagation_test.go
@@ -58,7 +58,7 @@ func newLocal(name string, t types.Type) local {
// newNamedType creates a bogus type named `name`.
func newNamedType(name string) *types.Named {
- return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil)
+ return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), types.Universe.Lookup("int").Type(), nil)
}
// sccString is a utility for stringifying `nodeToScc`. Every
diff --git a/go/callgraph/vta/testdata/src/function_alias.go b/go/callgraph/vta/testdata/src/function_alias.go
index b38e0e00d69..0a8dffe79d4 100644
--- a/go/callgraph/vta/testdata/src/function_alias.go
+++ b/go/callgraph/vta/testdata/src/function_alias.go
@@ -33,42 +33,42 @@ func Baz(f func()) {
// t2 = *t1
// *t2 = Baz$1
// t3 = local A (a)
-// t4 = &t3.foo [#0]
-// t5 = *t1
-// t6 = *t5
-// *t4 = t6
+// t4 = *t1
+// t5 = *t4
+// t6 = &t3.foo [#0]
+// *t6 = t5
// t7 = &t3.foo [#0]
// t8 = *t7
// t9 = t8()
-// t10 = &t3.do [#1] *Doer
-// t11 = &t3.foo [#0] *func()
-// t12 = *t11 func()
-// t13 = changetype Doer <- func() (t12) Doer
-// *t10 = t13
+// t10 = &t3.foo [#0] *func()
+// t11 = *t10 func()
+// t12 = &t3.do [#1] *Doer
+// t13 = changetype Doer <- func() (t11) Doer
+// *t12 = t13
// t14 = &t3.do [#1] *Doer
// t15 = *t14 Doer
// t16 = t15() ()
// Flow chain showing that Baz$1 reaches t8():
-// Baz$1 -> t2 <-> PtrFunction(func()) <-> t5 -> t6 -> t4 <-> Field(testdata.A:foo) <-> t7 -> t8
+// Baz$1 -> t2 <-> PtrFunction(func()) <-> t4 -> t5 -> t6 <-> Field(testdata.A:foo) <-> t7 -> t8
// Flow chain showing that Baz$1 reaches t15():
-// Field(testdata.A:foo) <-> t11 -> t12 -> t13 -> t10 <-> Field(testdata.A:do) <-> t14 -> t15
+// Field(testdata.A:foo) <-> t10 -> t11 -> t13 -> t12 <-> Field(testdata.A:do) <-> t14 -> t15
// WANT:
// Local(f) -> Local(t0)
// Local(t0) -> PtrFunction(func())
// Function(Baz$1) -> Local(t2)
-// PtrFunction(func()) -> Local(t0), Local(t2), Local(t5)
+// PtrFunction(func()) -> Local(t0), Local(t2), Local(t4)
// Local(t2) -> PtrFunction(func())
-// Local(t4) -> Field(testdata.A:foo)
-// Local(t5) -> Local(t6), PtrFunction(func())
-// Local(t6) -> Local(t4)
+// Local(t6) -> Field(testdata.A:foo)
+// Local(t4) -> Local(t5), PtrFunction(func())
+// Local(t5) -> Local(t6)
// Local(t7) -> Field(testdata.A:foo), Local(t8)
-// Field(testdata.A:foo) -> Local(t11), Local(t4), Local(t7)
-// Local(t4) -> Field(testdata.A:foo)
-// Field(testdata.A:do) -> Local(t10), Local(t14)
-// Local(t10) -> Field(testdata.A:do)
-// Local(t11) -> Field(testdata.A:foo), Local(t12)
-// Local(t12) -> Local(t13)
-// Local(t13) -> Local(t10)
+// Field(testdata.A:foo) -> Local(t10), Local(t6), Local(t7)
+// Local(t6) -> Field(testdata.A:foo)
+// Field(testdata.A:do) -> Local(t12), Local(t14)
+// Local(t12) -> Field(testdata.A:do)
+// Local(t10) -> Field(testdata.A:foo), Local(t11)
+// Local(t11) -> Local(t13)
+// Local(t13) -> Local(t12)
// Local(t14) -> Field(testdata.A:do), Local(t15)
diff --git a/go/callgraph/vta/testdata/src/panic.go b/go/callgraph/vta/testdata/src/panic.go
index 2d39c70ea89..5ef3548577b 100644
--- a/go/callgraph/vta/testdata/src/panic.go
+++ b/go/callgraph/vta/testdata/src/panic.go
@@ -27,12 +27,12 @@ func recover2() {
func Baz(a A) {
defer recover1()
+ defer recover()
panic(a)
}
// Relevant SSA:
// func recover1():
-// 0:
// t0 = print("only this recover...":string)
// t1 = recover()
// t2 = typeassert,ok t1.(I)
@@ -53,6 +53,7 @@ func Baz(a A) {
// t0 = local A (a)
// *t0 = a
// defer recover1()
+// defer recover()
// t1 = *t0
// t2 = make interface{} <- A (t1)
// panic t2
diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go
index 0531a227f6c..d1831983ad6 100644
--- a/go/callgraph/vta/utils.go
+++ b/go/callgraph/vta/utils.go
@@ -9,6 +9,7 @@ import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
func canAlias(n1, n2 node) bool {
@@ -56,24 +57,7 @@ func hasInFlow(n node) bool {
return true
}
- return isInterface(t) || isFunction(t)
-}
-
-// hasInitialTypes check if a node can have initial types.
-// Returns true iff `n` is not a panic or recover node as
-// those are artificial.
-func hasInitialTypes(n node) bool {
- switch n.(type) {
- case panicArg, recoverReturn:
- return false
- default:
- return true
- }
-}
-
-func isInterface(t types.Type) bool {
- _, ok := t.Underlying().(*types.Interface)
- return ok
+ return types.IsInterface(t) || isFunction(t)
}
func isFunction(t types.Type) bool {
@@ -98,7 +82,7 @@ func interfaceUnderPtr(t types.Type) types.Type {
return nil
}
- if isInterface(p.Elem()) {
+ if types.IsInterface(p.Elem()) {
return p.Elem()
}
@@ -134,19 +118,27 @@ func functionUnderPtr(t types.Type) types.Type {
}
// sliceArrayElem returns the element type of type `t` that is
-// expected to be a (pointer to) array or slice, consistent with
+// expected to be a (pointer to) array, slice or string, consistent with
// the ssa.Index and ssa.IndexAddr instructions. Panics otherwise.
func sliceArrayElem(t types.Type) types.Type {
- u := t.Underlying()
-
- if p, ok := u.(*types.Pointer); ok {
- u = p.Elem().Underlying()
- }
-
- if a, ok := u.(*types.Array); ok {
- return a.Elem()
+ switch u := t.Underlying().(type) {
+ case *types.Pointer:
+ return u.Elem().Underlying().(*types.Array).Elem()
+ case *types.Array:
+ return u.Elem()
+ case *types.Slice:
+ return u.Elem()
+ case *types.Basic:
+ return types.Typ[types.Byte]
+ case *types.Interface: // type param.
+ terms, err := typeparams.InterfaceTermSet(u)
+ if err != nil || len(terms) == 0 {
+ panic(t)
+ }
+ return sliceArrayElem(terms[0].Type()) // Element types must match.
+ default:
+ panic(t)
}
- return u.(*types.Slice).Elem()
}
// siteCallees computes a set of callees for call site `c` given program `callgraph`.
diff --git a/go/callgraph/vta/vta.go b/go/callgraph/vta/vta.go
index 9839bd3f3cd..58393600337 100644
--- a/go/callgraph/vta/vta.go
+++ b/go/callgraph/vta/vta.go
@@ -54,6 +54,8 @@
// reaching the node representing the call site to create a set of callees.
package vta
+// TODO(zpavlinovic): update VTA for how it handles generic function bodies and instantiation wrappers.
+
import (
"go/types"
diff --git a/go/gcexportdata/example_test.go b/go/gcexportdata/example_test.go
index e81e705b1c4..7371d31d430 100644
--- a/go/gcexportdata/example_test.go
+++ b/go/gcexportdata/example_test.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.7 && gc
-// +build go1.7,gc
+//go:build go1.7 && gc && !android && !ios && !js
+// +build go1.7,gc,!android,!ios,!js
package gcexportdata_test
@@ -30,7 +30,6 @@ func ExampleRead() {
log.Fatalf("can't find export data for fmt")
}
fmt.Printf("Package path: %s\n", path)
- fmt.Printf("Export data: %s\n", filepath.Base(filename))
// Open and read the file.
f, err := os.Open(filename)
@@ -80,7 +79,6 @@ func ExampleRead() {
// Output:
//
// Package path: fmt
- // Export data: fmt.a
// Package members: Println found
// Println type: func(a ...any) (n int, err error)
// Println location: $GOROOT/src/fmt/print.go:123:1
diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go
index d50826dbf7e..165ede0f8f3 100644
--- a/go/gcexportdata/gcexportdata.go
+++ b/go/gcexportdata/gcexportdata.go
@@ -22,26 +22,41 @@ package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
+ "encoding/json"
"fmt"
"go/token"
"go/types"
"io"
- "io/ioutil"
+ "os/exec"
- "golang.org/x/tools/go/internal/gcimporter"
+ "golang.org/x/tools/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
-// using the workspace layout conventions of go/build.
+// using the go command.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
func Find(importPath, srcDir string) (filename, path string) {
- return gcimporter.FindPkg(importPath, srcDir)
+ cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
+ cmd.Dir = srcDir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", ""
+ }
+ var data struct {
+ ImportPath string
+ Export string
+ }
+ json.Unmarshal(out, &data)
+ return data.Export, data.ImportPath
}
// NewReader returns a reader for the export data section of an object
@@ -69,9 +84,26 @@ func NewReader(r io.Reader) (io.Reader, error) {
}
}
+// readAll works the same way as io.ReadAll, but avoids allocations and copies
+// by preallocating a byte slice of the necessary size if the size is known up
+// front. This is always possible when the input is an archive. In that case,
+// NewReader will return the known size using an io.LimitedReader.
+func readAll(r io.Reader) ([]byte, error) {
+ if lr, ok := r.(*io.LimitedReader); ok {
+ data := make([]byte, lr.N)
+ _, err := io.ReadFull(lr, data)
+ return data, err
+ }
+ return io.ReadAll(r)
+}
+
// Read reads export data from in, decodes it, and returns type
// information for the package.
-// The package name is specified by path.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
@@ -82,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) {
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
- data, err := ioutil.ReadAll(in)
+ data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
@@ -91,22 +123,32 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
- // The App Engine Go runtime v1.6 uses the old export data format.
- // TODO(adonovan): delete once v1.7 has been around for a while.
- if bytes.HasPrefix(data, []byte("package ")) {
- return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
- }
-
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
- return pkg, err
- }
+ if len(data) > 0 {
+ switch data[0] {
+ case 'i':
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
- _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
- return pkg, err
+ case 'v', 'c', 'd':
+ _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ return pkg, err
+
+ case 'u':
+ _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+ }
+ }
+ return nil, fmt.Errorf("empty export data for %s", path)
}
// Write writes encoded type information for the specified package to out.
@@ -129,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
//
// Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
- data, err := ioutil.ReadAll(in)
+ data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err)
}
diff --git a/go/gcexportdata/gcexportdata_test.go b/go/gcexportdata/gcexportdata_test.go
deleted file mode 100644
index a0006c02d5a..00000000000
--- a/go/gcexportdata/gcexportdata_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcexportdata_test
-
-import (
- "go/token"
- "go/types"
- "log"
- "os"
- "testing"
-
- "golang.org/x/tools/go/gcexportdata"
-)
-
-// Test to ensure that gcexportdata can read files produced by App
-// Engine Go runtime v1.6.
-func TestAppEngine16(t *testing.T) {
- // Open and read the file.
- f, err := os.Open("testdata/errors-ae16.a")
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- r, err := gcexportdata.NewReader(f)
- if err != nil {
- log.Fatalf("reading export data: %v", err)
- }
-
- // Decode the export data.
- fset := token.NewFileSet()
- imports := make(map[string]*types.Package)
- pkg, err := gcexportdata.Read(r, fset, imports, "errors")
- if err != nil {
- log.Fatal(err)
- }
-
- // Print package information.
- got := pkg.Scope().Lookup("New").Type().String()
- want := "func(text string) error"
- if got != want {
- t.Errorf("New.Type = %s, want %s", got, want)
- }
-}
diff --git a/go/gcexportdata/importer.go b/go/gcexportdata/importer.go
index fe6ed93215c..37a7247e268 100644
--- a/go/gcexportdata/importer.go
+++ b/go/gcexportdata/importer.go
@@ -22,6 +22,9 @@ import (
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
diff --git a/go/gcexportdata/testdata/errors-ae16.a b/go/gcexportdata/testdata/errors-ae16.a
deleted file mode 100644
index 3f1dad54f07..00000000000
Binary files a/go/gcexportdata/testdata/errors-ae16.a and /dev/null differ
diff --git a/go/internal/gcimporter/gcimporter.go b/go/internal/gcimporter/gcimporter.go
deleted file mode 100644
index 493bfa03b0f..00000000000
--- a/go/internal/gcimporter/gcimporter.go
+++ /dev/null
@@ -1,1107 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
-// but it also contains the original source-based importer code for Go1.6.
-// Once we stop supporting 1.6, we can remove that code.
-
-// Package gcimporter provides various functions for reading
-// gc-generated object files that can be used to implement the
-// Importer interface defined by the Go 1.5 standard library package.
-package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
-
-import (
- "bufio"
- "errors"
- "fmt"
- "go/build"
- "go/constant"
- "go/token"
- "go/types"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "text/scanner"
-)
-
-const (
- // Enable debug during development: it adds some additional checks, and
- // prevents errors from being recovered.
- debug = false
-
- // If trace is set, debugging output is printed to std out.
- trace = false
-)
-
-var pkgExts = [...]string{".a", ".o"}
-
-// FindPkg returns the filename and unique package id for an import
-// path based on package information provided by build.Import (using
-// the build.Default build.Context). A relative srcDir is interpreted
-// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
- if path == "" {
- return
- }
-
- var noext string
- switch {
- default:
- // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
- // Don't require the source files to be present.
- if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
- srcDir = abs
- }
- bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
- if bp.PkgObj == "" {
- id = path // make sure we have an id to print in error message
- return
- }
- noext = strings.TrimSuffix(bp.PkgObj, ".a")
- id = bp.ImportPath
-
- case build.IsLocalImport(path):
- // "./x" -> "/this/directory/x.ext", "/this/directory/x"
- noext = filepath.Join(srcDir, path)
- id = noext
-
- case filepath.IsAbs(path):
- // for completeness only - go/build.Import
- // does not support absolute imports
- // "/x" -> "/x.ext", "/x"
- noext = path
- id = path
- }
-
- if false { // for debugging
- if path != id {
- fmt.Printf("%s -> %s\n", path, id)
- }
- }
-
- // try extensions
- for _, ext := range pkgExts {
- filename = noext + ext
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- filename = "" // not found
- return
-}
-
-// ImportData imports a package by reading the gc-generated export data,
-// adds the corresponding package object to the packages map indexed by id,
-// and returns the object.
-//
-// The packages map must contains all packages already imported. The data
-// reader position must be the beginning of the export data section. The
-// filename is only used in error messages.
-//
-// If packages[id] contains the completely imported package, that package
-// can be used directly, and there is no need to call this function (but
-// there is also no harm but for extra time used).
-func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
- // support for parser error handling
- defer func() {
- switch r := recover().(type) {
- case nil:
- // nothing to do
- case importError:
- err = r
- default:
- panic(r) // internal error
- }
- }()
-
- var p parser
- p.init(filename, id, data, packages)
- pkg = p.parseExport()
-
- return
-}
-
-// Import imports a gc-generated package given its import path and srcDir, adds
-// the corresponding package object to the packages map, and returns the object.
-// The packages map must contain all packages already imported.
-func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
- var rc io.ReadCloser
- var filename, id string
- if lookup != nil {
- // With custom lookup specified, assume that caller has
- // converted path to a canonical import path for use in the map.
- if path == "unsafe" {
- return types.Unsafe, nil
- }
- id = path
-
- // No need to re-import if the package was imported completely before.
- if pkg = packages[id]; pkg != nil && pkg.Complete() {
- return
- }
- f, err := lookup(path)
- if err != nil {
- return nil, err
- }
- rc = f
- } else {
- filename, id = FindPkg(path, srcDir)
- if filename == "" {
- if path == "unsafe" {
- return types.Unsafe, nil
- }
- return nil, fmt.Errorf("can't find import: %q", id)
- }
-
- // no need to re-import if the package was imported completely before
- if pkg = packages[id]; pkg != nil && pkg.Complete() {
- return
- }
-
- // open file
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- // add file name to error
- err = fmt.Errorf("%s: %v", filename, err)
- }
- }()
- rc = f
- }
- defer rc.Close()
-
- var hdr string
- buf := bufio.NewReader(rc)
- if hdr, _, err = FindExportData(buf); err != nil {
- return
- }
-
- switch hdr {
- case "$$\n":
- // Work-around if we don't have a filename; happens only if lookup != nil.
- // Either way, the filename is only needed for importer error messages, so
- // this is fine.
- if filename == "" {
- filename = path
- }
- return ImportData(packages, filename, id, buf)
-
- case "$$B\n":
- var data []byte
- data, err = ioutil.ReadAll(buf)
- if err != nil {
- break
- }
-
- // TODO(gri): allow clients of go/importer to provide a FileSet.
- // Or, define a new standard go/types/gcexportdata package.
- fset := token.NewFileSet()
-
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = IImportData(fset, packages, data[1:], id)
- } else {
- _, pkg, err = BImportData(fset, packages, data, id)
- }
-
- default:
- err = fmt.Errorf("unknown export data header: %q", hdr)
- }
-
- return
-}
-
-// ----------------------------------------------------------------------------
-// Parser
-
-// TODO(gri) Imported objects don't have position information.
-// Ideally use the debug table line info; alternatively
-// create some fake position (or the position of the
-// import). That way error messages referring to imported
-// objects can print meaningful information.
-
-// parser parses the exports inside a gc compiler-produced
-// object/archive file and populates its scope with the results.
-type parser struct {
- scanner scanner.Scanner
- tok rune // current token
- lit string // literal string; only valid for Ident, Int, String tokens
- id string // package id of imported package
- sharedPkgs map[string]*types.Package // package id -> package object (across importer)
- localPkgs map[string]*types.Package // package id -> package object (just this package)
-}
-
-func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
- p.scanner.Init(src)
- p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
- p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
- p.scanner.Whitespace = 1<<'\t' | 1<<' '
- p.scanner.Filename = filename // for good error messages
- p.next()
- p.id = id
- p.sharedPkgs = packages
- if debug {
- // check consistency of packages map
- for _, pkg := range packages {
- if pkg.Name() == "" {
- fmt.Printf("no package name for %s\n", pkg.Path())
- }
- }
- }
-}
-
-func (p *parser) next() {
- p.tok = p.scanner.Scan()
- switch p.tok {
- case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
- p.lit = p.scanner.TokenText()
- default:
- p.lit = ""
- }
- if debug {
- fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
- }
-}
-
-func declTypeName(pkg *types.Package, name string) *types.TypeName {
- scope := pkg.Scope()
- if obj := scope.Lookup(name); obj != nil {
- return obj.(*types.TypeName)
- }
- obj := types.NewTypeName(token.NoPos, pkg, name, nil)
- // a named type may be referred to before the underlying type
- // is known - set it up
- types.NewNamed(obj, nil, nil)
- scope.Insert(obj)
- return obj
-}
-
-// ----------------------------------------------------------------------------
-// Error handling
-
-// Internal errors are boxed as importErrors.
-type importError struct {
- pos scanner.Position
- err error
-}
-
-func (e importError) Error() string {
- return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
-}
-
-func (p *parser) error(err interface{}) {
- if s, ok := err.(string); ok {
- err = errors.New(s)
- }
- // panic with a runtime.Error if err is not an error
- panic(importError{p.scanner.Pos(), err.(error)})
-}
-
-func (p *parser) errorf(format string, args ...interface{}) {
- p.error(fmt.Sprintf(format, args...))
-}
-
-func (p *parser) expect(tok rune) string {
- lit := p.lit
- if p.tok != tok {
- p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
- }
- p.next()
- return lit
-}
-
-func (p *parser) expectSpecial(tok string) {
- sep := 'x' // not white space
- i := 0
- for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- i++
- }
- if i < len(tok) {
- p.errorf("expected %q, got %q", tok, tok[0:i])
- }
-}
-
-func (p *parser) expectKeyword(keyword string) {
- lit := p.expect(scanner.Ident)
- if lit != keyword {
- p.errorf("expected keyword %s, got %q", keyword, lit)
- }
-}
-
-// ----------------------------------------------------------------------------
-// Qualified and unqualified names
-
-// parsePackageID parses a PackageId:
-//
-// PackageId = string_lit .
-func (p *parser) parsePackageID() string {
- id, err := strconv.Unquote(p.expect(scanner.String))
- if err != nil {
- p.error(err)
- }
- // id == "" stands for the imported package id
- // (only known at time of package installation)
- if id == "" {
- id = p.id
- }
- return id
-}
-
-// parsePackageName parse a PackageName:
-//
-// PackageName = ident .
-func (p *parser) parsePackageName() string {
- return p.expect(scanner.Ident)
-}
-
-// parseDotIdent parses a dotIdentifier:
-//
-// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
-func (p *parser) parseDotIdent() string {
- ident := ""
- if p.tok != scanner.Int {
- sep := 'x' // not white space
- for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
- ident += p.lit
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- }
- }
- if ident == "" {
- p.expect(scanner.Ident) // use expect() for error handling
- }
- return ident
-}
-
-// parseQualifiedName parses a QualifiedName:
-//
-// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
-func (p *parser) parseQualifiedName() (id, name string) {
- p.expect('@')
- id = p.parsePackageID()
- p.expect('.')
- // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
- if p.tok == '?' {
- p.next()
- } else {
- name = p.parseDotIdent()
- }
- return
-}
-
-// getPkg returns the package for a given id. If the package is
-// not found, create the package and add it to the p.localPkgs
-// and p.sharedPkgs maps. name is the (expected) name of the
-// package. If name == "", the package name is expected to be
-// set later via an import clause in the export data.
-//
-// id identifies a package, usually by a canonical package path like
-// "encoding/json" but possibly by a non-canonical import path like
-// "./json".
-func (p *parser) getPkg(id, name string) *types.Package {
- // package unsafe is not in the packages maps - handle explicitly
- if id == "unsafe" {
- return types.Unsafe
- }
-
- pkg := p.localPkgs[id]
- if pkg == nil {
- // first import of id from this package
- pkg = p.sharedPkgs[id]
- if pkg == nil {
- // first import of id by this importer;
- // add (possibly unnamed) pkg to shared packages
- pkg = types.NewPackage(id, name)
- p.sharedPkgs[id] = pkg
- }
- // add (possibly unnamed) pkg to local packages
- if p.localPkgs == nil {
- p.localPkgs = make(map[string]*types.Package)
- }
- p.localPkgs[id] = pkg
- } else if name != "" {
- // package exists already and we have an expected package name;
- // make sure names match or set package name if necessary
- if pname := pkg.Name(); pname == "" {
- pkg.SetName(name)
- } else if pname != name {
- p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
- }
- }
- return pkg
-}
-
-// parseExportedName is like parseQualifiedName, but
-// the package id is resolved to an imported *types.Package.
-func (p *parser) parseExportedName() (pkg *types.Package, name string) {
- id, name := p.parseQualifiedName()
- pkg = p.getPkg(id, "")
- return
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-// parseBasicType parses a BasicType:
-//
-// BasicType = identifier .
-func (p *parser) parseBasicType() types.Type {
- id := p.expect(scanner.Ident)
- obj := types.Universe.Lookup(id)
- if obj, ok := obj.(*types.TypeName); ok {
- return obj.Type()
- }
- p.errorf("not a basic type: %s", id)
- return nil
-}
-
-// parseArrayType parses an ArrayType:
-//
-// ArrayType = "[" int_lit "]" Type .
-func (p *parser) parseArrayType(parent *types.Package) types.Type {
- // "[" already consumed and lookahead known not to be "]"
- lit := p.expect(scanner.Int)
- p.expect(']')
- elem := p.parseType(parent)
- n, err := strconv.ParseInt(lit, 10, 64)
- if err != nil {
- p.error(err)
- }
- return types.NewArray(elem, n)
-}
-
-// parseMapType parses a MapType:
-//
-// MapType = "map" "[" Type "]" Type .
-func (p *parser) parseMapType(parent *types.Package) types.Type {
- p.expectKeyword("map")
- p.expect('[')
- key := p.parseType(parent)
- p.expect(']')
- elem := p.parseType(parent)
- return types.NewMap(key, elem)
-}
-
-// parseName parses a Name:
-//
-// Name = identifier | "?" | QualifiedName .
-//
-// For unqualified and anonymous names, the returned package is the parent
-// package unless parent == nil, in which case the returned package is the
-// package being imported. (The parent package is not nil if the name
-// is an unqualified struct field or interface method name belonging to a
-// type declared in another package.)
-//
-// For qualified names, the returned package is nil (and not created if
-// it doesn't exist yet) unless materializePkg is set (which creates an
-// unnamed package with valid package path). In the latter case, a
-// subsequent import clause is expected to provide a name for the package.
-func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
- pkg = parent
- if pkg == nil {
- pkg = p.sharedPkgs[p.id]
- }
- switch p.tok {
- case scanner.Ident:
- name = p.lit
- p.next()
- case '?':
- // anonymous
- p.next()
- case '@':
- // exported name prefixed with package path
- pkg = nil
- var id string
- id, name = p.parseQualifiedName()
- if materializePkg {
- pkg = p.getPkg(id, "")
- }
- default:
- p.error("name expected")
- }
- return
-}
-
-func deref(typ types.Type) types.Type {
- if p, _ := typ.(*types.Pointer); p != nil {
- return p.Elem()
- }
- return typ
-}
-
-// parseField parses a Field:
-//
-// Field = Name Type [ string_lit ] .
-func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
- pkg, name := p.parseName(parent, true)
-
- if name == "_" {
- // Blank fields should be package-qualified because they
- // are unexported identifiers, but gc does not qualify them.
- // Assuming that the ident belongs to the current package
- // causes types to change during re-exporting, leading
- // to spurious "can't assign A to B" errors from go/types.
- // As a workaround, pretend all blank fields belong
- // to the same unique dummy package.
- const blankpkg = "<_>"
- pkg = p.getPkg(blankpkg, blankpkg)
- }
-
- typ := p.parseType(parent)
- anonymous := false
- if name == "" {
- // anonymous field - typ must be T or *T and T must be a type name
- switch typ := deref(typ).(type) {
- case *types.Basic: // basic types are named types
- pkg = nil // objects defined in Universe scope have no package
- name = typ.Name()
- case *types.Named:
- name = typ.Obj().Name()
- default:
- p.errorf("anonymous field expected")
- }
- anonymous = true
- }
- tag := ""
- if p.tok == scanner.String {
- s := p.expect(scanner.String)
- var err error
- tag, err = strconv.Unquote(s)
- if err != nil {
- p.errorf("invalid struct tag %s: %s", s, err)
- }
- }
- return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
-}
-
-// parseStructType parses a StructType:
-//
-// StructType = "struct" "{" [ FieldList ] "}" .
-// FieldList = Field { ";" Field } .
-func (p *parser) parseStructType(parent *types.Package) types.Type {
- var fields []*types.Var
- var tags []string
-
- p.expectKeyword("struct")
- p.expect('{')
- for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
- if i > 0 {
- p.expect(';')
- }
- fld, tag := p.parseField(parent)
- if tag != "" && tags == nil {
- tags = make([]string, i)
- }
- if tags != nil {
- tags = append(tags, tag)
- }
- fields = append(fields, fld)
- }
- p.expect('}')
-
- return types.NewStruct(fields, tags)
-}
-
-// parseParameter parses a Parameter:
-//
-// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
-func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
- _, name := p.parseName(nil, false)
- // remove gc-specific parameter numbering
- if i := strings.Index(name, "·"); i >= 0 {
- name = name[:i]
- }
- if p.tok == '.' {
- p.expectSpecial("...")
- isVariadic = true
- }
- typ := p.parseType(nil)
- if isVariadic {
- typ = types.NewSlice(typ)
- }
- // ignore argument tag (e.g. "noescape")
- if p.tok == scanner.String {
- p.next()
- }
- // TODO(gri) should we provide a package?
- par = types.NewVar(token.NoPos, nil, name, typ)
- return
-}
-
-// parseParameters parses a Parameters:
-//
-// Parameters = "(" [ ParameterList ] ")" .
-// ParameterList = { Parameter "," } Parameter .
-func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
- p.expect('(')
- for p.tok != ')' && p.tok != scanner.EOF {
- if len(list) > 0 {
- p.expect(',')
- }
- par, variadic := p.parseParameter()
- list = append(list, par)
- if variadic {
- if isVariadic {
- p.error("... not on final argument")
- }
- isVariadic = true
- }
- }
- p.expect(')')
-
- return
-}
-
-// parseSignature parses a Signature:
-//
-// Signature = Parameters [ Result ] .
-// Result = Type | Parameters .
-func (p *parser) parseSignature(recv *types.Var) *types.Signature {
- params, isVariadic := p.parseParameters()
-
- // optional result type
- var results []*types.Var
- if p.tok == '(' {
- var variadic bool
- results, variadic = p.parseParameters()
- if variadic {
- p.error("... not permitted on result type")
- }
- }
-
- return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
-}
-
-// parseInterfaceType parses an InterfaceType:
-//
-// InterfaceType = "interface" "{" [ MethodList ] "}" .
-// MethodList = Method { ";" Method } .
-// Method = Name Signature .
-//
-// The methods of embedded interfaces are always "inlined"
-// by the compiler and thus embedded interfaces are never
-// visible in the export data.
-func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
- var methods []*types.Func
-
- p.expectKeyword("interface")
- p.expect('{')
- for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
- if i > 0 {
- p.expect(';')
- }
- pkg, name := p.parseName(parent, true)
- sig := p.parseSignature(nil)
- methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
- }
- p.expect('}')
-
- // Complete requires the type's embedded interfaces to be fully defined,
- // but we do not define any
- return newInterface(methods, nil).Complete()
-}
-
-// parseChanType parses a ChanType:
-//
-// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
-func (p *parser) parseChanType(parent *types.Package) types.Type {
- dir := types.SendRecv
- if p.tok == scanner.Ident {
- p.expectKeyword("chan")
- if p.tok == '<' {
- p.expectSpecial("<-")
- dir = types.SendOnly
- }
- } else {
- p.expectSpecial("<-")
- p.expectKeyword("chan")
- dir = types.RecvOnly
- }
- elem := p.parseType(parent)
- return types.NewChan(dir, elem)
-}
-
-// parseType parses a Type:
-//
-// Type =
-// BasicType | TypeName | ArrayType | SliceType | StructType |
-// PointerType | FuncType | InterfaceType | MapType | ChanType |
-// "(" Type ")" .
-//
-// BasicType = ident .
-// TypeName = ExportedName .
-// SliceType = "[" "]" Type .
-// PointerType = "*" Type .
-// FuncType = "func" Signature .
-func (p *parser) parseType(parent *types.Package) types.Type {
- switch p.tok {
- case scanner.Ident:
- switch p.lit {
- default:
- return p.parseBasicType()
- case "struct":
- return p.parseStructType(parent)
- case "func":
- // FuncType
- p.next()
- return p.parseSignature(nil)
- case "interface":
- return p.parseInterfaceType(parent)
- case "map":
- return p.parseMapType(parent)
- case "chan":
- return p.parseChanType(parent)
- }
- case '@':
- // TypeName
- pkg, name := p.parseExportedName()
- return declTypeName(pkg, name).Type()
- case '[':
- p.next() // look ahead
- if p.tok == ']' {
- // SliceType
- p.next()
- return types.NewSlice(p.parseType(parent))
- }
- return p.parseArrayType(parent)
- case '*':
- // PointerType
- p.next()
- return types.NewPointer(p.parseType(parent))
- case '<':
- return p.parseChanType(parent)
- case '(':
- // "(" Type ")"
- p.next()
- typ := p.parseType(parent)
- p.expect(')')
- return typ
- }
- p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
- return nil
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// parseImportDecl parses an ImportDecl:
-//
-// ImportDecl = "import" PackageName PackageId .
-func (p *parser) parseImportDecl() {
- p.expectKeyword("import")
- name := p.parsePackageName()
- p.getPkg(p.parsePackageID(), name)
-}
-
-// parseInt parses an int_lit:
-//
-// int_lit = [ "+" | "-" ] { "0" ... "9" } .
-func (p *parser) parseInt() string {
- s := ""
- switch p.tok {
- case '-':
- s = "-"
- p.next()
- case '+':
- p.next()
- }
- return s + p.expect(scanner.Int)
-}
-
-// parseNumber parses a number:
-//
-// number = int_lit [ "p" int_lit ] .
-func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
- // mantissa
- mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
- if mant == nil {
- panic("invalid mantissa")
- }
-
- if p.lit == "p" {
- // exponent (base 2)
- p.next()
- exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
- if err != nil {
- p.error(err)
- }
- if exp < 0 {
- denom := constant.MakeInt64(1)
- denom = constant.Shift(denom, token.SHL, uint(-exp))
- typ = types.Typ[types.UntypedFloat]
- val = constant.BinaryOp(mant, token.QUO, denom)
- return
- }
- if exp > 0 {
- mant = constant.Shift(mant, token.SHL, uint(exp))
- }
- typ = types.Typ[types.UntypedFloat]
- val = mant
- return
- }
-
- typ = types.Typ[types.UntypedInt]
- val = mant
- return
-}
-
-// parseConstDecl parses a ConstDecl:
-//
-// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
-// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
-// bool_lit = "true" | "false" .
-// complex_lit = "(" float_lit "+" float_lit "i" ")" .
-// rune_lit = "(" int_lit "+" int_lit ")" .
-// string_lit = `"` { unicode_char } `"` .
-func (p *parser) parseConstDecl() {
- p.expectKeyword("const")
- pkg, name := p.parseExportedName()
-
- var typ0 types.Type
- if p.tok != '=' {
- // constant types are never structured - no need for parent type
- typ0 = p.parseType(nil)
- }
-
- p.expect('=')
- var typ types.Type
- var val constant.Value
- switch p.tok {
- case scanner.Ident:
- // bool_lit
- if p.lit != "true" && p.lit != "false" {
- p.error("expected true or false")
- }
- typ = types.Typ[types.UntypedBool]
- val = constant.MakeBool(p.lit == "true")
- p.next()
-
- case '-', scanner.Int:
- // int_lit
- typ, val = p.parseNumber()
-
- case '(':
- // complex_lit or rune_lit
- p.next()
- if p.tok == scanner.Char {
- p.next()
- p.expect('+')
- typ = types.Typ[types.UntypedRune]
- _, val = p.parseNumber()
- p.expect(')')
- break
- }
- _, re := p.parseNumber()
- p.expect('+')
- _, im := p.parseNumber()
- p.expectKeyword("i")
- p.expect(')')
- typ = types.Typ[types.UntypedComplex]
- val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
-
- case scanner.Char:
- // rune_lit
- typ = types.Typ[types.UntypedRune]
- val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
- p.next()
-
- case scanner.String:
- // string_lit
- typ = types.Typ[types.UntypedString]
- val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
- p.next()
-
- default:
- p.errorf("expected literal got %s", scanner.TokenString(p.tok))
- }
-
- if typ0 == nil {
- typ0 = typ
- }
-
- pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
-}
-
-// parseTypeDecl parses a TypeDecl:
-//
-// TypeDecl = "type" ExportedName Type .
-func (p *parser) parseTypeDecl() {
- p.expectKeyword("type")
- pkg, name := p.parseExportedName()
- obj := declTypeName(pkg, name)
-
- // The type object may have been imported before and thus already
- // have a type associated with it. We still need to parse the type
- // structure, but throw it away if the object already has a type.
- // This ensures that all imports refer to the same type object for
- // a given type declaration.
- typ := p.parseType(pkg)
-
- if name := obj.Type().(*types.Named); name.Underlying() == nil {
- name.SetUnderlying(typ)
- }
-}
-
-// parseVarDecl parses a VarDecl:
-//
-// VarDecl = "var" ExportedName Type .
-func (p *parser) parseVarDecl() {
- p.expectKeyword("var")
- pkg, name := p.parseExportedName()
- typ := p.parseType(pkg)
- pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
-}
-
-// parseFunc parses a Func:
-//
-// Func = Signature [ Body ] .
-// Body = "{" ... "}" .
-func (p *parser) parseFunc(recv *types.Var) *types.Signature {
- sig := p.parseSignature(recv)
- if p.tok == '{' {
- p.next()
- for i := 1; i > 0; p.next() {
- switch p.tok {
- case '{':
- i++
- case '}':
- i--
- }
- }
- }
- return sig
-}
-
-// parseMethodDecl parses a MethodDecl:
-//
-// MethodDecl = "func" Receiver Name Func .
-// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
-func (p *parser) parseMethodDecl() {
- // "func" already consumed
- p.expect('(')
- recv, _ := p.parseParameter() // receiver
- p.expect(')')
-
- // determine receiver base type object
- base := deref(recv.Type()).(*types.Named)
-
- // parse method name, signature, and possibly inlined body
- _, name := p.parseName(nil, false)
- sig := p.parseFunc(recv)
-
- // methods always belong to the same package as the base type object
- pkg := base.Obj().Pkg()
-
- // add method to type unless type was imported before
- // and method exists already
- // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
- base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
-}
-
-// parseFuncDecl parses a FuncDecl:
-//
-// FuncDecl = "func" ExportedName Func .
-func (p *parser) parseFuncDecl() {
- // "func" already consumed
- pkg, name := p.parseExportedName()
- typ := p.parseFunc(nil)
- pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
-}
-
-// parseDecl parses a Decl:
-//
-// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
-func (p *parser) parseDecl() {
- if p.tok == scanner.Ident {
- switch p.lit {
- case "import":
- p.parseImportDecl()
- case "const":
- p.parseConstDecl()
- case "type":
- p.parseTypeDecl()
- case "var":
- p.parseVarDecl()
- case "func":
- p.next() // look ahead
- if p.tok == '(' {
- p.parseMethodDecl()
- } else {
- p.parseFuncDecl()
- }
- }
- }
- p.expect('\n')
-}
-
-// ----------------------------------------------------------------------------
-// Export
-
-// parseExport parses an Export:
-//
-// Export = "PackageClause { Decl } "$$" .
-// PackageClause = "package" PackageName [ "safe" ] "\n" .
-func (p *parser) parseExport() *types.Package {
- p.expectKeyword("package")
- name := p.parsePackageName()
- if p.tok == scanner.Ident && p.lit == "safe" {
- // package was compiled with -u option - ignore
- p.next()
- }
- p.expect('\n')
-
- pkg := p.getPkg(p.id, name)
-
- for p.tok != '$' && p.tok != scanner.EOF {
- p.parseDecl()
- }
-
- if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
- // don't call next()/expect() since reading past the
- // export data may cause scanner errors (e.g. NUL chars)
- p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
- }
-
- if n := p.scanner.ErrorCount; n != 0 {
- p.errorf("expected no scanner errors, got %d", n)
- }
-
- // Record all locally referenced packages as imports.
- var imports []*types.Package
- for id, pkg2 := range p.localPkgs {
- if pkg2.Name() == "" {
- p.errorf("%s package has no name", id)
- }
- if id == p.id {
- continue // avoid self-edge
- }
- imports = append(imports, pkg2)
- }
- sort.Sort(byPath(imports))
- pkg.SetImports(imports)
-
- // package was imported completely and without errors
- pkg.MarkComplete()
-
- return pkg
-}
-
-type byPath []*types.Package
-
-func (a byPath) Len() int { return len(a) }
-func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/go/packages/golist.go b/go/packages/golist.go
index 50533995a65..6bb7168d2e3 100644
--- a/go/packages/golist.go
+++ b/go/packages/golist.go
@@ -60,6 +60,7 @@ func (r *responseDeduper) addAll(dr *driverResponse) {
for _, root := range dr.Roots {
r.addRoot(root)
}
+ r.dr.GoVersion = dr.GoVersion
}
func (r *responseDeduper) addPackage(p *Package) {
@@ -302,11 +303,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries
}
dirResponse, err := state.createDriverResponse(pattern)
- // If there was an error loading the package, or the package is returned
- // with errors, try to load the file as an ad-hoc package.
+ // If there was an error loading the package, or no packages are returned,
+ // or the package is returned with errors, try to load the file as an
+ // ad-hoc package.
// Usually the error will appear in a returned package, but may not if we're
// in module mode and the ad-hoc is located outside a module.
- if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
len(dirResponse.Packages[0].Errors) == 1 {
var queryErr error
if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
@@ -453,11 +455,14 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
if err != nil {
return nil, err
}
+
seen := make(map[string]*jsonPackage)
pkgs := make(map[string]*Package)
additionalErrors := make(map[string][]Error)
// Decode the JSON and convert it to Package form.
- var response driverResponse
+ response := &driverResponse{
+ GoVersion: goVersion,
+ }
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
@@ -599,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
// Work around https://golang.org/issue/28749:
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
- // Filter out any elements of CompiledGoFiles that are also in OtherFiles.
- // We have to keep this workaround in place until go1.12 is a distant memory.
- if len(pkg.OtherFiles) > 0 {
- other := make(map[string]bool, len(pkg.OtherFiles))
- for _, f := range pkg.OtherFiles {
- other[f] = true
- }
-
+ // Remove files from CompiledGoFiles that are non-go files
+ // (or are not files that look like they are from the cache).
+ if len(pkg.CompiledGoFiles) > 0 {
out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles {
- if other[f] {
+ if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
continue
}
out = append(out, f)
@@ -729,7 +729,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
}
sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
- return &response, nil
+ return response, nil
}
func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
@@ -755,6 +755,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
}
+// getGoVersion returns the effective minor version of the go command.
func (state *golistState) getGoVersion() (int, error) {
state.goVersionOnce.Do(func() {
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
diff --git a/go/packages/overlay_test.go b/go/packages/overlay_test.go
index f2164c274e2..4318739eb79 100644
--- a/go/packages/overlay_test.go
+++ b/go/packages/overlay_test.go
@@ -109,8 +109,6 @@ func TestOverlayChangesTestPackageName(t *testing.T) {
testAllOrModulesParallel(t, testOverlayChangesTestPackageName)
}
func testOverlayChangesTestPackageName(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 16)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{{
Name: "fake",
Files: map[string]interface{}{
@@ -717,8 +715,6 @@ func TestInvalidFilesBeforeOverlay(t *testing.T) {
}
func testInvalidFilesBeforeOverlay(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -756,8 +752,6 @@ func TestInvalidFilesBeforeOverlayContains(t *testing.T) {
testAllOrModulesParallel(t, testInvalidFilesBeforeOverlayContains)
}
func testInvalidFilesBeforeOverlayContains(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -1046,6 +1040,7 @@ func Hi() {
// This does not use go/packagestest because it needs to write a replace
// directive with an absolute path in one of the module's go.mod files.
func TestOverlaysInReplace(t *testing.T) {
+ testenv.NeedsGoPackages(t)
t.Parallel()
// Create module b.com in a temporary directory. Do not add any Go files
diff --git a/go/packages/packages.go b/go/packages/packages.go
index a93dc6add4d..1d5f0e45b2b 100644
--- a/go/packages/packages.go
+++ b/go/packages/packages.go
@@ -15,10 +15,12 @@ import (
"go/scanner"
"go/token"
"go/types"
+ "io"
"io/ioutil"
"log"
"os"
"path/filepath"
+ "runtime"
"strings"
"sync"
"time"
@@ -233,6 +235,11 @@ type driverResponse struct {
// Imports will be connected and then type and syntax information added in a
// later pass (see refine).
Packages []*Package
+
+ // GoVersion is the minor version number used by the driver
+ // (e.g. the go command on the PATH) when selecting .go files.
+ // Zero means unknown.
+ GoVersion int
}
// Load loads and returns the Go packages named by the given patterns.
@@ -256,7 +263,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
return nil, err
}
l.sizes = response.Sizes
- return l.refine(response.Roots, response.Packages...)
+ return l.refine(response)
}
// defaultDriver is a driver that implements go/packages' fallback behavior.
@@ -297,6 +304,9 @@ type Package struct {
// of the package, or while parsing or type-checking its files.
Errors []Error
+ // TypeErrors contains the subset of errors produced during type checking.
+ TypeErrors []types.Error
+
// GoFiles lists the absolute file paths of the package's Go source files.
GoFiles []string
@@ -532,6 +542,7 @@ type loaderPackage struct {
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
+ goVersion int // minor version number of go command on PATH
}
// loader holds the working state of a single call to load.
@@ -618,7 +629,8 @@ func newLoader(cfg *Config) *loader {
// refine connects the supplied packages into a graph and then adds type and
// and syntax information as requested by the LoadMode.
-func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
+func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
+ roots := response.Roots
rootMap := make(map[string]int, len(roots))
for i, root := range roots {
rootMap[root] = i
@@ -626,7 +638,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
ld.pkgs = make(map[string]*loaderPackage)
// first pass, fixup and build the map and roots
var initial = make([]*loaderPackage, len(roots))
- for _, pkg := range list {
+ for _, pkg := range response.Packages {
rootIndex := -1
if i, found := rootMap[pkg.ID]; found {
rootIndex = i
@@ -648,6 +660,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
Package: pkg,
needtypes: needtypes,
needsrc: needsrc,
+ goVersion: response.GoVersion,
}
ld.pkgs[lpkg.ID] = lpkg
if rootIndex >= 0 {
@@ -902,6 +915,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
case types.Error:
// from type checker
+ lpkg.TypeErrors = append(lpkg.TypeErrors, err)
errs = append(errs, Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
@@ -923,6 +937,35 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
lpkg.Errors = append(lpkg.Errors, errs...)
}
+ // If the go command on the PATH is newer than the runtime,
+ // then the go/{scanner,ast,parser,types} packages from the
+ // standard library may be unable to process the files
+ // selected by go list.
+ //
+ // There is currently no way to downgrade the effective
+ // version of the go command (see issue 52078), so we proceed
+ // with the newer go command but, in case of parse or type
+ // errors, we emit an additional diagnostic.
+ //
+ // See:
+ // - golang.org/issue/52078 (flag to set release tags)
+ // - golang.org/issue/50825 (gopls legacy version support)
+ // - golang.org/issue/55883 (go/packages confusing error)
+ //
+ // Should we assert a hard minimum of (currently) go1.16 here?
+ var runtimeVersion int
+ if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
+ defer func() {
+ if len(lpkg.Errors) > 0 {
+ appendError(Error{
+ Pos: "-",
+ Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
+ Kind: UnknownError,
+ })
+ }
+ }()
+ }
+
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
// The config requested loading sources and types, but sources are missing.
// Add an error to the package and fall back to loading from export data.
@@ -981,7 +1024,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
tc := &types.Config{
Importer: importer,
- // Type-check bodies of functions only in non-initial packages.
+ // Type-check bodies of functions only in initial packages.
// Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
@@ -1271,3 +1314,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
}
+
+var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go
index 796edb6b7b4..0da72851c76 100644
--- a/go/packages/packages_test.go
+++ b/go/packages/packages_test.go
@@ -2471,10 +2471,55 @@ func testIssue37098(t *testing.T, exporter packagestest.Exporter) {
}
}
+// TestIssue56632 checks that CompiledGoFiles does not contain non-go files regardless of
+// whether the NeedFiles mode bit is set.
+func TestIssue56632(t *testing.T) {
+ t.Parallel()
+ testenv.NeedsGoBuild(t)
+ testenv.NeedsTool(t, "cgo")
+
+ exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{
+ Name: "golang.org/issue56632",
+ Files: map[string]interface{}{
+ "a/a.go": `package a`,
+ "a/a_cgo.go": `package a
+
+import "C"`,
+ "a/a.s": ``,
+ "a/a.c": ``,
+ }}})
+ defer exported.Cleanup()
+
+ modes := []packages.LoadMode{packages.NeedCompiledGoFiles, packages.NeedCompiledGoFiles | packages.NeedFiles, packages.NeedImports | packages.NeedCompiledGoFiles, packages.NeedImports | packages.NeedFiles | packages.NeedCompiledGoFiles}
+ for _, mode := range modes {
+ exported.Config.Mode = mode
+
+ initial, err := packages.Load(exported.Config, "golang.org/issue56632/a")
+ if err != nil {
+ t.Fatalf("failed to load package: %v", err)
+ }
+
+ if len(initial) != 1 {
+ t.Errorf("expected 3 packages, got %d", len(initial))
+ }
+
+ p := initial[0]
+
+ if len(p.Errors) != 0 {
+ t.Errorf("expected no errors, got %v", p.Errors)
+ }
+
+ for _, f := range p.CompiledGoFiles {
+ if strings.HasSuffix(f, ".s") || strings.HasSuffix(f, ".c") {
+ t.Errorf("expected no non-Go CompiledGoFiles, got file %q in CompiledGoFiles", f)
+ }
+ }
+ }
+}
+
// TestInvalidFilesInXTest checks the fix for golang/go#37971 in Go 1.15.
func TestInvalidFilesInXTest(t *testing.T) { testAllOrModulesParallel(t, testInvalidFilesInXTest) }
func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -2501,7 +2546,6 @@ func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) {
func TestTypecheckCgo(t *testing.T) { testAllOrModulesParallel(t, testTypecheckCgo) }
func testTypecheckCgo(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
testenv.NeedsTool(t, "cgo")
const cgo = `package cgo
@@ -2673,8 +2717,6 @@ func TestInvalidPackageName(t *testing.T) {
}
func testInvalidPackageName(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{{
Name: "golang.org/fake",
Files: map[string]interface{}{
@@ -2709,6 +2751,31 @@ func TestEmptyEnvironment(t *testing.T) {
}
}
+func TestPackageLoadSingleFile(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "a")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ filename := filepath.Join(tmp, "a.go")
+
+ if err := ioutil.WriteFile(filename, []byte(`package main; func main() { println("hello world") }`), 0775); err != nil {
+ t.Fatal(err)
+ }
+
+ pkgs, err := packages.Load(&packages.Config{Mode: packages.LoadSyntax, Dir: tmp}, "file="+filename)
+ if err != nil {
+ t.Fatalf("could not load package: %v", err)
+ }
+ if len(pkgs) != 1 {
+ t.Fatalf("expected one package to be loaded, got %d", len(pkgs))
+ }
+ if len(pkgs[0].CompiledGoFiles) != 1 || pkgs[0].CompiledGoFiles[0] != filename {
+ t.Fatalf("expected one compiled go file (%q), got %v", filename, pkgs[0].CompiledGoFiles)
+ }
+}
+
func errorMessages(errors []packages.Error) []string {
var msgs []string
for _, err := range errors {
diff --git a/go/packages/packagestest/expect.go b/go/packages/packagestest/expect.go
index 430258681f5..92c20a64a8d 100644
--- a/go/packages/packagestest/expect.go
+++ b/go/packages/packagestest/expect.go
@@ -16,7 +16,6 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/span"
)
const (
@@ -124,14 +123,31 @@ func (e *Exported) Expect(methods map[string]interface{}) error {
return nil
}
-// Range is a type alias for span.Range for backwards compatibility, prefer
-// using span.Range directly.
-type Range = span.Range
+// A Range represents an interval within a source file in go/token notation.
+type Range struct {
+ TokFile *token.File // non-nil
+ Start, End token.Pos // both valid and within range of TokFile
+}
+
+// A rangeSetter abstracts a variable that can be set from a Range value.
+//
+// The parameter conversion machinery will automatically construct a
+// variable of type T and call the SetRange method on its address if
+// *T implements rangeSetter. This allows alternative notations of
+// source ranges to interoperate transparently with this package.
+//
+// This type intentionally does not mention Range itself, to avoid a
+// dependency from the application's range type upon this package.
+//
+// Currently this is a secret back door for use only by gopls.
+type rangeSetter interface {
+ SetRange(file *token.File, start, end token.Pos)
+}
// Mark adds a new marker to the known set.
func (e *Exported) Mark(name string, r Range) {
if e.markers == nil {
- e.markers = make(map[string]span.Range)
+ e.markers = make(map[string]Range)
}
e.markers[name] = r
}
@@ -221,22 +237,22 @@ func (e *Exported) getMarkers() error {
return nil
}
// set markers early so that we don't call getMarkers again from Expect
- e.markers = make(map[string]span.Range)
+ e.markers = make(map[string]Range)
return e.Expect(map[string]interface{}{
markMethod: e.Mark,
})
}
var (
- noteType = reflect.TypeOf((*expect.Note)(nil))
- identifierType = reflect.TypeOf(expect.Identifier(""))
- posType = reflect.TypeOf(token.Pos(0))
- positionType = reflect.TypeOf(token.Position{})
- rangeType = reflect.TypeOf(span.Range{})
- spanType = reflect.TypeOf(span.Span{})
- fsetType = reflect.TypeOf((*token.FileSet)(nil))
- regexType = reflect.TypeOf((*regexp.Regexp)(nil))
- exportedType = reflect.TypeOf((*Exported)(nil))
+ noteType = reflect.TypeOf((*expect.Note)(nil))
+ identifierType = reflect.TypeOf(expect.Identifier(""))
+ posType = reflect.TypeOf(token.Pos(0))
+ positionType = reflect.TypeOf(token.Position{})
+ rangeType = reflect.TypeOf(Range{})
+ rangeSetterType = reflect.TypeOf((*rangeSetter)(nil)).Elem()
+ fsetType = reflect.TypeOf((*token.FileSet)(nil))
+ regexType = reflect.TypeOf((*regexp.Regexp)(nil))
+ exportedType = reflect.TypeOf((*Exported)(nil))
)
// converter converts from a marker's argument parsed from the comment to
@@ -295,17 +311,16 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) {
}
return reflect.ValueOf(r), remains, nil
}, nil
- case pt == spanType:
+ case reflect.PtrTo(pt).AssignableTo(rangeSetterType):
+ // (*pt).SetRange method exists: call it.
return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) {
r, remains, err := e.rangeConverter(n, args)
if err != nil {
return reflect.Value{}, nil, err
}
- spn, err := r.Span()
- if err != nil {
- return reflect.Value{}, nil, err
- }
- return reflect.ValueOf(spn), remains, nil
+ v := reflect.New(pt)
+ v.Interface().(rangeSetter).SetRange(r.TokFile, r.Start, r.End)
+ return v.Elem(), remains, nil
}, nil
case pt == identifierType:
return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) {
@@ -408,9 +423,10 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) {
}
}
-func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) {
+func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (Range, []interface{}, error) {
+ tokFile := e.ExpectFileSet.File(n.Pos)
if len(args) < 1 {
- return span.Range{}, nil, fmt.Errorf("missing argument")
+ return Range{}, nil, fmt.Errorf("missing argument")
}
arg := args[0]
args = args[1:]
@@ -419,37 +435,62 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang
// handle the special identifiers
switch arg {
case eofIdentifier:
- // end of file identifier, look up the current file
- f := e.ExpectFileSet.File(n.Pos)
- eof := f.Pos(f.Size())
- return span.NewRange(e.ExpectFileSet, eof, token.NoPos), args, nil
+ // end of file identifier
+ eof := tokFile.Pos(tokFile.Size())
+ return newRange(tokFile, eof, eof), args, nil
default:
// look up an marker by name
mark, ok := e.markers[string(arg)]
if !ok {
- return span.Range{}, nil, fmt.Errorf("cannot find marker %v", arg)
+ return Range{}, nil, fmt.Errorf("cannot find marker %v", arg)
}
return mark, args, nil
}
case string:
start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg)
if err != nil {
- return span.Range{}, nil, err
+ return Range{}, nil, err
}
- if start == token.NoPos {
- return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
+ if !start.IsValid() {
+ return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
}
- return span.NewRange(e.ExpectFileSet, start, end), args, nil
+ return newRange(tokFile, start, end), args, nil
case *regexp.Regexp:
start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg)
if err != nil {
- return span.Range{}, nil, err
+ return Range{}, nil, err
}
- if start == token.NoPos {
- return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
+ if !start.IsValid() {
+ return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
}
- return span.NewRange(e.ExpectFileSet, start, end), args, nil
+ return newRange(tokFile, start, end), args, nil
default:
- return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg)
+ return Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg)
+ }
+}
+
+// newRange creates a new Range from a token.File and two valid positions within it.
+func newRange(file *token.File, start, end token.Pos) Range {
+ fileBase := file.Base()
+ fileEnd := fileBase + file.Size()
+ if !start.IsValid() {
+ panic("invalid start token.Pos")
+ }
+ if !end.IsValid() {
+ panic("invalid end token.Pos")
+ }
+ if int(start) < fileBase || int(start) > fileEnd {
+ panic(fmt.Sprintf("invalid start: %d not in [%d, %d]", start, fileBase, fileEnd))
+ }
+ if int(end) < fileBase || int(end) > fileEnd {
+ panic(fmt.Sprintf("invalid end: %d not in [%d, %d]", end, fileBase, fileEnd))
+ }
+ if start > end {
+ panic("invalid start: greater than end")
+ }
+ return Range{
+ TokFile: file,
+ Start: start,
+ End: end,
}
}
diff --git a/go/packages/packagestest/expect_test.go b/go/packages/packagestest/expect_test.go
index 2587f580b06..46d96d61fb9 100644
--- a/go/packages/packagestest/expect_test.go
+++ b/go/packages/packagestest/expect_test.go
@@ -10,7 +10,6 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages/packagestest"
- "golang.org/x/tools/internal/span"
)
func TestExpect(t *testing.T) {
@@ -43,7 +42,7 @@ func TestExpect(t *testing.T) {
}
},
"directNote": func(n *expect.Note) {},
- "range": func(r span.Range) {
+ "range": func(r packagestest.Range) {
if r.Start == token.NoPos || r.Start == 0 {
t.Errorf("Range had no valid starting position")
}
diff --git a/go/packages/packagestest/export.go b/go/packages/packagestest/export.go
index 894dcdd445d..b687a44fb4f 100644
--- a/go/packages/packagestest/export.go
+++ b/go/packages/packagestest/export.go
@@ -79,7 +79,6 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/testenv"
)
@@ -129,7 +128,7 @@ type Exported struct {
primary string // the first non GOROOT module that was exported
written map[string]map[string]string // the full set of exported files
notes []*expect.Note // The list of expectations extracted from go source files
- markers map[string]span.Range // The set of markers extracted from go source files
+ markers map[string]Range // The set of markers extracted from go source files
}
// Exporter implementations are responsible for converting from the generic description of some
diff --git a/go/packages/packagestest/modules_test.go b/go/packages/packagestest/modules_test.go
index 6f627b1e5bd..de290ead94a 100644
--- a/go/packages/packagestest/modules_test.go
+++ b/go/packages/packagestest/modules_test.go
@@ -9,11 +9,9 @@ import (
"testing"
"golang.org/x/tools/go/packages/packagestest"
- "golang.org/x/tools/internal/testenv"
)
func TestModulesExport(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
exported := packagestest.Export(t, packagestest.Modules, testdata)
defer exported.Cleanup()
// Check that the cfg contains all the right bits
diff --git a/go/pointer/analysis.go b/go/pointer/analysis.go
index 35ad8abdb12..e3c85ede4f7 100644
--- a/go/pointer/analysis.go
+++ b/go/pointer/analysis.go
@@ -16,6 +16,7 @@ import (
"runtime"
"runtime/debug"
"sort"
+ "strings"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
@@ -377,12 +378,27 @@ func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee)
}
- // Warn about calls to non-intrinsic external functions.
+ // Warn about calls to functions that are handled unsoundly.
// TODO(adonovan): de-dup these messages.
- if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil {
+ fn := callee.fn
+
+ // Warn about calls to non-intrinsic external functions.
+ if fn.Blocks == nil && a.findIntrinsic(fn) == nil {
a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn)
a.warnf(fn.Pos(), " (declared here)")
}
+
+ // Warn about calls to generic function bodies.
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ a.warnf(site.pos(), "unsound call to generic function body: %s (build with ssa.InstantiateGenerics)", fn)
+ a.warnf(fn.Pos(), " (declared here)")
+ }
+
+ // Warn about calls to instantiation wrappers of generics functions.
+ if fn.Origin() != nil && strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") {
+ a.warnf(site.pos(), "unsound call to instantiation wrapper of generic: %s (build with ssa.InstantiateGenerics)", fn)
+ a.warnf(fn.Pos(), " (declared here)")
+ }
}
// dumpSolution writes the PTS solution to the specified file.
diff --git a/go/pointer/api.go b/go/pointer/api.go
index 9a4cc0af4a2..64de1100351 100644
--- a/go/pointer/api.go
+++ b/go/pointer/api.go
@@ -28,7 +28,11 @@ type Config struct {
// dependencies of any main package may still affect the
// analysis result, because they contribute runtime types and
// thus methods.
+ //
// TODO(adonovan): investigate whether this is desirable.
+ //
+ // Calls to generic functions will be unsound unless packages
+ // are built using the ssa.InstantiateGenerics builder mode.
Mains []*ssa.Package
// Reflection determines whether to handle reflection
diff --git a/go/pointer/doc.go b/go/pointer/doc.go
index d41346e699f..aca343b88e3 100644
--- a/go/pointer/doc.go
+++ b/go/pointer/doc.go
@@ -358,6 +358,14 @@ A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
type-unsafe combination (T1,V2). Treating the value and its concrete
type as inseparable makes the analysis type-safe.)
+Type parameters:
+
+Type parameters are not directly supported by the analysis.
+Calls to generic functions will be left as if they had empty bodies.
+Users of the package are expected to use the ssa.InstantiateGenerics
+builder mode when building code that uses or depends on code
+containing generics.
+
reflect.Value:
A reflect.Value is modelled very similar to an interface{}, i.e. as
diff --git a/go/pointer/gen.go b/go/pointer/gen.go
index 09705948d9c..bee656b6237 100644
--- a/go/pointer/gen.go
+++ b/go/pointer/gen.go
@@ -14,9 +14,11 @@ import (
"fmt"
"go/token"
"go/types"
+ "strings"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
var (
@@ -978,7 +980,10 @@ func (a *analysis) genInstr(cgn *cgnode, instr ssa.Instruction) {
a.sizeof(instr.Type()))
case *ssa.Index:
- a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type()))
+ _, isstring := typeparams.CoreType(instr.X.Type()).(*types.Basic)
+ if !isstring {
+ a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type()))
+ }
case *ssa.Select:
recv := a.valueOffsetNode(instr, 2) // instr : (index, recvOk, recv0, ... recv_n-1)
@@ -1202,6 +1207,19 @@ func (a *analysis) genFunc(cgn *cgnode) {
return
}
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ // Body of generic function.
+ // We'll warn about calls to such functions at the end.
+ return
+ }
+
+ if strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") {
+ // instantiation wrapper of a generic function.
+ // These may contain type coercions which are not currently supported.
+ // We'll warn about calls to such functions at the end.
+ return
+ }
+
if a.log != nil {
fmt.Fprintln(a.log, "; Creating nodes for local values")
}
diff --git a/go/pointer/pointer_race_test.go b/go/pointer/pointer_race_test.go
new file mode 100644
index 00000000000..d3c9b475e25
--- /dev/null
+++ b/go/pointer/pointer_race_test.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+// +build race
+
+package pointer_test
+
+func init() {
+ raceEnabled = true
+}
diff --git a/go/pointer/pointer_test.go b/go/pointer/pointer_test.go
index 47074f620f2..1fa54f6e8f7 100644
--- a/go/pointer/pointer_test.go
+++ b/go/pointer/pointer_test.go
@@ -66,6 +66,8 @@ var inputs = []string{
// "testdata/timer.go", // TODO(adonovan): fix broken assumptions about runtime timers
}
+var raceEnabled = false
+
// Expectation grammar:
//
// @calls f -> g
@@ -238,9 +240,14 @@ func doOneInput(t *testing.T, input, fpath string) bool {
// Find all calls to the built-in print(x). Analytically,
// print is a no-op, but it's a convenient hook for testing
// the PTS of an expression, so our tests use it.
+ // Exclude generic bodies as these should be dead code for pointer.
+ // Instance of generics are included.
probes := make(map[*ssa.CallCommon]bool)
for fn := range ssautil.AllFunctions(prog) {
- // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if _Origin is exported.
+ if isGenericBody(fn) {
+ continue // skip generic bodies
+ }
+ // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if Origin is exported.
if fn.Pkg == mainpkg || (fn.Pkg == nil && mainFiles[prog.Fset.File(fn.Pos())]) {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
@@ -609,10 +616,6 @@ func TestInput(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113")
}
- if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 {
- t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113")
- }
- ok := true
wd, err := os.Getwd()
if err != nil {
@@ -627,24 +630,44 @@ func TestInput(t *testing.T) {
fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd)
for _, filename := range inputs {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Errorf("couldn't read file '%s': %s", filename, err)
- continue
- }
+ filename := filename
+ t.Run(filename, func(t *testing.T) {
+ if filename == "testdata/a_test.go" {
+ // For some reason this particular file is way more expensive than the others.
+ if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 {
+ t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113")
+ }
+ if raceEnabled {
+ t.Skip("skipping memory-intensive test under race detector; https://golang.org/issue/14113")
+ }
+ } else {
+ t.Parallel()
+ }
- fpath, err := filepath.Abs(filename)
- if err != nil {
- t.Errorf("couldn't get absolute path for '%s': %s", filename, err)
- }
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("couldn't read file '%s': %s", filename, err)
+ }
- if !doOneInput(t, string(content), fpath) {
- ok = false
- }
+ fpath, err := filepath.Abs(filename)
+ if err != nil {
+ t.Fatalf("couldn't get absolute path for '%s': %s", filename, err)
+ }
+
+ if !doOneInput(t, string(content), fpath) {
+ t.Fail()
+ }
+ })
}
- if !ok {
- t.Fail()
+}
+
+// isGenericBody returns true if fn is the body of a generic function.
+func isGenericBody(fn *ssa.Function) bool {
+ sig := fn.Signature
+ if typeparams.ForSignature(sig).Len() > 0 || typeparams.RecvTypeParams(sig).Len() > 0 {
+ return fn.Synthetic == ""
}
+ return false
}
// join joins the elements of multiset with " | "s.
diff --git a/go/pointer/reflect.go b/go/pointer/reflect.go
index efb11b00096..3762dd8d401 100644
--- a/go/pointer/reflect.go
+++ b/go/pointer/reflect.go
@@ -1024,7 +1024,7 @@ func ext۰reflect۰ChanOf(a *analysis, cgn *cgnode) {
var dir reflect.ChanDir // unknown
if site := cgn.callersite; site != nil {
if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
- v, _ := constant.Int64Val(c.Value)
+ v := c.Int64()
if 0 <= v && v <= int64(reflect.BothDir) {
dir = reflect.ChanDir(v)
}
@@ -1751,8 +1751,7 @@ func ext۰reflect۰rtype۰InOut(a *analysis, cgn *cgnode, out bool) {
index := -1
if site := cgn.callersite; site != nil {
if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
- v, _ := constant.Int64Val(c.Value)
- index = int(v)
+ index = int(c.Int64())
}
}
a.addConstraint(&rtypeInOutConstraint{
diff --git a/go/pointer/util.go b/go/pointer/util.go
index 5fec1fc4ed5..17728aa06ac 100644
--- a/go/pointer/util.go
+++ b/go/pointer/util.go
@@ -8,12 +8,13 @@ import (
"bytes"
"fmt"
"go/types"
- exec "golang.org/x/sys/execabs"
"log"
"os"
"runtime"
"time"
+ exec "golang.org/x/sys/execabs"
+
"golang.org/x/tools/container/intsets"
)
@@ -125,7 +126,7 @@ func (a *analysis) flatten(t types.Type) []*fieldInfo {
// Debuggability hack: don't remove
// the named type from interfaces as
// they're very verbose.
- fl = append(fl, &fieldInfo{typ: t})
+ fl = append(fl, &fieldInfo{typ: t}) // t may be a type param
} else {
fl = a.flatten(u)
}
diff --git a/go/ssa/TODO b/go/ssa/TODO
new file mode 100644
index 00000000000..6c35253c73c
--- /dev/null
+++ b/go/ssa/TODO
@@ -0,0 +1,16 @@
+-*- text -*-
+
+SSA Generics to-do list
+===========================
+
+DOCUMENTATION:
+- Read me for internals
+
+TYPE PARAMETERIZED GENERIC FUNCTIONS:
+- sanity.go updates.
+- Check source functions going to generics.
+- Tests, tests, tests...
+
+USAGE:
+- Back fill users for handling ssa.InstantiateGenerics being off.
+
diff --git a/go/ssa/builder.go b/go/ssa/builder.go
index b36775a4e34..be8d36a6eeb 100644
--- a/go/ssa/builder.go
+++ b/go/ssa/builder.go
@@ -101,6 +101,9 @@ package ssa
//
// This is a low level operation for creating functions that do not exist in
// the source. Use with caution.
+//
+// TODO(taking): Use consistent terminology for "concrete".
+// TODO(taking): Use consistent terminology for "monomorphization"/"instantiate"/"expand".
import (
"fmt"
@@ -272,7 +275,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value {
return fn.emit(&c)
case *ast.IndexExpr:
- mapt := fn.typeOf(e.X).Underlying().(*types.Map)
+ mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map.
lookup := &Lookup{
X: b.expr(fn, e.X),
Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
@@ -309,7 +312,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
typ = fn.typ(typ)
switch obj.Name() {
case "make":
- switch typ.Underlying().(type) {
+ switch ct := typeparams.CoreType(typ).(type) {
case *types.Slice:
n := b.expr(fn, args[1])
m := n
@@ -319,7 +322,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
if m, ok := m.(*Const); ok {
// treat make([]T, n, m) as new([m]T)[:n]
cap := m.Int64()
- at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap)
+ at := types.NewArray(ct.Elem(), cap)
alloc := emitNew(fn, at, pos)
alloc.Comment = "makeslice"
v := &Slice{
@@ -370,6 +373,8 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
// We must still evaluate the value, though. (If it
// was side-effect free, the whole call would have
// been constant-folded.)
+ //
+ // Type parameters are always non-constant so use Underlying.
t := deref(fn.typeOf(args[0])).Underlying()
if at, ok := t.(*types.Array); ok {
b.expr(fn, args[0]) // for effects only
@@ -453,47 +458,57 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
}
wantAddr := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel)
- last := len(sel.index) - 1
- return &address{
- addr: emitFieldSelection(fn, v, sel.index[last], true, e.Sel),
- pos: e.Sel.Pos(),
- expr: e.Sel,
+ index := sel.index[len(sel.index)-1]
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
+
+ // Due to the two phases of resolving AssignStmt, a panic from x.f = p()
+ // when x is nil is required to come after the side-effects of
+ // evaluating x and p().
+ emit := func(fn *Function) Value {
+ return emitFieldSelection(fn, v, index, true, e.Sel)
}
+ return &lazyAddress{addr: emit, t: fld.Type(), pos: e.Sel.Pos(), expr: e.Sel}
case *ast.IndexExpr:
+ xt := fn.typeOf(e.X)
+ elem, mode := indexType(xt)
var x Value
var et types.Type
- switch t := fn.typeOf(e.X).Underlying().(type) {
- case *types.Array:
+ switch mode {
+ case ixArrVar: // array, array|slice, array|*array, or array|*array|slice.
x = b.addr(fn, e.X, escaping).address(fn)
- et = types.NewPointer(t.Elem())
- case *types.Pointer: // *array
- x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())
- case *types.Slice:
+ et = types.NewPointer(elem)
+ case ixVar: // *array, slice, *array|slice
x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem())
- case *types.Map:
+ et = types.NewPointer(elem)
+ case ixMap:
+ mt := typeparams.CoreType(xt).(*types.Map)
return &element{
m: b.expr(fn, e.X),
- k: emitConv(fn, b.expr(fn, e.Index), t.Key()),
- t: t.Elem(),
+ k: emitConv(fn, b.expr(fn, e.Index), mt.Key()),
+ t: mt.Elem(),
pos: e.Lbrack,
}
default:
- panic("unexpected container type in IndexExpr: " + t.String())
+ panic("unexpected container type in IndexExpr: " + xt.String())
}
index := b.expr(fn, e.Index)
if isUntyped(index.Type()) {
index = emitConv(fn, index, tInt)
}
- v := &IndexAddr{
- X: x,
- Index: index,
+ // Due to the two phases of resolving AssignStmt, a panic from x[i] = p()
+ // when x is nil or i is out-of-bounds is required to come after the
+ // side-effects of evaluating x, i and p().
+ emit := func(fn *Function) Value {
+ v := &IndexAddr{
+ X: x,
+ Index: index,
+ }
+ v.setPos(e.Lbrack)
+ v.setType(et)
+ return fn.emit(v)
}
- v.setPos(e.Lbrack)
- v.setType(et)
- return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e}
+ return &lazyAddress{addr: emit, t: deref(et), pos: e.Lbrack, expr: e}
case *ast.StarExpr:
return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e}
@@ -552,7 +567,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *
}
if _, ok := loc.(*address); ok {
- if isInterface(loc.typ()) {
+ if isNonTypeParamInterface(loc.typ()) {
// e.g. var x interface{} = T{...}
// Can't in-place initialize an interface value.
// Fall back to copying.
@@ -622,18 +637,19 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *ast.FuncLit:
fn2 := &Function{
- name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
- Signature: fn.typeOf(e.Type).Underlying().(*types.Signature),
- pos: e.Type.Func,
- parent: fn,
- Pkg: fn.Pkg,
- Prog: fn.Prog,
- syntax: e,
- _Origin: nil, // anon funcs do not have an origin.
- _TypeParams: fn._TypeParams, // share the parent's type parameters.
- _TypeArgs: fn._TypeArgs, // share the parent's type arguments.
- info: fn.info,
- subst: fn.subst, // share the parent's type substitutions.
+ name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
+ Signature: fn.typeOf(e.Type).(*types.Signature),
+ pos: e.Type.Func,
+ parent: fn,
+ anonIdx: int32(len(fn.AnonFuncs)),
+ Pkg: fn.Pkg,
+ Prog: fn.Prog,
+ syntax: e,
+ topLevelOrigin: nil, // use anonIdx to lookup an anon instance's origin.
+ typeparams: fn.typeparams, // share the parent's type parameters.
+ typeargs: fn.typeargs, // share the parent's type arguments.
+ info: fn.info,
+ subst: fn.subst, // share the parent's type substitutions.
}
fn.AnonFuncs = append(fn.AnonFuncs, fn2)
b.created.Add(fn2)
@@ -669,6 +685,8 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
y.pos = e.Lparen
case *SliceToArrayPointer:
y.pos = e.Lparen
+ case *UnOp: // conversion from slice to array.
+ y.pos = e.Lparen
}
}
return y
@@ -733,14 +751,20 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *ast.SliceExpr:
var low, high, max Value
var x Value
- switch fn.typeOf(e.X).Underlying().(type) {
+ xtyp := fn.typeOf(e.X)
+ switch typeparams.CoreType(xtyp).(type) {
case *types.Array:
// Potentially escaping.
x = b.addr(fn, e.X, true).address(fn)
case *types.Basic, *types.Slice, *types.Pointer: // *array
x = b.expr(fn, e.X)
default:
- panic("unreachable")
+ // core type exception?
+ if isBytestring(xtyp) {
+ x = b.expr(fn, e.X) // bytestring is handled as string and []byte.
+ } else {
+ panic("unexpected sequence type in SliceExpr")
+ }
}
if e.Low != nil {
low = b.expr(fn, e.Low)
@@ -768,7 +792,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *types.Builtin:
return &Builtin{name: obj.Name(), sig: fn.instanceType(e).(*types.Signature)}
case *types.Nil:
- return nilConst(fn.instanceType(e))
+ return zeroConst(fn.instanceType(e))
}
// Package-level func or var?
if v := fn.Prog.packageLevelMember(obj); v != nil {
@@ -776,7 +800,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
return emitLoad(fn, g) // var (address)
}
callee := v.(*Function) // (func)
- if len(callee._TypeParams) > 0 {
+ if callee.typeparams.Len() > 0 {
targs := fn.subst.types(instanceArgs(fn.info, e))
callee = fn.Prog.needsInstance(callee, targs, b.created)
}
@@ -810,11 +834,32 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
wantAddr := isPointer(rt)
escaping := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel)
- if isInterface(rt) {
- // If v has interface type I,
+
+ if types.IsInterface(rt) {
+ // If v may be an interface type I (after instantiating),
// we must emit a check that v is non-nil.
- // We use: typeassert v.(I).
- emitTypeAssert(fn, v, rt, token.NoPos)
+ if recv, ok := sel.recv.(*typeparams.TypeParam); ok {
+ // Emit a nil check if any possible instantiation of the
+ // type parameter is an interface type.
+ if typeSetOf(recv).Len() > 0 {
+ // recv has a concrete term its typeset.
+ // So it cannot be instantiated as an interface.
+ //
+ // Example:
+ // func _[T interface{~int; Foo()}] () {
+ // var v T
+ // _ = v.Foo // <-- MethodVal
+ // }
+ } else {
+ // rt may be instantiated as an interface.
+ // Emit nil check: typeassert (any(v)).(any).
+ emitTypeAssert(fn, emitConv(fn, v, tEface), tEface, token.NoPos)
+ }
+ } else {
+ // non-type param interface
+ // Emit nil check: typeassert v.(I).
+ emitTypeAssert(fn, v, rt, token.NoPos)
+ }
}
if targs := receiverTypeArgs(obj); len(targs) > 0 {
// obj is generic.
@@ -851,9 +896,17 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases.
}
// not a generic instantiation.
- switch t := fn.typeOf(e.X).Underlying().(type) {
- case *types.Array:
- // Non-addressable array (in a register).
+ xt := fn.typeOf(e.X)
+ switch et, mode := indexType(xt); mode {
+ case ixVar:
+ // Addressable slice/array; use IndexAddr and Load.
+ return b.addr(fn, e, false).load(fn)
+
+ case ixArrVar, ixValue:
+ // An array in a register, a string or a combined type that contains
+ // either an [_]array (ixArrVar) or string (ixValue).
+
+ // Note: for ixArrVar and CoreType(xt)==nil can be IndexAddr and Load.
index := b.expr(fn, e.Index)
if isUntyped(index.Type()) {
index = emitConv(fn, index, tInt)
@@ -863,38 +916,20 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
Index: index,
}
v.setPos(e.Lbrack)
- v.setType(t.Elem())
+ v.setType(et)
return fn.emit(v)
- case *types.Map:
+ case ixMap:
+ ct := typeparams.CoreType(xt).(*types.Map)
v := &Lookup{
X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), t.Key()),
+ Index: emitConv(fn, b.expr(fn, e.Index), ct.Key()),
}
v.setPos(e.Lbrack)
- v.setType(t.Elem())
+ v.setType(ct.Elem())
return fn.emit(v)
-
- case *types.Basic: // => string
- // Strings are not addressable.
- index := b.expr(fn, e.Index)
- if isUntyped(index.Type()) {
- index = emitConv(fn, index, tInt)
- }
- v := &Lookup{
- X: b.expr(fn, e.X),
- Index: index,
- }
- v.setPos(e.Lbrack)
- v.setType(tByte)
- return fn.emit(v)
-
- case *types.Slice, *types.Pointer: // *array
- // Addressable slice/array; use IndexAddr and Load.
- return b.addr(fn, e, false).load(fn)
-
default:
- panic("unexpected container type in IndexExpr: " + t.String())
+ panic("unexpected container type in IndexExpr: " + xt.String())
}
case *ast.CompositeLit, *ast.StarExpr:
@@ -955,14 +990,14 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
wantAddr := isPointer(recv)
escaping := true
v := b.receiver(fn, selector.X, wantAddr, escaping, sel)
- if isInterface(recv) {
+ if types.IsInterface(recv) {
// Invoke-mode call.
- c.Value = v
+ c.Value = v // possibly type param
c.Method = obj
} else {
// "Call"-mode call.
callee := fn.Prog.originFunc(obj)
- if len(callee._TypeParams) > 0 {
+ if callee.typeparams.Len() > 0 {
callee = fn.Prog.needsInstance(callee, receiverTypeArgs(obj), b.created)
}
c.Value = callee
@@ -1053,7 +1088,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx
st := sig.Params().At(np).Type().(*types.Slice)
vt := st.Elem()
if len(varargs) == 0 {
- args = append(args, nilConst(st))
+ args = append(args, zeroConst(st))
} else {
// Replace a suffix of args with a slice containing it.
at := types.NewArray(vt, int64(len(varargs)))
@@ -1085,7 +1120,7 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
b.setCallFunc(fn, e, c)
// Then append the other actual parameters.
- sig, _ := fn.typeOf(e.Fun).Underlying().(*types.Signature)
+ sig, _ := typeparams.CoreType(fn.typeOf(e.Fun)).(*types.Signature)
if sig == nil {
panic(fmt.Sprintf("no signature for call of %s", e.Fun))
}
@@ -1218,8 +1253,32 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
// literal has type *T behaves like &T{}.
// In that case, addr must hold a T, not a *T.
func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) {
- typ := deref(fn.typeOf(e))
- switch t := typ.Underlying().(type) {
+ typ := deref(fn.typeOf(e)) // type with name [may be type param]
+ t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case
+ // Computing typ and t is subtle as these handle pointer types.
+ // For example, &T{...} is valid even for maps and slices.
+ // Also typ should refer to T (not *T) while t should be the core type of T.
+ //
+ // To show the ordering to take into account, consider the composite literal
+ // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here:
+ // type N struct{f int}
+ // func _[T N, S *N]() {
+ // _ = &T{f: 1}
+ // _ = []S{{f: 1}}
+ // }
+ // For `&T{f: 1}`, we compute `typ` and `t` as:
+ // typeOf(&T{f: 1}) == *T
+ // deref(*T) == T (typ)
+ // CoreType(T) == N
+ // deref(N) == N
+ // N.Underlying() == struct{f int} (t)
+ // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as:
+ // typeOf({f: 1}) == S
+ // deref(S) == S (typ)
+ // CoreType(S) == *N
+ // deref(*N) == N
+ // N.Underlying() == struct{f int} (t)
+ switch t := t.(type) {
case *types.Struct:
if !isZero && len(e.Elts) != t.NumFields() {
// memclear
@@ -1247,6 +1306,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
X: addr,
Field: fieldIndex,
}
+ faddr.setPos(pos)
faddr.setType(types.NewPointer(sf.Type()))
fn.emit(faddr)
b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb)
@@ -1517,7 +1577,7 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl
casetype = fn.typeOf(cond)
var condv Value
if casetype == tUntypedNil {
- condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), cond.Pos())
+ condv = emitCompare(fn, token.EQL, x, zeroConst(x.Type()), cond.Pos())
ti = x
} else {
yok := emitTypeTest(fn, x, casetype, cc.Case)
@@ -1600,7 +1660,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
case *ast.SendStmt: // ch<- i
ch := b.expr(fn, comm.Chan)
- chtyp := fn.typ(ch.Type()).Underlying().(*types.Chan)
+ chtyp := typeparams.CoreType(fn.typ(ch.Type())).(*types.Chan)
st = &SelectState{
Dir: types.SendOnly,
Chan: ch,
@@ -1657,9 +1717,8 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
vars = append(vars, varIndex, varOk)
for _, st := range states {
if st.Dir == types.RecvOnly {
- chtyp := fn.typ(st.Chan.Type()).Underlying().(*types.Chan)
- tElem := chtyp.Elem()
- vars = append(vars, anonVar(tElem))
+ chtyp := typeparams.CoreType(fn.typ(st.Chan.Type())).(*types.Chan)
+ vars = append(vars, anonVar(chtyp.Elem()))
}
}
sel.setType(types.NewTuple(vars...))
@@ -1823,6 +1882,8 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P
// elimination if x is pure, static unrolling, etc.
// Ranging over a nil *array may have >0 iterations.
// We still generate code for x, in case it has effects.
+ //
+ // TypeParams do not have constant length. Use underlying instead of core type.
length = intConst(arr.Len())
} else {
// length = len(x).
@@ -1855,7 +1916,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P
k = emitLoad(fn, index)
if tv != nil {
- switch t := x.Type().Underlying().(type) {
+ switch t := typeparams.CoreType(x.Type()).(type) {
case *types.Array:
instr := &Index{
X: x,
@@ -1925,11 +1986,9 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.
emitJump(fn, loop)
fn.currentBlock = loop
- _, isString := x.Type().Underlying().(*types.Basic)
-
okv := &Next{
Iter: it,
- IsString: isString,
+ IsString: isBasic(typeparams.CoreType(x.Type())),
}
okv.setType(types.NewTuple(
varOk,
@@ -1979,7 +2038,7 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos)
}
recv.setPos(pos)
recv.setType(types.NewTuple(
- newVar("k", x.Type().Underlying().(*types.Chan).Elem()),
+ newVar("k", typeparams.CoreType(x.Type()).(*types.Chan).Elem()),
varOk,
))
ko := fn.emit(recv)
@@ -2023,7 +2082,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
var k, v Value
var loop, done *BasicBlock
- switch rt := x.Type().Underlying().(type) {
+ switch rt := typeparams.CoreType(x.Type()).(type) {
case *types.Slice, *types.Array, *types.Pointer: // *array
k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For)
@@ -2101,11 +2160,11 @@ start:
b.expr(fn, s.X)
case *ast.SendStmt:
+ chtyp := typeparams.CoreType(fn.typeOf(s.Chan)).(*types.Chan)
fn.emit(&Send{
Chan: b.expr(fn, s.Chan),
- X: emitConv(fn, b.expr(fn, s.Value),
- fn.typeOf(s.Chan).Underlying().(*types.Chan).Elem()),
- pos: s.Arrow,
+ X: emitConv(fn, b.expr(fn, s.Value), chtyp.Elem()),
+ pos: s.Arrow,
})
case *ast.IncDecStmt:
@@ -2283,11 +2342,9 @@ func (b *builder) buildFunctionBody(fn *Function) {
var functype *ast.FuncType
switch n := fn.syntax.(type) {
case nil:
- // TODO(taking): Temporarily this can be the body of a generic function.
if fn.Params != nil {
return // not a Go source function. (Synthetic, or from object file.)
}
- // fn.Params == nil is handled within body == nil case.
case *ast.FuncDecl:
functype = n.Type
recvField = n.Recv
@@ -2319,6 +2376,13 @@ func (b *builder) buildFunctionBody(fn *Function) {
}
return
}
+
+ // Build instantiation wrapper around generic body?
+ if fn.topLevelOrigin != nil && fn.subst == nil {
+ buildInstantiationWrapper(fn)
+ return
+ }
+
if fn.Prog.mode&LogSource != 0 {
defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))()
}
@@ -2423,7 +2487,17 @@ func (p *Package) build() {
// TODO(adonovan): ideally belongs in memberFromObject, but
// that would require package creation in topological order.
for name, mem := range p.Members {
- if ast.IsExported(name) && !isGeneric(mem) {
+ isGround := func(m Member) bool {
+ switch m := m.(type) {
+ case *Type:
+ named, _ := m.Type().(*types.Named)
+ return named == nil || typeparams.ForNamed(named) == nil
+ case *Function:
+ return m.typeparams.Len() == 0
+ }
+ return true // *NamedConst, *Global
+ }
+ if ast.IsExported(name) && isGround(mem) {
p.Prog.needMethodsOf(mem.Type(), &p.created)
}
}
@@ -2461,6 +2535,9 @@ func (p *Package) build() {
}
// Initialize package-level vars in correct order.
+ if len(p.info.InitOrder) > 0 && len(p.files) == 0 {
+ panic("no source files provided for package. cannot initialize globals")
+ }
for _, varinit := range p.info.InitOrder {
if init.Prog.mode&LogSource != 0 {
fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n",
diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go
new file mode 100644
index 00000000000..2588f74c5f9
--- /dev/null
+++ b/go/ssa/builder_generic_test.go
@@ -0,0 +1,679 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "fmt"
+ "go/parser"
+ "go/token"
+ "reflect"
+ "sort"
+ "testing"
+
+ "golang.org/x/tools/go/expect"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// TestGenericBodies tests that bodies of generic functions and methods containing
+// different constructs can be built in BuilderMode(0).
+//
+// Each test specifies the contents of package containing a single go file.
+// Each call print(arg0, arg1, ...) to the builtin print function
+// in ssa is correlated a comment at the end of the line of the form:
+//
+// //@ types(a, b, c)
+//
+// where a, b and c are the types of the arguments to the print call
+// serialized using go/types.Type.String().
+// See x/tools/go/expect for details on the syntax.
+func TestGenericBodies(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericBodies requires type parameters")
+ }
+ for _, test := range []struct {
+ pkg string // name of the package.
+ contents string // contents of the Go package.
+ }{
+ {
+ pkg: "p",
+ contents: `
+ package p
+
+ func f(x int) {
+ var i interface{}
+ print(i, 0) //@ types("interface{}", int)
+ print() //@ types()
+ print(x) //@ types(int)
+ }
+ `,
+ },
+ {
+ pkg: "q",
+ contents: `
+ package q
+
+ func f[T any](x T) {
+ print(x) //@ types(T)
+ }
+ `,
+ },
+ {
+ pkg: "r",
+ contents: `
+ package r
+
+ func f[T ~int]() {
+ var x T
+ print(x) //@ types(T)
+ }
+ `,
+ },
+ {
+ pkg: "s",
+ contents: `
+ package s
+
+ func a[T ~[4]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func b[T ~*[4]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func c[T ~[]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func d[T ~string](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, rune)
+ }
+ }
+ func e[T ~map[int]string](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, string)
+ }
+ }
+ func f[T ~chan string](x T) {
+ for v := range x {
+ print(x, v) //@ types(T, string)
+ }
+ }
+
+ func From() {
+ type A [4]byte
+ print(a[A]) //@ types("func(x s.A)")
+
+ type B *[4]byte
+ print(b[B]) //@ types("func(x s.B)")
+
+ type C []byte
+ print(c[C]) //@ types("func(x s.C)")
+
+ type D string
+ print(d[D]) //@ types("func(x s.D)")
+
+ type E map[int]string
+ print(e[E]) //@ types("func(x s.E)")
+
+ type F chan string
+ print(f[F]) //@ types("func(x s.F)")
+ }
+ `,
+ },
+ {
+ pkg: "t",
+ contents: `
+ package t
+
+ func f[S any, T ~chan S](x T) {
+ for v := range x {
+ print(x, v) //@ types(T, S)
+ }
+ }
+
+ func From() {
+ type F chan string
+ print(f[string, F]) //@ types("func(x t.F)")
+ }
+ `,
+ },
+ {
+ pkg: "u",
+ contents: `
+ package u
+
+ func fibonacci[T ~chan int](c, quit T) {
+ x, y := 0, 1
+ for {
+ select {
+ case c <- x:
+ x, y = y, x+y
+ case <-quit:
+ print(c, quit, x, y) //@ types(T, T, int, int)
+ return
+ }
+ }
+ }
+ func start[T ~chan int](c, quit T) {
+ go func() {
+ for i := 0; i < 10; i++ {
+ print(<-c) //@ types(int)
+ }
+ quit <- 0
+ }()
+ }
+ func From() {
+ type F chan int
+ c := make(F)
+ quit := make(F)
+ print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F")
+ print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F")
+ }
+ `,
+ },
+ {
+ pkg: "v",
+ contents: `
+ package v
+
+ func f[T ~struct{ x int; y string }](i int) T {
+ u := []T{ T{0, "lorem"}, T{1, "ipsum"}}
+ return u[i]
+ }
+ func From() {
+ type S struct{ x int; y string }
+ print(f[S]) //@ types("func(i int) v.S")
+ }
+ `,
+ },
+ {
+ pkg: "w",
+ contents: `
+ package w
+
+ func f[T ~[4]int8](x T, l, h int) []int8 {
+ return x[l:h]
+ }
+ func g[T ~*[4]int16](x T, l, h int) []int16 {
+ return x[l:h]
+ }
+ func h[T ~[]int32](x T, l, h int) T {
+ return x[l:h]
+ }
+ func From() {
+ type F [4]int8
+ type G *[4]int16
+ type H []int32
+ print(f[F](F{}, 0, 0)) //@ types("[]int8")
+ print(g[G](nil, 0, 0)) //@ types("[]int16")
+ print(h[H](nil, 0, 0)) //@ types("w.H")
+ }
+ `,
+ },
+ {
+ pkg: "x",
+ contents: `
+ package x
+
+ func h[E any, T ~[]E](x T, l, h int) []E {
+ s := x[l:h]
+ print(s) //@ types("T")
+ return s
+ }
+ func From() {
+ type H []int32
+ print(h[int32, H](nil, 0, 0)) //@ types("[]int32")
+ }
+ `,
+ },
+ {
+ pkg: "y",
+ contents: `
+ package y
+
+ // Test "make" builtin with different forms on core types and
+ // when capacities are constants or variable.
+ func h[E any, T ~[]E](m, n int) {
+ print(make(T, 3)) //@ types(T)
+ print(make(T, 3, 5)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ print(make(T, m, n)) //@ types(T)
+ }
+ func i[K comparable, E any, T ~map[K]E](m int) {
+ print(make(T)) //@ types(T)
+ print(make(T, 5)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ }
+ func j[E any, T ~chan E](m int) {
+ print(make(T)) //@ types(T)
+ print(make(T, 6)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ }
+ func From() {
+ type H []int32
+ h[int32, H](3, 4)
+ type I map[int8]H
+ i[int8, H, I](5)
+ type J chan I
+ j[I, J](6)
+ }
+ `,
+ },
+ {
+ pkg: "z",
+ contents: `
+ package z
+
+ func h[T ~[4]int](x T) {
+ print(len(x), cap(x)) //@ types(int, int)
+ }
+ func i[T ~[4]byte | []int | ~chan uint8](x T) {
+ print(len(x), cap(x)) //@ types(int, int)
+ }
+ func j[T ~[4]int | any | map[string]int]() {
+ print(new(T)) //@ types("*T")
+ }
+ func k[T ~[4]int | any | map[string]int](x T) {
+ print(x) //@ types(T)
+ panic(x)
+ }
+ `,
+ },
+ {
+ pkg: "a",
+ contents: `
+ package a
+
+ func f[E any, F ~func() E](x F) {
+ print(x, x()) //@ types(F, E)
+ }
+ func From() {
+ type T func() int
+ f[int, T](func() int { return 0 })
+ f[int, func() int](func() int { return 1 })
+ }
+ `,
+ },
+ {
+ pkg: "b",
+ contents: `
+ package b
+
+ func f[E any, M ~map[string]E](m M) {
+ y, ok := m["lorem"]
+ print(m, y, ok) //@ types(M, E, bool)
+ }
+ func From() {
+ type O map[string][]int
+ f(O{"lorem": []int{0, 1, 2, 3}})
+ }
+ `,
+ },
+ {
+ pkg: "c",
+ contents: `
+ package c
+
+ func a[T interface{ []int64 | [5]int64 }](x T) int64 {
+ print(x, x[2], x[3]) //@ types(T, int64, int64)
+ x[2] = 5
+ return x[3]
+ }
+ func b[T interface{ []byte | string }](x T) byte {
+ print(x, x[3]) //@ types(T, byte)
+ return x[3]
+ }
+ func c[T interface{ []byte }](x T) byte {
+ print(x, x[2], x[3]) //@ types(T, byte, byte)
+ x[2] = 'b'
+ return x[3]
+ }
+ func d[T interface{ map[int]int64 }](x T) int64 {
+ print(x, x[2], x[3]) //@ types(T, int64, int64)
+ x[2] = 43
+ return x[3]
+ }
+ func e[T ~string](t T) {
+ print(t, t[0]) //@ types(T, uint8)
+ }
+ func f[T ~string|[]byte](t T) {
+ print(t, t[0]) //@ types(T, uint8)
+ }
+ func g[T []byte](t T) {
+ print(t, t[0]) //@ types(T, byte)
+ }
+ func h[T ~[4]int|[]int](t T) {
+ print(t, t[0]) //@ types(T, int)
+ }
+ func i[T ~[4]int|*[4]int|[]int](t T) {
+ print(t, t[0]) //@ types(T, int)
+ }
+ func j[T ~[4]int|*[4]int|[]int](t T) {
+ print(t, &t[0]) //@ types(T, "*int")
+ }
+ `,
+ },
+ {
+ pkg: "d",
+ contents: `
+ package d
+
+ type MyInt int
+ type Other int
+ type MyInterface interface{ foo() }
+
+ // ChangeType tests
+ func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ }
+ func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ }
+
+ // Convert tests
+ func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/}
+ func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ }
+ func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ }
+
+ // MakeInterface tests
+ func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ }
+
+ // NewConst tests
+ func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/}
+
+ // SliceToArrayPointer
+ func sl0[T *[4]int | *[2]int](x []int) { v := T(x); print(x, v) /*@ types("[]int", T)*/ }
+ func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ `,
+ },
+ {
+ pkg: "e",
+ contents: `
+ package e
+
+ func c[T interface{ foo() string }](x T) {
+ print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/
+ }
+ `,
+ },
+ {
+ pkg: "f",
+ contents: `package f
+
+ func eq[T comparable](t T, i interface{}) bool {
+ return t == i
+ }
+ `,
+ },
+ {
+ pkg: "g",
+ contents: `package g
+ type S struct{ f int }
+ func c[P *S]() []P { return []P{{f: 1}} }
+ `,
+ },
+ {
+ pkg: "h",
+ contents: `package h
+ func sign[bytes []byte | string](s bytes) (bool, bool) {
+ neg := false
+ if len(s) > 0 && (s[0] == '-' || s[0] == '+') {
+ neg = s[0] == '-'
+ s = s[1:]
+ }
+ return !neg, len(s) > 0
+ }`,
+ },
+ {
+ pkg: "i",
+ contents: `package i
+ func digits[bytes []byte | string](s bytes) bool {
+ for _, c := range []byte(s) {
+ if c < '0' || '9' < c {
+ return false
+ }
+ }
+ return true
+ }`,
+ },
+ {
+ pkg: "j",
+ contents: `
+ package j
+
+ type E interface{}
+
+ func Foo[T E, PT interface{ *T }]() T {
+ pt := PT(new(T))
+ x := *pt
+ print(x) /*@ types(T)*/
+ return x
+ }
+ `,
+ },
+ } {
+ test := test
+ t.Run(test.pkg, func(t *testing.T) {
+ // Parse
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ fname := test.pkg + ".go"
+ f, err := conf.ParseFile(fname, test.contents)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles(test.pkg, f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions)
+ for _, info := range lprog.AllPackages {
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+ }
+ p := prog.Package(lprog.Package(test.pkg).Pkg)
+ p.Build()
+
+ // Collect calls to the builtin print function.
+ probes := make(map[*ssa.CallCommon]bool)
+ for _, mem := range p.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ for _, bb := range fn.Blocks {
+ for _, i := range bb.Instrs {
+ if i, ok := i.(ssa.CallInstruction); ok {
+ call := i.Common()
+ if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" {
+ probes[i.Common()] = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Collect all notes in f, i.e. comments starting with "//@ types".
+ notes, err := expect.ExtractGo(prog.Fset, f)
+ if err != nil {
+ t.Errorf("expect.ExtractGo: %v", err)
+ }
+
+ // Matches each probe with a note that has the same line.
+ sameLine := func(x, y token.Pos) bool {
+ xp := prog.Fset.Position(x)
+ yp := prog.Fset.Position(y)
+ return xp.Filename == yp.Filename && xp.Line == yp.Line
+ }
+ expectations := make(map[*ssa.CallCommon]*expect.Note)
+ for call := range probes {
+ var match *expect.Note
+ for _, note := range notes {
+ if note.Name == "types" && sameLine(call.Pos(), note.Pos) {
+ match = note // first match is good enough.
+ break
+ }
+ }
+ if match != nil {
+ expectations[call] = match
+ } else {
+ t.Errorf("Unmatched probe: %v", call)
+ }
+ }
+
+ // Check each expectation.
+ for call, note := range expectations {
+ var args []string
+ for _, a := range call.Args {
+ args = append(args, a.Type().String())
+ }
+ if got, want := fmt.Sprint(args), fmt.Sprint(note.Args); got != want {
+ t.Errorf("Arguments to print() were expected to be %q. got %q", want, got)
+ }
+ }
+ })
+ }
+}
+
+// TestInstructionString tests serializing instructions via Instruction.String().
+func TestInstructionString(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestInstructionString requires type parameters")
+ }
+ // Tests (ssa.Instruction).String(). Instructions are from a single go file.
+ // The Instructions tested are those that match a comment of the form:
+ //
+ // //@ instrs(f, kind, strs...)
+ //
+ // where f is the name of the function, kind is the type of the instructions matched
+ // within the function, and tests that the String() value for all of the instructions
+ // matched of String() is strs (in some order).
+ // See x/tools/go/expect for details on the syntax.
+
+ const contents = `
+ package p
+
+ //@ instrs("f", "*ssa.TypeAssert")
+ //@ instrs("f", "*ssa.Call", "print(nil:interface{}, 0:int)")
+ func f(x int) { // non-generic smoke test.
+ var i interface{}
+ print(i, 0)
+ }
+
+ //@ instrs("h", "*ssa.Alloc", "local T (u)")
+ //@ instrs("h", "*ssa.FieldAddr", "&t0.x [#0]")
+ func h[T ~struct{ x string }]() T {
+ u := T{"lorem"}
+ return u
+ }
+
+ //@ instrs("c", "*ssa.TypeAssert", "typeassert t0.(interface{})")
+ //@ instrs("c", "*ssa.Call", "invoke x.foo()")
+ func c[T interface{ foo() string }](x T) {
+ _ = x.foo
+ _ = x.foo()
+ }
+
+ //@ instrs("d", "*ssa.TypeAssert", "typeassert t0.(interface{})")
+ //@ instrs("d", "*ssa.Call", "invoke x.foo()")
+ func d[T interface{ foo() string; comparable }](x T) {
+ _ = x.foo
+ _ = x.foo()
+ }
+ `
+
+ // Parse
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ const fname = "p.go"
+ f, err := conf.ParseFile(fname, contents)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions)
+ for _, info := range lprog.AllPackages {
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+ }
+ p := prog.Package(lprog.Package("p").Pkg)
+ p.Build()
+
+ // Collect all notes in f, i.e. comments starting with "//@ instr".
+ notes, err := expect.ExtractGo(prog.Fset, f)
+ if err != nil {
+ t.Errorf("expect.ExtractGo: %v", err)
+ }
+
+ // Expectation is a {function, type string} -> {want, matches}
+ // where matches is all Instructions.String() that match the key.
+ // Each expecation is that some permutation of matches is wants.
+ type expKey struct {
+ function string
+ kind string
+ }
+ type expValue struct {
+ wants []string
+ matches []string
+ }
+ expectations := make(map[expKey]*expValue)
+ for _, note := range notes {
+ if note.Name == "instrs" {
+ if len(note.Args) < 2 {
+ t.Error("Had @instrs annotation without at least 2 arguments")
+ continue
+ }
+ fn, kind := fmt.Sprint(note.Args[0]), fmt.Sprint(note.Args[1])
+ var wants []string
+ for _, arg := range note.Args[2:] {
+ wants = append(wants, fmt.Sprint(arg))
+ }
+ expectations[expKey{fn, kind}] = &expValue{wants, nil}
+ }
+ }
+
+ // Collect all Instructions that match the expectations.
+ for _, mem := range p.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ for _, bb := range fn.Blocks {
+ for _, i := range bb.Instrs {
+ kind := fmt.Sprintf("%T", i)
+ if e := expectations[expKey{fn.Name(), kind}]; e != nil {
+ e.matches = append(e.matches, i.String())
+ }
+ }
+ }
+ }
+ }
+
+ // Check each expectation.
+ for key, value := range expectations {
+ if _, ok := p.Members[key.function]; !ok {
+ t.Errorf("Expectation on %s does not match a member in %s", key.function, p.Pkg.Name())
+ }
+ got, want := value.matches, value.wants
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(want, got) {
+ t.Errorf("Within %s wanted instructions of kind %s: %q. got %q", key.function, key.kind, want, got)
+ }
+ }
+}
diff --git a/go/ssa/builder_go117_test.go b/go/ssa/builder_go117_test.go
index f6545e5e2cf..69985970596 100644
--- a/go/ssa/builder_go117_test.go
+++ b/go/ssa/builder_go117_test.go
@@ -57,7 +57,6 @@ func TestBuildPackageFailuresGo117(t *testing.T) {
importer types.Importer
}{
{"slice to array pointer - source is not a slice", "package p; var s [4]byte; var _ = (*[4]byte)(s)", nil},
- {"slice to array pointer - dest is not a pointer", "package p; var s []byte; var _ = ([4]byte)(s)", nil},
{"slice to array pointer - dest pointer elem is not an array", "package p; var s []byte; var _ = (*byte)(s)", nil},
}
diff --git a/go/ssa/builder_go120_test.go b/go/ssa/builder_go120_test.go
new file mode 100644
index 00000000000..04fb11a2d22
--- /dev/null
+++ b/go/ssa/builder_go120_test.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+)
+
+func TestBuildPackageGo120(t *testing.T) {
+ tests := []struct {
+ name string
+ src string
+ importer types.Importer
+ }{
+ {"slice to array", "package p; var s []byte; var _ = ([4]byte)(s)", nil},
+ {"slice to zero length array", "package p; var s []byte; var _ = ([0]byte)(s)", nil},
+ {"slice to zero length array type parameter", "package p; var s []byte; func f[T ~[0]byte]() { tmp := (T)(s); var z T; _ = tmp == z}", nil},
+ {"slice to non-zero length array type parameter", "package p; var s []byte; func h[T ~[1]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil},
+ {"slice to maybe-zero length array type parameter", "package p; var s []byte; func g[T ~[0]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil},
+ }
+
+ for _, tc := range tests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", tc.src, parser.ParseComments)
+ if err != nil {
+ t.Error(err)
+ }
+ files := []*ast.File{f}
+
+ pkg := types.NewPackage("p", "")
+ conf := &types.Config{Importer: tc.importer}
+ if _, _, err := ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions); err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ })
+ }
+}
diff --git a/go/ssa/builder_test.go b/go/ssa/builder_test.go
index 1975e26e45c..a80d8d5ab73 100644
--- a/go/ssa/builder_test.go
+++ b/go/ssa/builder_test.go
@@ -24,6 +24,7 @@ import (
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/testenv"
"golang.org/x/tools/internal/typeparams"
)
@@ -32,6 +33,8 @@ func isEmpty(f *ssa.Function) bool { return f.Blocks == nil }
// Tests that programs partially loaded from gc object files contain
// functions with no code for the external portions, but are otherwise ok.
func TestBuildPackage(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
input := `
package main
@@ -164,6 +167,8 @@ func main() {
// TestRuntimeTypes tests that (*Program).RuntimeTypes() includes all necessary types.
func TestRuntimeTypes(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
tests := []struct {
input string
want []string
@@ -221,6 +226,18 @@ func TestRuntimeTypes(t *testing.T) {
nil,
},
}
+
+ if typeparams.Enabled {
+ tests = append(tests, []struct {
+ input string
+ want []string
+ }{
+ // MakeInterface does not create runtime type for parameterized types.
+ {`package N; var g interface{}; func f[S any]() { var v []S; g = v }; `,
+ nil,
+ },
+ }...)
+ }
for _, test := range tests {
// Parse the file.
fset := token.NewFileSet()
diff --git a/go/ssa/const.go b/go/ssa/const.go
index dc182d9616c..4a51a2cb4bb 100644
--- a/go/ssa/const.go
+++ b/go/ssa/const.go
@@ -12,65 +12,73 @@ import (
"go/token"
"go/types"
"strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/typeparams"
)
// NewConst returns a new constant of the specified value and type.
// val must be valid according to the specification of Const.Value.
func NewConst(val constant.Value, typ types.Type) *Const {
+ if val == nil {
+ switch soleTypeKind(typ) {
+ case types.IsBoolean:
+ val = constant.MakeBool(false)
+ case types.IsInteger:
+ val = constant.MakeInt64(0)
+ case types.IsString:
+ val = constant.MakeString("")
+ }
+ }
return &Const{typ, val}
}
+// soleTypeKind returns a BasicInfo for which constant.Value can
+// represent all zero values for the types in the type set.
+//
+// types.IsBoolean for false is a representative.
+// types.IsInteger for 0
+// types.IsString for ""
+// 0 otherwise.
+func soleTypeKind(typ types.Type) types.BasicInfo {
+ // State records the set of possible zero values (false, 0, "").
+ // Candidates (perhaps all) are eliminated during the type-set
+ // iteration, which executes at least once.
+ state := types.IsBoolean | types.IsInteger | types.IsString
+ underIs(typeSetOf(typ), func(t types.Type) bool {
+ var c types.BasicInfo
+ if t, ok := t.(*types.Basic); ok {
+ c = t.Info()
+ }
+ if c&types.IsNumeric != 0 { // int/float/complex
+ c = types.IsInteger
+ }
+ state = state & c
+ return state != 0
+ })
+ return state
+}
+
// intConst returns an 'int' constant that evaluates to i.
// (i is an int64 in case the host is narrower than the target.)
func intConst(i int64) *Const {
return NewConst(constant.MakeInt64(i), tInt)
}
-// nilConst returns a nil constant of the specified type, which may
-// be any reference type, including interfaces.
-func nilConst(typ types.Type) *Const {
- return NewConst(nil, typ)
-}
-
// stringConst returns a 'string' constant that evaluates to s.
func stringConst(s string) *Const {
return NewConst(constant.MakeString(s), tString)
}
-// zeroConst returns a new "zero" constant of the specified type,
-// which must not be an array or struct type: the zero values of
-// aggregates are well-defined but cannot be represented by Const.
+// zeroConst returns a new "zero" constant of the specified type.
func zeroConst(t types.Type) *Const {
- switch t := t.(type) {
- case *types.Basic:
- switch {
- case t.Info()&types.IsBoolean != 0:
- return NewConst(constant.MakeBool(false), t)
- case t.Info()&types.IsNumeric != 0:
- return NewConst(constant.MakeInt64(0), t)
- case t.Info()&types.IsString != 0:
- return NewConst(constant.MakeString(""), t)
- case t.Kind() == types.UnsafePointer:
- fallthrough
- case t.Kind() == types.UntypedNil:
- return nilConst(t)
- default:
- panic(fmt.Sprint("zeroConst for unexpected type:", t))
- }
- case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
- return nilConst(t)
- case *types.Named:
- return NewConst(zeroConst(t.Underlying()).Value, t)
- case *types.Array, *types.Struct, *types.Tuple:
- panic(fmt.Sprint("zeroConst applied to aggregate:", t))
- }
- panic(fmt.Sprint("zeroConst: unexpected ", t))
+ return NewConst(nil, t)
}
func (c *Const) RelString(from *types.Package) string {
var s string
if c.Value == nil {
- s = "nil"
+ s = zeroString(c.typ, from)
} else if c.Value.Kind() == constant.String {
s = constant.StringVal(c.Value)
const max = 20
@@ -85,6 +93,44 @@ func (c *Const) RelString(from *types.Package) string {
return s + ":" + relType(c.Type(), from)
}
+// zeroString returns the string representation of the "zero" value of the type t.
+func zeroString(t types.Type, from *types.Package) string {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false"
+ case t.Info()&types.IsNumeric != 0:
+ return "0"
+ case t.Info()&types.IsString != 0:
+ return `""`
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil"
+ default:
+ panic(fmt.Sprint("zeroString for unexpected type:", t))
+ }
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return "nil"
+ case *types.Named:
+ return zeroString(t.Underlying(), from)
+ case *types.Array, *types.Struct:
+ return relType(t, from) + "{}"
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ components[i] = zeroString(t.At(i).Type(), from)
+ }
+ return "(" + strings.Join(components, ", ") + ")"
+ case *typeparams.TypeParam:
+ return "*new(" + relType(t, from) + ")"
+ }
+ panic(fmt.Sprint("zeroString: unexpected ", t))
+}
+
func (c *Const) Name() string {
return c.RelString(nil)
}
@@ -107,9 +153,30 @@ func (c *Const) Pos() token.Pos {
return token.NoPos
}
-// IsNil returns true if this constant represents a typed or untyped nil value.
+// IsNil returns true if this constant is a nil value of
+// a nillable reference type (pointer, slice, channel, map, or function),
+// a basic interface type, or
+// a type parameter all of whose possible instantiations are themselves nillable.
func (c *Const) IsNil() bool {
- return c.Value == nil
+ return c.Value == nil && nillable(c.typ)
+}
+
+// nillable reports whether *new(T) == nil is legal for type T.
+func nillable(t types.Type) bool {
+ if typeparams.IsTypeParam(t) {
+ return underIs(typeSetOf(t), func(u types.Type) bool {
+ // empty type set (u==nil) => any underlying types => not nillable
+ return u != nil && nillable(u)
+ })
+ }
+ switch t.Underlying().(type) {
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return true
+ case *types.Interface:
+ return true // basic interface.
+ default:
+ return false
+ }
}
// TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp.
@@ -149,14 +216,16 @@ func (c *Const) Uint64() uint64 {
// Float64 returns the numeric value of this constant truncated to fit
// a float64.
func (c *Const) Float64() float64 {
- f, _ := constant.Float64Val(c.Value)
+ x := constant.ToFloat(c.Value) // (c.Value == nil) => x.Kind() == Unknown
+ f, _ := constant.Float64Val(x)
return f
}
// Complex128 returns the complex value of this constant truncated to
// fit a complex128.
func (c *Const) Complex128() complex128 {
- re, _ := constant.Float64Val(constant.Real(c.Value))
- im, _ := constant.Float64Val(constant.Imag(c.Value))
+ x := constant.ToComplex(c.Value) // (c.Value == nil) => x.Kind() == Unknown
+ re, _ := constant.Float64Val(constant.Real(x))
+ im, _ := constant.Float64Val(constant.Imag(x))
return complex(re, im)
}
diff --git a/go/ssa/const_test.go b/go/ssa/const_test.go
new file mode 100644
index 00000000000..131fe1aced2
--- /dev/null
+++ b/go/ssa/const_test.go
@@ -0,0 +1,104 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "math/big"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func TestConstString(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestConstString requires type parameters.")
+ }
+
+ const source = `
+ package P
+
+ type Named string
+
+ func fn() (int, bool, string)
+ func gen[T int]() {}
+ `
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ expr string // type expression
+ constant interface{} // constant value
+ want string // expected String() value
+ }{
+ {"int", int64(0), "0:int"},
+ {"int64", int64(0), "0:int64"},
+ {"float32", int64(0), "0:float32"},
+ {"float32", big.NewFloat(1.5), "1.5:float32"},
+ {"bool", false, "false:bool"},
+ {"string", "", `"":string`},
+ {"Named", "", `"":P.Named`},
+ {"struct{x string}", nil, "struct{x string}{}:struct{x string}"},
+ {"[]int", nil, "nil:[]int"},
+ {"[3]int", nil, "[3]int{}:[3]int"},
+ {"*int", nil, "nil:*int"},
+ {"interface{}", nil, "nil:interface{}"},
+ {"interface{string}", nil, `"":interface{string}`},
+ {"interface{int|int64}", nil, "0:interface{int|int64}"},
+ {"interface{bool}", nil, "false:interface{bool}"},
+ {"interface{bool|int}", nil, "nil:interface{bool|int}"},
+ {"interface{int|string}", nil, "nil:interface{int|string}"},
+ {"interface{bool|string}", nil, "nil:interface{bool|string}"},
+ {"interface{struct{x string}}", nil, "nil:interface{struct{x string}}"},
+ {"interface{int|int64}", int64(1), "1:interface{int|int64}"},
+ {"interface{~bool}", true, "true:interface{~bool}"},
+ {"interface{Named}", "lorem ipsum", `"lorem ipsum":interface{P.Named}`},
+ {"func() (int, bool, string)", nil, "nil:func() (int, bool, string)"},
+ } {
+ // Eval() expr for its type.
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Fatalf("Eval(%s) failed: %v", test.expr, err)
+ }
+ var val constant.Value
+ if test.constant != nil {
+ val = constant.Make(test.constant)
+ }
+ c := ssa.NewConst(val, tv.Type)
+ got := strings.ReplaceAll(c.String(), " | ", "|") // Accept both interface{a | b} and interface{a|b}.
+ if got != test.want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", val, tv.Type, got, test.want)
+ }
+ }
+
+ // Test tuples
+ fn := pkg.Scope().Lookup("fn")
+ tup := fn.Type().(*types.Signature).Results()
+ if got, want := ssa.NewConst(nil, tup).String(), `(0, false, ""):(int, bool, string)`; got != want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want)
+ }
+
+ // Test type-param
+ gen := pkg.Scope().Lookup("gen")
+ tp := typeparams.ForSignature(gen.Type().(*types.Signature)).At(0)
+ if got, want := ssa.NewConst(nil, tp).String(), "0:T"; got != want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want)
+ }
+}
diff --git a/go/ssa/coretype.go b/go/ssa/coretype.go
new file mode 100644
index 00000000000..128d61e4267
--- /dev/null
+++ b/go/ssa/coretype.go
@@ -0,0 +1,159 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Utilities for dealing with core types.
+
+// isBytestring returns true if T has the same terms as interface{[]byte | string}.
+// These act like a core type for some operations: slice expressions, append and copy.
+//
+// See https://go.dev/ref/spec#Core_types for the details on bytestring.
+func isBytestring(T types.Type) bool {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return false
+ }
+
+ tset := typeSetOf(U)
+ if tset.Len() != 2 {
+ return false
+ }
+ hasBytes, hasString := false, false
+ underIs(tset, func(t types.Type) bool {
+ switch {
+ case isString(t):
+ hasString = true
+ case isByteSlice(t):
+ hasBytes = true
+ }
+ return hasBytes || hasString
+ })
+ return hasBytes && hasString
+}
+
+// termList is a list of types.
+type termList []*typeparams.Term // type terms of the type set
+func (s termList) Len() int { return len(s) }
+func (s termList) At(i int) types.Type { return s[i].Type() }
+
+// typeSetOf returns the type set of typ. Returns an empty typeset on an error.
+func typeSetOf(typ types.Type) termList {
+ // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on.
+ var terms []*typeparams.Term
+ var err error
+ switch typ := typ.(type) {
+ case *typeparams.TypeParam:
+ terms, err = typeparams.StructuralTerms(typ)
+ case *typeparams.Union:
+ terms, err = typeparams.UnionTermSet(typ)
+ case *types.Interface:
+ terms, err = typeparams.InterfaceTermSet(typ)
+ default:
+ // Common case.
+ // Specializing the len=1 case to avoid a slice
+ // had no measurable space/time benefit.
+ terms = []*typeparams.Term{typeparams.NewTerm(false, typ)}
+ }
+
+ if err != nil {
+ return termList(nil)
+ }
+ return termList(terms)
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of s and reports whether all calls to f returned true. If there are
+// no specific terms, underIs returns the result of f(nil).
+func underIs(s termList, f func(types.Type) bool) bool {
+ if s.Len() == 0 {
+ return f(nil)
+ }
+ for i := 0; i < s.Len(); i++ {
+ u := s.At(i).Underlying()
+ if !f(u) {
+ return false
+ }
+ }
+ return true
+}
+
+// indexType returns the element type and index mode of a IndexExpr over a type.
+// It returns (nil, invalid) if the type is not indexable; this should never occur in a well-typed program.
+func indexType(typ types.Type) (types.Type, indexMode) {
+ switch U := typ.Underlying().(type) {
+ case *types.Array:
+ return U.Elem(), ixArrVar
+ case *types.Pointer:
+ if arr, ok := U.Elem().Underlying().(*types.Array); ok {
+ return arr.Elem(), ixVar
+ }
+ case *types.Slice:
+ return U.Elem(), ixVar
+ case *types.Map:
+ return U.Elem(), ixMap
+ case *types.Basic:
+ return tByte, ixValue // must be a string
+ case *types.Interface:
+ tset := typeSetOf(U)
+ if tset.Len() == 0 {
+ return nil, ixInvalid // no underlying terms or error is empty.
+ }
+
+ elem, mode := indexType(tset.At(0))
+ for i := 1; i < tset.Len() && mode != ixInvalid; i++ {
+ e, m := indexType(tset.At(i))
+ if !types.Identical(elem, e) { // if type checked, just a sanity check
+ return nil, ixInvalid
+ }
+ // Update the mode to the most constrained address type.
+ mode = mode.meet(m)
+ }
+ if mode != ixInvalid {
+ return elem, mode
+ }
+ }
+ return nil, ixInvalid
+}
+
+// An indexMode specifies the (addressing) mode of an index operand.
+//
+// Addressing mode of an index operation is based on the set of
+// underlying types.
+// Hasse diagram of the indexMode meet semi-lattice:
+//
+// ixVar ixMap
+// | |
+// ixArrVar |
+// | |
+// ixValue |
+// \ /
+// ixInvalid
+type indexMode byte
+
+const (
+ ixInvalid indexMode = iota // index is invalid
+ ixValue // index is a computed value (not addressable)
+ ixArrVar // like ixVar, but index operand contains an array
+ ixVar // index is an addressable variable
+ ixMap // index is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment)
+)
+
+// meet is the address type that is constrained by both x and y.
+func (x indexMode) meet(y indexMode) indexMode {
+ if (x == ixMap || y == ixMap) && x != y {
+ return ixInvalid
+ }
+ // Use int representation and return min.
+ if x < y {
+ return y
+ }
+ return x
+}
diff --git a/go/ssa/coretype_test.go b/go/ssa/coretype_test.go
new file mode 100644
index 00000000000..74fe4db1667
--- /dev/null
+++ b/go/ssa/coretype_test.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func TestCoreType(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestCoreType requires type parameters.")
+ }
+
+ const source = `
+ package P
+
+ type Named int
+
+ type A any
+ type B interface{~int}
+ type C interface{int}
+ type D interface{Named}
+ type E interface{~int|interface{Named}}
+ type F interface{~int|~float32}
+ type G interface{chan int|interface{chan int}}
+ type H interface{chan int|chan float32}
+ type I interface{chan<- int|chan int}
+ type J interface{chan int|chan<- int}
+ type K interface{<-chan int|chan int}
+ type L interface{chan int|<-chan int}
+ type M interface{chan int|chan Named}
+ type N interface{<-chan int|chan<- int}
+ type O interface{chan int|bool}
+ type P struct{ Named }
+ type Q interface{ Foo() }
+ type R interface{ Foo() ; Named }
+ type S interface{ Foo() ; ~int }
+
+ type T interface{chan int|interface{chan int}|<-chan int}
+`
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "hello.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ expr string // type expression of Named type
+ want string // expected core type (or "" if none)
+ }{
+ {"Named", "int"}, // Underlying type is not interface.
+ {"A", ""}, // Interface has no terms.
+ {"B", "int"}, // Tilde term.
+ {"C", "int"}, // Non-tilde term.
+ {"D", "int"}, // Named term.
+ {"E", "int"}, // Identical underlying types.
+ {"F", ""}, // Differing underlying types.
+ {"G", "chan int"}, // Identical Element types.
+ {"H", ""}, // Element type int has differing underlying type to float32.
+ {"I", "chan<- int"}, // SendRecv followed by SendOnly
+ {"J", "chan<- int"}, // SendOnly followed by SendRecv
+ {"K", "<-chan int"}, // RecvOnly followed by SendRecv
+ {"L", "<-chan int"}, // SendRecv followed by RecvOnly
+ {"M", ""}, // Element type int is not *identical* to Named.
+ {"N", ""}, // Differing channel directions
+ {"O", ""}, // A channel followed by a non-channel.
+ {"P", "struct{P.Named}"}, // Embedded type.
+ {"Q", ""}, // interface type with no terms and functions
+ {"R", "int"}, // interface type with both terms and functions.
+ {"S", "int"}, // interface type with a tilde term
+ {"T", "<-chan int"}, // Prefix of 2 terms that are identical before switching to channel.
+ } {
+ // Eval() expr for its type.
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Fatalf("Eval(%s) failed: %v", test.expr, err)
+ }
+
+ ct := typeparams.CoreType(tv.Type)
+ var got string
+ if ct == nil {
+ got = ""
+ } else {
+ got = ct.String()
+ }
+ if got != test.want {
+ t.Errorf("CoreType(%s) = %v, want %v", test.expr, got, test.want)
+ }
+ }
+}
diff --git a/go/ssa/create.go b/go/ssa/create.go
index 345d9acfbbd..ccb20e79683 100644
--- a/go/ssa/create.go
+++ b/go/ssa/create.go
@@ -91,37 +91,31 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
}
// Collect type parameters if this is a generic function/method.
- var tparams []*typeparams.TypeParam
- for i, rtparams := 0, typeparams.RecvTypeParams(sig); i < rtparams.Len(); i++ {
- tparams = append(tparams, rtparams.At(i))
- }
- for i, sigparams := 0, typeparams.ForSignature(sig); i < sigparams.Len(); i++ {
- tparams = append(tparams, sigparams.At(i))
+ var tparams *typeparams.TypeParamList
+ if rtparams := typeparams.RecvTypeParams(sig); rtparams.Len() > 0 {
+ tparams = rtparams
+ } else if sigparams := typeparams.ForSignature(sig); sigparams.Len() > 0 {
+ tparams = sigparams
}
fn := &Function{
- name: name,
- object: obj,
- Signature: sig,
- syntax: syntax,
- pos: obj.Pos(),
- Pkg: pkg,
- Prog: pkg.Prog,
- _TypeParams: tparams,
- info: pkg.info,
+ name: name,
+ object: obj,
+ Signature: sig,
+ syntax: syntax,
+ pos: obj.Pos(),
+ Pkg: pkg,
+ Prog: pkg.Prog,
+ typeparams: tparams,
+ info: pkg.info,
}
pkg.created.Add(fn)
if syntax == nil {
fn.Synthetic = "loaded from gc object file"
}
- if len(tparams) > 0 {
+ if tparams.Len() > 0 {
fn.Prog.createInstanceSet(fn)
}
- if len(tparams) > 0 && syntax != nil {
- fn.Synthetic = "generic function"
- // TODO(taking): Allow for the function to be built once type params are supported.
- fn.syntax = nil // Treating as an external function temporarily.
- }
pkg.objects[obj] = fn
if sig.Recv() == nil {
diff --git a/go/ssa/dom.go b/go/ssa/dom.go
index ce2473cafce..66a2f5e6ed3 100644
--- a/go/ssa/dom.go
+++ b/go/ssa/dom.go
@@ -303,7 +303,7 @@ func sanityCheckDomTree(f *Function) {
// Printing functions ----------------------------------------
-// printDomTree prints the dominator tree as text, using indentation.
+// printDomTreeText prints the dominator tree as text, using indentation.
func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
for _, child := range v.dom.children {
diff --git a/go/ssa/emit.go b/go/ssa/emit.go
index fb11c3558d3..e7cd6261dcd 100644
--- a/go/ssa/emit.go
+++ b/go/ssa/emit.go
@@ -11,6 +11,8 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/typeparams"
)
// emitNew emits to f a new (heap Alloc) instruction allocating an
@@ -27,7 +29,7 @@ func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
// new temporary, and returns the value so defined.
func emitLoad(f *Function, addr Value) *UnOp {
v := &UnOp{Op: token.MUL, X: addr}
- v.setType(deref(addr.Type()))
+ v.setType(deref(typeparams.CoreType(addr.Type())))
f.emit(v)
return v
}
@@ -121,9 +123,9 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
if types.Identical(xt, yt) {
// no conversion necessary
- } else if _, ok := xt.(*types.Interface); ok {
+ } else if isNonTypeParamInterface(x.Type()) {
y = emitConv(f, y, x.Type())
- } else if _, ok := yt.(*types.Interface); ok {
+ } else if isNonTypeParamInterface(y.Type()) {
x = emitConv(f, x, y.Type())
} else if _, ok := x.(*Const); ok {
x = emitConv(f, x, y.Type())
@@ -166,6 +168,32 @@ func isValuePreserving(ut_src, ut_dst types.Type) bool {
return false
}
+// isSliceToArrayPointer reports whether ut_src is a slice type
+// that can be converted to a pointer to an array type ut_dst.
+// Precondition: neither argument is a named type.
+func isSliceToArrayPointer(ut_src, ut_dst types.Type) bool {
+ if slice, ok := ut_src.(*types.Slice); ok {
+ if ptr, ok := ut_dst.(*types.Pointer); ok {
+ if arr, ok := ptr.Elem().Underlying().(*types.Array); ok {
+ return types.Identical(slice.Elem(), arr.Elem())
+ }
+ }
+ }
+ return false
+}
+
+// isSliceToArray reports whether ut_src is a slice type
+// that can be converted to an array type ut_dst.
+// Precondition: neither argument is a named type.
+func isSliceToArray(ut_src, ut_dst types.Type) bool {
+ if slice, ok := ut_src.(*types.Slice); ok {
+ if arr, ok := ut_dst.(*types.Array); ok {
+ return types.Identical(slice.Elem(), arr.Elem())
+ }
+ }
+ return false
+}
+
// emitConv emits to f code to convert Value val to exactly type typ,
// and returns the converted value. Implicit conversions are required
// by language assignability rules in assignments, parameter passing,
@@ -177,21 +205,28 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
if types.Identical(t_src, typ) {
return val
}
-
ut_dst := typ.Underlying()
ut_src := t_src.Underlying()
+ dst_types := typeSetOf(ut_dst)
+ src_types := typeSetOf(ut_src)
+
// Just a change of type, but not value or representation?
- if isValuePreserving(ut_src, ut_dst) {
+ preserving := underIs(src_types, func(s types.Type) bool {
+ return underIs(dst_types, func(d types.Type) bool {
+ return s != nil && d != nil && isValuePreserving(s, d) // all (s -> d) are value preserving.
+ })
+ })
+ if preserving {
c := &ChangeType{X: val}
c.setType(typ)
return f.emit(c)
}
// Conversion to, or construction of a value of, an interface type?
- if _, ok := ut_dst.(*types.Interface); ok {
+ if isNonTypeParamInterface(typ) {
// Assignment from one interface type to another?
- if _, ok := ut_src.(*types.Interface); ok {
+ if isNonTypeParamInterface(t_src) {
c := &ChangeInterface{X: val}
c.setType(typ)
return f.emit(c)
@@ -199,7 +234,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
// Untyped nil constant? Return interface-typed nil constant.
if ut_src == tUntypedNil {
- return nilConst(typ)
+ return zeroConst(typ)
}
// Convert (non-nil) "untyped" literals to their default type.
@@ -214,7 +249,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
// Conversion of a compile-time constant value?
if c, ok := val.(*Const); ok {
- if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
+ if isBasic(ut_dst) || c.Value == nil {
// Conversion of a compile-time constant to
// another constant type results in a new
// constant of the destination type and
@@ -228,21 +263,30 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
}
// Conversion from slice to array pointer?
- if slice, ok := ut_src.(*types.Slice); ok {
- if ptr, ok := ut_dst.(*types.Pointer); ok {
- if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) {
- c := &SliceToArrayPointer{X: val}
- c.setType(ut_dst)
- return f.emit(c)
- }
- }
+ slice2ptr := underIs(src_types, func(s types.Type) bool {
+ return underIs(dst_types, func(d types.Type) bool {
+ return s != nil && d != nil && isSliceToArrayPointer(s, d) // all (s->d) are slice to array pointer conversion.
+ })
+ })
+ if slice2ptr {
+ c := &SliceToArrayPointer{X: val}
+ c.setType(typ)
+ return f.emit(c)
}
+
+ // Conversion from slice to array?
+ slice2array := underIs(src_types, func(s types.Type) bool {
+ return underIs(dst_types, func(d types.Type) bool {
+ return s != nil && d != nil && isSliceToArray(s, d) // all (s->d) are slice to array conversion.
+ })
+ })
+ if slice2array {
+ return emitSliceToArray(f, val, typ)
+ }
+
// A representation-changing conversion?
- // At least one of {ut_src,ut_dst} must be *Basic.
- // (The other may be []byte or []rune.)
- _, ok1 := ut_src.(*types.Basic)
- _, ok2 := ut_dst.(*types.Basic)
- if ok1 || ok2 {
+ // All of ut_src or ut_dst is basic, byte slice, or rune slice?
+ if isBasicConvTypes(src_types) || isBasicConvTypes(dst_types) {
c := &Convert{X: val}
c.setType(typ)
return f.emit(c)
@@ -251,6 +295,33 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
}
+// emitTypeCoercion emits to f code to coerce the type of a
+// Value v to exactly type typ, and returns the coerced value.
+//
+// Requires that coercing v.Typ() to typ is a value preserving change.
+//
+// Currently used only when v.Type() is a type instance of typ or vice versa.
+// A type v is a type instance of a type t if there exists a
+// type parameter substitution σ s.t. σ(v) == t. Example:
+//
+// σ(func(T) T) == func(int) int for σ == [T ↦ int]
+//
+// This happens in instantiation wrappers for conversion
+// from an instantiation to a parameterized type (and vice versa)
+// with σ substituting f.typeparams by f.typeargs.
+func emitTypeCoercion(f *Function, v Value, typ types.Type) Value {
+ if types.Identical(v.Type(), typ) {
+ return v // no coercion needed
+ }
+ // TODO(taking): for instances should we record which side is the instance?
+ c := &ChangeType{
+ X: v,
+ }
+ c.setType(typ)
+ f.emit(c)
+ return c
+}
+
// emitStore emits to f an instruction to store value val at location
// addr, applying implicit conversions as required by assignability rules.
func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
@@ -359,7 +430,7 @@ func emitTailCall(f *Function, call *Call) {
// value of a field.
func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value {
for _, index := range indices {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
@@ -393,7 +464,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos)
// field's value.
// Ident id is used for position and debug info.
func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
X: v,
@@ -419,6 +490,48 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.
return v
}
+// emitSliceToArray emits to f code to convert a slice value to an array value.
+//
+// Precondition: all types in type set of typ are arrays and convertible to all
+// types in the type set of val.Type().
+func emitSliceToArray(f *Function, val Value, typ types.Type) Value {
+ // Emit the following:
+ // if val == nil && len(typ) == 0 {
+ // ptr = &[0]T{}
+ // } else {
+ // ptr = SliceToArrayPointer(val)
+ // }
+ // v = *ptr
+
+ ptype := types.NewPointer(typ)
+ p := &SliceToArrayPointer{X: val}
+ p.setType(ptype)
+ ptr := f.emit(p)
+
+ nilb := f.newBasicBlock("slicetoarray.nil")
+ nonnilb := f.newBasicBlock("slicetoarray.nonnil")
+ done := f.newBasicBlock("slicetoarray.done")
+
+ cond := emitCompare(f, token.EQL, ptr, zeroConst(ptype), token.NoPos)
+ emitIf(f, cond, nilb, nonnilb)
+ f.currentBlock = nilb
+
+ zero := f.addLocal(typ, token.NoPos)
+ emitJump(f, done)
+ f.currentBlock = nonnilb
+
+ emitJump(f, done)
+ f.currentBlock = done
+
+ phi := &Phi{Edges: []Value{zero, ptr}, Comment: "slicetoarray"}
+ phi.pos = val.Pos()
+ phi.setType(typ)
+ x := f.emit(phi)
+ unOp := &UnOp{Op: token.MUL, X: x}
+ unOp.setType(typ)
+ return f.emit(unOp)
+}
+
// zeroValue emits to f code to produce a zero value of type t,
// and returns it.
func zeroValue(f *Function, t types.Type) Value {
diff --git a/go/ssa/example_test.go b/go/ssa/example_test.go
index 492a02f766e..9a5fd436928 100644
--- a/go/ssa/example_test.go
+++ b/go/ssa/example_test.go
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !android && !ios && !js
+// +build !android,!ios,!js
+
package ssa_test
import (
diff --git a/go/ssa/func.go b/go/ssa/func.go
index c598ff836d3..57f5f718f73 100644
--- a/go/ssa/func.go
+++ b/go/ssa/func.go
@@ -251,7 +251,10 @@ func buildReferrers(f *Function) {
}
// mayNeedRuntimeTypes returns all of the types in the body of fn that might need runtime types.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
func mayNeedRuntimeTypes(fn *Function) []types.Type {
+ // Collect all types that may need rtypes, i.e. those that flow into an interface.
var ts []types.Type
for _, bb := range fn.Blocks {
for _, instr := range bb.Instrs {
@@ -260,7 +263,21 @@ func mayNeedRuntimeTypes(fn *Function) []types.Type {
}
}
}
- return ts
+
+ // Types that contain a parameterized type are considered to not be runtime types.
+ if fn.typeparams.Len() == 0 {
+ return ts // No potentially parameterized types.
+ }
+ // Filter parameterized types, in place.
+ fn.Prog.methodsMu.Lock()
+ defer fn.Prog.methodsMu.Unlock()
+ filtered := ts[:0]
+ for _, t := range ts {
+ if !fn.Prog.parameterized.isParameterized(t) {
+ filtered = append(filtered, t)
+ }
+ }
+ return filtered
}
// finishBody() finalizes the contents of the function after SSA code generation of its body.
@@ -518,8 +535,8 @@ func (fn *Function) declaredPackage() *Package {
switch {
case fn.Pkg != nil:
return fn.Pkg // non-generic function
- case fn._Origin != nil:
- return fn._Origin.Pkg // instance of a named generic function
+ case fn.topLevelOrigin != nil:
+ return fn.topLevelOrigin.Pkg // instance of a named generic function
case fn.parent != nil:
return fn.parent.declaredPackage() // instance of an anonymous [generic] function
default:
diff --git a/go/ssa/instantiate.go b/go/ssa/instantiate.go
index 049b53487d5..f73594bb41b 100644
--- a/go/ssa/instantiate.go
+++ b/go/ssa/instantiate.go
@@ -18,7 +18,7 @@ import (
//
// This is an experimental interface! It may change without warning.
func (prog *Program) _Instances(fn *Function) []*Function {
- if len(fn._TypeParams) == 0 {
+ if fn.typeparams.Len() == 0 || len(fn.typeargs) > 0 {
return nil
}
@@ -29,7 +29,7 @@ func (prog *Program) _Instances(fn *Function) []*Function {
// A set of instantiations of a generic function fn.
type instanceSet struct {
- fn *Function // len(fn._TypeParams) > 0 and len(fn._TypeArgs) == 0.
+ fn *Function // fn.typeparams.Len() > 0 and len(fn.typeargs) == 0.
instances map[*typeList]*Function // canonical type arguments to an instance.
syntax *ast.FuncDecl // fn.syntax copy for instantiating after fn is done. nil on synthetic packages.
info *types.Info // fn.pkg.info copy for building after fn is done.. nil on synthetic packages.
@@ -56,7 +56,7 @@ func (insts *instanceSet) list() []*Function {
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu)
func (prog *Program) createInstanceSet(fn *Function) {
- assert(len(fn._TypeParams) > 0 && len(fn._TypeArgs) == 0, "Can only create instance sets for generic functions")
+ assert(fn.typeparams.Len() > 0 && len(fn.typeargs) == 0, "Can only create instance sets for generic functions")
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
@@ -73,7 +73,7 @@ func (prog *Program) createInstanceSet(fn *Function) {
}
}
-// needsInstance returns an Function that that is the instantiation of fn with the type arguments targs.
+// needsInstance returns a Function that is the instantiation of fn with the type arguments targs.
//
// Any CREATEd instance is added to cr.
//
@@ -82,41 +82,45 @@ func (prog *Program) needsInstance(fn *Function, targs []types.Type, cr *creator
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
- return prog.instances[fn].lookupOrCreate(targs, cr)
+ return prog.lookupOrCreateInstance(fn, targs, cr)
+}
+
+// lookupOrCreateInstance returns a Function that is the instantiation of fn with the type arguments targs.
+//
+// Any CREATEd instance is added to cr.
+//
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodMu)
+func (prog *Program) lookupOrCreateInstance(fn *Function, targs []types.Type, cr *creator) *Function {
+ return prog.instances[fn].lookupOrCreate(targs, &prog.parameterized, cr)
}
// lookupOrCreate returns the instantiation of insts.fn using targs.
-// If the instantiation is reported, this is added to cr.
-func (insts *instanceSet) lookupOrCreate(targs []types.Type, cr *creator) *Function {
+// If the instantiation is created, this is added to cr.
+func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWalker, cr *creator) *Function {
if insts.instances == nil {
insts.instances = make(map[*typeList]*Function)
}
+ fn := insts.fn
+ prog := fn.Prog
+
// canonicalize on a tuple of targs. Sig is not unique.
//
// func A[T any]() {
// var x T
// fmt.Println("%T", x)
// }
- key := insts.fn.Prog.canon.List(targs)
+ key := prog.canon.List(targs)
if inst, ok := insts.instances[key]; ok {
return inst
}
+ // CREATE instance/instantiation wrapper
var syntax ast.Node
if insts.syntax != nil {
syntax = insts.syntax
}
- instance := createInstance(insts.fn, targs, insts.info, syntax, cr)
- insts.instances[key] = instance
- return instance
-}
-// createInstance returns an CREATEd instantiation of fn using targs.
-//
-// Function is added to cr.
-func createInstance(fn *Function, targs []types.Type, info *types.Info, syntax ast.Node, cr *creator) *Function {
- prog := fn.Prog
var sig *types.Signature
var obj *types.Func
if recv := fn.Signature.Recv(); recv != nil {
@@ -137,25 +141,36 @@ func createInstance(fn *Function, targs []types.Type, info *types.Info, syntax a
sig = prog.canon.Type(instance).(*types.Signature)
}
+ var synthetic string
+ var subst *subster
+
+ concrete := !parameterized.anyParameterized(targs)
+
+ if prog.mode&InstantiateGenerics != 0 && concrete {
+ synthetic = fmt.Sprintf("instance of %s", fn.Name())
+ subst = makeSubster(prog.ctxt, fn.typeparams, targs, false)
+ } else {
+ synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name())
+ }
+
name := fmt.Sprintf("%s%s", fn.Name(), targs) // may not be unique
- synthetic := fmt.Sprintf("instantiation of %s", fn.Name())
instance := &Function{
- name: name,
- object: obj,
- Signature: sig,
- Synthetic: synthetic,
- _Origin: fn,
- pos: obj.Pos(),
- Pkg: nil,
- Prog: fn.Prog,
- _TypeParams: fn._TypeParams,
- _TypeArgs: targs,
- info: info, // on synthetic packages info is nil.
- subst: makeSubster(prog.ctxt, fn._TypeParams, targs, false),
- }
- if prog.mode&InstantiateGenerics != 0 {
- instance.syntax = syntax // otherwise treat instance as an external function.
+ name: name,
+ object: obj,
+ Signature: sig,
+ Synthetic: synthetic,
+ syntax: syntax,
+ topLevelOrigin: fn,
+ pos: obj.Pos(),
+ Pkg: nil,
+ Prog: fn.Prog,
+ typeparams: fn.typeparams, // share with origin
+ typeargs: targs,
+ info: insts.info, // on synthetic packages info is nil.
+ subst: subst,
}
+
cr.Add(instance)
+ insts.instances[key] = instance
return instance
}
diff --git a/go/ssa/instantiate_test.go b/go/ssa/instantiate_test.go
index 0da8c63042e..cd33e7e659e 100644
--- a/go/ssa/instantiate_test.go
+++ b/go/ssa/instantiate_test.go
@@ -4,19 +4,52 @@
package ssa
-// Note: Tests use unexported functions.
+// Note: Tests use unexported method _Instances.
import (
"bytes"
+ "fmt"
"go/types"
"reflect"
"sort"
+ "strings"
"testing"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/internal/typeparams"
)
+// loadProgram creates loader.Program out of p.
+func loadProgram(p string) (*loader.Program, error) {
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("", p)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ return nil, fmt.Errorf("Load: %v", err)
+ }
+ return lprog, nil
+}
+
+// buildPackage builds and returns ssa representation of package pkg of lprog.
+func buildPackage(lprog *loader.Program, pkg string, mode BuilderMode) *Package {
+ prog := NewProgram(lprog.Fset, mode)
+
+ for _, info := range lprog.AllPackages {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+
+ p := prog.Package(lprog.Package(pkg).Pkg)
+ p.Build()
+ return p
+}
+
// TestNeedsInstance ensures that new method instances can be created via needsInstance,
// that TypeArgs are as expected, and can be accessed via _Instances.
func TestNeedsInstance(t *testing.T) {
@@ -45,30 +78,15 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
// func init func()
// var init$guard bool
- // Parse
- var conf loader.Config
- f, err := conf.ParseFile("", input)
- if err != nil {
- t.Fatalf("parse: %v", err)
- }
- conf.CreateFromFiles("p", f)
-
- // Load
- lprog, err := conf.Load()
- if err != nil {
- t.Fatalf("Load: %v", err)
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
}
for _, mode := range []BuilderMode{BuilderMode(0), InstantiateGenerics} {
// Create and build SSA
- prog := NewProgram(lprog.Fset, mode)
-
- for _, info := range lprog.AllPackages {
- prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
- }
-
- p := prog.Package(lprog.Package("p").Pkg)
- p.Build()
+ p := buildPackage(lprog, "p", mode)
+ prog := p.Prog
ptr := p.Type("Pointer").Type().(*types.Named)
if ptr.NumMethods() != 1 {
@@ -88,11 +106,11 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
if len(cr) != 1 {
t.Errorf("Expected first instance to create a function. got %d created functions", len(cr))
}
- if instance._Origin != meth {
- t.Errorf("Expected Origin of %s to be %s. got %s", instance, meth, instance._Origin)
+ if instance.Origin() != meth {
+ t.Errorf("Expected Origin of %s to be %s. got %s", instance, meth, instance.Origin())
}
- if len(instance._TypeArgs) != 1 || !types.Identical(instance._TypeArgs[0], intSliceTyp) {
- t.Errorf("Expected TypeArgs of %s to be %v. got %v", instance, []types.Type{intSliceTyp}, instance._TypeArgs)
+ if len(instance.TypeArgs()) != 1 || !types.Identical(instance.TypeArgs()[0], intSliceTyp) {
+ t.Errorf("Expected TypeArgs of %s to be %v. got %v", instance, []types.Type{intSliceTyp}, instance.typeargs)
}
instances := prog._Instances(meth)
if want := []*Function{instance}; !reflect.DeepEqual(instances, want) {
@@ -126,3 +144,218 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
}
}
}
+
+// TestCallsToInstances checks that calles of calls to generic functions,
+// without monomorphization, are wrappers around the origin generic function.
+func TestCallsToInstances(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+ const input = `
+package p
+
+type I interface {
+ Foo()
+}
+
+type A int
+func (a A) Foo() {}
+
+type J[T any] interface{ Bar() T }
+type K[T any] struct{ J[T] }
+
+func Id[T any] (t T) T {
+ return t
+}
+
+func Lambda[T I]() func() func(T) {
+ return func() func(T) {
+ return T.Foo
+ }
+}
+
+func NoOp[T any]() {}
+
+func Bar[T interface { Foo(); ~int | ~string }, U any] (t T, u U) {
+ Id[U](u)
+ Id[T](t)
+}
+
+func Make[T any]() interface{} {
+ NoOp[K[T]]()
+ return nil
+}
+
+func entry(i int, a A) int {
+ Lambda[A]()()(a)
+
+ x := Make[int]()
+ if j, ok := x.(interface{ Bar() int }); ok {
+ print(j)
+ }
+
+ Bar[A, int](a, i)
+
+ return Id[int](i)
+}
+`
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
+ }
+
+ p := buildPackage(lprog, "p", SanityCheckFunctions)
+ prog := p.Prog
+
+ for _, ti := range []struct {
+ orig string
+ instance string
+ tparams string
+ targs string
+ chTypeInstrs int // number of ChangeType instructions in f's body
+ }{
+ {"Id", "Id[int]", "[T]", "[int]", 2},
+ {"Lambda", "Lambda[p.A]", "[T]", "[p.A]", 1},
+ {"Make", "Make[int]", "[T]", "[int]", 0},
+ {"NoOp", "NoOp[p.K[T]]", "[T]", "[p.K[T]]", 0},
+ } {
+ test := ti
+ t.Run(test.instance, func(t *testing.T) {
+ f := p.Members[test.orig].(*Function)
+ if f == nil {
+ t.Fatalf("origin function not found")
+ }
+
+ i := instanceOf(f, test.instance, prog)
+ if i == nil {
+ t.Fatalf("instance not found")
+ }
+
+ // for logging on failures
+ var body strings.Builder
+ i.WriteTo(&body)
+ t.Log(body.String())
+
+ if len(i.Blocks) != 1 {
+ t.Fatalf("body has more than 1 block")
+ }
+
+ if instrs := changeTypeInstrs(i.Blocks[0]); instrs != test.chTypeInstrs {
+ t.Errorf("want %v instructions; got %v", test.chTypeInstrs, instrs)
+ }
+
+ if test.tparams != tparams(i) {
+ t.Errorf("want %v type params; got %v", test.tparams, tparams(i))
+ }
+
+ if test.targs != targs(i) {
+ t.Errorf("want %v type arguments; got %v", test.targs, targs(i))
+ }
+ })
+ }
+}
+
+func instanceOf(f *Function, name string, prog *Program) *Function {
+ for _, i := range prog._Instances(f) {
+ if i.Name() == name {
+ return i
+ }
+ }
+ return nil
+}
+
+func tparams(f *Function) string {
+ tplist := f.TypeParams()
+ var tps []string
+ for i := 0; i < tplist.Len(); i++ {
+ tps = append(tps, tplist.At(i).String())
+ }
+ return fmt.Sprint(tps)
+}
+
+func targs(f *Function) string {
+ var tas []string
+ for _, ta := range f.TypeArgs() {
+ tas = append(tas, ta.String())
+ }
+ return fmt.Sprint(tas)
+}
+
+func changeTypeInstrs(b *BasicBlock) int {
+ cnt := 0
+ for _, i := range b.Instrs {
+ if _, ok := i.(*ChangeType); ok {
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func TestInstanceUniqueness(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+ const input = `
+package p
+
+func H[T any](t T) {
+ print(t)
+}
+
+func F[T any](t T) {
+ H[T](t)
+ H[T](t)
+ H[T](t)
+}
+
+func G[T any](t T) {
+ H[T](t)
+ H[T](t)
+}
+
+func Foo[T any, S any](t T, s S) {
+ Foo[S, T](s, t)
+ Foo[T, S](t, s)
+}
+`
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
+ }
+
+ p := buildPackage(lprog, "p", SanityCheckFunctions)
+ prog := p.Prog
+
+ for _, test := range []struct {
+ orig string
+ instances string
+ }{
+ {"H", "[p.H[T] p.H[T]]"},
+ {"Foo", "[p.Foo[S T] p.Foo[T S]]"},
+ } {
+ t.Run(test.orig, func(t *testing.T) {
+ f := p.Members[test.orig].(*Function)
+ if f == nil {
+ t.Fatalf("origin function not found")
+ }
+
+ instances := prog._Instances(f)
+ sort.Slice(instances, func(i, j int) bool { return instances[i].Name() < instances[j].Name() })
+
+ if got := fmt.Sprintf("%v", instances); !reflect.DeepEqual(got, test.instances) {
+ t.Errorf("got %v instances, want %v", got, test.instances)
+ }
+ })
+ }
+}
+
+// instancesStr returns a sorted slice of string
+// representation of instances.
+func instancesStr(instances []*Function) []string {
+ var is []string
+ for _, i := range instances {
+ is = append(is, fmt.Sprintf("%v", i))
+ }
+ sort.Strings(is)
+ return is
+}
diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go
index 2b21aad708b..58cac464241 100644
--- a/go/ssa/interp/interp.go
+++ b/go/ssa/interp/interp.go
@@ -51,7 +51,6 @@ import (
"os"
"reflect"
"runtime"
- "strings"
"sync/atomic"
"golang.org/x/tools/go/ssa"
@@ -335,7 +334,17 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation {
}
case *ssa.Index:
- fr.env[instr] = fr.get(instr.X).(array)[asInt64(fr.get(instr.Index))]
+ x := fr.get(instr.X)
+ idx := fr.get(instr.Index)
+
+ switch x := x.(type) {
+ case array:
+ fr.env[instr] = x[asInt64(idx)]
+ case string:
+ fr.env[instr] = x[asInt64(idx)]
+ default:
+ panic(fmt.Sprintf("unexpected x type in Index: %T", x))
+ }
case *ssa.Lookup:
fr.env[instr] = lookup(instr, fr.get(instr.X), fr.get(instr.Index))
@@ -506,13 +515,15 @@ func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function,
return ext(fr, args)
}
if fn.Blocks == nil {
- var reason string // empty by default
- if strings.HasPrefix(fn.Synthetic, "instantiation") {
- reason = " (interp requires ssa.BuilderMode to include InstantiateGenerics on generics)"
- }
- panic("no code for function: " + name + reason)
+ panic("no code for function: " + name)
}
}
+
+ // generic function body?
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ panic("interp requires ssa.BuilderMode to include InstantiateGenerics to execute generics")
+ }
+
fr.env = make(map[ssa.Value]value)
fr.block = fn.Blocks[0]
fr.locals = make([]value, len(fn.Locals))
diff --git a/go/ssa/interp/interp_go120_test.go b/go/ssa/interp/interp_go120_test.go
new file mode 100644
index 00000000000..d8eb2c21341
--- /dev/null
+++ b/go/ssa/interp/interp_go120_test.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package interp_test
+
+func init() {
+ testdataTests = append(testdataTests, "slice2array.go")
+}
diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go
index a0acf2f968a..c893d83e753 100644
--- a/go/ssa/interp/interp_test.go
+++ b/go/ssa/interp/interp_test.go
@@ -132,6 +132,9 @@ var testdataTests = []string{
func init() {
if typeparams.Enabled {
testdataTests = append(testdataTests, "fixedbugs/issue52835.go")
+ testdataTests = append(testdataTests, "fixedbugs/issue55086.go")
+ testdataTests = append(testdataTests, "typeassert.go")
+ testdataTests = append(testdataTests, "zeros.go")
}
}
diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go
index 8f031384f03..0ea10da9e02 100644
--- a/go/ssa/interp/ops.go
+++ b/go/ssa/interp/ops.go
@@ -34,9 +34,10 @@ type exitPanic int
// constValue returns the value of the constant with the
// dynamic type tag appropriate for c.Type().
func constValue(c *ssa.Const) value {
- if c.IsNil() {
- return zero(c.Type()) // typed nil
+ if c.Value == nil {
+ return zero(c.Type()) // typed zero
}
+ // c is not a type parameter so it's underlying type is basic.
if t, ok := c.Type().Underlying().(*types.Basic); ok {
// TODO(adonovan): eliminate untyped constants from SSA form.
@@ -307,7 +308,7 @@ func slice(x, lo, hi, max value) value {
panic(fmt.Sprintf("slice: unexpected X type: %T", x))
}
-// lookup returns x[idx] where x is a map or string.
+// lookup returns x[idx] where x is a map.
func lookup(instr *ssa.Lookup, x, idx value) value {
switch x := x.(type) { // map or string
case map[value]value, *hashmap:
@@ -327,8 +328,6 @@ func lookup(instr *ssa.Lookup, x, idx value) value {
v = tuple{v, ok}
}
return v
- case string:
- return x[asInt64(idx)]
}
panic(fmt.Sprintf("unexpected x type in Lookup: %T", x))
}
@@ -1396,18 +1395,15 @@ func conv(t_dst, t_src types.Type, x value) value {
// sliceToArrayPointer converts the value x of type slice to type t_dst
// a pointer to array and returns the result.
func sliceToArrayPointer(t_dst, t_src types.Type, x value) value {
- utSrc := t_src.Underlying()
- utDst := t_dst.Underlying()
-
- if _, ok := utSrc.(*types.Slice); ok {
- if utSrc, ok := utDst.(*types.Pointer); ok {
- if arr, ok := utSrc.Elem().(*types.Array); ok {
+ if _, ok := t_src.Underlying().(*types.Slice); ok {
+ if ptr, ok := t_dst.Underlying().(*types.Pointer); ok {
+ if arr, ok := ptr.Elem().Underlying().(*types.Array); ok {
x := x.([]value)
if arr.Len() > int64(len(x)) {
panic("array length is greater than slice length")
}
if x == nil {
- return zero(utSrc)
+ return zero(t_dst)
}
v := value(array(x[:arr.Len()]))
return &v
diff --git a/go/ssa/interp/testdata/boundmeth.go b/go/ssa/interp/testdata/boundmeth.go
index 69937f9d3c7..47b94068591 100644
--- a/go/ssa/interp/testdata/boundmeth.go
+++ b/go/ssa/interp/testdata/boundmeth.go
@@ -123,7 +123,8 @@ func nilInterfaceMethodValue() {
r := fmt.Sprint(recover())
// runtime panic string varies across toolchains
if r != "interface conversion: interface is nil, not error" &&
- r != "runtime error: invalid memory address or nil pointer dereference" {
+ r != "runtime error: invalid memory address or nil pointer dereference" &&
+ r != "method value: interface is nil" {
panic("want runtime panic from nil interface method value, got " + r)
}
}()
diff --git a/go/ssa/interp/testdata/fixedbugs/issue55086.go b/go/ssa/interp/testdata/fixedbugs/issue55086.go
new file mode 100644
index 00000000000..84c81e91a26
--- /dev/null
+++ b/go/ssa/interp/testdata/fixedbugs/issue55086.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func a() (r string) {
+ s := "initial"
+ var p *struct{ i int }
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p.i = "set", 2 // s must be set before p.i panics
+ return "unreachable"
+}
+
+func b() (r string) {
+ s := "initial"
+ fn := func() []int { panic("") }
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, fn()[0] = "set", 2 // fn() panics before any assignment occurs
+ return "unreachable"
+}
+
+func c() (r string) {
+ s := "initial"
+ var p map[int]int
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p[0] = "set", 2 //s must be set before p[0] index panics"
+ return "unreachable"
+}
+
+func d() (r string) {
+ s := "initial"
+ var p map[int]int
+ defer func() {
+ recover()
+ r = s
+ }()
+ fn := func() int { panic("") }
+
+ s, p[0] = "set", fn() // fn() panics before s is set
+ return "unreachable"
+}
+
+func e() (r string) {
+ s := "initial"
+ p := map[int]int{}
+ defer func() {
+ recover()
+ r = s
+ }()
+ fn := func() int { panic("") }
+
+ s, p[fn()] = "set", 0 // fn() panics before any assignment occurs
+ return "unreachable"
+}
+
+func f() (r string) {
+ s := "initial"
+ p := []int{}
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p[1] = "set", 0 // p[1] panics after s is set
+ return "unreachable"
+}
+
+func g() (r string) {
+ s := "initial"
+ p := map[any]any{}
+ defer func() {
+ recover()
+ r = s
+ }()
+ var i any = func() {}
+ s, p[i] = "set", 0 // p[i] panics after s is set
+ return "unreachable"
+}
+
+func h() (r string) {
+ fail := false
+ defer func() {
+ recover()
+ if fail {
+ r = "fail"
+ } else {
+ r = "success"
+ }
+ }()
+
+ type T struct{ f int }
+ var p *struct{ *T }
+
+ // The implicit "p.T" operand should be evaluated in phase 1 (and panic),
+ // before the "fail = true" assignment in phase 2.
+ fail, p.f = true, 0
+ return "unreachable"
+}
+
+func main() {
+ for _, test := range []struct {
+ fn func() string
+ want string
+ desc string
+ }{
+ {a, "set", "s must be set before p.i panics"},
+ {b, "initial", "p() panics before s is set"},
+ {c, "set", "s must be set before p[0] index panics"},
+ {d, "initial", "fn() panics before s is set"},
+ {e, "initial", "fn() panics before s is set"},
+ {f, "set", "p[1] panics after s is set"},
+ {g, "set", "p[i] panics after s is set"},
+ {h, "success", "p.T panics before fail is set"},
+ } {
+ if test.fn() != test.want {
+ panic(test.desc)
+ }
+ }
+}
diff --git a/go/ssa/interp/testdata/slice2array.go b/go/ssa/interp/testdata/slice2array.go
new file mode 100644
index 00000000000..84e6b733008
--- /dev/null
+++ b/go/ssa/interp/testdata/slice2array.go
@@ -0,0 +1,92 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test for slice to array conversion introduced in go1.20
+// See: https://tip.golang.org/ref/spec#Conversions_from_slice_to_array_pointer
+
+package main
+
+func main() {
+ s := make([]byte, 3, 4)
+ s[0], s[1], s[2] = 2, 3, 5
+ a := ([2]byte)(s)
+ s[0] = 7
+
+ if a != [2]byte{2, 3} {
+ panic("converted from non-nil slice to array")
+ }
+
+ {
+ var s []int
+ a := ([0]int)(s)
+ if a != [0]int{} {
+ panic("zero len array is not equal")
+ }
+ }
+
+ if emptyToEmptyDoesNotPanic() {
+ panic("no panic expected from emptyToEmptyDoesNotPanic()")
+ }
+ if !threeToFourDoesPanic() {
+ panic("panic expected from threeToFourDoesPanic()")
+ }
+
+ if !fourPanicsWhileOneDoesNot[[4]int]() {
+ panic("panic expected from fourPanicsWhileOneDoesNot[[4]int]()")
+ }
+ if fourPanicsWhileOneDoesNot[[1]int]() {
+ panic("no panic expected from fourPanicsWhileOneDoesNot[[1]int]()")
+ }
+
+ if !fourPanicsWhileZeroDoesNot[[4]int]() {
+ panic("panic expected from fourPanicsWhileZeroDoesNot[[4]int]()")
+ }
+ if fourPanicsWhileZeroDoesNot[[0]int]() {
+ panic("no panic expected from fourPanicsWhileZeroDoesNot[[0]int]()")
+ }
+}
+
+func emptyToEmptyDoesNotPanic() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ var s []int
+ _ = ([0]int)(s)
+ return false
+}
+
+func threeToFourDoesPanic() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ s := make([]int, 3, 5)
+ _ = ([4]int)(s)
+ return false
+}
+
+func fourPanicsWhileOneDoesNot[T [1]int | [4]int]() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ s := make([]int, 3, 5)
+ _ = T(s)
+ return false
+}
+
+func fourPanicsWhileZeroDoesNot[T [0]int | [4]int]() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ var s []int
+ _ = T(s)
+ return false
+}
diff --git a/go/ssa/interp/testdata/slice2arrayptr.go b/go/ssa/interp/testdata/slice2arrayptr.go
index ff2d9b55ccd..d9d8804d36a 100644
--- a/go/ssa/interp/testdata/slice2arrayptr.go
+++ b/go/ssa/interp/testdata/slice2arrayptr.go
@@ -32,6 +32,8 @@ func main() {
},
"runtime error: array length is greater than slice length",
)
+
+ f()
}
type arr [2]int
diff --git a/go/ssa/interp/testdata/typeassert.go b/go/ssa/interp/testdata/typeassert.go
new file mode 100644
index 00000000000..792a7558f61
--- /dev/null
+++ b/go/ssa/interp/testdata/typeassert.go
@@ -0,0 +1,32 @@
+// Tests of type asserts.
+// Requires type parameters.
+package typeassert
+
+type fooer interface{ foo() string }
+
+type X int
+
+func (_ X) foo() string { return "x" }
+
+func f[T fooer](x T) func() string {
+ return x.foo
+}
+
+func main() {
+ if f[X](0)() != "x" {
+ panic("f[X]() != 'x'")
+ }
+
+ p := false
+ func() {
+ defer func() {
+ if recover() != nil {
+ p = true
+ }
+ }()
+ f[fooer](nil) // panics on x.foo when T is an interface and nil.
+ }()
+ if !p {
+ panic("f[fooer] did not panic")
+ }
+}
diff --git a/go/ssa/interp/testdata/zeros.go b/go/ssa/interp/testdata/zeros.go
new file mode 100644
index 00000000000..509c78a36ec
--- /dev/null
+++ b/go/ssa/interp/testdata/zeros.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test interpretation on zero values with type params.
+package zeros
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic(msg)
+ }
+}
+
+func tp0[T int | string | float64]() T { return T(0) }
+
+func tpFalse[T ~bool]() T { return T(false) }
+
+func tpEmptyString[T string | []byte]() T { return T("") }
+
+func tpNil[T *int | []byte]() T { return T(nil) }
+
+func main() {
+ // zero values
+ var zi int
+ var zf float64
+ var zs string
+
+ assert(zi == int(0), "zero value of int is int(0)")
+ assert(zf == float64(0), "zero value of float64 is float64(0)")
+ assert(zs != string(0), "zero value of string is not string(0)")
+
+ assert(zi == tp0[int](), "zero value of int is int(0)")
+ assert(zf == tp0[float64](), "zero value of float64 is float64(0)")
+ assert(zs != tp0[string](), "zero value of string is not string(0)")
+
+ assert(zf == -0.0, "constant -0.0 is converted to 0.0")
+
+ assert(!tpFalse[bool](), "zero value of bool is false")
+
+ assert(tpEmptyString[string]() == zs, `zero value of string is string("")`)
+ assert(len(tpEmptyString[[]byte]()) == 0, `[]byte("") is empty`)
+
+ assert(tpNil[*int]() == nil, "nil is nil")
+ assert(tpNil[[]byte]() == nil, "nil is nil")
+}
diff --git a/go/ssa/lift.go b/go/ssa/lift.go
index c350481db76..945536bbbf4 100644
--- a/go/ssa/lift.go
+++ b/go/ssa/lift.go
@@ -44,6 +44,8 @@ import (
"go/types"
"math/big"
"os"
+
+ "golang.org/x/tools/internal/typeparams"
)
// If true, show diagnostic information at each step of lifting.
@@ -381,10 +383,9 @@ type newPhiMap map[*BasicBlock][]newPhi
//
// fresh is a source of fresh ids for phi nodes.
func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool {
- // Don't lift aggregates into registers, because we don't have
- // a way to express their zero-constants.
+ // TODO(taking): zero constants of aggregated types can now be lifted.
switch deref(alloc.Type()).Underlying().(type) {
- case *types.Array, *types.Struct:
+ case *types.Array, *types.Struct, *typeparams.TypeParam:
return false
}
diff --git a/go/ssa/lvalue.go b/go/ssa/lvalue.go
index 64262def8b2..51122b8e85e 100644
--- a/go/ssa/lvalue.go
+++ b/go/ssa/lvalue.go
@@ -56,12 +56,12 @@ func (a *address) typ() types.Type {
}
// An element is an lvalue represented by m[k], the location of an
-// element of a map or string. These locations are not addressable
+// element of a map. These locations are not addressable
// since pointers cannot be formed from them, but they do support
-// load(), and in the case of maps, store().
+// load() and store().
type element struct {
- m, k Value // map or string
- t types.Type // map element type or string byte type
+ m, k Value // map
+ t types.Type // map element type
pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
}
@@ -86,13 +86,49 @@ func (e *element) store(fn *Function, v Value) {
}
func (e *element) address(fn *Function) Value {
- panic("map/string elements are not addressable")
+ panic("map elements are not addressable")
}
func (e *element) typ() types.Type {
return e.t
}
+// A lazyAddress is an lvalue whose address is the result of an instruction.
+// These work like an *address except a new address.address() Value
+// is created on each load, store and address call.
+// A lazyAddress can be used to control when a side effect (nil pointer
+// dereference, index out of bounds) of using a location happens.
+type lazyAddress struct {
+ addr func(fn *Function) Value // emit to fn the computation of the address
+ t types.Type // type of the location
+ pos token.Pos // source position
+ expr ast.Expr // source syntax of the value (not address) [debug mode]
+}
+
+func (l *lazyAddress) load(fn *Function) Value {
+ load := emitLoad(fn, l.addr(fn))
+ load.pos = l.pos
+ return load
+}
+
+func (l *lazyAddress) store(fn *Function, v Value) {
+ store := emitStore(fn, l.addr(fn), v, l.pos)
+ if l.expr != nil {
+ // store.Val is v, converted for assignability.
+ emitDebugRef(fn, l.expr, store.Val, false)
+ }
+}
+
+func (l *lazyAddress) address(fn *Function) Value {
+ addr := l.addr(fn)
+ if l.expr != nil {
+ emitDebugRef(fn, l.expr, addr, true)
+ }
+ return addr
+}
+
+func (l *lazyAddress) typ() types.Type { return l.t }
+
// A blank is a dummy variable whose name is "_".
// It is not reified: loads are illegal and stores are ignored.
type blank struct{}
diff --git a/go/ssa/methods.go b/go/ssa/methods.go
index 6954e17b772..4185618cdd6 100644
--- a/go/ssa/methods.go
+++ b/go/ssa/methods.go
@@ -27,8 +27,8 @@ func (prog *Program) MethodValue(sel *types.Selection) *Function {
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
}
T := sel.Recv()
- if isInterface(T) {
- return nil // abstract method (interface)
+ if types.IsInterface(T) {
+ return nil // abstract method (interface, possibly type param)
}
if prog.mode&LogSource != 0 {
defer logStack("MethodValue %s %v", T, sel)()
@@ -76,7 +76,7 @@ type methodSet struct {
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) createMethodSet(T types.Type) *methodSet {
if prog.mode&SanityCheckFunctions != 0 {
- if isInterface(T) || prog.parameterized.isParameterized(T) {
+ if types.IsInterface(T) || prog.parameterized.isParameterized(T) {
panic("type is interface or parameterized")
}
}
@@ -107,9 +107,9 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creato
fn = makeWrapper(prog, sel, cr)
} else {
fn = prog.originFunc(obj)
- if len(fn._TypeParams) > 0 { // instantiate
+ if fn.typeparams.Len() > 0 { // instantiate
targs := receiverTypeArgs(obj)
- fn = prog.instances[fn].lookupOrCreate(targs, cr)
+ fn = prog.lookupOrCreateInstance(fn, targs, cr)
}
}
if fn.Signature.Recv() == nil {
@@ -190,7 +190,7 @@ func (prog *Program) needMethods(T types.Type, skip bool, cr *creator) {
tmset := prog.MethodSets.MethodSet(T)
- if !skip && !isInterface(T) && tmset.Len() > 0 {
+ if !skip && !types.IsInterface(T) && tmset.Len() > 0 {
// Create methods of T.
mset := prog.createMethodSet(T)
if !mset.complete {
diff --git a/go/ssa/parameterized.go b/go/ssa/parameterized.go
index 956718cd723..b11413c8184 100644
--- a/go/ssa/parameterized.go
+++ b/go/ssa/parameterized.go
@@ -111,3 +111,12 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
return false
}
+
+func (w *tpWalker) anyParameterized(ts []types.Type) bool {
+ for _, t := range ts {
+ if w.isParameterized(t) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/go/ssa/print.go b/go/ssa/print.go
index b8e53923a17..e40bbfa2d21 100644
--- a/go/ssa/print.go
+++ b/go/ssa/print.go
@@ -17,6 +17,7 @@ import (
"strings"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// relName returns the name of v relative to i.
@@ -232,7 +233,7 @@ func (v *MakeChan) String() string {
}
func (v *FieldAddr) String() string {
- st := deref(v.X.Type()).Underlying().(*types.Struct)
+ st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
@@ -242,7 +243,7 @@ func (v *FieldAddr) String() string {
}
func (v *Field) String() string {
- st := v.X.Type().Underlying().(*types.Struct)
+ st := typeparams.CoreType(v.X.Type()).(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go
index 7d71302756e..3fb3f394e87 100644
--- a/go/ssa/sanity.go
+++ b/go/ssa/sanity.go
@@ -132,9 +132,9 @@ func (s *sanity) checkInstr(idx int, instr Instruction) {
case *ChangeType:
case *SliceToArrayPointer:
case *Convert:
- if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
- if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
- s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
+ if from := instr.X.Type(); !isBasicConvTypes(typeSetOf(from)) {
+ if to := instr.Type(); !isBasicConvTypes(typeSetOf(to)) {
+ s.errorf("convert %s -> %s: at least one type must be basic (or all basic, []byte, or []rune)", from, to)
}
}
@@ -403,7 +403,7 @@ func (s *sanity) checkFunction(fn *Function) bool {
// - check transient fields are nil
// - warn if any fn.Locals do not appear among block instructions.
- // TODO(taking): Sanity check _Origin, _TypeParams, and _TypeArgs.
+ // TODO(taking): Sanity check origin, typeparams, and typeargs.
s.fn = fn
if fn.Prog == nil {
s.errorf("nil Prog")
@@ -420,16 +420,19 @@ func (s *sanity) checkFunction(fn *Function) bool {
strings.HasPrefix(fn.Synthetic, "bound ") ||
strings.HasPrefix(fn.Synthetic, "thunk ") ||
strings.HasSuffix(fn.name, "Error") ||
- strings.HasPrefix(fn.Synthetic, "instantiation") ||
- (fn.parent != nil && len(fn._TypeArgs) > 0) /* anon fun in instance */ {
+ strings.HasPrefix(fn.Synthetic, "instance ") ||
+ strings.HasPrefix(fn.Synthetic, "instantiation ") ||
+ (fn.parent != nil && len(fn.typeargs) > 0) /* anon fun in instance */ {
// ok
} else {
s.errorf("nil Pkg")
}
}
if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
- if strings.HasPrefix(fn.Synthetic, "instantiation") && fn.Prog.mode&InstantiateGenerics != 0 {
- // ok
+ if len(fn.typeargs) > 0 && fn.Prog.mode&InstantiateGenerics != 0 {
+ // ok (instantiation with InstantiateGenerics on)
+ } else if fn.topLevelOrigin != nil && len(fn.typeargs) > 0 {
+ // ok (we always have the syntax set for instantiation)
} else {
s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
}
@@ -494,6 +497,9 @@ func (s *sanity) checkFunction(fn *Function) bool {
if anon.Parent() != fn {
s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
}
+ if i != int(anon.anonIdx) {
+ s.errorf("AnonFuncs[%d]=%s but %s.anonIdx=%d", i, anon, anon, anon.anonIdx)
+ }
}
s.fn = nil
return !s.insane
diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go
index cbc638c81a8..3108de2e4a8 100644
--- a/go/ssa/ssa.go
+++ b/go/ssa/ssa.go
@@ -294,16 +294,15 @@ type Node interface {
//
// Type() returns the function's Signature.
//
-// A function is generic iff it has a non-empty TypeParams list and an
-// empty TypeArgs list. TypeParams lists the type parameters of the
-// function's Signature or the receiver's type parameters for a method.
-//
-// The instantiation of a generic function is a concrete function. These
-// are a list of n>0 TypeParams and n TypeArgs. An instantiation will
-// have a generic Origin function. There is at most one instantiation
-// of each origin type per Identical() type list. Instantiations do not
-// belong to any Pkg. The generic function and the instantiations will
-// share the same source Pos for the functions and the instructions.
+// A generic function is a function or method that has uninstantiated type
+// parameters (TypeParams() != nil). Consider a hypothetical generic
+// method, (*Map[K,V]).Get. It may be instantiated with all ground
+// (non-parameterized) types as (*Map[string,int]).Get or with
+// parameterized types as (*Map[string,U]).Get, where U is a type parameter.
+// In both instantiations, Origin() refers to the instantiated generic
+// method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of
+// the generic method. TypeArgs() refers to [string,U] or [string,int],
+// respectively, and is nil in the generic method.
type Function struct {
name string
object types.Object // a declared *types.Func or one of its wrappers
@@ -324,10 +323,11 @@ type Function struct {
AnonFuncs []*Function // anonymous functions directly beneath this one
referrers []Instruction // referring instructions (iff Parent() != nil)
built bool // function has completed both CREATE and BUILD phase.
+ anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn.
- _Origin *Function // the origin function if this the instantiation of a generic function. nil if Parent() != nil.
- _TypeParams []*typeparams.TypeParam // the type paramaters of this function. len(TypeParams) == len(_TypeArgs) => runtime function
- _TypeArgs []types.Type // type arguments for for an instantiation. len(_TypeArgs) != 0 => instantiation
+ typeparams *typeparams.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function
+ typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function
+ topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil.
// The following fields are set transiently during building,
// then cleared.
@@ -337,7 +337,7 @@ type Function struct {
targets *targets // linked stack of branch targets
lblocks map[types.Object]*lblock // labelled blocks
info *types.Info // *types.Info to build from. nil for wrappers.
- subst *subster // type substitution cache
+ subst *subster // non-nil => expand generic body using this type substitution of ground types
}
// BasicBlock represents an SSA basic block.
@@ -409,26 +409,28 @@ type Parameter struct {
referrers []Instruction
}
-// A Const represents the value of a constant expression.
+// A Const represents a value known at build time.
//
-// The underlying type of a constant may be any boolean, numeric, or
-// string type. In addition, a Const may represent the nil value of
-// any reference type---interface, map, channel, pointer, slice, or
-// function---but not "untyped nil".
+// Consts include true constants of boolean, numeric, and string types, as
+// defined by the Go spec; these are represented by a non-nil Value field.
//
-// All source-level constant expressions are represented by a Const
-// of the same type and value.
-//
-// Value holds the value of the constant, independent of its Type(),
-// using go/constant representation, or nil for a typed nil value.
+// Consts also include the "zero" value of any type, of which the nil values
+// of various pointer-like types are a special case; these are represented
+// by a nil Value field.
//
// Pos() returns token.NoPos.
//
-// Example printed form:
-//
-// 42:int
-// "hello":untyped string
-// 3+4i:MyComplex
+// Example printed forms:
+//
+// 42:int
+// "hello":untyped string
+// 3+4i:MyComplex
+// nil:*int
+// nil:[]string
+// [3]int{}:[3]int
+// struct{x string}{}:struct{x string}
+// 0:interface{int|int64}
+// nil:interface{bool|int} // no go/constant representation
type Const struct {
typ types.Type
Value constant.Value
@@ -603,9 +605,17 @@ type UnOp struct {
// - between (possibly named) pointers to identical base types.
// - from a bidirectional channel to a read- or write-channel,
// optionally adding/removing a name.
+// - between a type (t) and an instance of the type (tσ), i.e.
+// Type() == σ(X.Type()) (or X.Type()== σ(Type())) where
+// σ is the type substitution of Parent().TypeParams by
+// Parent().TypeArgs.
//
// This operation cannot fail dynamically.
//
+// Type changes may to be to or from a type parameter (or both). All
+// types in the type set of X.Type() have a value-preserving type
+// change to all types in the type set of Type().
+//
// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
// from an explicit conversion in the source.
//
@@ -631,6 +641,10 @@ type ChangeType struct {
//
// A conversion may imply a type name change also.
//
+// Conversions may to be to or from a type parameter. All types in
+// the type set of X.Type() can be converted to all types in the type
+// set of Type().
+//
// This operation cannot fail dynamically.
//
// Conversions of untyped string/number/bool constants to a specific
@@ -670,6 +684,11 @@ type ChangeInterface struct {
// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
// from an explicit conversion in the source.
//
+// Conversion may to be to or from a type parameter. All types in
+// the type set of X.Type() must be a slice types that can be converted to
+// all types in the type set of Type() which must all be pointer to array
+// types.
+//
// Example printed form:
//
// t1 = slice to array pointer *[4]byte <- []byte (t0)
@@ -809,7 +828,9 @@ type Slice struct {
//
// Pos() returns the position of the ast.SelectorExpr.Sel for the
// field, if explicit in the source. For implicit selections, returns
-// the position of the inducing explicit selection.
+// the position of the inducing explicit selection. If produced for a
+// struct literal S{f: e}, it returns the position of the colon; for
+// S{e} it returns the start of expression e.
//
// Example printed form:
//
@@ -817,7 +838,7 @@ type Slice struct {
type FieldAddr struct {
register
X Value // *struct
- Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field)
+ Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field)
}
// The Field instruction yields the Field of struct X.
@@ -836,14 +857,14 @@ type FieldAddr struct {
type Field struct {
register
X Value // struct
- Field int // index into X.Type().(*types.Struct).Fields
+ Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields
}
// The IndexAddr instruction yields the address of the element at
// index Index of collection X. Index is an integer expression.
//
-// The elements of maps and strings are not addressable; use Lookup or
-// MapUpdate instead.
+// The elements of maps and strings are not addressable; use Lookup (map),
+// Index (string), or MapUpdate instead.
//
// Dynamically, this instruction panics if X evaluates to a nil *array
// pointer.
@@ -858,11 +879,13 @@ type Field struct {
// t2 = &t0[t1]
type IndexAddr struct {
register
- X Value // slice or *array,
+ X Value // *array, slice or type parameter with types array, *array, or slice.
Index Value // numeric index
}
-// The Index instruction yields element Index of array X.
+// The Index instruction yields element Index of collection X, an array,
+// string or type parameter containing an array, a string, a pointer to an,
+// array or a slice.
//
// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
// explicit in the source.
@@ -872,13 +895,12 @@ type IndexAddr struct {
// t2 = t0[t1]
type Index struct {
register
- X Value // array
+ X Value // array, string or type parameter with types array, *array, slice, or string.
Index Value // integer index
}
-// The Lookup instruction yields element Index of collection X, a map
-// or string. Index is an integer expression if X is a string or the
-// appropriate key type if X is a map.
+// The Lookup instruction yields element Index of collection map X.
+// Index is the appropriate key type.
//
// If CommaOk, the result is a 2-tuple of the value above and a
// boolean indicating the result of a map membership test for the key.
@@ -892,8 +914,8 @@ type Index struct {
// t5 = t3[t4],ok
type Lookup struct {
register
- X Value // string or map
- Index Value // numeric or key-typed index
+ X Value // map
+ Index Value // key-typed index
CommaOk bool // return a value,ok pair
}
@@ -1337,9 +1359,10 @@ type anInstruction struct {
// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon
// represents a dynamically dispatched call to an interface method.
// In this mode, Value is the interface value and Method is the
-// interface's abstract method. Note: an abstract method may be
-// shared by multiple interfaces due to embedding; Value.Type()
-// provides the specific interface used for this call.
+// interface's abstract method. The interface value may be a type
+// parameter. Note: an abstract method may be shared by multiple
+// interfaces due to embedding; Value.Type() provides the specific
+// interface used for this call.
//
// Value is implicitly supplied to the concrete method implementation
// as the receiver parameter; in other words, Args[0] holds not the
@@ -1378,7 +1401,7 @@ func (c *CallCommon) Signature() *types.Signature {
if c.Method != nil {
return c.Method.Type().(*types.Signature)
}
- return c.Value.Type().Underlying().(*types.Signature)
+ return typeparams.CoreType(c.Value.Type()).(*types.Signature)
}
// StaticCallee returns the callee if this is a trivially static
@@ -1469,6 +1492,29 @@ func (v *Function) Referrers() *[]Instruction {
return nil
}
+// TypeParams are the function's type parameters if generic or the
+// type parameters that were instantiated if fn is an instantiation.
+//
+// TODO(taking): declare result type as *types.TypeParamList
+// after we drop support for go1.17.
+func (fn *Function) TypeParams() *typeparams.TypeParamList {
+ return fn.typeparams
+}
+
+// TypeArgs are the types that TypeParams() were instantiated by to create fn
+// from fn.Origin().
+func (fn *Function) TypeArgs() []types.Type { return fn.typeargs }
+
+// Origin is the function fn is an instantiation of. Returns nil if fn is not
+// an instantiation.
+func (fn *Function) Origin() *Function {
+ if fn.parent != nil && len(fn.typeargs) > 0 {
+ // Nested functions are BUILT at a different time than there instances.
+ return fn.parent.Origin().AnonFuncs[fn.anonIdx]
+ }
+ return fn.topLevelOrigin
+}
+
func (v *Parameter) Type() types.Type { return v.typ }
func (v *Parameter) Name() string { return v.name }
func (v *Parameter) Object() types.Object { return v.object }
diff --git a/go/ssa/ssautil/load.go b/go/ssa/ssautil/load.go
index 58d185f6727..96d69a20a17 100644
--- a/go/ssa/ssautil/load.go
+++ b/go/ssa/ssautil/load.go
@@ -77,10 +77,12 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*
packages.Visit(initial, nil, func(p *packages.Package) {
if p.Types != nil && !p.IllTyped {
var files []*ast.File
+ var info *types.Info
if deps || isInitial[p] {
files = p.Syntax
+ info = p.TypesInfo
}
- ssamap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true)
+ ssamap[p] = prog.CreatePackage(p.Types, files, info, true)
}
})
diff --git a/go/ssa/ssautil/load_test.go b/go/ssa/ssautil/load_test.go
index f769be273bb..efa2ba40a8b 100644
--- a/go/ssa/ssautil/load_test.go
+++ b/go/ssa/ssautil/load_test.go
@@ -12,10 +12,12 @@ import (
"go/token"
"go/types"
"os"
+ "path"
"strings"
"testing"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/packages/packagestest"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/internal/testenv"
@@ -31,6 +33,8 @@ func main() {
`
func TestBuildPackage(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
// There is a more substantial test of BuildPackage and the
// SSA program it builds in ../ssa/builder_test.go.
@@ -135,3 +139,57 @@ func TestIssue28106(t *testing.T) {
prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0))
prog.Build() // no crash
}
+
+func TestIssue53604(t *testing.T) {
+ // Tests that variable initializers are not added to init() when syntax
+ // is not present but types.Info is available.
+ //
+ // Packages x, y, z are loaded with mode `packages.LoadSyntax`.
+ // Package x imports y, and y imports z.
+ // Packages are built using ssautil.Packages() with x and z as roots.
+ // This setup creates y using CreatePackage(pkg, files, info, ...)
+ // where len(files) == 0 but info != nil.
+ //
+ // Tests that globals from y are not initialized.
+ e := packagestest.Export(t, packagestest.Modules, []packagestest.Module{
+ {
+ Name: "golang.org/fake",
+ Files: map[string]interface{}{
+ "x/x.go": `package x; import "golang.org/fake/y"; var V = y.F()`,
+ "y/y.go": `package y; import "golang.org/fake/z"; var F = func () *int { return &z.Z } `,
+ "z/z.go": `package z; var Z int`,
+ },
+ },
+ })
+ defer e.Cleanup()
+
+ // Load x and z as entry packages using packages.LoadSyntax
+ e.Config.Mode = packages.LoadSyntax
+ pkgs, err := packages.Load(e.Config, path.Join(e.Temp(), "fake/x"), path.Join(e.Temp(), "fake/z"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, p := range pkgs {
+ if len(p.Errors) > 0 {
+ t.Fatalf("%v", p.Errors)
+ }
+ }
+
+ prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0))
+ prog.Build()
+
+ // y does not initialize F.
+ y := prog.ImportedPackage("golang.org/fake/y")
+ if y == nil {
+ t.Fatal("Failed to load intermediate package y")
+ }
+ yinit := y.Members["init"].(*ssa.Function)
+ for _, bb := range yinit.Blocks {
+ for _, i := range bb.Instrs {
+ if store, ok := i.(*ssa.Store); ok && store.Addr == y.Var("F") {
+ t.Errorf("y.init() stores to F %v", store)
+ }
+ }
+ }
+
+}
diff --git a/go/ssa/stdlib_test.go b/go/ssa/stdlib_test.go
index 7e02f97a7ed..8b9f4238da8 100644
--- a/go/ssa/stdlib_test.go
+++ b/go/ssa/stdlib_test.go
@@ -21,12 +21,10 @@ import (
"testing"
"time"
- "golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/typeparams/genericfeatures"
)
func bytesAllocated() uint64 {
@@ -51,22 +49,6 @@ func TestStdlib(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- var nonGeneric int
- for i := 0; i < len(pkgs); i++ {
- pkg := pkgs[i]
- inspect := inspector.New(pkg.Syntax)
- features := genericfeatures.ForPackage(inspect, pkg.TypesInfo)
- // Skip standard library packages that use generics. This won't be
- // sufficient if any standard library packages start _importing_ packages
- // that use generics.
- if features != 0 {
- t.Logf("skipping package %q which uses generics", pkg.PkgPath)
- continue
- }
- pkgs[nonGeneric] = pkg
- nonGeneric++
- }
- pkgs = pkgs[:nonGeneric]
t1 := time.Now()
alloc1 := bytesAllocated()
diff --git a/go/ssa/subst.go b/go/ssa/subst.go
index b29130ea0cb..396626befca 100644
--- a/go/ssa/subst.go
+++ b/go/ssa/subst.go
@@ -18,6 +18,8 @@ import (
//
// Not concurrency-safe.
type subster struct {
+ // TODO(zpavlinovic): replacements can contain type params
+ // when generating instances inside of a generic function body.
replacements map[*typeparams.TypeParam]types.Type // values should contain no type params
cache map[types.Type]types.Type // cache of subst results
ctxt *typeparams.Context
@@ -27,17 +29,17 @@ type subster struct {
// Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache.
// targs should not contain any types in tparams.
-func makeSubster(ctxt *typeparams.Context, tparams []*typeparams.TypeParam, targs []types.Type, debug bool) *subster {
- assert(len(tparams) == len(targs), "makeSubster argument count must match")
+func makeSubster(ctxt *typeparams.Context, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster {
+ assert(tparams.Len() == len(targs), "makeSubster argument count must match")
subst := &subster{
- replacements: make(map[*typeparams.TypeParam]types.Type, len(tparams)),
+ replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()),
cache: make(map[types.Type]types.Type),
ctxt: ctxt,
debug: debug,
}
- for i, tpar := range tparams {
- subst.replacements[tpar] = targs[i]
+ for i := 0; i < tparams.Len(); i++ {
+ subst.replacements[tparams.At(i)] = targs[i]
}
if subst.debug {
if err := subst.wellFormed(); err != nil {
@@ -331,9 +333,9 @@ func (subst *subster) named(t *types.Named) types.Type {
// type N[A any] func() A
// func Foo[T](g N[T]) {}
// To instantiate Foo[string], one goes through {T->string}. To get the type of g
- // one subsitutes T with string in {N with TypeArgs == {T} and TypeParams == {A} }
- // to get {N with TypeArgs == {string} and TypeParams == {A} }.
- assert(targs.Len() == tparams.Len(), "TypeArgs().Len() must match TypeParams().Len() if present")
+ // one subsitutes T with string in {N with typeargs == {T} and typeparams == {A} }
+ // to get {N with TypeArgs == {string} and typeparams == {A} }.
+ assert(targs.Len() == tparams.Len(), "typeargs.Len() must match typeparams.Len() if present")
for i, n := 0, targs.Len(); i < n; i++ {
inst := subst.typ(targs.At(i)) // TODO(generic): Check with rfindley for mutual recursion
insts[i] = inst
diff --git a/go/ssa/subst_test.go b/go/ssa/subst_test.go
index fe84adcc3da..5fa88270004 100644
--- a/go/ssa/subst_test.go
+++ b/go/ssa/subst_test.go
@@ -99,12 +99,8 @@ var _ L[int] = Fn0[L[int]](nil)
}
T := tv.Type.(*types.Named)
- var tparams []*typeparams.TypeParam
- for i, l := 0, typeparams.ForNamed(T); i < l.Len(); i++ {
- tparams = append(tparams, l.At(i))
- }
- subst := makeSubster(typeparams.NewContext(), tparams, targs, true)
+ subst := makeSubster(typeparams.NewContext(), typeparams.ForNamed(T), targs, true)
sub := subst.typ(T.Underlying())
if got := sub.String(); got != test.want {
t.Errorf("subst{%v->%v}.typ(%s) = %v, want %v", test.expr, test.args, T.Underlying(), got, test.want)
diff --git a/go/ssa/testdata/valueforexpr.go b/go/ssa/testdata/valueforexpr.go
index da76f13a392..243ec614f64 100644
--- a/go/ssa/testdata/valueforexpr.go
+++ b/go/ssa/testdata/valueforexpr.go
@@ -1,3 +1,4 @@
+//go:build ignore
// +build ignore
package main
diff --git a/go/ssa/util.go b/go/ssa/util.go
index 80c7d5cbec0..6b0aada9e8a 100644
--- a/go/ssa/util.go
+++ b/go/ssa/util.go
@@ -49,7 +49,56 @@ func isPointer(typ types.Type) bool {
return ok
}
-func isInterface(T types.Type) bool { return types.IsInterface(T) }
+// isNonTypeParamInterface reports whether t is an interface type but not a type parameter.
+func isNonTypeParamInterface(t types.Type) bool {
+ return !typeparams.IsTypeParam(t) && types.IsInterface(t)
+}
+
+// isBasic reports whether t is a basic type.
+func isBasic(t types.Type) bool {
+ _, ok := t.(*types.Basic)
+ return ok
+}
+
+// isString reports whether t is exactly a string type.
+func isString(t types.Type) bool {
+ return isBasic(t) && t.(*types.Basic).Info()&types.IsString != 0
+}
+
+// isByteSlice reports whether t is []byte.
+func isByteSlice(t types.Type) bool {
+ if b, ok := t.(*types.Slice); ok {
+ e, _ := b.Elem().(*types.Basic)
+ return e != nil && e.Kind() == types.Byte
+ }
+ return false
+}
+
+// isRuneSlice reports whether t is []rune.
+func isRuneSlice(t types.Type) bool {
+ if b, ok := t.(*types.Slice); ok {
+ e, _ := b.Elem().(*types.Basic)
+ return e != nil && e.Kind() == types.Rune
+ }
+ return false
+}
+
+// isBasicConvType returns true when a type set can be
+// one side of a Convert operation. This is when:
+// - All are basic, []byte, or []rune.
+// - At least 1 is basic.
+// - At most 1 is []byte or []rune.
+func isBasicConvTypes(tset termList) bool {
+ basics := 0
+ all := underIs(tset, func(t types.Type) bool {
+ if isBasic(t) {
+ basics++
+ return true
+ }
+ return isByteSlice(t) || isRuneSlice(t)
+ })
+ return all && basics >= 1 && tset.Len()-basics <= 1
+}
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
@@ -113,7 +162,7 @@ func nonbasicTypes(ts []types.Type) []types.Type {
added := make(map[types.Type]bool) // additionally filter duplicates
var filtered []types.Type
for _, T := range ts {
- if _, basic := T.(*types.Basic); !basic {
+ if !isBasic(T) {
if !added[T] {
added[T] = true
filtered = append(filtered, T)
@@ -123,22 +172,6 @@ func nonbasicTypes(ts []types.Type) []types.Type {
return filtered
}
-// isGeneric returns true if a package-level member is generic.
-func isGeneric(m Member) bool {
- switch m := m.(type) {
- case *NamedConst, *Global:
- return false
- case *Type:
- // lifted from types.isGeneric.
- named, _ := m.Type().(*types.Named)
- return named != nil && named.Obj() != nil && typeparams.NamedTypeArgs(named) == nil && typeparams.ForNamed(named) != nil
- case *Function:
- return len(m._TypeParams) != len(m._TypeArgs)
- default:
- panic("unreachable")
- }
-}
-
// receiverTypeArgs returns the type arguments to a function's reciever.
// Returns an empty list if obj does not have a reciever or its reciever does not have type arguments.
func receiverTypeArgs(obj *types.Func) []types.Type {
diff --git a/go/ssa/wrappers.go b/go/ssa/wrappers.go
index 3f2267c8a1b..228daf6158a 100644
--- a/go/ssa/wrappers.go
+++ b/go/ssa/wrappers.go
@@ -120,19 +120,19 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function {
// address of implicit C field.
var c Call
- if r := recvType(obj); !isInterface(r) { // concrete method
+ if r := recvType(obj); !types.IsInterface(r) { // concrete method
if !isPointer(r) {
v = emitLoad(fn, v)
}
callee := prog.originFunc(obj)
- if len(callee._TypeParams) > 0 {
- callee = prog.instances[callee].lookupOrCreate(receiverTypeArgs(obj), cr)
+ if callee.typeparams.Len() > 0 {
+ callee = prog.lookupOrCreateInstance(callee, receiverTypeArgs(obj), cr)
}
c.Call.Value = callee
c.Call.Args = append(c.Call.Args, v)
} else {
c.Call.Method = obj
- c.Call.Value = emitLoad(fn, v)
+ c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam)
}
for _, arg := range fn.Params[1:] {
c.Call.Args = append(c.Call.Args, arg)
@@ -208,16 +208,16 @@ func makeBound(prog *Program, obj *types.Func, cr *creator) *Function {
createParams(fn, 0)
var c Call
- if !isInterface(recvType(obj)) { // concrete
+ if !types.IsInterface(recvType(obj)) { // concrete
callee := prog.originFunc(obj)
- if len(callee._TypeParams) > 0 {
- callee = prog.instances[callee].lookupOrCreate(targs, cr)
+ if callee.typeparams.Len() > 0 {
+ callee = prog.lookupOrCreateInstance(callee, targs, cr)
}
c.Call.Value = callee
c.Call.Args = []Value{fv}
} else {
- c.Call.Value = fv
c.Call.Method = obj
+ c.Call.Value = fv // interface (possibly a typeparam)
}
for _, arg := range fn.Params {
c.Call.Args = append(c.Call.Args, arg)
@@ -324,3 +324,63 @@ func toSelection(sel *types.Selection) *selection {
indirect: sel.Indirect(),
}
}
+
+// -- instantiations --------------------------------------------------
+
+// buildInstantiationWrapper creates a body for an instantiation
+// wrapper fn. The body calls the original generic function,
+// bracketed by ChangeType conversions on its arguments and results.
+func buildInstantiationWrapper(fn *Function) {
+ orig := fn.topLevelOrigin
+ sig := fn.Signature
+
+ fn.startBody()
+ if sig.Recv() != nil {
+ fn.addParamObj(sig.Recv())
+ }
+ createParams(fn, 0)
+
+ // Create body. Add a call to origin generic function
+ // and make type changes between argument and parameters,
+ // as well as return values.
+ var c Call
+ c.Call.Value = orig
+ if res := orig.Signature.Results(); res.Len() == 1 {
+ c.typ = res.At(0).Type()
+ } else {
+ c.typ = res
+ }
+
+ // parameter of instance becomes an argument to the call
+ // to the original generic function.
+ argOffset := 0
+ for i, arg := range fn.Params {
+ var typ types.Type
+ if i == 0 && sig.Recv() != nil {
+ typ = orig.Signature.Recv().Type()
+ argOffset = 1
+ } else {
+ typ = orig.Signature.Params().At(i - argOffset).Type()
+ }
+ c.Call.Args = append(c.Call.Args, emitTypeCoercion(fn, arg, typ))
+ }
+
+ results := fn.emit(&c)
+ var ret Return
+ switch res := sig.Results(); res.Len() {
+ case 0:
+ // no results, do nothing.
+ case 1:
+ ret.Results = []Value{emitTypeCoercion(fn, results, res.At(0).Type())}
+ default:
+ for i := 0; i < sig.Results().Len(); i++ {
+ v := emitExtract(fn, results, i)
+ ret.Results = append(ret.Results, emitTypeCoercion(fn, v, res.At(i).Type()))
+ }
+ }
+
+ fn.emit(&ret)
+ fn.currentBlock = nil
+
+ fn.finishBody()
+}
diff --git a/go/types/typeutil/map.go b/go/types/typeutil/map.go
index dcc029b8733..7bd2fdb38be 100644
--- a/go/types/typeutil/map.go
+++ b/go/types/typeutil/map.go
@@ -332,7 +332,9 @@ func (h Hasher) hashFor(t types.Type) uint32 {
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
- hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ // Use shallow hash on method signature to
+ // avoid anonymous interface cycles.
+ hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
}
// Hash type restrictions.
@@ -434,3 +436,76 @@ func (h Hasher) hashPtr(ptr interface{}) uint32 {
h.ptrMap[ptr] = hash
return hash
}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+// type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h Hasher) shallowHash(t types.Type) uint32 {
+ // t is the type of an interface method (Signature),
+ // its params or results (Tuples), or their immediate
+ // elements (mostly Slice, Pointer, Basic, Named),
+ // so there's no need to optimize anything else.
+ switch t := t.(type) {
+ case *types.Signature:
+ var hash uint32 = 604171
+ if t.Variadic() {
+ hash *= 971767
+ }
+ // The Signature/Tuple recursion is always finite
+ // and invariably shallow.
+ return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+ case *types.Tuple:
+ n := t.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 53471161 * h.shallowHash(t.At(i).Type())
+ }
+ return hash
+
+ case *types.Basic:
+ return 45212177 * uint32(t.Kind())
+
+ case *types.Array:
+ return 1524181 + 2*uint32(t.Len())
+
+ case *types.Slice:
+ return 2690201
+
+ case *types.Struct:
+ return 3326489
+
+ case *types.Pointer:
+ return 4393139
+
+ case *typeparams.Union:
+ return 562448657
+
+ case *types.Interface:
+ return 2124679 // no recursion here
+
+ case *types.Map:
+ return 9109
+
+ case *types.Chan:
+ return 9127
+
+ case *types.Named:
+ return h.hashPtr(t.Obj())
+
+ case *typeparams.TypeParam:
+ return h.hashPtr(t.Obj())
+ }
+ panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/go/types/typeutil/map_test.go b/go/types/typeutil/map_test.go
index 8cd643e5b48..ee73ff9cfd5 100644
--- a/go/types/typeutil/map_test.go
+++ b/go/types/typeutil/map_test.go
@@ -244,6 +244,14 @@ func Bar[P Constraint[P]]() {}
func Baz[Q any]() {} // The underlying type of Constraint[P] is any.
// But Quux is not.
func Quux[Q interface{ quux() }]() {}
+
+
+type Issue56048_I interface{ m() interface { Issue56048_I } }
+var Issue56048 = Issue56048_I.m
+
+type Issue56048_Ib interface{ m() chan []*interface { Issue56048_Ib } }
+var Issue56048b = Issue56048_Ib.m
+
`
fset := token.NewFileSet()
@@ -296,12 +304,14 @@ func Quux[Q interface{ quux() }]() {}
ME1Type = scope.Lookup("ME1Type").Type()
ME2 = scope.Lookup("ME2").Type()
- Constraint = scope.Lookup("Constraint").Type()
- Foo = scope.Lookup("Foo").Type()
- Fn = scope.Lookup("Fn").Type()
- Bar = scope.Lookup("Foo").Type()
- Baz = scope.Lookup("Foo").Type()
- Quux = scope.Lookup("Quux").Type()
+ Constraint = scope.Lookup("Constraint").Type()
+ Foo = scope.Lookup("Foo").Type()
+ Fn = scope.Lookup("Fn").Type()
+ Bar = scope.Lookup("Foo").Type()
+ Baz = scope.Lookup("Foo").Type()
+ Quux = scope.Lookup("Quux").Type()
+ Issue56048 = scope.Lookup("Issue56048").Type()
+ Issue56048b = scope.Lookup("Issue56048b").Type()
)
tmap := new(typeutil.Map)
@@ -371,6 +381,9 @@ func Quux[Q interface{ quux() }]() {}
{Bar, "Bar", false},
{Baz, "Baz", false},
{Quux, "Quux", true},
+
+ {Issue56048, "Issue56048", true}, // (not actually about generics)
+ {Issue56048b, "Issue56048b", true}, // (not actually about generics)
}
for _, step := range steps {
diff --git a/godoc/godoc.go b/godoc/godoc.go
index 7ff2eab6239..dfac2111a67 100644
--- a/godoc/godoc.go
+++ b/godoc/godoc.go
@@ -345,11 +345,16 @@ func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
}
-func comment_htmlFunc(comment string) string {
+func comment_htmlFunc(info *PageInfo, comment string) string {
var buf bytes.Buffer
// TODO(gri) Provide list of words (e.g. function parameters)
// to be emphasized by ToHTML.
- doc.ToHTML(&buf, comment, nil) // does html-escaping
+
+ // godocToHTML is:
+ // - buf.Write(info.PDoc.HTML(comment)) on go1.19
+ // - go/doc.ToHTML(&buf, comment, nil) on other versions
+ godocToHTML(&buf, info.PDoc, comment)
+
return buf.String()
}
@@ -448,7 +453,7 @@ func srcToPkgLinkFunc(relpath string) string {
return fmt.Sprintf(`%s`, relpath, relpath[len("pkg/"):])
}
-// srcBreadcrumbFun converts each segment of relpath to a HTML .
+// srcBreadcrumbFunc converts each segment of relpath to a HTML .
// Each segment links to its corresponding src directories.
func srcBreadcrumbFunc(relpath string) string {
segments := strings.Split(relpath, "/")
@@ -658,7 +663,7 @@ func (p *Presentation) example_suffixFunc(name string) string {
return suffix
}
-// implements_html returns the "> Implements" toggle for a package-level named type.
+// implements_htmlFunc returns the "> Implements" toggle for a package-level named type.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) string {
if p.ImplementsHTML == nil {
@@ -676,7 +681,7 @@ func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) stri
return buf.String()
}
-// methodset_html returns the "> Method set" toggle for a package-level named type.
+// methodset_htmlFunc returns the "> Method set" toggle for a package-level named type.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) string {
if p.MethodSetHTML == nil {
@@ -694,7 +699,7 @@ func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) strin
return buf.String()
}
-// callgraph_html returns the "> Call graph" toggle for a package-level func.
+// callgraph_htmlFunc returns the "> Call graph" toggle for a package-level func.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) callgraph_htmlFunc(info *PageInfo, recv, name string) string {
if p.CallGraphHTML == nil {
diff --git a/godoc/index.go b/godoc/index.go
index d3f9f64fc5c..4dc3362a7e2 100644
--- a/godoc/index.go
+++ b/godoc/index.go
@@ -50,6 +50,7 @@ import (
"index/suffixarray"
"io"
"log"
+ "math"
"os"
pathpkg "path"
"path/filepath"
@@ -161,7 +162,7 @@ func newKindRun(h RunList) interface{} {
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0
- prev := SpotInfo(1<<32 - 1) // an unlikely value
+ prev := SpotInfo(math.MaxUint32) // an unlikely value
for _, x := range run {
if x != prev {
run[k] = x
diff --git a/godoc/redirect/hash.go b/godoc/redirect/hash.go
deleted file mode 100644
index d5a1e3eb67b..00000000000
--- a/godoc/redirect/hash.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides a compact encoding of
-// a map of Mercurial hashes to Git hashes.
-
-package redirect
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// hashMap is a map of Mercurial hashes to Git hashes.
-type hashMap struct {
- file *os.File
- entries int
-}
-
-// newHashMap takes a file handle that contains a map of Mercurial to Git
-// hashes. The file should be a sequence of pairs of little-endian encoded
-// uint32s, representing a hgHash and a gitHash respectively.
-// The sequence must be sorted by hgHash.
-// The file must remain open for as long as the returned hashMap is used.
-func newHashMap(f *os.File) (*hashMap, error) {
- fi, err := f.Stat()
- if err != nil {
- return nil, err
- }
- return &hashMap{file: f, entries: int(fi.Size() / 8)}, nil
-}
-
-// Lookup finds an hgHash in the map that matches the given prefix, and returns
-// its corresponding gitHash. The prefix must be at least 8 characters long.
-func (m *hashMap) Lookup(s string) gitHash {
- if m == nil {
- return 0
- }
- hg, err := hgHashFromString(s)
- if err != nil {
- return 0
- }
- var git gitHash
- b := make([]byte, 8)
- sort.Search(m.entries, func(i int) bool {
- n, err := m.file.ReadAt(b, int64(i*8))
- if err != nil {
- panic(err)
- }
- if n != 8 {
- panic(io.ErrUnexpectedEOF)
- }
- v := hgHash(binary.LittleEndian.Uint32(b[:4]))
- if v == hg {
- git = gitHash(binary.LittleEndian.Uint32(b[4:]))
- }
- return v >= hg
- })
- return git
-}
-
-// hgHash represents the lower (leftmost) 32 bits of a Mercurial hash.
-type hgHash uint32
-
-func (h hgHash) String() string {
- return intToHash(int64(h))
-}
-
-func hgHashFromString(s string) (hgHash, error) {
- if len(s) < 8 {
- return 0, fmt.Errorf("string too small: len(s) = %d", len(s))
- }
- hash := s[:8]
- i, err := strconv.ParseInt(hash, 16, 64)
- if err != nil {
- return 0, err
- }
- return hgHash(i), nil
-}
-
-// gitHash represents the leftmost 28 bits of a Git hash in its upper 28 bits,
-// and it encodes hash's repository in the lower 4 bits.
-type gitHash uint32
-
-func (h gitHash) Hash() string {
- return intToHash(int64(h))[:7]
-}
-
-func (h gitHash) Repo() string {
- return repo(h & 0xF).String()
-}
-
-func intToHash(i int64) string {
- s := strconv.FormatInt(i, 16)
- if len(s) < 8 {
- s = strings.Repeat("0", 8-len(s)) + s
- }
- return s
-}
-
-// repo represents a Go Git repository.
-type repo byte
-
-const (
- repoGo repo = iota
- repoBlog
- repoCrypto
- repoExp
- repoImage
- repoMobile
- repoNet
- repoSys
- repoTalks
- repoText
- repoTools
-)
-
-func (r repo) String() string {
- return map[repo]string{
- repoGo: "go",
- repoBlog: "blog",
- repoCrypto: "crypto",
- repoExp: "exp",
- repoImage: "image",
- repoMobile: "mobile",
- repoNet: "net",
- repoSys: "sys",
- repoTalks: "talks",
- repoText: "text",
- repoTools: "tools",
- }[r]
-}
diff --git a/godoc/redirect/redirect.go b/godoc/redirect/redirect.go
index 57d779ccb41..d0145ee183b 100644
--- a/godoc/redirect/redirect.go
+++ b/godoc/redirect/redirect.go
@@ -3,147 +3,22 @@
// license that can be found in the LICENSE file.
// Package redirect provides hooks to register HTTP handlers that redirect old
-// godoc paths to their new equivalents and assist in accessing the issue
-// tracker, wiki, code review system, etc.
+// godoc paths to their new equivalents.
package redirect // import "golang.org/x/tools/godoc/redirect"
import (
- "context"
- "fmt"
- "html/template"
"net/http"
- "os"
"regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context/ctxhttp"
)
-// Register registers HTTP handlers that redirect old godoc paths to their new
-// equivalents and assist in accessing the issue tracker, wiki, code review
-// system, etc. If mux is nil it uses http.DefaultServeMux.
+// Register registers HTTP handlers that redirect old godoc paths to their new equivalents.
+// If mux is nil it uses http.DefaultServeMux.
func Register(mux *http.ServeMux) {
if mux == nil {
mux = http.DefaultServeMux
}
- handlePathRedirects(mux, pkgRedirects, "/pkg/")
- handlePathRedirects(mux, cmdRedirects, "/cmd/")
- for prefix, redirect := range prefixHelpers {
- p := "/" + prefix + "/"
- mux.Handle(p, PrefixHandler(p, redirect))
- }
- for path, redirect := range redirects {
- mux.Handle(path, Handler(redirect))
- }
// NB: /src/pkg (sans trailing slash) is the index of packages.
mux.HandleFunc("/src/pkg/", srcPkgHandler)
- mux.HandleFunc("/cl/", clHandler)
- mux.HandleFunc("/change/", changeHandler)
- mux.HandleFunc("/design/", designHandler)
-}
-
-func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) {
- for source, target := range redirects {
- h := Handler(prefix + target + "/")
- p := prefix + source
- mux.Handle(p, h)
- mux.Handle(p+"/", h)
- }
-}
-
-// Packages that were renamed between r60 and go1.
-var pkgRedirects = map[string]string{
- "asn1": "encoding/asn1",
- "big": "math/big",
- "cmath": "math/cmplx",
- "csv": "encoding/csv",
- "exec": "os/exec",
- "exp/template/html": "html/template",
- "gob": "encoding/gob",
- "http": "net/http",
- "http/cgi": "net/http/cgi",
- "http/fcgi": "net/http/fcgi",
- "http/httptest": "net/http/httptest",
- "http/pprof": "net/http/pprof",
- "json": "encoding/json",
- "mail": "net/mail",
- "rand": "math/rand",
- "rpc": "net/rpc",
- "rpc/jsonrpc": "net/rpc/jsonrpc",
- "scanner": "text/scanner",
- "smtp": "net/smtp",
- "tabwriter": "text/tabwriter",
- "template": "text/template",
- "template/parse": "text/template/parse",
- "url": "net/url",
- "utf16": "unicode/utf16",
- "utf8": "unicode/utf8",
- "xml": "encoding/xml",
-}
-
-// Commands that were renamed between r60 and go1.
-var cmdRedirects = map[string]string{
- "gofix": "fix",
- "goinstall": "go",
- "gopack": "pack",
- "gotest": "go",
- "govet": "vet",
- "goyacc": "yacc",
-}
-
-var redirects = map[string]string{
- "/blog": "/blog/",
- "/build": "http://build.golang.org",
- "/change": "https://go.googlesource.com/go",
- "/cl": "https://go-review.googlesource.com",
- "/cmd/godoc/": "https://pkg.go.dev/golang.org/x/tools/cmd/godoc",
- "/issue": "https://github.com/golang/go/issues",
- "/issue/new": "https://github.com/golang/go/issues/new",
- "/issues": "https://github.com/golang/go/issues",
- "/issues/new": "https://github.com/golang/go/issues/new",
- "/play": "http://play.golang.org",
- "/design": "https://go.googlesource.com/proposal/+/master/design",
-
- // In Go 1.2 the references page is part of /doc/.
- "/ref": "/doc/#references",
- // This next rule clobbers /ref/spec and /ref/mem.
- // TODO(adg): figure out what to do here, if anything.
- // "/ref/": "/doc/#references",
-
- // Be nice to people who are looking in the wrong place.
- "/doc/mem": "/ref/mem",
- "/doc/spec": "/ref/spec",
-
- "/talks": "http://talks.golang.org",
- "/tour": "http://tour.golang.org",
- "/wiki": "https://github.com/golang/go/wiki",
-
- "/doc/articles/c_go_cgo.html": "/blog/c-go-cgo",
- "/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and",
- "/doc/articles/defer_panic_recover.html": "/blog/defer-panic-and-recover",
- "/doc/articles/error_handling.html": "/blog/error-handling-and-go",
- "/doc/articles/gobs_of_data.html": "/blog/gobs-of-data",
- "/doc/articles/godoc_documenting_go_code.html": "/blog/godoc-documenting-go-code",
- "/doc/articles/gos_declaration_syntax.html": "/blog/gos-declaration-syntax",
- "/doc/articles/image_draw.html": "/blog/go-imagedraw-package",
- "/doc/articles/image_package.html": "/blog/go-image-package",
- "/doc/articles/json_and_go.html": "/blog/json-and-go",
- "/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces",
- "/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection",
- "/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals",
- "/doc/go_for_cpp_programmers.html": "/wiki/GoForCPPProgrammers",
- "/doc/go_tutorial.html": "http://tour.golang.org/",
-}
-
-var prefixHelpers = map[string]string{
- "issue": "https://github.com/golang/go/issues/",
- "issues": "https://github.com/golang/go/issues/",
- "play": "http://play.golang.org/",
- "talks": "http://talks.golang.org/",
- "wiki": "https://github.com/golang/go/wiki/",
}
func Handler(target string) http.Handler {
@@ -181,144 +56,3 @@ func srcPkgHandler(w http.ResponseWriter, r *http.Request) {
r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):]
http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
}
-
-func clHandler(w http.ResponseWriter, r *http.Request) {
- const prefix = "/cl/"
- if p := r.URL.Path; p == prefix {
- // redirect /prefix/ to /prefix
- http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
- return
- }
- id := r.URL.Path[len(prefix):]
- // support /cl/152700045/, which is used in commit 0edafefc36.
- id = strings.TrimSuffix(id, "/")
- if !validID.MatchString(id) {
- http.Error(w, "Not found", http.StatusNotFound)
- return
- }
- target := ""
-
- if n, err := strconv.Atoi(id); err == nil && isRietveldCL(n) {
- // Issue 28836: if this Rietveld CL happens to
- // also be a Gerrit CL, render a disambiguation HTML
- // page with two links instead. We need to make a
- // Gerrit API call to figure that out, but we cache
- // known Gerrit CLs so it's done at most once per CL.
- if ok, err := isGerritCL(r.Context(), n); err == nil && ok {
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- clDisambiguationHTML.Execute(w, n)
- return
- }
-
- target = "https://codereview.appspot.com/" + id
- } else {
- target = "https://go-review.googlesource.com/" + id
- }
- http.Redirect(w, r, target, http.StatusFound)
-}
-
-var clDisambiguationHTML = template.Must(template.New("").Parse(`
-
-
- Go CL {{.}} Disambiguation
-
-
-
- CL number {{.}} exists in both Gerrit (the current code review system)
- and Rietveld (the previous code review system). Please make a choice:
-
-
\x0a\x09\x0a{{end}}\x0a",
diff --git a/godoc/tohtml_go119.go b/godoc/tohtml_go119.go
new file mode 100644
index 00000000000..6dbf7212b9a
--- /dev/null
+++ b/godoc/tohtml_go119.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package godoc
+
+import (
+ "bytes"
+ "go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+ buf.Write(pkg.HTML(comment))
+}
diff --git a/godoc/tohtml_other.go b/godoc/tohtml_other.go
new file mode 100644
index 00000000000..a1dcf2e195b
--- /dev/null
+++ b/godoc/tohtml_other.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package godoc
+
+import (
+ "bytes"
+ "go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+ doc.ToHTML(buf, comment, nil)
+}
diff --git a/gopls/README.md b/gopls/README.md
index 9afc2e48c1e..56d15921a70 100644
--- a/gopls/README.md
+++ b/gopls/README.md
@@ -5,56 +5,57 @@
`gopls` (pronounced "Go please") is the official Go [language server] developed
by the Go team. It provides IDE features to any [LSP]-compatible editor.
-
+
You should not need to interact with `gopls` directly--it will be automatically
integrated into your editor. The specific features and settings vary slightly
-by editor, so we recommend that you proceed to the [documentation for your
-editor](#editors) below.
+by editor, so we recommend that you proceed to the
+[documentation for your editor](#editors) below.
## Editors
To get started with `gopls`, install an LSP plugin in your editor of choice.
-* [VSCode](https://github.com/golang/vscode-go/blob/master/README.md)
+* [VS Code](https://github.com/golang/vscode-go/blob/master/README.md)
* [Vim / Neovim](doc/vim.md)
* [Emacs](doc/emacs.md)
* [Atom](https://github.com/MordFustang21/ide-gopls)
* [Sublime Text](doc/subl.md)
* [Acme](https://github.com/fhs/acme-lsp)
+* [Lapce](https://github.com/lapce-community/lapce-go)
-If you use `gopls` with an editor that is not on this list, please let us know
-by [filing an issue](#new-issue) or [modifying this documentation](doc/contributing.md).
+If you use `gopls` with an editor that is not on this list, please send us a CL
+[updating this documentation](doc/contributing.md).
## Installation
For the most part, you should not need to install or update `gopls`. Your
editor should handle that step for you.
-If you do want to get the latest stable version of `gopls`, change to any
-directory that is both outside of your `GOPATH` and outside of a module (a temp
-directory is fine), and run:
+If you do want to get the latest stable version of `gopls`, run the following
+command:
```sh
go install golang.org/x/tools/gopls@latest
```
-Learn more in the [advanced installation
-instructions](doc/advanced.md#installing-unreleased-versions).
+Learn more in the
+[advanced installation instructions](doc/advanced.md#installing-unreleased-versions).
+
+Learn more about gopls releases in the [release policy](doc/releases.md).
## Setting up your workspace
-`gopls` supports both Go module and GOPATH modes, but if you are working with
-multiple modules or uncommon project layouts, you will need to specifically
-configure your workspace. See the [Workspace document](doc/workspace.md) for
-information on supported workspace layouts.
+`gopls` supports both Go module, multi-module and GOPATH modes. See the
+[workspace documentation](doc/workspace.md) for information on supported
+workspace layouts.
## Configuration
You can configure `gopls` to change your editor experience or view additional
debugging information. Configuration options will be made available by your
editor, so see your [editor's instructions](#editors) for specific details. A
-full list of `gopls` settings can be found in the [Settings documentation](doc/settings.md).
+full list of `gopls` settings can be found in the [settings documentation](doc/settings.md).
### Environment variables
@@ -62,27 +63,36 @@ full list of `gopls` settings can be found in the [Settings documentation](doc/s
variables you configure. Some editors, such as VS Code, allow users to
selectively override the values of some environment variables.
-## Troubleshooting
+## Support Policy
-If you are having issues with `gopls`, please follow the steps described in the
-[troubleshooting guide](doc/troubleshooting.md).
+Gopls is maintained by engineers on the
+[Go tools team](https://github.com/orgs/golang/teams/tools-team/members),
+who actively monitor the
+[Go](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+label%3Agopls)
+and
+[VS Code Go](https://github.com/golang/vscode-go/issues) issue trackers.
-## Supported Go versions and build systems
+### Supported Go versions
`gopls` follows the
[Go Release Policy](https://golang.org/doc/devel/release.html#policy),
meaning that it officially supports the last 2 major Go releases. Per
-[issue #39146](golang.org/issues/39146), we attempt to maintain best-effort
+[issue #39146](https://go.dev/issues/39146), we attempt to maintain best-effort
support for the last 4 major Go releases, but this support extends only to not
breaking the build and avoiding easily fixable regressions.
-The following table shows the final gopls version that supports being built at
-a given Go Version. Any more recent Go versions missing from this table can
-still be built with the latest version of gopls.
+In the context of this discussion, gopls "supports" a Go version if it supports
+being built with that Go version as well as integrating with the `go` command
+of that Go version.
-| Go Version | Final gopls Version With Support |
-| ----------- | -------------------------------- |
+The following table shows the final gopls version that supports a given Go
+version. Go releases more recent than any in the table can be used with any
+version of gopls.
+
+| Go Version | Final gopls version with support (without warnings) |
+| ----------- | --------------------------------------------------- |
| Go 1.12 | [gopls@v0.7.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.7.5) |
+| Go 1.15 | [gopls@v0.9.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.9.5) |
Our extended support is enforced via [continuous integration with older Go
versions](doc/contributing.md#ci). This legacy Go CI may not block releases:
@@ -90,13 +100,22 @@ test failures may be skipped rather than fixed. Furthermore, if a regression in
an older Go version causes irreconcilable CI failures, we may drop support for
that Go version in CI if it is 3 or 4 Go versions old.
-`gopls` currently only supports the `go` command, so if you are using a
-different build system, `gopls` will not work well. Bazel is not officially
-supported, but Bazel support is in development (see
-[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512)).
+### Supported build systems
+
+`gopls` currently only supports the `go` command, so if you are using
+a different build system, `gopls` will not work well. Bazel is not officially
+supported, but may be made to work with an appropriately configured
+`go/packages` driver. See
+[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512)
+for more information.
You can follow [these instructions](https://github.com/bazelbuild/rules_go/wiki/Editor-setup)
to configure your `gopls` to work with Bazel.
+### Troubleshooting
+
+If you are having issues with `gopls`, please follow the steps described in the
+[troubleshooting guide](doc/troubleshooting.md).
+
## Additional information
* [Features](doc/features.md)
@@ -110,4 +129,3 @@ to configure your `gopls` to work with Bazel.
[language server]: https://langserver.org
[LSP]: https://microsoft.github.io/language-server-protocol/
-[Gophers Slack]: https://gophers.slack.com/
diff --git a/gopls/api-diff/api_diff.go b/gopls/api-diff/api_diff.go
index 167bdbd1b9f..8bb54186bab 100644
--- a/gopls/api-diff/api_diff.go
+++ b/gopls/api-diff/api_diff.go
@@ -13,262 +13,77 @@ import (
"encoding/json"
"flag"
"fmt"
- "io"
- "io/ioutil"
"log"
"os"
"os/exec"
- "path/filepath"
- "strings"
- "golang.org/x/tools/internal/gocommand"
- difflib "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/source"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
-var (
- previousVersionFlag = flag.String("prev", "", "version to compare against")
- versionFlag = flag.String("version", "", "version being tagged, or current version if omitted")
-)
+const usage = `api-diff []
+
+Compare the API of two gopls versions. If the second argument is provided, it
+will be used as the new version to compare against. Otherwise, compare against
+the current API.
+`
func main() {
flag.Parse()
- apiDiff, err := diffAPI(*versionFlag, *previousVersionFlag)
+ if flag.NArg() < 1 || flag.NArg() > 2 {
+ fmt.Fprint(os.Stderr, usage)
+ os.Exit(2)
+ }
+
+ oldVer := flag.Arg(0)
+ newVer := ""
+ if flag.NArg() == 2 {
+ newVer = flag.Arg(1)
+ }
+
+ apiDiff, err := diffAPI(oldVer, newVer)
if err != nil {
log.Fatal(err)
}
- fmt.Printf(`
-%s
-`, apiDiff)
-}
-
-type JSON interface {
- String() string
- Write(io.Writer)
+ fmt.Println("\n" + apiDiff)
}
-func diffAPI(version, prev string) (string, error) {
+func diffAPI(oldVer, newVer string) (string, error) {
ctx := context.Background()
- previousApi, err := loadAPI(ctx, prev)
+ previousAPI, err := loadAPI(ctx, oldVer)
if err != nil {
- return "", fmt.Errorf("load previous API: %v", err)
+ return "", fmt.Errorf("loading %s: %v", oldVer, err)
}
- var currentApi *source.APIJSON
- if version == "" {
- currentApi = source.GeneratedAPIJSON
+ var currentAPI *source.APIJSON
+ if newVer == "" {
+ currentAPI = source.GeneratedAPIJSON
} else {
var err error
- currentApi, err = loadAPI(ctx, version)
+ currentAPI, err = loadAPI(ctx, newVer)
if err != nil {
- return "", fmt.Errorf("load current API: %v", err)
- }
- }
-
- b := &strings.Builder{}
- if err := diff(b, previousApi.Commands, currentApi.Commands, "command", func(c *source.CommandJSON) string {
- return c.Command
- }, diffCommands); err != nil {
- return "", fmt.Errorf("diff commands: %v", err)
- }
- if diff(b, previousApi.Analyzers, currentApi.Analyzers, "analyzer", func(a *source.AnalyzerJSON) string {
- return a.Name
- }, diffAnalyzers); err != nil {
- return "", fmt.Errorf("diff analyzers: %v", err)
- }
- if err := diff(b, previousApi.Lenses, currentApi.Lenses, "code lens", func(l *source.LensJSON) string {
- return l.Lens
- }, diffLenses); err != nil {
- return "", fmt.Errorf("diff lenses: %v", err)
- }
- for key, prev := range previousApi.Options {
- current, ok := currentApi.Options[key]
- if !ok {
- panic(fmt.Sprintf("unexpected option key: %s", key))
- }
- if err := diff(b, prev, current, "option", func(o *source.OptionJSON) string {
- return o.Name
- }, diffOptions); err != nil {
- return "", fmt.Errorf("diff options (%s): %v", key, err)
+ return "", fmt.Errorf("loading %s: %v", newVer, err)
}
}
- return b.String(), nil
+ return cmp.Diff(previousAPI, currentAPI), nil
}
-func diff[T JSON](b *strings.Builder, previous, new []T, kind string, uniqueKey func(T) string, diffFunc func(*strings.Builder, T, T)) error {
- prevJSON := collect(previous, uniqueKey)
- newJSON := collect(new, uniqueKey)
- for k := range newJSON {
- delete(prevJSON, k)
- }
- for _, deleted := range prevJSON {
- b.WriteString(fmt.Sprintf("%s %s was deleted.\n", kind, deleted))
- }
- for _, prev := range previous {
- delete(newJSON, uniqueKey(prev))
- }
- if len(newJSON) > 0 {
- b.WriteString("The following commands were added:\n")
- for _, n := range newJSON {
- n.Write(b)
- b.WriteByte('\n')
- }
- }
- previousMap := collect(previous, uniqueKey)
- for _, current := range new {
- prev, ok := previousMap[uniqueKey(current)]
- if !ok {
- continue
- }
- c, p := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
- prev.Write(p)
- current.Write(c)
- if diff, err := diffStr(p.String(), c.String()); err == nil && diff != "" {
- diffFunc(b, prev, current)
- b.WriteString("\n--\n")
- }
- }
- return nil
-}
-
-func collect[T JSON](args []T, uniqueKey func(T) string) map[string]T {
- m := map[string]T{}
- for _, arg := range args {
- m[uniqueKey(arg)] = arg
- }
- return m
-}
-
-var goCmdRunner = gocommand.Runner{}
-
func loadAPI(ctx context.Context, version string) (*source.APIJSON, error) {
- tmpGopath, err := ioutil.TempDir("", "gopath*")
- if err != nil {
- return nil, fmt.Errorf("temp dir: %v", err)
- }
- defer os.RemoveAll(tmpGopath)
+ ver := fmt.Sprintf("golang.org/x/tools/gopls@%s", version)
+ cmd := exec.Command("go", "run", ver, "api-json")
- exampleDir := fmt.Sprintf("%s/src/example.com", tmpGopath)
- if err := os.MkdirAll(exampleDir, 0776); err != nil {
- return nil, fmt.Errorf("mkdir: %v", err)
- }
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
- if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "mod",
- Args: []string{"init", "example.com"},
- WorkingDir: exampleDir,
- Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)),
- }); err != nil {
- return nil, fmt.Errorf("go mod init failed: %v (stdout: %v)", err, stdout)
- }
- if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "install",
- Args: []string{fmt.Sprintf("golang.org/x/tools/gopls@%s", version)},
- WorkingDir: exampleDir,
- Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)),
- }); err != nil {
- return nil, fmt.Errorf("go install failed: %v (stdout: %v)", err, stdout.String())
- }
- cmd := exec.Cmd{
- Path: filepath.Join(tmpGopath, "bin", "gopls"),
- Args: []string{"gopls", "api-json"},
- Dir: tmpGopath,
- }
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("output: %v", err)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("go run failed: %v; stderr:\n%s", err, stderr)
}
apiJson := &source.APIJSON{}
- if err := json.Unmarshal(out, apiJson); err != nil {
+ if err := json.Unmarshal(stdout.Bytes(), apiJson); err != nil {
return nil, fmt.Errorf("unmarshal: %v", err)
}
return apiJson, nil
}
-
-func diffCommands(b *strings.Builder, prev, current *source.CommandJSON) {
- if prev.Title != current.Title {
- b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", prev.Title, current.Title))
- }
- if prev.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", prev.Doc, current.Doc))
- }
- if prev.ArgDoc != current.ArgDoc {
- b.WriteString("Arguments changed from " + formatBlock(prev.ArgDoc) + " to " + formatBlock(current.ArgDoc))
- }
- if prev.ResultDoc != current.ResultDoc {
- b.WriteString("Results changed from " + formatBlock(prev.ResultDoc) + " to " + formatBlock(current.ResultDoc))
- }
-}
-
-func diffAnalyzers(b *strings.Builder, previous, current *source.AnalyzerJSON) {
- b.WriteString(fmt.Sprintf("Changes to analyzer %s:\n\n", current.Name))
- if previous.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc))
- }
- if previous.Default != current.Default {
- b.WriteString(fmt.Sprintf("Default changed from %v to %v\n", previous.Default, current.Default))
- }
-}
-
-func diffLenses(b *strings.Builder, previous, current *source.LensJSON) {
- b.WriteString(fmt.Sprintf("Changes to code lens %s:\n\n", current.Title))
- if previous.Title != current.Title {
- b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", previous.Title, current.Title))
- }
- if previous.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc))
- }
-}
-
-func diffOptions(b *strings.Builder, previous, current *source.OptionJSON) {
- b.WriteString(fmt.Sprintf("Changes to option %s:\n\n", current.Name))
- if previous.Doc != current.Doc {
- diff, err := diffStr(previous.Doc, current.Doc)
- if err != nil {
- panic(err)
- }
- b.WriteString(fmt.Sprintf("Documentation changed:\n%s\n", diff))
- }
- if previous.Default != current.Default {
- b.WriteString(fmt.Sprintf("Default changed from %q to %q\n", previous.Default, current.Default))
- }
- if previous.Hierarchy != current.Hierarchy {
- b.WriteString(fmt.Sprintf("Categorization changed from %q to %q\n", previous.Hierarchy, current.Hierarchy))
- }
- if previous.Status != current.Status {
- b.WriteString(fmt.Sprintf("Status changed from %q to %q\n", previous.Status, current.Status))
- }
- if previous.Type != current.Type {
- b.WriteString(fmt.Sprintf("Type changed from %q to %q\n", previous.Type, current.Type))
- }
- // TODO(rstambler): Handle possibility of same number but different keys/values.
- if len(previous.EnumKeys.Keys) != len(current.EnumKeys.Keys) {
- b.WriteString(fmt.Sprintf("Enum keys changed from\n%s\n to \n%s\n", previous.EnumKeys, current.EnumKeys))
- }
- if len(previous.EnumValues) != len(current.EnumValues) {
- b.WriteString(fmt.Sprintf("Enum values changed from\n%s\n to \n%s\n", previous.EnumValues, current.EnumValues))
- }
-}
-
-func formatBlock(str string) string {
- if str == "" {
- return `""`
- }
- return "\n```\n" + str + "\n```\n"
-}
-
-func diffStr(before, after string) (string, error) {
- // Add newlines to avoid newline messages in diff.
- if before == after {
- return "", nil
- }
- before += "\n"
- after += "\n"
- d, err := myers.ComputeEdits("", before, after)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%q", difflib.ToUnified("previous", "current", before, d)), err
-}
diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md
index f5c83d5771d..28bf1deae83 100644
--- a/gopls/doc/analyzers.md
+++ b/gopls/doc/analyzers.md
@@ -131,7 +131,7 @@ of the second argument is not a pointer to a type implementing error.
find structs that would use less memory if their fields were sorted
This analyzer find structs that can be rearranged to use less memory, and provides
-a suggested edit with the optimal order.
+a suggested edit with the most compact order.
Note that there are two different diagnostics reported. One checks struct size,
and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the
@@ -150,6 +150,11 @@ has 24 pointer bytes because it has to scan further through the *uint32.
has 8 because it can stop immediately after the string pointer.
+Be aware that the most compact order is not always the most efficient.
+In rare cases it may cause two variables each updated by its own goroutine
+to occupy the same CPU cache line, inducing a form of memory contention
+known as "false sharing" that slows down both goroutines.
+
**Disabled by default. Enable it by setting `"analyses": {"fieldalignment": true}`.**
@@ -213,19 +218,60 @@ inferred from function arguments, or from other type arguments:
check references to loop variables from within nested functions
-This analyzer checks for references to loop variables from within a
-function literal inside the loop body. It checks only instances where
-the function literal is called in a defer or go statement that is the
-last statement in the loop body, as otherwise we would need whole
-program analysis.
-
-For example:
-
- for i, v := range s {
- go func() {
- println(i, v) // not what you might expect
- }()
- }
+This analyzer reports places where a function literal references the
+iteration variable of an enclosing loop, and the loop calls the function
+in such a way (e.g. with go or defer) that it may outlive the loop
+iteration and possibly observe the wrong value of the variable.
+
+In this example, all the deferred functions run after the loop has
+completed, so all observe the final value of v.
+
+ for _, v := range list {
+ defer func() {
+ use(v) // incorrect
+ }()
+ }
+
+One fix is to create a new variable for each iteration of the loop:
+
+ for _, v := range list {
+ v := v // new var per iteration
+ defer func() {
+ use(v) // ok
+ }()
+ }
+
+The next example uses a go statement and has a similar problem.
+In addition, it has a data race because the loop updates v
+concurrent with the goroutines accessing it.
+
+ for _, v := range elem {
+ go func() {
+ use(v) // incorrect, and a data race
+ }()
+ }
+
+A fix is the same as before. The checker also reports problems
+in goroutines started by golang.org/x/sync/errgroup.Group.
+A hard-to-spot variant of this form is common in parallel tests:
+
+ func Test(t *testing.T) {
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ use(test) // incorrect, and a data race
+ })
+ }
+ }
+
+The t.Parallel() call causes the rest of the function to execute
+concurrent with the loop.
+
+The analyzer reports references only in the last statement,
+as it is not deep enough to understand the effects of subsequent
+statements that might render the reference benign.
+("Last statement" is defined recursively in compound
+statements such as if, switch, and select.)
See: https://golang.org/doc/go_faq.html#closures_and_goroutines
@@ -490,6 +536,17 @@ identifiers.
Please see the documentation for package testing in golang.org/pkg/testing
for the conventions that are enforced for Tests, Benchmarks, and Examples.
+**Enabled by default.**
+
+## **timeformat**
+
+check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+
+
**Enabled by default.**
## **unmarshal**
@@ -652,6 +709,15 @@ func <>(inferred parameters) {
**Enabled by default.**
+## **unusedvariable**
+
+check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+
+
+**Disabled by default. Enable it by setting `"analyses": {"unusedvariable": true}`.**
+
## **fillstruct**
note incomplete struct initializations
diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md
index f868a48936e..9d5e851ccac 100644
--- a/gopls/doc/commands.md
+++ b/gopls/doc/commands.md
@@ -100,6 +100,26 @@ Args:
}
```
+### **Get known vulncheck result**
+Identifier: `gopls.fetch_vulncheck_result`
+
+Fetch the result of latest vulnerability check (`govulncheck`).
+
+Args:
+
+```
+{
+ // The file URI.
+ "URI": string,
+}
+```
+
+Result:
+
+```
+map[golang.org/x/tools/gopls/internal/lsp/protocol.DocumentURI]*golang.org/x/tools/gopls/internal/govulncheck.Result
+```
+
### **Toggle gc_details**
Identifier: `gopls.gc_details`
@@ -247,26 +267,26 @@ Args:
}
```
-### **Run test(s)**
-Identifier: `gopls.run_tests`
+### **Reset go.mod diagnostics**
+Identifier: `gopls.reset_go_mod_diagnostics`
-Runs `go test` for a specific set of test or benchmark functions.
+Reset diagnostics in the go.mod file of a module.
Args:
```
{
- // The test file containing the tests to run.
- "URI": string,
- // Specific test names to run, e.g. TestFoo.
- "Tests": []string,
- // Specific benchmarks to run, e.g. BenchmarkFoo.
- "Benchmarks": []string,
+ "URIArg": {
+ "URI": string,
+ },
+ // Optional: source of the diagnostics to reset.
+ // If not set, all resettable go.mod diagnostics will be cleared.
+ "DiagnosticSource": string,
}
```
-### **Run vulncheck (experimental)**
-Identifier: `gopls.run_vulncheck_exp`
+### **Run govulncheck.**
+Identifier: `gopls.run_govulncheck`
Run vulnerability check (`govulncheck`).
@@ -274,8 +294,8 @@ Args:
```
{
- // Dir is the directory from which vulncheck will run from.
- "Dir": string,
+ // Any document in the directory from which govulncheck will run.
+ "URI": string,
// Package pattern. E.g. "", ".", "./...".
"Pattern": string,
}
@@ -285,19 +305,27 @@ Result:
```
{
- "Vuln": []{
- "ID": string,
- "Details": string,
- "Aliases": []string,
- "Symbol": string,
- "PkgPath": string,
- "ModPath": string,
- "URL": string,
- "CurrentVersion": string,
- "FixedVersion": string,
- "CallStacks": [][]golang.org/x/tools/internal/lsp/command.StackEntry,
- "CallStackSummaries": []string,
- },
+ // Token holds the progress token for LSP workDone reporting of the vulncheck
+ // invocation.
+ "Token": interface{},
+}
+```
+
+### **Run test(s)**
+Identifier: `gopls.run_tests`
+
+Runs `go test` for a specific set of test or benchmark functions.
+
+Args:
+
+```
+{
+ // The test file containing the tests to run.
+ "URI": string,
+ // Specific test names to run, e.g. TestFoo.
+ "Tests": []string,
+ // Specific benchmarks to run, e.g. BenchmarkFoo.
+ "Benchmarks": []string,
}
```
diff --git a/gopls/doc/contributing.md b/gopls/doc/contributing.md
index 99e45292296..367280f53e3 100644
--- a/gopls/doc/contributing.md
+++ b/gopls/doc/contributing.md
@@ -18,8 +18,8 @@ claiming it.
## Getting started
-Most of the `gopls` logic is actually in the `golang.org/x/tools/internal/lsp`
-directory, so you are most likely to develop in the golang.org/x/tools module.
+Most of the `gopls` logic is in the `golang.org/x/tools/gopls/internal/lsp`
+directory.
## Build
diff --git a/gopls/doc/design/integrating.md b/gopls/doc/design/integrating.md
index 845f9eb007f..ba2cc07aa71 100644
--- a/gopls/doc/design/integrating.md
+++ b/gopls/doc/design/integrating.md
@@ -20,7 +20,7 @@ Many LSP requests pass position or range information. This is described in the [
This means that integrators will need to calculate UTF-16 based column offsets.
-[`golang.org/x/tools/internal/span`] has the code to do this in go.
+[`golang.org/x/tools/gopls/internal/span`] has the code to do this in go.
[#31080] tracks making `span` and other useful packages non-internal.
## Edits
@@ -61,9 +61,9 @@ For instance, files that are needed to do correct type checking are modified by
Monitoring files inside gopls directly has a lot of awkward problems, but the [LSP specification] has methods that allow gopls to request that the client notify it of file system changes, specifically [`workspace/didChangeWatchedFiles`].
This is currently being added to gopls by a community member, and tracked in [#31553]
-[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#InitializeResult
-[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#ServerCapabilities
-[`golang.org/x/tools/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint
+[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#InitializeResult
+[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#ServerCapabilities
+[`golang.org/x/tools/gopls/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint
[LSP specification]: https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/
[lsp-response]: https://github.com/Microsoft/language-server-protocol/blob/gh-pages/_specifications/specification-3-14.md#response-message
diff --git a/gopls/doc/generate.go b/gopls/doc/generate.go
index e63653de6bc..d674bfce489 100644
--- a/gopls/doc/generate.go
+++ b/gopls/doc/generate.go
@@ -20,6 +20,7 @@ import (
"io"
"io/ioutil"
"os"
+ "os/exec"
"path/filepath"
"reflect"
"regexp"
@@ -32,47 +33,71 @@ import (
"github.com/jba/printsrc"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/command/commandmeta"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/command/commandmeta"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
func main() {
- if _, err := doMain("..", true); err != nil {
+ if _, err := doMain(true); err != nil {
fmt.Fprintf(os.Stderr, "Generation failed: %v\n", err)
os.Exit(1)
}
}
-func doMain(baseDir string, write bool) (bool, error) {
+func doMain(write bool) (bool, error) {
api, err := loadAPI()
if err != nil {
return false, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "internal/lsp/source/api_json.go"), api, write, rewriteAPI); !ok || err != nil {
+ sourceDir, err := pkgDir("golang.org/x/tools/gopls/internal/lsp/source")
+ if err != nil {
+ return false, err
+ }
+
+ if ok, err := rewriteFile(filepath.Join(sourceDir, "api_json.go"), api, write, rewriteAPI); !ok || err != nil {
+ return ok, err
+ }
+
+ goplsDir, err := pkgDir("golang.org/x/tools/gopls")
+ if err != nil {
+ return false, err
+ }
+
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "settings.md"), api, write, rewriteSettings); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/settings.md"), api, write, rewriteSettings); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "commands.md"), api, write, rewriteCommands); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/commands.md"), api, write, rewriteCommands); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "inlayHints.md"), api, write, rewriteInlayHints); !ok || err != nil {
return ok, err
}
return true, nil
}
+// pkgDir returns the directory corresponding to the import path pkgPath.
+func pkgDir(pkgPath string) (string, error) {
+ out, err := exec.Command("go", "list", "-f", "{{.Dir}}", pkgPath).Output()
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
func loadAPI() (*source.APIJSON, error) {
pkgs, err := packages.Load(
&packages.Config{
Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedDeps,
},
- "golang.org/x/tools/internal/lsp/source",
+ "golang.org/x/tools/gopls/internal/lsp/source",
)
if err != nil {
return nil, err
@@ -102,6 +127,7 @@ func loadAPI() (*source.APIJSON, error) {
} {
api.Analyzers = append(api.Analyzers, loadAnalyzers(m)...)
}
+ api.Hints = loadHints(source.AllInlayHints)
for _, category := range []reflect.Value{
reflect.ValueOf(defaults.UserOptions),
} {
@@ -146,6 +172,14 @@ func loadAPI() (*source.APIJSON, error) {
Default: def,
})
}
+ case "hints":
+ for _, a := range api.Hints {
+ opt.EnumKeys.Keys = append(opt.EnumKeys.Keys, source.EnumKey{
+ Name: fmt.Sprintf("%q", a.Name),
+ Doc: a.Doc,
+ Default: strconv.FormatBool(a.Default),
+ })
+ }
}
}
}
@@ -488,6 +522,23 @@ func loadAnalyzers(m map[string]*source.Analyzer) []*source.AnalyzerJSON {
return json
}
+func loadHints(m map[string]*source.Hint) []*source.HintJSON {
+ var sorted []string
+ for _, h := range m {
+ sorted = append(sorted, h.Name)
+ }
+ sort.Strings(sorted)
+ var json []*source.HintJSON
+ for _, name := range sorted {
+ h := m[name]
+ json = append(json, &source.HintJSON{
+ Name: h.Name,
+ Doc: h.Doc,
+ })
+ }
+ return json
+}
+
func lowerFirst(x string) string {
if x == "" {
return x
@@ -505,7 +556,7 @@ func upperFirst(x string) string {
func fileForPos(pkg *packages.Package, pos token.Pos) (*ast.File, error) {
fset := pkg.Fset
for _, f := range pkg.Syntax {
- if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename {
+ if safetoken.StartPosition(fset, f.Pos()).Filename == safetoken.StartPosition(fset, pos).Filename {
return f, nil
}
}
@@ -537,7 +588,7 @@ func rewriteFile(file string, api *source.APIJSON, write bool, rewrite func([]by
func rewriteAPI(_ []byte, api *source.APIJSON) ([]byte, error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by \"golang.org/x/tools/gopls/doc/generate\"; DO NOT EDIT.\n\npackage source\n\nvar GeneratedAPIJSON = ")
- if err := printsrc.NewPrinter("golang.org/x/tools/internal/lsp/source").Fprint(&buf, api); err != nil {
+ if err := printsrc.NewPrinter("golang.org/x/tools/gopls/internal/lsp/source").Fprint(&buf, api); err != nil {
return nil, err
}
return format.Source(buf.Bytes())
@@ -571,7 +622,7 @@ func rewriteSettings(doc []byte, api *source.APIJSON) ([]byte, error) {
writeTitle(section, h.final, level)
for _, opt := range h.options {
header := strMultiply("#", level+1)
- section.Write([]byte(fmt.Sprintf("%s ", header)))
+ fmt.Fprintf(section, "%s ", header)
opt.Write(section)
}
}
@@ -699,6 +750,21 @@ func rewriteAnalyzers(doc []byte, api *source.APIJSON) ([]byte, error) {
return replaceSection(doc, "Analyzers", section.Bytes())
}
+func rewriteInlayHints(doc []byte, api *source.APIJSON) ([]byte, error) {
+ section := bytes.NewBuffer(nil)
+ for _, hint := range api.Hints {
+ fmt.Fprintf(section, "## **%v**\n\n", hint.Name)
+ fmt.Fprintf(section, "%s\n\n", hint.Doc)
+ switch hint.Default {
+ case true:
+ fmt.Fprintf(section, "**Enabled by default.**\n\n")
+ case false:
+ fmt.Fprintf(section, "**Disabled by default. Enable it by setting `\"hints\": {\"%s\": true}`.**\n\n", hint.Name)
+ }
+ }
+ return replaceSection(doc, "Hints", section.Bytes())
+}
+
func replaceSection(doc []byte, sectionName string, replacement []byte) ([]byte, error) {
re := regexp.MustCompile(fmt.Sprintf(`(?s)\n(.*?)`, sectionName, sectionName))
idx := re.FindSubmatchIndex(doc)
diff --git a/gopls/doc/generate_test.go b/gopls/doc/generate_test.go
index 137a646cd8d..d33594d6159 100644
--- a/gopls/doc/generate_test.go
+++ b/gopls/doc/generate_test.go
@@ -16,7 +16,7 @@ import (
func TestGenerated(t *testing.T) {
testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
- ok, err := doMain("../..", false)
+ ok, err := doMain(false)
if err != nil {
t.Fatal(err)
}
diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md
new file mode 100644
index 00000000000..2ae9a2828af
--- /dev/null
+++ b/gopls/doc/inlayHints.md
@@ -0,0 +1,80 @@
+# Hints
+
+This document describes the inlay hints that `gopls` uses inside the editor.
+
+
+## **assignVariableTypes**
+
+Enable/disable inlay hints for variable types in assign statements:
+```go
+ i/* int*/, j/* int*/ := 0, len(r)-1
+```
+
+**Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.**
+
+## **compositeLiteralFields**
+
+Enable/disable inlay hints for composite literal field names:
+```go
+ {/*in: */"Hello, world", /*want: */"dlrow ,olleH"}
+```
+
+**Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.**
+
+## **compositeLiteralTypes**
+
+Enable/disable inlay hints for composite literal types:
+```go
+ for _, c := range []struct {
+ in, want string
+ }{
+ /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"},
+ }
+```
+
+**Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.**
+
+## **constantValues**
+
+Enable/disable inlay hints for constant values:
+```go
+ const (
+ KindNone Kind = iota/* = 0*/
+ KindPrint/* = 1*/
+ KindPrintf/* = 2*/
+ KindErrorf/* = 3*/
+ )
+```
+
+**Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.**
+
+## **functionTypeParameters**
+
+Enable/disable inlay hints for implicit type parameters on generic functions:
+```go
+ myFoo/*[int, string]*/(1, "hello")
+```
+
+**Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.**
+
+## **parameterNames**
+
+Enable/disable inlay hints for parameter names:
+```go
+ parseInt(/* str: */ "123", /* radix: */ 8)
+```
+
+**Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.**
+
+## **rangeVariableTypes**
+
+Enable/disable inlay hints for variable types in range statements:
+```go
+ for k/* int*/, v/* string*/ := range []string{} {
+ fmt.Println(k, v)
+ }
+```
+
+**Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.**
+
+
diff --git a/gopls/doc/releases.md b/gopls/doc/releases.md
new file mode 100644
index 00000000000..befb92c3966
--- /dev/null
+++ b/gopls/doc/releases.md
@@ -0,0 +1,25 @@
+# Gopls release policy
+
+Gopls releases follow [semver](http://semver.org), with major changes and new
+features introduced only in new minor versions (i.e. versions of the form
+`v*.N.0` for some N). Subsequent patch releases contain only cherry-picked
+fixes or superficial updates.
+
+In order to align with the
+[Go release timeline](https://github.com/golang/go/wiki/Go-Release-Cycle#timeline),
+we aim to release a new minor version of Gopls approximately every three
+months, with patch releases approximately every month, according to the
+following table:
+
+| Month | Version(s) |
+| ---- | ------- |
+| Jan | `v*..0` |
+| Jan-Mar | `v*..*` |
+| Apr | `v*..0` |
+| Apr-Jun | `v*..*` |
+| Jul | `v*..0` |
+| Jul-Sep | `v*..*` |
+| Oct | `v*..0` |
+| Oct-Dec | `v*..*` |
+
+For more background on this policy, see https://go.dev/issue/55267.
diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md
index 092a3c7cfaf..6816967ee21 100644
--- a/gopls/doc/settings.md
+++ b/gopls/doc/settings.md
@@ -1,6 +1,6 @@
# Settings
-
+
This document describes the global settings for `gopls` inside the editor.
The settings block will be called `"gopls"` and contains a collection of
@@ -35,6 +35,7 @@ still be able to independently override specific experimental features.
* [Completion](#completion)
* [Diagnostic](#diagnostic)
* [Documentation](#documentation)
+ * [Inlayhint](#inlayhint)
* [Navigation](#navigation)
### Build
@@ -62,15 +63,19 @@ relative to the workspace folder. They are evaluated in order, and
the last filter that applies to a path controls whether it is included.
The path prefix can be empty, so an initial `-` excludes everything.
+DirectoryFilters also supports the `**` operator to match 0 or more directories.
+
Examples:
-Exclude node_modules: `-node_modules`
+Exclude node_modules at current depth: `-node_modules`
+
+Exclude node_modules at any depth: `-**/node_modules`
Include only project_a: `-` (exclude everything), `+project_a`
Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`
-Default: `["-node_modules"]`.
+Default: `["-**/node_modules"]`.
#### **templateExtensions** *[]string*
@@ -118,6 +123,9 @@ Default: `true`.
experimentalWorkspaceModule opts a user into the experimental support
for multi-module workspaces.
+Deprecated: this feature is deprecated and will be removed in a future
+version of gopls (https://go.dev/issue/55331).
+
Default: `false`.
#### **experimentalPackageCacheKey** *bool*
@@ -153,16 +161,28 @@ be removed.
Default: `false`.
-#### **experimentalUseInvalidMetadata** *bool*
+#### **standaloneTags** *[]string*
-**This setting is experimental and may be deleted.**
+standaloneTags specifies a set of build constraints that identify
+individual Go source files that make up the entire main package of an
+executable.
-experimentalUseInvalidMetadata enables gopls to fall back on outdated
-package metadata to provide editor features if the go command fails to
-load packages for some reason (like an invalid go.mod file). This will
-eventually be the default behavior, and this setting will be removed.
+A common example of standalone main files is the convention of using the
+directive `//go:build ignore` to denote files that are not intended to be
+included in any package, for example because they are invoked directly by
+the developer using `go run`.
-Default: `false`.
+Gopls considers a file to be a standalone main file if and only if it has
+package name "main" and has a build directive of the exact form
+"//go:build tag" or "// +build tag", where tag is among the list of tags
+configured by this setting. Notably, if the build constraint is more
+complicated than a simple tag (such as the composite constraint
+`//go:build tag && go1.18`), the file is not considered to be a standalone
+main file.
+
+This setting is only supported when gopls is built with Go 1.16 or later.
+
+Default: `["ignore"]`.
### Formatting
@@ -214,6 +234,22 @@ semantic tokens to the client.
Default: `false`.
+#### **noSemanticString** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticString turns off the sending of the semantic token 'string'
+
+Default: `false`.
+
+#### **noSemanticNumber** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticNumber turns off the sending of the semantic token 'number'
+
+Default: `false`.
+
#### Completion
##### **usePlaceholders** *bool*
@@ -265,8 +301,8 @@ Default: `true`.
analyses specify analyses that the user would like to enable or disable.
A map of the names of analysis passes that should be enabled/disabled.
-A full list of analyzers that gopls uses can be found
-[here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
+A full list of analyzers that gopls uses can be found in
+[analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
Example Usage:
@@ -286,6 +322,8 @@ Default: `{}`.
**This setting is experimental and may be deleted.**
staticcheck enables additional analyses from staticcheck.io.
+These analyses are documented on
+[Staticcheck's website](https://staticcheck.io/docs/checks/).
Default: `false`.
@@ -305,6 +343,20 @@ Can contain any of:
Default: `{"bounds":true,"escape":true,"inline":true,"nil":true}`.
+##### **vulncheck** *enum*
+
+**This setting is experimental and may be deleted.**
+
+vulncheck enables vulnerability scanning.
+
+Must be one of:
+
+* `"Imports"`: In Imports mode, `gopls` will report vulnerabilities that affect packages
+directly and indirectly used by the analyzed main module.
+* `"Off"`: Disable vulnerability analysis.
+
+Default: `"Off"`.
+
##### **diagnosticsDelay** *time.Duration*
**This is an advanced setting and should not be configured by most `gopls` users.**
@@ -330,6 +382,9 @@ file system notifications.
This option must be set to a valid duration string, for example `"100ms"`.
+Deprecated: this setting is deprecated and will be removed in a future
+version of gopls (https://go.dev/issue/55332)
+
Default: `"0s"`.
#### Documentation
@@ -362,6 +417,9 @@ It might be one of:
If company chooses to use its own `godoc.org`, its address can be used as well.
+Modules matching the GOPRIVATE environment variable will not have
+documentation links in hover.
+
Default: `"pkg.go.dev"`.
##### **linksInHover** *bool*
@@ -370,6 +428,18 @@ linksInHover toggles the presence of links to documentation in hover.
Default: `true`.
+#### Inlayhint
+
+##### **hints** *map[string]bool*
+
+**This setting is experimental and may be deleted.**
+
+hints specify inlay hints that users want to see. A full list of hints
+that gopls uses can be found in
+[inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).
+
+Default: `{}`.
+
#### Navigation
##### **importShortcut** *enum*
@@ -439,6 +509,17 @@ Default: `false`.
+#### **newDiff** *string*
+
+newDiff enables the new diff implementation. If this is "both", for now both
+diffs will be run and statistics will be generated in a file in $TMPDIR. This
+is a risky setting; help in trying it is appreciated. If it is "old" the old
+implementation is used, and if it is "new", just the new implementation is
+used. This setting will eventually be deleted, once gopls has fully migrated to
+the new diff algorithm.
+
+Default: 'both'.
+
## Code Lenses
These are the code lenses that `gopls` currently supports. They can be enabled
@@ -461,6 +542,11 @@ Runs `go generate` for a given directory.
Identifier: `regenerate_cgo`
Regenerates cgo definitions.
+### **Run govulncheck.**
+
+Identifier: `run_govulncheck`
+
+Run vulnerability check (`govulncheck`).
### **Run test(s) (legacy)**
Identifier: `test`
diff --git a/gopls/doc/vim.md b/gopls/doc/vim.md
index d9b33ac34dc..af54a7e088e 100644
--- a/gopls/doc/vim.md
+++ b/gopls/doc/vim.md
@@ -175,23 +175,22 @@ a helper function in Lua:
lua < ../
diff --git a/gopls/go.sum b/gopls/go.sum
index 5873afa1968..6f6866773d6 100644
--- a/gopls/go.sum
+++ b/gopls/go.sum
@@ -1,23 +1,25 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
-github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns=
-github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
-github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM=
github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
+github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8=
+github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
github.com/jba/printsrc v0.2.2 h1:9OHK51UT+/iMAEBlQIIXW04qvKyF3/vvLuwW/hL8tDU=
github.com/jba/printsrc v0.2.2/go.mod h1:1xULjw59sL0dPdWpDoVU06TIEO/Wnfv6AHRpiElTwYM=
github.com/jba/templatecheck v0.6.0 h1:SwM8C4hlK/YNLsdcXStfnHWE2HKkuTVwy5FKQHt5ro8=
@@ -33,62 +35,65 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338 h1:2O2DON6y3XMJiQRAS1UWU+54aec2uopH3x7MAiqGW6Y=
+golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be h1:jokAF1mfylAi1iTQx7C44B7vyXUcSEMw8eDv0PzNu8s=
-golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be/go.mod h1:twca1SxmF6/i2wHY/mj1vLIkkHdp+nil/yA32ZOP4kg=
+golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/vuln v0.0.0-20221212182831-af59454a8a0a h1:KWIh6uTTw7r3PEz1N1OIEM8pr5bf1uP1n6JL5Ml56X8=
+golang.org/x/vuln v0.0.0-20221212182831-af59454a8a0a/go.mod h1:54iI0rrZVM8VdIvTrT/sdlVfMUJWOgvTRQN24CEtZk0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
-honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU=
-honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70=
-mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
-mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo=
+honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA=
+honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
+mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
+mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY=
mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc=
diff --git a/gopls/internal/coverage/coverage.go b/gopls/internal/coverage/coverage.go
index 7bb3640bdbd..9a7d219945e 100644
--- a/gopls/internal/coverage/coverage.go
+++ b/gopls/internal/coverage/coverage.go
@@ -12,9 +12,13 @@
// -o controls where the coverage file is written, defaulting to /tmp/cover.out
// -i coverage-file will generate the report from an existing coverage file
// -v controls verbosity (0: only report coverage, 1: report as each directory is finished,
-// 2: report on each test, 3: more details, 4: too much)
+//
+// 2: report on each test, 3: more details, 4: too much)
+//
// -t tests only tests packages in the given comma-separated list of directories in gopls.
-// The names should start with ., as in ./internal/regtest/bench
+//
+// The names should start with ., as in ./internal/regtest/bench
+//
// -run tests. If set, -run tests is passed on to the go test command.
//
// Despite gopls' use of goroutines, the counts are almost deterministic.
@@ -60,7 +64,7 @@ func main() {
tests = realTestName(tests)
// report coverage for packages under internal/lsp
- parg := "golang.org/x/tools/internal/lsp/..."
+ parg := "golang.org/x/tools/gopls/internal/lsp/..."
accum := []string{}
seen := make(map[string]bool)
@@ -184,7 +188,12 @@ func maybePrint(m result) {
if *verbose > 3 {
fmt.Printf("%s %s %q %.3f\n", m.Action, m.Test, m.Output, m.Elapsed)
}
+ case "pause", "cont":
+ if *verbose > 2 {
+ fmt.Printf("%s %s %.3f\n", m.Action, m.Test, m.Elapsed)
+ }
default:
+ fmt.Printf("%#v\n", m)
log.Fatalf("unknown action %s\n", m.Action)
}
}
@@ -224,7 +233,7 @@ func checkCwd() {
if err != nil {
log.Fatal(err)
}
- // we expect to be a the root of golang.org/x/tools
+ // we expect to be at the root of golang.org/x/tools
cmd := exec.Command("go", "list", "-m", "-f", "{{.Dir}}", "golang.org/x/tools")
buf, err := cmd.Output()
buf = bytes.Trim(buf, "\n \t") // remove \n at end
@@ -239,10 +248,6 @@ func checkCwd() {
if err != nil {
log.Fatalf("expected a gopls directory, %v", err)
}
- _, err = os.Stat("internal/lsp")
- if err != nil {
- log.Fatalf("expected to see internal/lsp, %v", err)
- }
}
func listDirs(dir string) []string {
diff --git a/gopls/internal/govulncheck/README.md b/gopls/internal/govulncheck/README.md
deleted file mode 100644
index d8339c506f6..00000000000
--- a/gopls/internal/govulncheck/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# internal/govulncheck package
-
-This package is a literal copy of the cmd/govulncheck/internal/govulncheck
-package in the vuln repo (https://go.googlesource.com/vuln).
-
-The `copy.sh` does the copying, after removing all .go files here. To use it:
-
-1. Clone the vuln repo to a directory next to the directory holding this repo
- (tools). After doing that your directory structure should look something like
- ```
- ~/repos/x/tools/gopls/...
- ~/repos/x/vuln/...
- ```
-
-2. cd to this directory.
-
-3. Run `copy.sh`.
diff --git a/gopls/internal/govulncheck/cache.go b/gopls/internal/govulncheck/cache.go
deleted file mode 100644
index 404c3567320..00000000000
--- a/gopls/internal/govulncheck/cache.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-// Package govulncheck supports the govulncheck command.
-package govulncheck
-
-import (
- "encoding/json"
- "go/build"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "golang.org/x/vuln/client"
- "golang.org/x/vuln/osv"
-)
-
-// The cache uses a single JSON index file for each vulnerability database
-// which contains the map from packages to the time the last
-// vulnerability for that package was added/modified and the time that
-// the index was retrieved from the vulnerability database. The JSON
-// format is as follows:
-//
-// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/indexes/index.json
-// {
-// Retrieved time.Time
-// Index client.DBIndex
-// }
-//
-// Each package also has a JSON file which contains the array of vulnerability
-// entries for the package. The JSON format is as follows:
-//
-// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/{import path}/vulns.json
-// []*osv.Entry
-
-// FSCache is a thread-safe file-system cache implementing osv.Cache
-//
-// TODO: use something like cmd/go/internal/lockedfile for thread safety?
-type FSCache struct {
- mu sync.Mutex
- rootDir string
-}
-
-// Assert that *FSCache implements client.Cache.
-var _ client.Cache = (*FSCache)(nil)
-
-// use cfg.GOMODCACHE available in cmd/go/internal?
-var defaultCacheRoot = filepath.Join(build.Default.GOPATH, "/pkg/mod/cache/download/vulndb")
-
-func DefaultCache() *FSCache {
- return &FSCache{rootDir: defaultCacheRoot}
-}
-
-type cachedIndex struct {
- Retrieved time.Time
- Index client.DBIndex
-}
-
-func (c *FSCache) ReadIndex(dbName string) (client.DBIndex, time.Time, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, "index.json"))
- if err != nil {
- if os.IsNotExist(err) {
- return nil, time.Time{}, nil
- }
- return nil, time.Time{}, err
- }
- var index cachedIndex
- if err := json.Unmarshal(b, &index); err != nil {
- return nil, time.Time{}, err
- }
- return index.Index, index.Retrieved, nil
-}
-
-func (c *FSCache) WriteIndex(dbName string, index client.DBIndex, retrieved time.Time) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- path := filepath.Join(c.rootDir, dbName)
- if err := os.MkdirAll(path, 0755); err != nil {
- return err
- }
- j, err := json.Marshal(cachedIndex{
- Index: index,
- Retrieved: retrieved,
- })
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(path, "index.json"), j, 0666); err != nil {
- return err
- }
- return nil
-}
-
-func (c *FSCache) ReadEntries(dbName string, p string) ([]*osv.Entry, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, p, "vulns.json"))
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
- var entries []*osv.Entry
- if err := json.Unmarshal(b, &entries); err != nil {
- return nil, err
- }
- return entries, nil
-}
-
-func (c *FSCache) WriteEntries(dbName string, p string, entries []*osv.Entry) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- path := filepath.Join(c.rootDir, dbName, p)
- if err := os.MkdirAll(path, 0777); err != nil {
- return err
- }
- j, err := json.Marshal(entries)
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(path, "vulns.json"), j, 0666); err != nil {
- return err
- }
- return nil
-}
diff --git a/gopls/internal/govulncheck/cache_test.go b/gopls/internal/govulncheck/cache_test.go
deleted file mode 100644
index 5a25c781020..00000000000
--- a/gopls/internal/govulncheck/cache_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package govulncheck
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "testing"
- "time"
-
- "golang.org/x/sync/errgroup"
- "golang.org/x/vuln/client"
- "golang.org/x/vuln/osv"
-)
-
-func TestCache(t *testing.T) {
- tmpDir := t.TempDir()
-
- cache := &FSCache{rootDir: tmpDir}
- dbName := "vulndb.golang.org"
-
- _, _, err := cache.ReadIndex(dbName)
- if err != nil {
- t.Fatalf("ReadIndex failed for non-existent database: %v", err)
- }
-
- if err = os.Mkdir(filepath.Join(tmpDir, dbName), 0777); err != nil {
- t.Fatalf("os.Mkdir failed: %v", err)
- }
- _, _, err = cache.ReadIndex(dbName)
- if err != nil {
- t.Fatalf("ReadIndex failed for database without cached index: %v", err)
- }
-
- now := time.Now()
- expectedIdx := client.DBIndex{
- "a.vuln.example.com": time.Time{}.Add(time.Hour),
- "b.vuln.example.com": time.Time{}.Add(time.Hour * 2),
- "c.vuln.example.com": time.Time{}.Add(time.Hour * 3),
- }
- if err = cache.WriteIndex(dbName, expectedIdx, now); err != nil {
- t.Fatalf("WriteIndex failed to write index: %v", err)
- }
-
- idx, retrieved, err := cache.ReadIndex(dbName)
- if err != nil {
- t.Fatalf("ReadIndex failed for database with cached index: %v", err)
- }
- if !reflect.DeepEqual(idx, expectedIdx) {
- t.Errorf("ReadIndex returned unexpected index, got:\n%s\nwant:\n%s", idx, expectedIdx)
- }
- if !retrieved.Equal(now) {
- t.Errorf("ReadIndex returned unexpected retrieved: got %s, want %s", retrieved, now)
- }
-
- if _, err = cache.ReadEntries(dbName, "vuln.example.com"); err != nil {
- t.Fatalf("ReadEntires failed for non-existent package: %v", err)
- }
-
- expectedEntries := []*osv.Entry{
- {ID: "001"},
- {ID: "002"},
- {ID: "003"},
- }
- if err := cache.WriteEntries(dbName, "vuln.example.com", expectedEntries); err != nil {
- t.Fatalf("WriteEntries failed: %v", err)
- }
-
- entries, err := cache.ReadEntries(dbName, "vuln.example.com")
- if err != nil {
- t.Fatalf("ReadEntries failed for cached package: %v", err)
- }
- if !reflect.DeepEqual(entries, expectedEntries) {
- t.Errorf("ReadEntries returned unexpected entries, got:\n%v\nwant:\n%v", entries, expectedEntries)
- }
-}
-
-func TestConcurrency(t *testing.T) {
- tmpDir := t.TempDir()
-
- cache := &FSCache{rootDir: tmpDir}
- dbName := "vulndb.golang.org"
-
- g := new(errgroup.Group)
- for i := 0; i < 1000; i++ {
- i := i
- g.Go(func() error {
- id := i % 5
- p := fmt.Sprintf("package%d", id)
-
- entries, err := cache.ReadEntries(dbName, p)
- if err != nil {
- return err
- }
-
- err = cache.WriteEntries(dbName, p, append(entries, &osv.Entry{ID: fmt.Sprint(id)}))
- if err != nil {
- return err
- }
- return nil
- })
- }
-
- if err := g.Wait(); err != nil {
- t.Errorf("error in parallel cache entries read/write: %v", err)
- }
-
- // sanity checking
- for i := 0; i < 5; i++ {
- id := fmt.Sprint(i)
- p := fmt.Sprintf("package%s", id)
-
- es, err := cache.ReadEntries(dbName, p)
- if err != nil {
- t.Fatalf("failed to read entries: %v", err)
- }
- for _, e := range es {
- if e.ID != id {
- t.Errorf("want %s ID for vuln entry; got %s", id, e.ID)
- }
- }
- }
-
- // do similar for cache index
- start := time.Now()
- for i := 0; i < 1000; i++ {
- i := i
- g.Go(func() error {
- id := i % 5
- p := fmt.Sprintf("package%v", id)
-
- idx, _, err := cache.ReadIndex(dbName)
- if err != nil {
- return err
- }
-
- if idx == nil {
- idx = client.DBIndex{}
- }
-
- // sanity checking
- if rt, ok := idx[p]; ok && rt.Before(start) {
- return fmt.Errorf("unexpected past time in index: %v before start %v", rt, start)
- }
-
- now := time.Now()
- idx[p] = now
- if err := cache.WriteIndex(dbName, idx, now); err != nil {
- return err
- }
- return nil
- })
- }
-
- if err := g.Wait(); err != nil {
- t.Errorf("error in parallel cache index read/write: %v", err)
- }
-}
diff --git a/gopls/internal/govulncheck/copy.sh b/gopls/internal/govulncheck/copy.sh
deleted file mode 100755
index 24ed45bfe5a..00000000000
--- a/gopls/internal/govulncheck/copy.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash -eu
-
-# Copyright 2020 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-set -o pipefail
-
-# Copy golang.org/x/vuln/cmd/govulncheck/internal/govulncheck into this directory.
-# Assume the x/vuln repo is a sibling of the tools repo.
-
-rm -f *.go
-cp ../../../../vuln/cmd/govulncheck/internal/govulncheck/*.go .
diff --git a/gopls/internal/govulncheck/semver/semver.go b/gopls/internal/govulncheck/semver/semver.go
new file mode 100644
index 00000000000..4ab298d137b
--- /dev/null
+++ b/gopls/internal/govulncheck/semver/semver.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+// Package semver provides shared utilities for manipulating
+// Go semantic versions.
+package semver
+
+import (
+ "regexp"
+ "strings"
+)
+
+// addSemverPrefix adds a 'v' prefix to s if it isn't already prefixed
+// with 'v' or 'go'. This allows us to easily test go-style SEMVER
+// strings against normal SEMVER strings.
+func addSemverPrefix(s string) string {
+ if !strings.HasPrefix(s, "v") && !strings.HasPrefix(s, "go") {
+ return "v" + s
+ }
+ return s
+}
+
+// removeSemverPrefix removes the 'v' or 'go' prefixes from go-style
+// SEMVER strings, for usage in the public vulnerability format.
+func removeSemverPrefix(s string) string {
+ s = strings.TrimPrefix(s, "v")
+ s = strings.TrimPrefix(s, "go")
+ return s
+}
+
+// CanonicalizeSemverPrefix turns a SEMVER string into the canonical
+// representation using the 'v' prefix, as used by the OSV format.
+// Input may be a bare SEMVER ("1.2.3"), Go prefixed SEMVER ("go1.2.3"),
+// or already canonical SEMVER ("v1.2.3").
+func CanonicalizeSemverPrefix(s string) string {
+ return addSemverPrefix(removeSemverPrefix(s))
+}
+
+var (
+ // Regexp for matching go tags. The groups are:
+ // 1 the major.minor version
+ // 2 the patch version, or empty if none
+ // 3 the entire prerelease, if present
+ // 4 the prerelease type ("beta" or "rc")
+ // 5 the prerelease number
+ tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc|-pre)(\d+))?$`)
+)
diff --git a/gopls/internal/govulncheck/semver/semver_test.go b/gopls/internal/govulncheck/semver/semver_test.go
new file mode 100644
index 00000000000..6daead6855b
--- /dev/null
+++ b/gopls/internal/govulncheck/semver/semver_test.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package semver
+
+import (
+ "testing"
+)
+
+func TestCanonicalize(t *testing.T) {
+ for _, test := range []struct {
+ v string
+ want string
+ }{
+ {"v1.2.3", "v1.2.3"},
+ {"1.2.3", "v1.2.3"},
+ {"go1.2.3", "v1.2.3"},
+ } {
+ got := CanonicalizeSemverPrefix(test.v)
+ if got != test.want {
+ t.Errorf("want %s; got %s", test.want, got)
+ }
+ }
+}
diff --git a/gopls/internal/govulncheck/source.go b/gopls/internal/govulncheck/source.go
deleted file mode 100644
index 752a8313091..00000000000
--- a/gopls/internal/govulncheck/source.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package govulncheck
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
-
- "golang.org/x/tools/go/packages"
- "golang.org/x/vuln/client"
- "golang.org/x/vuln/vulncheck"
-)
-
-// A PackageError contains errors from loading a set of packages.
-type PackageError struct {
- Errors []packages.Error
-}
-
-func (e *PackageError) Error() string {
- var b strings.Builder
- fmt.Fprintln(&b, "Packages contain errors:")
- for _, e := range e.Errors {
- fmt.Println(&b, e)
- }
- return b.String()
-}
-
-// LoadPackages loads the packages matching patterns using cfg, after setting
-// the cfg mode flags that vulncheck needs for analysis.
-// If the packages contain errors, a PackageError is returned containing a list of the errors,
-// along with the packages themselves.
-func LoadPackages(cfg *packages.Config, patterns ...string) ([]*vulncheck.Package, error) {
- cfg.Mode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles |
- packages.NeedImports | packages.NeedTypes | packages.NeedTypesSizes |
- packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps |
- packages.NeedModule
-
- pkgs, err := packages.Load(cfg, patterns...)
- vpkgs := vulncheck.Convert(pkgs)
- if err != nil {
- return nil, err
- }
- var perrs []packages.Error
- packages.Visit(pkgs, nil, func(p *packages.Package) {
- perrs = append(perrs, p.Errors...)
- })
- if len(perrs) > 0 {
- err = &PackageError{perrs}
- }
- return vpkgs, err
-}
-
-// Source calls vulncheck.Source on the Go source in pkgs. It returns the result
-// with Vulns trimmed to those that are actually called.
-func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*vulncheck.Result, error) {
- r, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{Client: c})
- if err != nil {
- return nil, err
- }
- // Keep only the vulns that are called.
- var vulns []*vulncheck.Vuln
- for _, v := range r.Vulns {
- if v.CallSink != 0 {
- vulns = append(vulns, v)
- }
- }
- r.Vulns = vulns
- return r, nil
-}
-
-// CallInfo is information about calls to vulnerable functions.
-type CallInfo struct {
- CallStacks map[*vulncheck.Vuln][]vulncheck.CallStack // all call stacks
- VulnGroups [][]*vulncheck.Vuln // vulns grouped by ID and package
- ModuleVersions map[string]string // map from module paths to versions
- TopPackages map[string]bool // top-level packages
-}
-
-// GetCallInfo computes call stacks and related information from a vulncheck.Result.
-// I also makes a set of top-level packages from pkgs.
-func GetCallInfo(r *vulncheck.Result, pkgs []*vulncheck.Package) *CallInfo {
- pset := map[string]bool{}
- for _, p := range pkgs {
- pset[p.PkgPath] = true
- }
- return &CallInfo{
- CallStacks: vulncheck.CallStacks(r),
- VulnGroups: groupByIDAndPackage(r.Vulns),
- ModuleVersions: moduleVersionMap(r.Modules),
- TopPackages: pset,
- }
-}
-
-func groupByIDAndPackage(vs []*vulncheck.Vuln) [][]*vulncheck.Vuln {
- groups := map[[2]string][]*vulncheck.Vuln{}
- for _, v := range vs {
- key := [2]string{v.OSV.ID, v.PkgPath}
- groups[key] = append(groups[key], v)
- }
-
- var res [][]*vulncheck.Vuln
- for _, g := range groups {
- res = append(res, g)
- }
- sort.Slice(res, func(i, j int) bool {
- return res[i][0].PkgPath < res[j][0].PkgPath
- })
- return res
-}
-
-// moduleVersionMap builds a map from module paths to versions.
-func moduleVersionMap(mods []*vulncheck.Module) map[string]string {
- moduleVersions := map[string]string{}
- for _, m := range mods {
- v := m.Version
- if m.Replace != nil {
- v = m.Replace.Version
- }
- moduleVersions[m.Path] = v
- }
- return moduleVersions
-}
diff --git a/gopls/internal/govulncheck/types.go b/gopls/internal/govulncheck/types.go
new file mode 100644
index 00000000000..71984519e02
--- /dev/null
+++ b/gopls/internal/govulncheck/types.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package govulncheck
+
+import "time"
+
+// Result is the result of vulnerability scanning.
+type Result struct {
+ // Vulns contains all vulnerabilities that are called or imported by
+ // the analyzed module.
+ Vulns []*Vuln `json:",omitempty"`
+
+ // Mode contains the source of the vulnerability info.
+ // Clients of the gopls.fetch_vulncheck_result command may need
+ // to interprete the vulnerabilities differently based on the
+ // analysis mode. For example, Vuln without callstack traces
+ // indicate a vulnerability that is not used if the result was
+ // from 'govulncheck' analysis mode. On the other hand, Vuln
+ // without callstack traces just implies the package with the
+ // vulnerability is known to the workspace and we do not know
+ // whether the vulnerable symbols are actually used or not.
+ Mode AnalysisMode `json:",omitempty"`
+
+ // AsOf describes when this Result was computed using govulncheck.
+ // It is valid only with the govulncheck analysis mode.
+ AsOf time.Time `json:",omitempty"`
+}
+
+type AnalysisMode string
+
+const (
+ ModeInvalid AnalysisMode = "" // zero value
+ ModeGovulncheck AnalysisMode = "govulncheck"
+ ModeImports AnalysisMode = "imports"
+)
diff --git a/gopls/internal/govulncheck/types_118.go b/gopls/internal/govulncheck/types_118.go
new file mode 100644
index 00000000000..7b354d622a8
--- /dev/null
+++ b/gopls/internal/govulncheck/types_118.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+// Package govulncheck provides an experimental govulncheck API.
+package govulncheck
+
+import (
+ "golang.org/x/vuln/exp/govulncheck"
+)
+
+var (
+ // Source reports vulnerabilities that affect the analyzed packages.
+ Source = govulncheck.Source
+
+ // DefaultCache constructs cache for a vulnerability database client.
+ DefaultCache = govulncheck.DefaultCache
+)
+
+type (
+ // Config is the configuration for Main.
+ Config = govulncheck.Config
+
+ // Vuln represents a single OSV entry.
+ Vuln = govulncheck.Vuln
+
+ // Module represents a specific vulnerability relevant to a
+ // single module or package.
+ Module = govulncheck.Module
+
+ // Package is a Go package with known vulnerable symbols.
+ Package = govulncheck.Package
+
+ // CallStacks contains a representative call stack for each
+ // vulnerable symbol that is called.
+ CallStack = govulncheck.CallStack
+
+ // StackFrame represents a call stack entry.
+ StackFrame = govulncheck.StackFrame
+)
diff --git a/gopls/internal/govulncheck/types_not118.go b/gopls/internal/govulncheck/types_not118.go
new file mode 100644
index 00000000000..faf5a7055b5
--- /dev/null
+++ b/gopls/internal/govulncheck/types_not118.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package govulncheck
+
+import (
+ "go/token"
+
+ "golang.org/x/vuln/osv"
+)
+
+// Vuln represents a single OSV entry.
+type Vuln struct {
+ // OSV contains all data from the OSV entry for this vulnerability.
+ OSV *osv.Entry
+
+ // Modules contains all of the modules in the OSV entry where a
+ // vulnerable package is imported by the target source code or binary.
+ //
+ // For example, a module M with two packages M/p1 and M/p2, where only p1
+ // is vulnerable, will appear in this list if and only if p1 is imported by
+ // the target source code or binary.
+ Modules []*Module
+}
+
+func (v *Vuln) IsCalled() bool {
+ return false
+}
+
+// Module represents a specific vulnerability relevant to a single module.
+type Module struct {
+ // Path is the module path of the module containing the vulnerability.
+ //
+ // Importable packages in the standard library will have the path "stdlib".
+ Path string
+
+ // FoundVersion is the module version where the vulnerability was found.
+ FoundVersion string
+
+ // FixedVersion is the module version where the vulnerability was
+ // fixed. If there are multiple fixed versions in the OSV report, this will
+ // be the latest fixed version.
+ //
+ // This is empty if a fix is not available.
+ FixedVersion string
+
+ // Packages contains all the vulnerable packages in OSV entry that are
+ // imported by the target source code or binary.
+ //
+ // For example, given a module M with two packages M/p1 and M/p2, where
+ // both p1 and p2 are vulnerable, p1 and p2 will each only appear in this
+ // list they are individually imported by the target source code or binary.
+ Packages []*Package
+}
+
+// Package is a Go package with known vulnerable symbols.
+type Package struct {
+ // Path is the import path of the package containing the vulnerability.
+ Path string
+
+ // CallStacks contains a representative call stack for each
+ // vulnerable symbol that is called.
+ //
+ // For vulnerabilities found from binary analysis, only CallStack.Symbol
+ // will be provided.
+ //
+ // For non-affecting vulnerabilities reported from the source mode
+ // analysis, this will be empty.
+ CallStacks []CallStack
+}
+
+// CallStacks contains a representative call stack for a vulnerable
+// symbol.
+type CallStack struct {
+ // Symbol is the name of the detected vulnerable function
+ // or method.
+ //
+ // This follows the naming convention in the OSV report.
+ Symbol string
+
+ // Summary is a one-line description of the callstack, used by the
+ // default govulncheck mode.
+ //
+ // Example: module3.main calls github.com/shiyanhui/dht.DHT.Run
+ Summary string
+
+ // Frames contains an entry for each stack in the call stack.
+ //
+ // Frames are sorted starting from the entry point to the
+ // imported vulnerable symbol. The last frame in Frames should match
+ // Symbol.
+ Frames []*StackFrame
+}
+
+// StackFrame represents a call stack entry.
+type StackFrame struct {
+ // PackagePath is the import path.
+ PkgPath string
+
+ // FuncName is the function name.
+ FuncName string
+
+ // RecvType is the fully qualified receiver type,
+ // if the called symbol is a method.
+ //
+ // The client can create the final symbol name by
+ // prepending RecvType to FuncName.
+ RecvType string
+
+ // Position describes an arbitrary source position
+ // including the file, line, and column location.
+ // A Position is valid if the line number is > 0.
+ Position token.Position
+}
+
+func (sf *StackFrame) Name() string {
+ return ""
+}
+
+func (sf *StackFrame) Pos() string {
+ return ""
+}
diff --git a/gopls/internal/govulncheck/util.go b/gopls/internal/govulncheck/util.go
index baa2d961329..544fba2a593 100644
--- a/gopls/internal/govulncheck/util.go
+++ b/gopls/internal/govulncheck/util.go
@@ -8,23 +8,24 @@
package govulncheck
import (
- "fmt"
- "strings"
-
"golang.org/x/mod/semver"
+ isem "golang.org/x/tools/gopls/internal/govulncheck/semver"
"golang.org/x/vuln/osv"
- "golang.org/x/vuln/vulncheck"
)
// LatestFixed returns the latest fixed version in the list of affected ranges,
// or the empty string if there are no fixed versions.
-func LatestFixed(as []osv.Affected) string {
+func LatestFixed(modulePath string, as []osv.Affected) string {
v := ""
for _, a := range as {
+ if a.Package.Name != modulePath {
+ continue
+ }
for _, r := range a.Ranges {
if r.Type == osv.TypeSemver {
for _, e := range r.Events {
- if e.Fixed != "" && (v == "" || semver.Compare(e.Fixed, v) > 0) {
+ if e.Fixed != "" && (v == "" ||
+ semver.Compare(isem.CanonicalizeSemverPrefix(e.Fixed), isem.CanonicalizeSemverPrefix(v)) > 0) {
v = e.Fixed
}
}
@@ -33,77 +34,3 @@ func LatestFixed(as []osv.Affected) string {
}
return v
}
-
-// SummarizeCallStack returns a short description of the call stack.
-// It uses one of two forms, depending on what the lowest function F in topPkgs
-// calls:
-// - If it calls a function V from the vulnerable package, then summarizeCallStack
-// returns "F calls V".
-// - If it calls a function G in some other package, which eventually calls V,
-// it returns "F calls G, which eventually calls V".
-//
-// If it can't find any of these functions, summarizeCallStack returns the empty string.
-func SummarizeCallStack(cs vulncheck.CallStack, topPkgs map[string]bool, vulnPkg string) string {
- // Find the lowest function in the top packages.
- iTop := lowest(cs, func(e vulncheck.StackEntry) bool {
- return topPkgs[PkgPath(e.Function)]
- })
- if iTop < 0 {
- return ""
- }
- // Find the highest function in the vulnerable package that is below iTop.
- iVuln := highest(cs[iTop+1:], func(e vulncheck.StackEntry) bool {
- return PkgPath(e.Function) == vulnPkg
- })
- if iVuln < 0 {
- return ""
- }
- iVuln += iTop + 1 // adjust for slice in call to highest.
- topName := FuncName(cs[iTop].Function)
- vulnName := FuncName(cs[iVuln].Function)
- if iVuln == iTop+1 {
- return fmt.Sprintf("%s calls %s", topName, vulnName)
- }
- return fmt.Sprintf("%s calls %s, which eventually calls %s",
- topName, FuncName(cs[iTop+1].Function), vulnName)
-}
-
-// highest returns the highest (one with the smallest index) entry in the call
-// stack for which f returns true.
-func highest(cs vulncheck.CallStack, f func(e vulncheck.StackEntry) bool) int {
- for i := 0; i < len(cs); i++ {
- if f(cs[i]) {
- return i
- }
- }
- return -1
-}
-
-// lowest returns the lowest (one with the largets index) entry in the call
-// stack for which f returns true.
-func lowest(cs vulncheck.CallStack, f func(e vulncheck.StackEntry) bool) int {
- for i := len(cs) - 1; i >= 0; i-- {
- if f(cs[i]) {
- return i
- }
- }
- return -1
-}
-
-// PkgPath returns the package path from fn.
-func PkgPath(fn *vulncheck.FuncNode) string {
- if fn.PkgPath != "" {
- return fn.PkgPath
- }
- s := strings.TrimPrefix(fn.RecvType, "*")
- if i := strings.LastIndexByte(s, '.'); i > 0 {
- s = s[:i]
- }
- return s
-}
-
-// FuncName returns the function name from fn, adjusted
-// to remove pointer annotations.
-func FuncName(fn *vulncheck.FuncNode) string {
- return strings.TrimPrefix(fn.String(), "*")
-}
diff --git a/gopls/internal/govulncheck/util_test.go b/gopls/internal/govulncheck/util_test.go
deleted file mode 100644
index 3288cd84c83..00000000000
--- a/gopls/internal/govulncheck/util_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package govulncheck
-
-import (
- "strings"
- "testing"
-
- "golang.org/x/vuln/vulncheck"
-)
-
-func TestPkgPath(t *testing.T) {
- for _, test := range []struct {
- in vulncheck.FuncNode
- want string
- }{
- {
- vulncheck.FuncNode{PkgPath: "math", Name: "Floor"},
- "math",
- },
- {
- vulncheck.FuncNode{RecvType: "a.com/b.T", Name: "M"},
- "a.com/b",
- },
- {
- vulncheck.FuncNode{RecvType: "*a.com/b.T", Name: "M"},
- "a.com/b",
- },
- } {
- got := PkgPath(&test.in)
- if got != test.want {
- t.Errorf("%+v: got %q, want %q", test.in, got, test.want)
- }
- }
-}
-
-func TestSummarizeCallStack(t *testing.T) {
- topPkgs := map[string]bool{"t1": true, "t2": true}
- vulnPkg := "v"
-
- for _, test := range []struct {
- in, want string
- }{
- {"a.F", ""},
- {"t1.F", ""},
- {"v.V", ""},
- {
- "t1.F v.V",
- "t1.F calls v.V",
- },
- {
- "t1.F t2.G v.V1 v.v2",
- "t2.G calls v.V1",
- },
- {
- "t1.F x.Y t2.G a.H b.I c.J v.V",
- "t2.G calls a.H, which eventually calls v.V",
- },
- } {
- in := stringToCallStack(test.in)
- got := SummarizeCallStack(in, topPkgs, vulnPkg)
- if got != test.want {
- t.Errorf("%s:\ngot %s\nwant %s", test.in, got, test.want)
- }
- }
-}
-
-func stringToCallStack(s string) vulncheck.CallStack {
- var cs vulncheck.CallStack
- for _, e := range strings.Fields(s) {
- parts := strings.Split(e, ".")
- cs = append(cs, vulncheck.StackEntry{
- Function: &vulncheck.FuncNode{
- PkgPath: parts[0],
- Name: parts[1],
- },
- })
- }
- return cs
-}
diff --git a/gopls/internal/govulncheck/vulncache.go b/gopls/internal/govulncheck/vulncache.go
new file mode 100644
index 00000000000..a259f027336
--- /dev/null
+++ b/gopls/internal/govulncheck/vulncache.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package govulncheck
+
+import (
+ "sync"
+ "time"
+
+ vulnc "golang.org/x/vuln/client"
+ "golang.org/x/vuln/osv"
+)
+
+// inMemoryCache is an implementation of the [client.Cache] interface
+// that "decorates" another instance of that interface to provide
+// an additional layer of (memory-based) caching.
+type inMemoryCache struct {
+ mu sync.Mutex
+ underlying vulnc.Cache
+ db map[string]*db
+}
+
+var _ vulnc.Cache = &inMemoryCache{}
+
+type db struct {
+ retrieved time.Time
+ index vulnc.DBIndex
+ entry map[string][]*osv.Entry
+}
+
+// NewInMemoryCache returns a new memory-based cache that decorates
+// the provided cache (file-based, perhaps).
+func NewInMemoryCache(underlying vulnc.Cache) *inMemoryCache {
+ return &inMemoryCache{
+ underlying: underlying,
+ db: make(map[string]*db),
+ }
+}
+
+func (c *inMemoryCache) lookupDBLocked(dbName string) *db {
+ cached := c.db[dbName]
+ if cached == nil {
+ cached = &db{entry: make(map[string][]*osv.Entry)}
+ c.db[dbName] = cached
+ }
+ return cached
+}
+
+// ReadIndex returns the index for dbName from the cache, or returns zero values
+// if it is not present.
+func (c *inMemoryCache) ReadIndex(dbName string) (vulnc.DBIndex, time.Time, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+
+ if cached.retrieved.IsZero() {
+ // First time ReadIndex is called.
+ index, retrieved, err := c.underlying.ReadIndex(dbName)
+ if err != nil {
+ return index, retrieved, err
+ }
+ cached.index, cached.retrieved = index, retrieved
+ }
+ return cached.index, cached.retrieved, nil
+}
+
+// WriteIndex puts the index and retrieved time into the cache.
+func (c *inMemoryCache) WriteIndex(dbName string, index vulnc.DBIndex, retrieved time.Time) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ cached.index, cached.retrieved = index, retrieved
+ // TODO(hyangah): shouldn't we invalidate all cached entries?
+ return c.underlying.WriteIndex(dbName, index, retrieved)
+}
+
+// ReadEntries returns the vulndb entries for path from the cache.
+func (c *inMemoryCache) ReadEntries(dbName, path string) ([]*osv.Entry, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ entries, ok := cached.entry[path]
+ if !ok {
+ // cache miss
+ entries, err := c.underlying.ReadEntries(dbName, path)
+ if err != nil {
+ return entries, err
+ }
+ cached.entry[path] = entries
+ }
+ return entries, nil
+}
+
+// WriteEntries puts the entries for path into the cache.
+func (c *inMemoryCache) WriteEntries(dbName, path string, entries []*osv.Entry) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ cached.entry[path] = entries
+ return c.underlying.WriteEntries(dbName, path, entries)
+}
diff --git a/gopls/internal/hooks/analysis.go b/gopls/internal/hooks/analysis.go
deleted file mode 100644
index 51048991d5a..00000000000
--- a/gopls/internal/hooks/analysis.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.17
-// +build go1.17
-
-package hooks
-
-import (
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "honnef.co/go/tools/analysis/lint"
- "honnef.co/go/tools/quickfix"
- "honnef.co/go/tools/simple"
- "honnef.co/go/tools/staticcheck"
- "honnef.co/go/tools/stylecheck"
-)
-
-func updateAnalyzers(options *source.Options) {
- options.StaticcheckSupported = true
-
- mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity {
- switch severity {
- case lint.SeverityError:
- return protocol.SeverityError
- case lint.SeverityDeprecated:
- // TODO(dh): in LSP, deprecated is a tag, not a severity.
- // We'll want to support this once we enable SA5011.
- return protocol.SeverityWarning
- case lint.SeverityWarning:
- return protocol.SeverityWarning
- case lint.SeverityInfo:
- return protocol.SeverityInformation
- case lint.SeverityHint:
- return protocol.SeverityHint
- default:
- return protocol.SeverityWarning
- }
- }
- add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) {
- for _, a := range analyzers {
- if _, ok := skip[a.Analyzer.Name]; ok {
- continue
- }
-
- enabled := !a.Doc.NonDefault
- options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity))
- }
- }
-
- add(simple.Analyzers, nil)
- add(staticcheck.Analyzers, map[string]struct{}{
- // This check conflicts with the vet printf check (golang/go#34494).
- "SA5009": {},
- // This check relies on facts from dependencies, which
- // we don't currently compute.
- "SA5011": {},
- })
- add(stylecheck.Analyzers, nil)
- add(quickfix.Analyzers, nil)
-}
diff --git a/gopls/internal/hooks/analysis_116.go b/gopls/internal/hooks/analysis_116.go
new file mode 100644
index 00000000000..dd429dea898
--- /dev/null
+++ b/gopls/internal/hooks/analysis_116.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.17
+// +build !go1.17
+
+package hooks
+
+import "golang.org/x/tools/gopls/internal/lsp/source"
+
+func updateAnalyzers(options *source.Options) {
+ options.StaticcheckSupported = false
+}
diff --git a/gopls/internal/hooks/analysis_117.go b/gopls/internal/hooks/analysis_117.go
index 02f9170ab63..27ab9a699f9 100644
--- a/gopls/internal/hooks/analysis_117.go
+++ b/gopls/internal/hooks/analysis_117.go
@@ -1,14 +1,62 @@
-// Copyright 2021 The Go Authors. All rights reserved.
+// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !go1.17
-// +build !go1.17
+//go:build go1.17
+// +build go1.17
package hooks
-import "golang.org/x/tools/internal/lsp/source"
+import (
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "honnef.co/go/tools/analysis/lint"
+ "honnef.co/go/tools/quickfix"
+ "honnef.co/go/tools/simple"
+ "honnef.co/go/tools/staticcheck"
+ "honnef.co/go/tools/stylecheck"
+)
func updateAnalyzers(options *source.Options) {
- options.StaticcheckSupported = false
+ options.StaticcheckSupported = true
+
+ mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity {
+ switch severity {
+ case lint.SeverityError:
+ return protocol.SeverityError
+ case lint.SeverityDeprecated:
+ // TODO(dh): in LSP, deprecated is a tag, not a severity.
+ // We'll want to support this once we enable SA5011.
+ return protocol.SeverityWarning
+ case lint.SeverityWarning:
+ return protocol.SeverityWarning
+ case lint.SeverityInfo:
+ return protocol.SeverityInformation
+ case lint.SeverityHint:
+ return protocol.SeverityHint
+ default:
+ return protocol.SeverityWarning
+ }
+ }
+ add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) {
+ for _, a := range analyzers {
+ if _, ok := skip[a.Analyzer.Name]; ok {
+ continue
+ }
+
+ enabled := !a.Doc.NonDefault
+ options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity))
+ }
+ }
+
+ add(simple.Analyzers, nil)
+ add(staticcheck.Analyzers, map[string]struct{}{
+ // This check conflicts with the vet printf check (golang/go#34494).
+ "SA5009": {},
+ // This check relies on facts from dependencies, which
+ // we don't currently compute.
+ "SA5011": {},
+ })
+ add(stylecheck.Analyzers, nil)
+ add(quickfix.Analyzers, nil)
}
diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go
index a307ba77fd6..f7fec5a7bb2 100644
--- a/gopls/internal/hooks/diff.go
+++ b/gopls/internal/hooks/diff.go
@@ -5,37 +5,165 @@
package hooks
import (
+ "encoding/json"
"fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "time"
"github.com/sergi/go-diff/diffmatchpatch"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/diff"
)
-func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) {
+// structure for saving information about diffs
+// while the new code is being rolled out
+type diffstat struct {
+ Before, After int
+ Oldedits, Newedits int
+ Oldtime, Newtime time.Duration
+ Stack string
+ Msg string `json:",omitempty"` // for errors
+ Ignored int `json:",omitempty"` // numbr of skipped records with 0 edits
+}
+
+var (
+ ignoredMu sync.Mutex
+ ignored int // counter of diff requests on equal strings
+
+ diffStatsOnce sync.Once
+ diffStats *os.File // never closed
+)
+
+// save writes a JSON record of statistics about diff requests to a temporary file.
+func (s *diffstat) save() {
+ diffStatsOnce.Do(func() {
+ f, err := ioutil.TempFile("", "gopls-diff-stats-*")
+ if err != nil {
+ log.Printf("can't create diff stats temp file: %v", err) // e.g. disk full
+ return
+ }
+ diffStats = f
+ })
+ if diffStats == nil {
+ return
+ }
+
+ // diff is frequently called with equal strings,
+ // so we count repeated instances but only print every 15th.
+ ignoredMu.Lock()
+ if s.Oldedits == 0 && s.Newedits == 0 {
+ ignored++
+ if ignored < 15 {
+ ignoredMu.Unlock()
+ return
+ }
+ }
+ s.Ignored = ignored
+ ignored = 0
+ ignoredMu.Unlock()
+
+ // Record the name of the file in which diff was called.
+ // There aren't many calls, so only the base name is needed.
+ if _, file, line, ok := runtime.Caller(2); ok {
+ s.Stack = fmt.Sprintf("%s:%d", filepath.Base(file), line)
+ }
+ x, err := json.Marshal(s)
+ if err != nil {
+ log.Fatalf("internal error marshalling JSON: %v", err)
+ }
+ fmt.Fprintf(diffStats, "%s\n", x)
+}
+
+// disaster is called when the diff algorithm panics or produces a
+// diff that cannot be applied. It saves the broken input in a
+// new temporary file and logs the file name, which is returned.
+func disaster(before, after string) string {
+ // We use the pid to salt the name, not os.TempFile,
+ // so that each process creates at most one file.
+ // One is sufficient for a bug report.
+ filename := fmt.Sprintf("%s/gopls-diff-bug-%x", os.TempDir(), os.Getpid())
+
+ // We use NUL as a separator: it should never appear in Go source.
+ data := before + "\x00" + after
+
+ if err := ioutil.WriteFile(filename, []byte(data), 0600); err != nil {
+ log.Printf("failed to write diff bug report: %v", err)
+ return ""
+ }
+
+ bug.Reportf("Bug detected in diff algorithm! Please send file %s to the maintainers of gopls if you are comfortable sharing its contents.", filename)
+
+ return filename
+}
+
+// BothDiffs edits calls both the new and old diffs, checks that the new diffs
+// change before into after, and attempts to preserve some statistics.
+func BothDiffs(before, after string) (edits []diff.Edit) {
+ // The new diff code contains a lot of internal checks that panic when they
+ // fail. This code catches the panics, or other failures, tries to save
+ // the failing example (and it would ask the user to send it back to us, and
+ // changes options.newDiff to 'old', if only we could figure out how.)
+ stat := diffstat{Before: len(before), After: len(after)}
+ now := time.Now()
+ oldedits := ComputeEdits(before, after)
+ stat.Oldedits = len(oldedits)
+ stat.Oldtime = time.Since(now)
+ defer func() {
+ if r := recover(); r != nil {
+ disaster(before, after)
+ edits = oldedits
+ }
+ }()
+ now = time.Now()
+ newedits := diff.Strings(before, after)
+ stat.Newedits = len(newedits)
+ stat.Newtime = time.Now().Sub(now)
+ got, err := diff.Apply(before, newedits)
+ if err != nil || got != after {
+ stat.Msg += "FAIL"
+ disaster(before, after)
+ stat.save()
+ return oldedits
+ }
+ stat.save()
+ return newedits
+}
+
+// ComputeEdits computes a diff using the github.com/sergi/go-diff implementation.
+func ComputeEdits(before, after string) (edits []diff.Edit) {
// The go-diff library has an unresolved panic (see golang/go#278774).
// TODO(rstambler): Remove the recover once the issue has been fixed
// upstream.
defer func() {
if r := recover(); r != nil {
- edits = nil
- err = fmt.Errorf("unable to compute edits for %s: %s", uri.Filename(), r)
+ bug.Reportf("unable to compute edits: %s", r)
+ // Report one big edit for the whole file.
+ edits = []diff.Edit{{
+ Start: 0,
+ End: len(before),
+ New: after,
+ }}
}
}()
diffs := diffmatchpatch.New().DiffMain(before, after, true)
- edits = make([]diff.TextEdit, 0, len(diffs))
+ edits = make([]diff.Edit, 0, len(diffs))
offset := 0
for _, d := range diffs {
- start := span.NewPoint(0, 0, offset)
+ start := offset
switch d.Type {
case diffmatchpatch.DiffDelete:
offset += len(d.Text)
- edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.NewPoint(0, 0, offset))})
+ edits = append(edits, diff.Edit{Start: start, End: offset})
case diffmatchpatch.DiffEqual:
offset += len(d.Text)
case diffmatchpatch.DiffInsert:
- edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.Point{}), NewText: d.Text})
+ edits = append(edits, diff.Edit{Start: start, End: start, New: d.Text})
}
}
- return edits, nil
+ return edits
}
diff --git a/gopls/internal/hooks/diff_test.go b/gopls/internal/hooks/diff_test.go
index d979be78dbe..a46bf3b2d28 100644
--- a/gopls/internal/hooks/diff_test.go
+++ b/gopls/internal/hooks/diff_test.go
@@ -2,15 +2,32 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package hooks_test
+package hooks
import (
+ "io/ioutil"
+ "os"
"testing"
- "golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/diff/difftest"
+ "golang.org/x/tools/internal/diff/difftest"
)
func TestDiff(t *testing.T) {
- difftest.DiffTest(t, hooks.ComputeEdits)
+ difftest.DiffTest(t, ComputeEdits)
+}
+
+func TestDisaster(t *testing.T) {
+ a := "This is a string,(\u0995) just for basic\nfunctionality"
+ b := "This is another string, (\u0996) to see if disaster will store stuff correctly"
+ fname := disaster(a, b)
+ buf, err := ioutil.ReadFile(fname)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != a+"\x00"+b {
+ t.Error("failed to record original strings")
+ }
+ if err := os.Remove(fname); err != nil {
+ t.Error(err)
+ }
}
diff --git a/gopls/internal/hooks/gen-licenses.sh b/gopls/internal/hooks/gen-licenses.sh
index 7d6bab79f54..c35c91260d4 100755
--- a/gopls/internal/hooks/gen-licenses.sh
+++ b/gopls/internal/hooks/gen-licenses.sh
@@ -27,7 +27,7 @@ mods=$(go list -deps -f '{{with .Module}}{{.Path}}{{end}}' golang.org/x/tools/go
for mod in $mods; do
# Find the license file, either LICENSE or COPYING, and add it to the result.
dir=$(go list -m -f {{.Dir}} $mod)
- license=$(ls -1 $dir | egrep -i '^(LICENSE|COPYING)$')
+ license=$(ls -1 $dir | grep -E -i '^(LICENSE|COPYING)$')
echo "-- $mod $license --" >> $tempfile
echo >> $tempfile
sed 's/^-- / &/' $dir/$license >> $tempfile
diff --git a/gopls/internal/hooks/gofumpt_117.go b/gopls/internal/hooks/gofumpt_117.go
new file mode 100644
index 00000000000..71886357704
--- /dev/null
+++ b/gopls/internal/hooks/gofumpt_117.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package hooks
+
+import "golang.org/x/tools/gopls/internal/lsp/source"
+
+func updateGofumpt(options *source.Options) {
+}
diff --git a/gopls/internal/hooks/gofumpt_118.go b/gopls/internal/hooks/gofumpt_118.go
new file mode 100644
index 00000000000..4eb523261dc
--- /dev/null
+++ b/gopls/internal/hooks/gofumpt_118.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package hooks
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "mvdan.cc/gofumpt/format"
+)
+
+func updateGofumpt(options *source.Options) {
+ options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) {
+ return format.Source(src, format.Options{
+ LangVersion: langVersion,
+ ModulePath: modulePath,
+ })
+ }
+}
diff --git a/gopls/internal/hooks/hooks.go b/gopls/internal/hooks/hooks.go
index 023aefeab98..5624a5eb386 100644
--- a/gopls/internal/hooks/hooks.go
+++ b/gopls/internal/hooks/hooks.go
@@ -8,27 +8,24 @@
package hooks // import "golang.org/x/tools/gopls/internal/hooks"
import (
- "context"
-
- "golang.org/x/tools/gopls/internal/vulncheck"
- "golang.org/x/tools/internal/lsp/source"
- "mvdan.cc/gofumpt/format"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/diff"
"mvdan.cc/xurls/v2"
)
func Options(options *source.Options) {
options.LicensesText = licensesText
if options.GoDiff {
- options.ComputeEdits = ComputeEdits
+ switch options.NewDiff {
+ case "old":
+ options.ComputeEdits = ComputeEdits
+ case "new":
+ options.ComputeEdits = diff.Strings
+ default:
+ options.ComputeEdits = BothDiffs
+ }
}
options.URLRegexp = xurls.Relaxed()
- options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) {
- return format.Source(src, format.Options{
- LangVersion: langVersion,
- ModulePath: modulePath,
- })
- }
updateAnalyzers(options)
-
- options.Govulncheck = vulncheck.Govulncheck
+ updateGofumpt(options)
}
diff --git a/gopls/internal/hooks/licenses_test.go b/gopls/internal/hooks/licenses_test.go
index 3b61d348d95..b10d7e2b36c 100644
--- a/gopls/internal/hooks/licenses_test.go
+++ b/gopls/internal/hooks/licenses_test.go
@@ -15,9 +15,9 @@ import (
)
func TestLicenses(t *testing.T) {
- // License text differs for older Go versions because staticcheck isn't
- // supported for those versions.
- testenv.NeedsGo1Point(t, 17)
+ // License text differs for older Go versions because staticcheck or gofumpt
+ // isn't supported for those versions.
+ testenv.NeedsGo1Point(t, 18)
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skip("generating licenses only works on Unixes")
diff --git a/internal/lsp/README.md b/gopls/internal/lsp/README.md
similarity index 100%
rename from internal/lsp/README.md
rename to gopls/internal/lsp/README.md
diff --git a/internal/lsp/analysis/embeddirective/embeddirective.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective.go
similarity index 100%
rename from internal/lsp/analysis/embeddirective/embeddirective.go
rename to gopls/internal/lsp/analysis/embeddirective/embeddirective.go
diff --git a/internal/lsp/analysis/embeddirective/embeddirective_test.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go
similarity index 100%
rename from internal/lsp/analysis/embeddirective/embeddirective_test.go
rename to gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go
diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/a.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/embeddirective/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/b.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go
similarity index 100%
rename from internal/lsp/analysis/embeddirective/testdata/src/a/b.go
rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go
diff --git a/internal/lsp/analysis/embeddirective/testdata/src/a/embedText b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText
similarity index 100%
rename from internal/lsp/analysis/embeddirective/testdata/src/a/embedText
rename to gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText
diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go
similarity index 87%
rename from internal/lsp/analysis/fillreturns/fillreturns.go
rename to gopls/internal/lsp/analysis/fillreturns/fillreturns.go
index 72fe65d79ca..c8146df2dd0 100644
--- a/internal/lsp/analysis/fillreturns/fillreturns.go
+++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go
@@ -19,6 +19,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/fuzzy"
"golang.org/x/tools/internal/typeparams"
)
@@ -51,9 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, fmt.Errorf("nil TypeInfo")
}
- errors := analysisinternal.GetTypeErrors(pass)
outer:
- for _, typeErr := range errors {
+ for _, typeErr := range pass.TypeErrors {
// Filter out the errors that are not relevant to this analyzer.
if !FixesError(typeErr) {
continue
@@ -70,6 +70,8 @@ outer:
}
// Get the end position of the error.
+ // (This heuristic assumes that the buffer is formatted,
+ // at least up to the end position of the error.)
var buf bytes.Buffer
if err := format.Node(&buf, pass.Fset, file); err != nil {
continue
@@ -112,7 +114,7 @@ outer:
break
}
}
- if enclosingFunc == nil {
+ if enclosingFunc == nil || enclosingFunc.Results == nil {
continue
}
@@ -155,19 +157,23 @@ outer:
fixed := make([]ast.Expr, len(enclosingFunc.Results.List))
// For each value in the return function declaration, find the leftmost element
- // in the return statement that has the desired type. If no such element exits,
+ // in the return statement that has the desired type. If no such element exists,
// fill in the missing value with the appropriate "zero" value.
+ // Beware that type information may be incomplete.
var retTyps []types.Type
for _, ret := range enclosingFunc.Results.List {
- retTyps = append(retTyps, info.TypeOf(ret.Type))
+ retTyp := info.TypeOf(ret.Type)
+ if retTyp == nil {
+ return nil, nil
+ }
+ retTyps = append(retTyps, retTyp)
}
- matches :=
- analysisinternal.FindMatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg)
+ matches := analysisinternal.MatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg)
for i, retTyp := range retTyps {
var match ast.Expr
var idx int
for j, val := range remaining {
- if !matchingTypes(info.TypeOf(val), retTyp) {
+ if t := info.TypeOf(val); t == nil || !matchingTypes(t, retTyp) {
continue
}
if !analysisinternal.IsZeroValue(val) {
@@ -184,21 +190,19 @@ outer:
fixed[i] = match
remaining = append(remaining[:idx], remaining[idx+1:]...)
} else {
- idents, ok := matches[retTyp]
+ names, ok := matches[retTyp]
if !ok {
return nil, fmt.Errorf("invalid return type: %v", retTyp)
}
- // Find the identifier whose name is most similar to the return type.
- // If we do not find any identifier that matches the pattern,
- // generate a zero value.
- value := analysisinternal.FindBestMatch(retTyp.String(), idents)
- if value == nil {
- value = analysisinternal.ZeroValue(file, pass.Pkg, retTyp)
- }
- if value == nil {
+ // Find the identifier most similar to the return type.
+ // If no identifier matches the pattern, generate a zero value.
+ if best := fuzzy.BestMatch(retTyp.String(), names); best != "" {
+ fixed[i] = ast.NewIdent(best)
+ } else if zero := analysisinternal.ZeroValue(file, pass.Pkg, retTyp); zero != nil {
+ fixed[i] = zero
+ } else {
return nil, nil
}
- fixed[i] = value
}
}
diff --git a/internal/lsp/analysis/fillreturns/fillreturns_test.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go
similarity index 89%
rename from internal/lsp/analysis/fillreturns/fillreturns_test.go
rename to gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go
index 7ef0d46792e..1f7627551a0 100644
--- a/internal/lsp/analysis/fillreturns/fillreturns_test.go
+++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/fillreturns"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillreturns"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/fillreturns/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
similarity index 100%
rename from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
rename to gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go
similarity index 77%
rename from internal/lsp/analysis/fillstruct/fillstruct.go
rename to gopls/internal/lsp/analysis/fillstruct/fillstruct.go
index f160d4422ae..00857a0953e 100644
--- a/internal/lsp/analysis/fillstruct/fillstruct.go
+++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go
@@ -4,6 +4,12 @@
// Package fillstruct defines an Analyzer that automatically
// fills in a struct declaration with zero value elements for each field.
+//
+// The analyzer's diagnostic is merely a prompt.
+// The actual fix is created by a separate direct call from gopls to
+// the SuggestedFixes function.
+// Tests of Analyzer.Run can be found in ./testdata/src.
+// Tests of the SuggestedFixes logic live in ../../testdata/fillstruct.
package fillstruct
import (
@@ -20,8 +26,10 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/fuzzy"
"golang.org/x/tools/internal/typeparams"
)
@@ -45,12 +53,10 @@ func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{(*ast.CompositeLit)(nil)}
inspect.Preorder(nodeFilter, func(n ast.Node) {
- info := pass.TypesInfo
- if info == nil {
- return
- }
expr := n.(*ast.CompositeLit)
+ // Find enclosing file.
+ // TODO(adonovan): use inspect.WithStack?
var file *ast.File
for _, f := range pass.Files {
if f.Pos() <= expr.Pos() && expr.Pos() <= f.End() {
@@ -62,73 +68,49 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
- typ := info.TypeOf(expr)
+ typ := pass.TypesInfo.TypeOf(expr)
if typ == nil {
return
}
- // Ignore types that have type parameters for now.
- // TODO: support type params.
- if typ, ok := typ.(*types.Named); ok {
- if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 {
- return
- }
- }
-
// Find reference to the type declaration of the struct being initialized.
- for {
- p, ok := typ.Underlying().(*types.Pointer)
- if !ok {
- break
- }
- typ = p.Elem()
- }
- typ = typ.Underlying()
-
- obj, ok := typ.(*types.Struct)
+ typ = deref(typ)
+ tStruct, ok := typ.Underlying().(*types.Struct)
if !ok {
return
}
- fieldCount := obj.NumFields()
+ // Inv: typ is the possibly-named struct type.
+
+ fieldCount := tStruct.NumFields()
// Skip any struct that is already populated or that has no fields.
if fieldCount == 0 || fieldCount == len(expr.Elts) {
return
}
- var fillable bool
+ // Are any fields in need of filling?
var fillableFields []string
for i := 0; i < fieldCount; i++ {
- field := obj.Field(i)
+ field := tStruct.Field(i)
// Ignore fields that are not accessible in the current package.
if field.Pkg() != nil && field.Pkg() != pass.Pkg && !field.Exported() {
continue
}
- // Ignore structs containing fields that have type parameters for now.
- // TODO: support type params.
- if typ, ok := field.Type().(*types.Named); ok {
- if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 {
- return
- }
- }
- if _, ok := field.Type().(*typeparams.TypeParam); ok {
- return
- }
- fillable = true
fillableFields = append(fillableFields, fmt.Sprintf("%s: %s", field.Name(), field.Type().String()))
}
- if !fillable {
+ if len(fillableFields) == 0 {
return
}
+
+ // Derive a name for the struct type.
var name string
- switch typ := expr.Type.(type) {
- case *ast.Ident:
- name = typ.Name
- case *ast.SelectorExpr:
- name = fmt.Sprintf("%s.%s", typ.X, typ.Sel.Name)
- default:
+ if typ != tStruct {
+ // named struct type (e.g. pkg.S[T])
+ name = types.TypeString(typ, types.RelativeTo(pass.Pkg))
+ } else {
+ // anonymous struct type
totalFields := len(fillableFields)
- maxLen := 20
+ const maxLen = 20
// Find the index to cut off printing of fields.
var i, fieldLen int
for i = range fillableFields {
@@ -152,7 +134,13 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
+// SuggestedFix computes the suggested fix for the kinds of
+// diagnostics produced by the Analyzer above.
func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ if info == nil {
+ return nil, fmt.Errorf("nil types.Info")
+ }
+
pos := rng.Start // don't use the end
// TODO(rstambler): Using ast.Inspect would probably be more efficient than
@@ -169,37 +157,29 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
}
}
- if info == nil {
- return nil, fmt.Errorf("nil types.Info")
- }
typ := info.TypeOf(expr)
if typ == nil {
return nil, fmt.Errorf("no composite literal")
}
// Find reference to the type declaration of the struct being initialized.
- for {
- p, ok := typ.Underlying().(*types.Pointer)
- if !ok {
- break
- }
- typ = p.Elem()
- }
- typ = typ.Underlying()
-
- obj, ok := typ.(*types.Struct)
+ typ = deref(typ)
+ tStruct, ok := typ.Underlying().(*types.Struct)
if !ok {
- return nil, fmt.Errorf("unexpected type %v (%T), expected *types.Struct", typ, typ)
+ return nil, fmt.Errorf("%s is not a (pointer to) struct type",
+ types.TypeString(typ, types.RelativeTo(pkg)))
}
- fieldCount := obj.NumFields()
+ // Inv: typ is the the possibly-named struct type.
+
+ fieldCount := tStruct.NumFields()
// Check which types have already been filled in. (we only want to fill in
// the unfilled types, or else we'll blat user-supplied details)
- prefilledTypes := map[string]ast.Expr{}
+ prefilledFields := map[string]ast.Expr{}
for _, e := range expr.Elts {
if kv, ok := e.(*ast.KeyValueExpr); ok {
if key, ok := kv.Key.(*ast.Ident); ok {
- prefilledTypes[key.Name] = kv.Value
+ prefilledFields[key.Name] = kv.Value
}
}
}
@@ -209,14 +189,16 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
// each field we're going to set. format.Node only cares about line
// numbers, so we don't need to set columns, and each line can be
// 1 byte long.
+ // TODO(adonovan): why is this necessary? The position information
+ // is going to be wrong for the existing trees in prefilledFields.
+ // Can't the formatter just do its best with an empty fileset?
fakeFset := token.NewFileSet()
tok := fakeFset.AddFile("", -1, fieldCount+2)
line := 2 // account for 1-based lines and the left brace
- var elts []ast.Expr
var fieldTyps []types.Type
for i := 0; i < fieldCount; i++ {
- field := obj.Field(i)
+ field := tStruct.Field(i)
// Ignore fields that are not accessible in the current package.
if field.Pkg() != nil && field.Pkg() != pkg && !field.Exported() {
fieldTyps = append(fieldTyps, nil)
@@ -224,11 +206,13 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
}
fieldTyps = append(fieldTyps, field.Type())
}
- matches := analysisinternal.FindMatchingIdents(fieldTyps, file, rng.Start, info, pkg)
+ matches := analysisinternal.MatchingIdents(fieldTyps, file, rng.Start, info, pkg)
+ var elts []ast.Expr
for i, fieldTyp := range fieldTyps {
if fieldTyp == nil {
- continue
+ continue // TODO(adonovan): is this reachable?
}
+ fieldName := tStruct.Field(i).Name()
tok.AddLine(line - 1) // add 1 byte per line
if line > tok.LineCount() {
@@ -239,30 +223,28 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
kv := &ast.KeyValueExpr{
Key: &ast.Ident{
NamePos: pos,
- Name: obj.Field(i).Name(),
+ Name: fieldName,
},
Colon: pos,
}
- if expr, ok := prefilledTypes[obj.Field(i).Name()]; ok {
+ if expr, ok := prefilledFields[fieldName]; ok {
kv.Value = expr
} else {
- idents, ok := matches[fieldTyp]
+ names, ok := matches[fieldTyp]
if !ok {
return nil, fmt.Errorf("invalid struct field type: %v", fieldTyp)
}
- // Find the identifier whose name is most similar to the name of the field's key.
- // If we do not find any identifier that matches the pattern, generate a new value.
+ // Find the name most similar to the field name.
+ // If no name matches the pattern, generate a zero value.
// NOTE: We currently match on the name of the field key rather than the field type.
- value := analysisinternal.FindBestMatch(obj.Field(i).Name(), idents)
- if value == nil {
- value = populateValue(file, pkg, fieldTyp)
- }
- if value == nil {
+ if best := fuzzy.BestMatch(fieldName, names); best != "" {
+ kv.Value = ast.NewIdent(best)
+ } else if v := populateValue(file, pkg, fieldTyp); v != nil {
+ kv.Value = v
+ } else {
return nil, nil
}
-
- kv.Value = value
}
elts = append(elts, kv)
line++
@@ -290,7 +272,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
// Find the line on which the composite literal is declared.
split := bytes.Split(content, []byte("\n"))
- lineNumber := fset.Position(expr.Lbrace).Line
+ lineNumber := safetoken.StartPosition(fset, expr.Lbrace).Line
firstLine := split[lineNumber-1] // lines are 1-indexed
// Trim the whitespace from the left of the line, and use the index
@@ -306,7 +288,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
}
sug := indent(formatBuf.Bytes(), whitespace)
- if len(prefilledTypes) > 0 {
+ if len(prefilledFields) > 0 {
// Attempt a second pass through the formatter to line up columns.
sourced, err := format.Source(sug)
if err == nil {
@@ -350,16 +332,12 @@ func indent(str, ind []byte) []byte {
//
// When the type of a struct field is a basic literal or interface, we return
// default values. For other types, such as maps, slices, and channels, we create
-// expressions rather than using default values.
+// empty expressions such as []T{} or make(chan T) rather than using default values.
//
// The reasoning here is that users will call fillstruct with the intention of
// initializing the struct, in which case setting these fields to nil has no effect.
func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- under := typ
- if n, ok := typ.(*types.Named); ok {
- under = n.Underlying()
- }
- switch u := under.(type) {
+ switch u := typ.Underlying().(type) {
case *types.Basic:
switch {
case u.Info()&types.IsNumeric != 0:
@@ -373,6 +351,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
default:
panic("unknown basic type")
}
+
case *types.Map:
k := analysisinternal.TypeExpr(f, pkg, u.Key())
v := analysisinternal.TypeExpr(f, pkg, u.Elem())
@@ -395,6 +374,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
Elt: s,
},
}
+
case *types.Array:
a := analysisinternal.TypeExpr(f, pkg, u.Elem())
if a == nil {
@@ -408,6 +388,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
},
},
}
+
case *types.Chan:
v := analysisinternal.TypeExpr(f, pkg, u.Elem())
if v == nil {
@@ -426,6 +407,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
},
},
}
+
case *types.Struct:
s := analysisinternal.TypeExpr(f, pkg, typ)
if s == nil {
@@ -434,6 +416,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
return &ast.CompositeLit{
Type: s,
}
+
case *types.Signature:
var params []*ast.Field
for i := 0; i < u.Params().Len(); i++ {
@@ -471,6 +454,7 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
},
Body: &ast.BlockStmt{},
}
+
case *types.Pointer:
switch u.Elem().(type) {
case *types.Basic:
@@ -490,8 +474,34 @@ func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
X: populateValue(f, pkg, u.Elem()),
}
}
+
case *types.Interface:
+ if param, ok := typ.(*typeparams.TypeParam); ok {
+ // *new(T) is the zero value of a type parameter T.
+ // TODO(adonovan): one could give a more specific zero
+ // value if the type has a core type that is, say,
+ // always a number or a pointer. See go/ssa for details.
+ return &ast.StarExpr{
+ X: &ast.CallExpr{
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(param.Obj().Name()),
+ },
+ },
+ }
+ }
+
return ast.NewIdent("nil")
}
return nil
}
+
+func deref(t types.Type) types.Type {
+ for {
+ ptr, ok := t.Underlying().(*types.Pointer)
+ if !ok {
+ return t
+ }
+ t = ptr.Elem()
+ }
+}
diff --git a/internal/lsp/analysis/fillstruct/fillstruct_test.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go
similarity index 89%
rename from internal/lsp/analysis/fillstruct/fillstruct_test.go
rename to gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go
index 51a516cdfdb..66642b7ab59 100644
--- a/internal/lsp/analysis/fillstruct/fillstruct_test.go
+++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/fillstruct"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
similarity index 58%
rename from internal/lsp/analysis/fillstruct/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
index 68560092105..9ee3860fcae 100644
--- a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
@@ -19,16 +19,16 @@ type basicStruct struct {
foo int
}
-var _ = basicStruct{} // want ""
+var _ = basicStruct{} // want `Fill basicStruct`
type twoArgStruct struct {
foo int
bar string
}
-var _ = twoArgStruct{} // want ""
+var _ = twoArgStruct{} // want `Fill twoArgStruct`
-var _ = twoArgStruct{ // want ""
+var _ = twoArgStruct{ // want `Fill twoArgStruct`
bar: "bar",
}
@@ -37,9 +37,9 @@ type nestedStruct struct {
basic basicStruct
}
-var _ = nestedStruct{} // want ""
+var _ = nestedStruct{} // want `Fill nestedStruct`
-var _ = data.B{} // want ""
+var _ = data.B{} // want `Fill b.B`
type typedStruct struct {
m map[string]int
@@ -49,25 +49,25 @@ type typedStruct struct {
a [2]string
}
-var _ = typedStruct{} // want ""
+var _ = typedStruct{} // want `Fill typedStruct`
type funStruct struct {
fn func(i int) int
}
-var _ = funStruct{} // want ""
+var _ = funStruct{} // want `Fill funStruct`
-type funStructCompex struct {
+type funStructComplex struct {
fn func(i int, s string) (string, int)
}
-var _ = funStructCompex{} // want ""
+var _ = funStructComplex{} // want `Fill funStructComplex`
type funStructEmpty struct {
fn func()
}
-var _ = funStructEmpty{} // want ""
+var _ = funStructEmpty{} // want `Fill funStructEmpty`
type Foo struct {
A int
@@ -78,7 +78,7 @@ type Bar struct {
Y *Foo
}
-var _ = Bar{} // want ""
+var _ = Bar{} // want `Fill Bar`
type importedStruct struct {
m map[*ast.CompositeLit]ast.Field
@@ -89,7 +89,7 @@ type importedStruct struct {
st ast.CompositeLit
}
-var _ = importedStruct{} // want ""
+var _ = importedStruct{} // want `Fill importedStruct`
type pointerBuiltinStruct struct {
b *bool
@@ -97,17 +97,17 @@ type pointerBuiltinStruct struct {
i *int
}
-var _ = pointerBuiltinStruct{} // want ""
+var _ = pointerBuiltinStruct{} // want `Fill pointerBuiltinStruct`
var _ = []ast.BasicLit{
- {}, // want ""
+ {}, // want `Fill go/ast.BasicLit`
}
-var _ = []ast.BasicLit{{}, // want ""
+var _ = []ast.BasicLit{{}, // want "go/ast.BasicLit"
}
type unsafeStruct struct {
foo unsafe.Pointer
}
-var _ = unsafeStruct{} // want ""
+var _ = unsafeStruct{} // want `Fill unsafeStruct`
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/b/b.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go
similarity index 100%
rename from internal/lsp/analysis/fillstruct/testdata/src/b/b.go
rename to gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go
diff --git a/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
new file mode 100644
index 00000000000..46bb8ae4027
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
@@ -0,0 +1,50 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fillstruct
+
+type emptyStruct[A any] struct{}
+
+var _ = emptyStruct[int]{}
+
+type basicStruct[T any] struct {
+ foo T
+}
+
+var _ = basicStruct[int]{} // want `Fill basicStruct\[int\]`
+
+type twoArgStruct[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStruct[string, int]{} // want `Fill twoArgStruct\[string, int\]`
+
+var _ = twoArgStruct[int, string]{ // want `Fill twoArgStruct\[int, string\]`
+ bar: "bar",
+}
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct[int]
+}
+
+var _ = nestedStruct{} // want "Fill nestedStruct"
+
+func _[T any]() {
+ type S struct{ t T }
+ x := S{} // want "Fill S"
+ _ = x
+}
+
+func Test() {
+ var tests = []struct {
+ a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p string
+ }{
+ {}, // want "Fill anonymous struct { a: string, b: string, c: string, ... }"
+ }
+ for _, test := range tests {
+ _ = test
+ }
+}
diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/infertypeargs.go
rename to gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go
diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
similarity index 80%
rename from internal/lsp/analysis/infertypeargs/infertypeargs_test.go
rename to gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
index 2957f46e367..70855e1ab3e 100644
--- a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
@@ -8,13 +8,11 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/infertypeargs"
- "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/infertypeargs"
"golang.org/x/tools/internal/typeparams"
)
func Test(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
if !typeparams.Enabled {
t.Skip("type params are not enabled")
}
diff --git a/internal/lsp/analysis/infertypeargs/run_go117.go b/gopls/internal/lsp/analysis/infertypeargs/run_go117.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/run_go117.go
rename to gopls/internal/lsp/analysis/infertypeargs/run_go117.go
diff --git a/internal/lsp/analysis/infertypeargs/run_go118.go b/gopls/internal/lsp/analysis/infertypeargs/run_go118.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/run_go118.go
rename to gopls/internal/lsp/analysis/infertypeargs/run_go118.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
similarity index 100%
rename from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
rename to gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
diff --git a/internal/lsp/analysis/nonewvars/nonewvars.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go
similarity index 94%
rename from internal/lsp/analysis/nonewvars/nonewvars.go
rename to gopls/internal/lsp/analysis/nonewvars/nonewvars.go
index e7fa430cc53..6937b36d1f5 100644
--- a/internal/lsp/analysis/nonewvars/nonewvars.go
+++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go
@@ -30,7 +30,7 @@ will turn into
`
var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.NoNewVars),
+ Name: "nonewvars",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -39,7 +39,9 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- errors := analysisinternal.GetTypeErrors(pass)
+ if len(pass.TypeErrors) == 0 {
+ return nil, nil
+ }
nodeFilter := []ast.Node{(*ast.AssignStmt)(nil)}
inspect.Preorder(nodeFilter, func(n ast.Node) {
@@ -60,7 +62,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
- for _, err := range errors {
+ for _, err := range pass.TypeErrors {
if !FixesError(err.Msg) {
continue
}
diff --git a/internal/lsp/analysis/nonewvars/nonewvars_test.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go
similarity index 89%
rename from internal/lsp/analysis/nonewvars/nonewvars_test.go
rename to gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go
index dc58ab0ff5e..8f6f0a51fb4 100644
--- a/internal/lsp/analysis/nonewvars/nonewvars_test.go
+++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/nonewvars"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/nonewvars"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/nonewvars/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
similarity index 100%
rename from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
rename to gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go
similarity index 94%
rename from internal/lsp/analysis/noresultvalues/noresultvalues.go
rename to gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go
index b9f21f3135e..41952a5479e 100644
--- a/internal/lsp/analysis/noresultvalues/noresultvalues.go
+++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go
@@ -29,7 +29,7 @@ will turn into
`
var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.NoResultValues),
+ Name: "noresultvalues",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -38,7 +38,9 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- errors := analysisinternal.GetTypeErrors(pass)
+ if len(pass.TypeErrors) == 0 {
+ return nil, nil
+ }
nodeFilter := []ast.Node{(*ast.ReturnStmt)(nil)}
inspect.Preorder(nodeFilter, func(n ast.Node) {
@@ -55,7 +57,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
- for _, err := range errors {
+ for _, err := range pass.TypeErrors {
if !FixesError(err.Msg) {
continue
}
diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
similarity index 89%
rename from internal/lsp/analysis/noresultvalues/noresultvalues_test.go
rename to gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
index 12198a1c130..24ce39207ee 100644
--- a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
+++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/noresultvalues"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/noresultvalues"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
similarity index 100%
rename from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
rename to gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
similarity index 100%
rename from internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
rename to gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
similarity index 85%
rename from internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
rename to gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
index e60f7d6b055..b0365a6b3da 100644
--- a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
+++ b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifycompositelit"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifycompositelit"
)
func Test(t *testing.T) {
diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go
similarity index 100%
rename from internal/lsp/analysis/simplifyrange/simplifyrange.go
rename to gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go
diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
similarity index 86%
rename from internal/lsp/analysis/simplifyrange/simplifyrange_test.go
rename to gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
index ecc7a969257..fbd57ec2d65 100644
--- a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
+++ b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifyrange"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyrange"
)
func Test(t *testing.T) {
diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go
similarity index 100%
rename from internal/lsp/analysis/simplifyslice/simplifyslice.go
rename to gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go
diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
similarity index 89%
rename from internal/lsp/analysis/simplifyslice/simplifyslice_test.go
rename to gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
index cff6267c679..41914ba3170 100644
--- a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
+++ b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifyslice"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyslice"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
similarity index 100%
rename from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
similarity index 100%
rename from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
rename to gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
diff --git a/internal/lsp/analysis/stubmethods/stubmethods.go b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go
similarity index 91%
rename from internal/lsp/analysis/stubmethods/stubmethods.go
rename to gopls/internal/lsp/analysis/stubmethods/stubmethods.go
index f9dc69a9652..9ff869225ff 100644
--- a/internal/lsp/analysis/stubmethods/stubmethods.go
+++ b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go
@@ -35,7 +35,7 @@ var Analyzer = &analysis.Analyzer{
}
func run(pass *analysis.Pass) (interface{}, error) {
- for _, err := range analysisinternal.GetTypeErrors(pass) {
+ for _, err := range pass.TypeErrors {
ifaceErr := strings.Contains(err.Msg, "missing method") || strings.HasPrefix(err.Msg, "cannot convert")
if !ifaceErr {
continue
@@ -84,7 +84,7 @@ type StubInfo struct {
// in the case where the concrete type file requires a new import that happens to be renamed
// in the interface file.
// TODO(marwan-at-work): implement interface literals.
- Interface types.Object
+ Interface *types.TypeName
Concrete *types.Named
Pointer bool
}
@@ -269,19 +269,21 @@ func fromAssignStmt(ti *types.Info, as *ast.AssignStmt, pos token.Pos) *StubInfo
}
}
-// RelativeToFiles returns a types.Qualifier that formats package names
-// according to the files where the concrete and interface types are defined.
+// RelativeToFiles returns a types.Qualifier that formats package
+// names according to the import environments of the files that define
+// the concrete type and the interface type. (Only the imports of the
+// latter file are provided.)
//
// This is similar to types.RelativeTo except if a file imports the package with a different name,
// then it will use it. And if the file does import the package but it is ignored,
-// then it will return the original name. It also prefers package names in ifaceFile in case
-// an import is missing from concFile but is present in ifaceFile.
+// then it will return the original name. It also prefers package names in importEnv in case
+// an import is missing from concFile but is present among importEnv.
//
// Additionally, if missingImport is not nil, the function will be called whenever the concFile
// is presented with a package that is not imported. This is useful so that as types.TypeString is
// formatting a function signature, it is identifying packages that will need to be imported when
// stubbing an interface.
-func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, missingImport func(name, path string)) types.Qualifier {
+func RelativeToFiles(concPkg *types.Package, concFile *ast.File, ifaceImports []*ast.ImportSpec, missingImport func(name, path string)) types.Qualifier {
return func(other *types.Package) string {
if other == concPkg {
return ""
@@ -292,6 +294,7 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss
for _, imp := range concFile.Imports {
impPath, _ := strconv.Unquote(imp.Path.Value)
isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
+ // TODO(adonovan): this comparison disregards a vendor prefix in 'other'.
if impPath == other.Path() && !isIgnored {
importName := other.Name()
if imp.Name != nil {
@@ -304,16 +307,15 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss
// If the concrete file does not have the import, check if the package
// is renamed in the interface file and prefer that.
var importName string
- if ifaceFile != nil {
- for _, imp := range ifaceFile.Imports {
- impPath, _ := strconv.Unquote(imp.Path.Value)
- isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
- if impPath == other.Path() && !isIgnored {
- if imp.Name != nil && imp.Name.Name != concPkg.Name() {
- importName = imp.Name.Name
- }
- break
+ for _, imp := range ifaceImports {
+ impPath, _ := strconv.Unquote(imp.Path.Value)
+ isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
+ // TODO(adonovan): this comparison disregards a vendor prefix in 'other'.
+ if impPath == other.Path() && !isIgnored {
+ if imp.Name != nil && imp.Name.Name != concPkg.Name() {
+ importName = imp.Name.Name
}
+ break
}
}
@@ -333,7 +335,7 @@ func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, miss
// ifaceType will try to extract the types.Object that defines
// the interface given the ast.Expr where the "missing method"
// or "conversion" errors happen.
-func ifaceType(n ast.Expr, ti *types.Info) types.Object {
+func ifaceType(n ast.Expr, ti *types.Info) *types.TypeName {
tv, ok := ti.Types[n]
if !ok {
return nil
@@ -341,7 +343,7 @@ func ifaceType(n ast.Expr, ti *types.Info) types.Object {
return ifaceObjFromType(tv.Type)
}
-func ifaceObjFromType(t types.Type) types.Object {
+func ifaceObjFromType(t types.Type) *types.TypeName {
named, ok := t.(*types.Named)
if !ok {
return nil
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
similarity index 58%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
index 81c732001af..c5d8a2d789c 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
@@ -6,22 +6,22 @@ package undeclared
func x() int {
var z int
- z = y // want "undeclared name: y"
+ z = y // want "(undeclared name|undefined): y"
- if z == m { // want "undeclared name: m"
+ if z == m { // want "(undeclared name|undefined): m"
z = 1
}
if z == 1 {
z = 1
- } else if z == n+1 { // want "undeclared name: n"
+ } else if z == n+1 { // want "(undeclared name|undefined): n"
z = 1
}
switch z {
case 10:
z = 1
- case a: // want "undeclared name: a"
+ case a: // want "(undeclared name|undefined): a"
z = 1
}
return z
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
similarity index 78%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
index ecf00ecfc20..76c7ba685e1 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
@@ -5,7 +5,7 @@
package undeclared
func channels(s string) {
- undefinedChannels(c()) // want "undeclared name: undefinedChannels"
+ undefinedChannels(c()) // want "(undeclared name|undefined): undefinedChannels"
}
func c() (<-chan string, chan string) {
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
similarity index 69%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
index ab7b2ba5c18..73beace102c 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
@@ -6,5 +6,5 @@ package undeclared
func consecutiveParams() {
var s string
- undefinedConsecutiveParams(s, s) // want "undeclared name: undefinedConsecutiveParams"
+ undefinedConsecutiveParams(s, s) // want "(undeclared name|undefined): undefinedConsecutiveParams"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
similarity index 71%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
index 341a9d2a453..5de9254112d 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
@@ -6,5 +6,5 @@ package undeclared
func errorParam() {
var err error
- undefinedErrorParam(err) // want "undeclared name: undefinedErrorParam"
+ undefinedErrorParam(err) // want "(undeclared name|undefined): undefinedErrorParam"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
similarity index 67%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
index ab82463d00e..c62174ec947 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
@@ -7,5 +7,5 @@ package undeclared
type T struct{}
func literals() {
- undefinedLiterals("hey compiler", T{}, &T{}) // want "undeclared name: undefinedLiterals"
+ undefinedLiterals("hey compiler", T{}, &T{}) // want "(undeclared name|undefined): undefinedLiterals"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
similarity index 69%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
index 9a543821ee6..9396da4bd9d 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
@@ -7,5 +7,5 @@ package undeclared
import "time"
func operation() {
- undefinedOperation(10 * time.Second) // want "undeclared name: undefinedOperation"
+ undefinedOperation(10 * time.Second) // want "(undeclared name|undefined): undefinedOperation"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
similarity index 72%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
index 9ed09a27f24..a4ed290d466 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
@@ -6,5 +6,5 @@ package undeclared
func selector() {
m := map[int]bool{}
- undefinedSelector(m[1]) // want "undeclared name: undefinedSelector"
+ undefinedSelector(m[1]) // want "(undeclared name|undefined): undefinedSelector"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
similarity index 70%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
index d741c68f68d..5cde299add3 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
@@ -5,5 +5,5 @@
package undeclared
func slice() {
- undefinedSlice([]int{1, 2}) // want "undeclared name: undefinedSlice"
+ undefinedSlice([]int{1, 2}) // want "(undeclared name|undefined): undefinedSlice"
}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
similarity index 76%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
index 3148e8f4d4c..9e91c59c25e 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
@@ -5,7 +5,7 @@
package undeclared
func tuple() {
- undefinedTuple(b()) // want "undeclared name: undefinedTuple"
+ undefinedTuple(b()) // want "(undeclared name|undefined): undefinedTuple"
}
func b() (string, error) {
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
similarity index 70%
rename from internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
rename to gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
index 98f77a43cd1..5b4241425e5 100644
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
@@ -7,5 +7,5 @@ package undeclared
func uniqueArguments() {
var s string
var i int
- undefinedUniqueArguments(s, i, s) // want "undeclared name: undefinedUniqueArguments"
+ undefinedUniqueArguments(s, i, s) // want "(undeclared name|undefined): undefinedUniqueArguments"
}
diff --git a/internal/lsp/analysis/undeclaredname/undeclared.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go
similarity index 91%
rename from internal/lsp/analysis/undeclaredname/undeclared.go
rename to gopls/internal/lsp/analysis/undeclaredname/undeclared.go
index faa14091aee..3e42f08d55b 100644
--- a/internal/lsp/analysis/undeclaredname/undeclared.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go
@@ -18,8 +18,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
)
const Doc = `suggested fixes for "undeclared name: <>"
@@ -38,27 +39,34 @@ func <>(inferred parameters) {
`
var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.UndeclaredName),
+ Name: "undeclaredname",
Doc: Doc,
Requires: []*analysis.Analyzer{},
Run: run,
RunDespiteErrors: true,
}
-const undeclaredNamePrefix = "undeclared name: "
+// The prefix for this error message changed in Go 1.20.
+var undeclaredNamePrefixes = []string{"undeclared name: ", "undefined: "}
func run(pass *analysis.Pass) (interface{}, error) {
- for _, err := range analysisinternal.GetTypeErrors(pass) {
+ for _, err := range pass.TypeErrors {
runForError(pass, err)
}
return nil, nil
}
func runForError(pass *analysis.Pass, err types.Error) {
- if !strings.HasPrefix(err.Msg, undeclaredNamePrefix) {
+ var name string
+ for _, prefix := range undeclaredNamePrefixes {
+ if !strings.HasPrefix(err.Msg, prefix) {
+ continue
+ }
+ name = strings.TrimPrefix(err.Msg, prefix)
+ }
+ if name == "" {
return
}
- name := strings.TrimPrefix(err.Msg, undeclaredNamePrefix)
var file *ast.File
for _, f := range pass.Files {
if f.Pos() <= err.Pos && err.Pos < f.End() {
@@ -105,8 +113,8 @@ func runForError(pass *analysis.Pass, err types.Error) {
if tok == nil {
return
}
- offset := pass.Fset.Position(err.Pos).Offset
- end := tok.Pos(offset + len(name))
+ offset := safetoken.StartPosition(pass.Fset, err.Pos).Offset
+ end := tok.Pos(offset + len(name)) // TODO(adonovan): dubious! err.Pos + len(name)??
pass.Report(analysis.Diagnostic{
Pos: err.Pos,
End: end,
@@ -139,7 +147,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast
return nil, fmt.Errorf("could not locate insertion point")
}
- insertBefore := fset.Position(insertBeforeStmt.Pos()).Offset
+ insertBefore := safetoken.StartPosition(fset, insertBeforeStmt.Pos()).Offset
// Get the indent to add on the line after the new statement.
// Since this will have a parse error, we can not use format.Source().
diff --git a/internal/lsp/analysis/undeclaredname/undeclared_test.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go
similarity index 85%
rename from internal/lsp/analysis/undeclaredname/undeclared_test.go
rename to gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go
index b7154393742..306c3f03941 100644
--- a/internal/lsp/analysis/undeclaredname/undeclared_test.go
+++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/undeclaredname"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname"
)
func Test(t *testing.T) {
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/unusedparams/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
similarity index 100%
rename from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
similarity index 100%
rename from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
rename to gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
diff --git a/internal/lsp/analysis/unusedparams/unusedparams.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams.go
similarity index 100%
rename from internal/lsp/analysis/unusedparams/unusedparams.go
rename to gopls/internal/lsp/analysis/unusedparams/unusedparams.go
diff --git a/internal/lsp/analysis/unusedparams/unusedparams_test.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go
similarity index 89%
rename from internal/lsp/analysis/unusedparams/unusedparams_test.go
rename to gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go
index dff17c95e5d..fdd43b821fe 100644
--- a/internal/lsp/analysis/unusedparams/unusedparams_test.go
+++ b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/unusedparams"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedparams"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
new file mode 100644
index 00000000000..aa9f46e5b31
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+ "os"
+)
+
+type A struct {
+ b int
+}
+
+func singleAssignment() {
+ v := "s" // want `v declared (and|but) not used`
+
+ s := []int{ // want `s declared (and|but) not used`
+ 1,
+ 2,
+ }
+
+ a := func(s string) bool { // want `a declared (and|but) not used`
+ return false
+ }
+
+ if 1 == 1 {
+ s := "v" // want `s declared (and|but) not used`
+ }
+
+ panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+ v := "s" // want `v declared (and|but) not used`
+}
+
+func partOfMultiAssignment() {
+ f, err := os.Open("file") // want `f declared (and|but) not used`
+ panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+ b := <-c // want `b declared (and|but) not used`
+ s := fmt.Sprint("") // want `s declared (and|but) not used`
+ a := A{ // want `a declared (and|but) not used`
+ b: func() int {
+ return 1
+ }(),
+ }
+ c := A{<-cInt} // want `c declared (and|but) not used`
+ d := fInt() + <-cInt // want `d declared (and|but) not used`
+ e := fBool() && <-cBool // want `e declared (and|but) not used`
+ f := map[int]int{ // want `f declared (and|but) not used`
+ fInt(): <-cInt,
+ }
+ g := []int{<-cInt} // want `g declared (and|but) not used`
+ h := func(s string) {} // want `h declared (and|but) not used`
+ i := func(s string) {}() // want `i declared (and|but) not used`
+}
+
+func commentAbove() {
+ // v is a variable
+ v := "s" // want `v declared (and|but) not used`
+}
+
+func fBool() bool {
+ return true
+}
+
+func fInt() int {
+ return 1
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
new file mode 100644
index 00000000000..18173ce0bf9
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
@@ -0,0 +1,59 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+ "os"
+)
+
+type A struct {
+ b int
+}
+
+func singleAssignment() {
+ if 1 == 1 {
+ }
+
+ panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+}
+
+func partOfMultiAssignment() {
+ _, err := os.Open("file") // want `f declared (and|but) not used`
+ panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+ <-c // want `b declared (and|but) not used`
+ fmt.Sprint("") // want `s declared (and|but) not used`
+ A{ // want `a declared (and|but) not used`
+ b: func() int {
+ return 1
+ }(),
+ }
+ A{<-cInt} // want `c declared (and|but) not used`
+ fInt() + <-cInt // want `d declared (and|but) not used`
+ fBool() && <-cBool // want `e declared (and|but) not used`
+ map[int]int{ // want `f declared (and|but) not used`
+ fInt(): <-cInt,
+ }
+ []int{<-cInt} // want `g declared (and|but) not used`
+ func(s string) {}() // want `i declared (and|but) not used`
+}
+
+func commentAbove() {
+ // v is a variable
+}
+
+func fBool() bool {
+ return true
+}
+
+func fInt() int {
+ return 1
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
new file mode 100644
index 00000000000..8e843024a54
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+ var b, c bool // want `b declared (and|but) not used`
+ panic(c)
+
+ if 1 == 1 {
+ var s string // want `s declared (and|but) not used`
+ }
+}
+
+func b() {
+ // b is a variable
+ var b bool // want `b declared (and|but) not used`
+}
+
+func c() {
+ var (
+ d string
+
+ // some comment for c
+ c bool // want `c declared (and|but) not used`
+ )
+
+ panic(d)
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
new file mode 100644
index 00000000000..6ed97332eea
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+ var c bool // want `b declared (and|but) not used`
+ panic(c)
+
+ if 1 == 1 {
+ }
+}
+
+func b() {
+ // b is a variable
+}
+
+func c() {
+ var (
+ d string
+ )
+ panic(d)
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go
new file mode 100644
index 00000000000..904016be71e
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go
@@ -0,0 +1,300 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unusedvariable defines an analyzer that checks for unused variables.
+package unusedvariable
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+const Doc = `check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "unusedvariable",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{},
+ Run: run,
+ RunDespiteErrors: true, // an unusedvariable diagnostic is a compile error
+}
+
+// The suffix for this error message changed in Go 1.20.
+var unusedVariableSuffixes = []string{" declared and not used", " declared but not used"}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, typeErr := range pass.TypeErrors {
+ for _, suffix := range unusedVariableSuffixes {
+ if strings.HasSuffix(typeErr.Msg, suffix) {
+ varName := strings.TrimSuffix(typeErr.Msg, suffix)
+ err := runForError(pass, typeErr, varName)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+func runForError(pass *analysis.Pass, err types.Error, name string) error {
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= err.Pos && err.Pos < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return nil
+ }
+
+ path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos)
+ if len(path) < 2 {
+ return nil
+ }
+
+ ident, ok := path[0].(*ast.Ident)
+ if !ok || ident.Name != name {
+ return nil
+ }
+
+ diag := analysis.Diagnostic{
+ Pos: ident.Pos(),
+ End: ident.End(),
+ Message: err.Msg,
+ }
+
+ for i := range path {
+ switch stmt := path[i].(type) {
+ case *ast.ValueSpec:
+ // Find GenDecl to which offending ValueSpec belongs.
+ if decl, ok := path[i+1].(*ast.GenDecl); ok {
+ fixes := removeVariableFromSpec(pass, path, stmt, decl, ident)
+ // fixes may be nil
+ if len(fixes) > 0 {
+ diag.SuggestedFixes = fixes
+ pass.Report(diag)
+ }
+ }
+
+ case *ast.AssignStmt:
+ if stmt.Tok != token.DEFINE {
+ continue
+ }
+
+ containsIdent := false
+ for _, expr := range stmt.Lhs {
+ if expr == ident {
+ containsIdent = true
+ }
+ }
+ if !containsIdent {
+ continue
+ }
+
+ fixes := removeVariableFromAssignment(pass, path, stmt, ident)
+ // fixes may be nil
+ if len(fixes) > 0 {
+ diag.SuggestedFixes = fixes
+ pass.Report(diag)
+ }
+ }
+ }
+
+ return nil
+}
+
+func removeVariableFromSpec(pass *analysis.Pass, path []ast.Node, stmt *ast.ValueSpec, decl *ast.GenDecl, ident *ast.Ident) []analysis.SuggestedFix {
+ newDecl := new(ast.GenDecl)
+ *newDecl = *decl
+ newDecl.Specs = nil
+
+ for _, spec := range decl.Specs {
+ if spec != stmt {
+ newDecl.Specs = append(newDecl.Specs, spec)
+ continue
+ }
+
+ newSpec := new(ast.ValueSpec)
+ *newSpec = *stmt
+ newSpec.Names = nil
+
+ for _, n := range stmt.Names {
+ if n != ident {
+ newSpec.Names = append(newSpec.Names, n)
+ }
+ }
+
+ if len(newSpec.Names) > 0 {
+ newDecl.Specs = append(newDecl.Specs, newSpec)
+ }
+ }
+
+ // decl.End() does not include any comments, so if a comment is present we
+ // need to account for it when we delete the statement
+ end := decl.End()
+ if stmt.Comment != nil && stmt.Comment.End() > end {
+ end = stmt.Comment.End()
+ }
+
+ // There are no other specs left in the declaration, the whole statement can
+ // be deleted
+ if len(newDecl.Specs) == 0 {
+ // Find parent DeclStmt and delete it
+ for _, node := range path {
+ if declStmt, ok := node.(*ast.DeclStmt); ok {
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: deleteStmtFromBlock(path, declStmt),
+ },
+ }
+ }
+ }
+ }
+
+ var b bytes.Buffer
+ if err := format.Node(&b, pass.Fset, newDecl); err != nil {
+ return nil
+ }
+
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: decl.Pos(),
+ // Avoid adding a new empty line
+ End: end + 1,
+ NewText: b.Bytes(),
+ },
+ },
+ },
+ }
+}
+
+func removeVariableFromAssignment(pass *analysis.Pass, path []ast.Node, stmt *ast.AssignStmt, ident *ast.Ident) []analysis.SuggestedFix {
+ // The only variable in the assignment is unused
+ if len(stmt.Lhs) == 1 {
+ // If LHS has only one expression to be valid it has to have 1 expression
+ // on RHS
+ //
+ // RHS may have side effects, preserve RHS
+ if exprMayHaveSideEffects(stmt.Rhs[0]) {
+ // Delete until RHS
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: ident.Pos(),
+ End: stmt.Rhs[0].Pos(),
+ },
+ },
+ },
+ }
+ }
+
+ // RHS does not have any side effects, delete the whole statement
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: deleteStmtFromBlock(path, stmt),
+ },
+ }
+ }
+
+ // Otherwise replace ident with `_`
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: ident.Pos(),
+ End: ident.End(),
+ NewText: []byte("_"),
+ },
+ },
+ },
+ }
+}
+
+func suggestedFixMessage(name string) string {
+ return fmt.Sprintf("Remove variable %s", name)
+}
+
+func deleteStmtFromBlock(path []ast.Node, stmt ast.Stmt) []analysis.TextEdit {
+ // Find innermost enclosing BlockStmt.
+ var block *ast.BlockStmt
+ for i := range path {
+ if blockStmt, ok := path[i].(*ast.BlockStmt); ok {
+ block = blockStmt
+ break
+ }
+ }
+
+ nodeIndex := -1
+ for i, blockStmt := range block.List {
+ if blockStmt == stmt {
+ nodeIndex = i
+ break
+ }
+ }
+
+ // The statement we need to delete was not found in BlockStmt
+ if nodeIndex == -1 {
+ return nil
+ }
+
+ // Delete until the end of the block unless there is another statement after
+ // the one we are trying to delete
+ end := block.Rbrace
+ if nodeIndex < len(block.List)-1 {
+ end = block.List[nodeIndex+1].Pos()
+ }
+
+ return []analysis.TextEdit{
+ {
+ Pos: stmt.Pos(),
+ End: end,
+ },
+ }
+}
+
+// exprMayHaveSideEffects reports whether the expression may have side effects
+// (because it contains a function call or channel receive). We disregard
+// runtime panics as well written programs should not encounter them.
+func exprMayHaveSideEffects(expr ast.Expr) bool {
+ var mayHaveSideEffects bool
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch n := n.(type) {
+ case *ast.CallExpr: // possible function call
+ mayHaveSideEffects = true
+ return false
+ case *ast.UnaryExpr:
+ if n.Op == token.ARROW { // channel receive
+ mayHaveSideEffects = true
+ return false
+ }
+ case *ast.FuncLit:
+ return false // evaluating what's inside a FuncLit has no effect
+ }
+ return true
+ })
+
+ return mayHaveSideEffects
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
new file mode 100644
index 00000000000..08223155f6e
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unusedvariable_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedvariable"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+
+ t.Run("decl", func(t *testing.T) {
+ analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "decl")
+ })
+
+ t.Run("assign", func(t *testing.T) {
+ analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "assign")
+ })
+}
diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go
similarity index 100%
rename from internal/lsp/analysis/useany/testdata/src/a/a.go
rename to gopls/internal/lsp/analysis/useany/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden
similarity index 100%
rename from internal/lsp/analysis/useany/testdata/src/a/a.go.golden
rename to gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/useany/useany.go b/gopls/internal/lsp/analysis/useany/useany.go
similarity index 100%
rename from internal/lsp/analysis/useany/useany.go
rename to gopls/internal/lsp/analysis/useany/useany.go
diff --git a/internal/lsp/analysis/useany/useany_test.go b/gopls/internal/lsp/analysis/useany/useany_test.go
similarity index 89%
rename from internal/lsp/analysis/useany/useany_test.go
rename to gopls/internal/lsp/analysis/useany/useany_test.go
index 535d9152665..083c3d54fd4 100644
--- a/internal/lsp/analysis/useany/useany_test.go
+++ b/gopls/internal/lsp/analysis/useany/useany_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/useany"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/useany"
"golang.org/x/tools/internal/typeparams"
)
diff --git a/internal/lsp/browser/README.md b/gopls/internal/lsp/browser/README.md
similarity index 100%
rename from internal/lsp/browser/README.md
rename to gopls/internal/lsp/browser/README.md
diff --git a/internal/lsp/browser/browser.go b/gopls/internal/lsp/browser/browser.go
similarity index 100%
rename from internal/lsp/browser/browser.go
rename to gopls/internal/lsp/browser/browser.go
diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go
new file mode 100644
index 00000000000..61e868c5e17
--- /dev/null
+++ b/gopls/internal/lsp/cache/analysis.go
@@ -0,0 +1,1235 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+// This file defines gopls' driver for modular static analysis (go/analysis).
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "log"
+ "reflect"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/gopls/internal/lsp/filecache"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/facts"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+/*
+
+ DESIGN
+
+ An analysis request is for a set of analyzers and an individual
+ package ID, notated (a*, p). The result is the set of diagnostics
+ for that package. It could easily be generalized to a set of
+ packages, (a*, p*), and perhaps should be, to improve performance
+ versus calling it in a loop.
+
+ The snapshot holds a cache (persistent.Map) of entries keyed by
+ (a*, p) pairs ("analysisKey") that have been requested so far. Some
+ of these entries may be invalidated during snapshot cloning after a
+ modification event. The cache maps each (a*, p) to a promise of
+ the analysis result or "analysisSummary". The summary contains the
+ results of analysis (e.g. diagnostics) as well as the intermediate
+ results required by the recursion, such as serialized types and
+ facts.
+
+ The promise represents the result of a call to analyzeImpl, which
+ type-checks a package and then applies a graph of analyzers to it
+ in parallel postorder. (These graph edges are "horizontal": within
+ the same package.) First, analyzeImpl reads the source files of
+ package p, and obtains (recursively) the results of the "vertical"
+ dependencies (i.e. analyzers applied to the packages imported by
+ p). Only the subset of analyzers that use facts need be executed
+ recursively, but even if this subset is empty, the step is still
+ necessary because it provides type information. It is possible that
+ a package may need to be type-checked and analyzed twice, for
+ different subsets of analyzers, but the overlap is typically
+ insignificant.
+
+ With the file contents and the results of vertical dependencies,
+ analyzeImpl is then in a position to produce a key representing the
+ unit of work (parsing, type-checking, and analysis) that it has to
+ do. The key is a cryptographic hash of the "recipe" for this step,
+ including the Metadata, the file contents, the set of analyzers,
+ and the type and fact information from the vertical dependencies.
+
+ The key is sought in a machine-global persistent file-system based
+ cache. If this gopls process, or another gopls process on the same
+ machine, has already performed this analysis step, analyzeImpl will
+ make a cache hit and load the serialized summary of the results. If
+ not, it will have to proceed to type-checking and analysis, and
+ write a new cache entry. The entry contains serialized types
+ (export data) and analysis facts.
+
+ For types, we use "shallow" export data. Historically, the Go
+ compiler always produced a summary of the types for a given package
+ that included types from other packages that it indirectly
+ referenced: "deep" export data. This had the advantage that the
+ compiler (and analogous tools such as gopls) need only load one
+ file per direct import. However, it meant that the files tended to
+ get larger based on the level of the package in the import
+ graph. For example, higher-level packages in the kubernetes module
+ have over 1MB of "deep" export data, even when they have almost no
+ content of their own, merely because they mention a major type that
+ references many others. In pathological cases the export data was
+ 300x larger than the source for a package due to this quadratic
+ growth.
+
+ "Shallow" export data means that the serialized types describe only
+ a single package. If those types mention types from other packages,
+ the type checker may need to request additional packages beyond
+ just the direct imports. This means type information for the entire
+ transitive closure of imports may need to be available just in
+ case. After a cache hit or a cache miss, the summary is
+ postprocessed so that it contains the union of export data payloads
+ of all its direct dependencies.
+
+ For correct dependency analysis, the digest used as a cache key
+ must reflect the "deep" export data, so it is derived recursively
+ from the transitive closure. As an optimization, we needn't include
+ every package of the transitive closure in the deep hash, only the
+ packages that were actually requested by the type checker. This
+ allows changes to a package that have no effect on its export data
+ to be "pruned". The direct consumer will need to be re-executed,
+ but if its export data is unchanged as a result, then indirect
+ consumers may not need to be re-executed. This allows, for example,
+ one to insert a print statement in a function and not "rebuild" the
+ whole application (though export data does record line numbers of
+ types which may be perturbed by otherwise insignificant changes.)
+
+ The summary must record whether a package is transitively
+ error-free (whether it would compile) because many analyzers are
+ not safe to run on packages with inconsistent types.
+
+ For fact encoding, we use the same fact set as the unitchecker
+ (vet) to record and serialize analysis facts. The fact
+ serialization mechanism is analogous to "deep" export data.
+
+*/
+
+// TODO(adonovan):
+// - Profile + optimize:
+// - on a cold run, mostly type checking + export data, unsurprisingly.
+// - on a hot-disk run, mostly type checking the IWL.
+// Would be nice to have a benchmark that separates this out.
+// - measure and record in the code the typical operation times
+// and file sizes (export data + facts = cache entries).
+// - Do "port the old logic" tasks (see TODO in actuallyAnalyze).
+// - Add a (white-box) test of pruning when a change doesn't affect export data.
+// - Optimise pruning based on subset of packages mentioned in exportdata.
+// - Better logging so that it is possible to deduce why an analyzer
+// is not being run--often due to very indirect failures.
+// Even if the ultimate consumer decides to ignore errors,
+// tests and other situations want to be assured of freedom from
+// errors, not just missing results. This should be recorded.
+// - Check that the event trace is intelligible.
+// - Split this into a subpackage, gopls/internal/lsp/cache/driver,
+// consisting of this file and three helpers from errors.go.
+// The (*snapshot).Analyze method would stay behind and make calls
+// to the driver package.
+// Steps:
+// - define a narrow driver.Snapshot interface with only these methods:
+// Metadata(PackageID) source.Metadata
+// GetFile(Context, URI) (source.FileHandle, error)
+// View() *View // for Options
+// - define a State type that encapsulates the persistent map
+// (with its own mutex), and has methods:
+// New() *State
+// Clone(invalidate map[PackageID]bool) *State
+// Destroy()
+// - share cache.{goVersionRx,parseGoImpl}
+
+var born = time.Now()
+
+// Analyze applies a set of analyzers to the package denoted by id,
+// and returns their diagnostics for that package.
+//
+// The analyzers list must be duplicate free; order does not matter.
+//
+// Precondition: all analyzers within the process have distinct names.
+// (The names are relied on by the serialization logic.)
+func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) {
+ if false { // debugging
+ log.Println("Analyze@", time.Since(born)) // called after the 7s IWL in k8s
+ }
+
+ // Filter and sort enabled root analyzers.
+ // A disabled analyzer may still be run if required by another.
+ toSrc := make(map[*analysis.Analyzer]*source.Analyzer)
+ var enabled []*analysis.Analyzer
+ for _, a := range analyzers {
+ if a.IsEnabled(s.view.Options()) {
+ toSrc[a.Analyzer] = a
+ enabled = append(enabled, a.Analyzer)
+ }
+ }
+ sort.Slice(enabled, func(i, j int) bool {
+ return enabled[i].Name < enabled[j].Name
+ })
+
+ // Register fact types of required analyzers.
+ for _, a := range requiredAnalyzers(enabled) {
+ for _, f := range a.FactTypes {
+ gob.Register(f)
+ }
+ }
+
+ if false { // debugging
+ // TODO(adonovan): use proper tracing.
+ t0 := time.Now()
+ defer func() {
+ log.Printf("%v for analyze(%s, %s)", time.Since(t0), id, enabled)
+ }()
+ }
+
+ // Run the analysis.
+ res, err := s.analyze(ctx, id, enabled)
+ if err != nil {
+ return nil, err
+ }
+
+ // Report diagnostics only from enabled actions that succeeded.
+ // Errors from creating or analyzing packages are ignored.
+ // Diagnostics are reported in the order of the analyzers argument.
+ //
+ // TODO(adonovan): ignoring action errors gives the caller no way
+ // to distinguish "there are no problems in this code" from
+ // "the code (or analyzers!) are so broken that we couldn't even
+ // begin the analysis you asked for".
+ // Even if current callers choose to discard the
+ // results, we should propagate the per-action errors.
+ var results []*source.Diagnostic
+ for _, a := range enabled {
+ summary := res.Actions[a.Name]
+ if summary.Err != "" {
+ continue // action failed
+ }
+ for _, gobDiag := range summary.Diagnostics {
+ results = append(results, toSourceDiagnostic(toSrc[a], &gobDiag))
+ }
+ }
+ return results, nil
+}
+
+// analysisKey is the type of keys in the snapshot.analyses map.
+type analysisKey struct {
+ analyzerNames string
+ pkgid PackageID
+}
+
+func (key analysisKey) String() string {
+ return fmt.Sprintf("%s@%s", key.analyzerNames, key.pkgid)
+}
+
+// analyzeSummary is a gob-serializable summary of successfully
+// applying a list of analyzers to a package.
+type analyzeSummary struct {
+ PkgPath PackagePath // types.Package.Path() (needed to decode export data)
+ Export []byte
+ DeepExportHash source.Hash // hash of reflexive transitive closure of export data
+ Compiles bool // transitively free of list/parse/type errors
+ Actions actionsMap // map from analyzer name to analysis results (*actionSummary)
+
+ // Not serialized: populated after the summary is computed or deserialized.
+ allExport map[PackagePath][]byte // transitive export data
+}
+
+// actionsMap defines a stable Gob encoding for a map.
+// TODO(adonovan): generalize and move to a library when we can use generics.
+type actionsMap map[string]*actionSummary
+
+var _ gob.GobEncoder = (actionsMap)(nil)
+var _ gob.GobDecoder = (*actionsMap)(nil)
+
+type actionsMapEntry struct {
+ K string
+ V *actionSummary
+}
+
+func (m actionsMap) GobEncode() ([]byte, error) {
+ entries := make([]actionsMapEntry, 0, len(m))
+ for k, v := range m {
+ entries = append(entries, actionsMapEntry{k, v})
+ }
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].K < entries[j].K
+ })
+ var buf bytes.Buffer
+ err := gob.NewEncoder(&buf).Encode(entries)
+ return buf.Bytes(), err
+}
+
+func (m *actionsMap) GobDecode(data []byte) error {
+ var entries []actionsMapEntry
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil {
+ return err
+ }
+ *m = make(actionsMap, len(entries))
+ for _, e := range entries {
+ (*m)[e.K] = e.V
+ }
+ return nil
+}
+
+// actionSummary is a gob-serializable summary of one possibly failed analysis action.
+// If Err is non-empty, the other fields are undefined.
+type actionSummary struct {
+ Facts []byte // the encoded facts.Set
+ FactsHash source.Hash // hash(Facts)
+ Diagnostics []gobDiagnostic
+ Err string // "" => success
+}
+
+// analyze is a memoization of analyzeImpl.
+func (s *snapshot) analyze(ctx context.Context, id PackageID, analyzers []*analysis.Analyzer) (*analyzeSummary, error) {
+ // Use the sorted list of names of analyzers in the key.
+ //
+ // TODO(adonovan): opt: account for analysis results at a
+ // finer grain to avoid duplicate work when a
+ // a proper subset of analyzers is requested?
+ // In particular, TypeErrorAnalyzers don't use facts
+ // but need to request vdeps just for type information.
+ names := make([]string, 0, len(analyzers))
+ for _, a := range analyzers {
+ names = append(names, a.Name)
+ }
+ // This key describes the result of applying a list of analyzers to a package.
+ key := analysisKey{strings.Join(names, ","), id}
+
+ // An analysisPromise represents the result of loading, parsing,
+ // type-checking and analyzing a single package.
+ type analysisPromise struct {
+ promise *memoize.Promise // [analyzeImplResult]
+ }
+
+ type analyzeImplResult struct {
+ summary *analyzeSummary
+ err error
+ }
+
+ // Access the map once, briefly, and atomically.
+ s.mu.Lock()
+ entry, hit := s.analyses.Get(key)
+ if !hit {
+ entry = analysisPromise{
+ promise: memoize.NewPromise("analysis", func(ctx context.Context, arg interface{}) interface{} {
+ summary, err := analyzeImpl(ctx, arg.(*snapshot), analyzers, id)
+ return analyzeImplResult{summary, err}
+ }),
+ }
+ s.analyses.Set(key, entry, nil) // nothing needs releasing
+ }
+ s.mu.Unlock()
+
+ // Await result.
+ ap := entry.(analysisPromise)
+ v, err := s.awaitPromise(ctx, ap.promise)
+ if err != nil {
+ return nil, err // e.g. cancelled
+ }
+ res := v.(analyzeImplResult)
+ return res.summary, res.err
+}
+
+// analyzeImpl applies a list of analyzers (plus any others
+// transitively required by them) to a package. It succeeds as long
+// as it could produce a types.Package, even if there were direct or
+// indirect list/parse/type errors, and even if all the analysis
+// actions failed. It usually fails only if the package was unknown,
+// a file was missing, or the operation was cancelled.
+//
+// Postcondition: analyzeImpl must not continue to use the snapshot
+// (in background goroutines) after it has returned; see memoize.RefCounted.
+func analyzeImpl(ctx context.Context, snapshot *snapshot, analyzers []*analysis.Analyzer, id PackageID) (*analyzeSummary, error) {
+ m := snapshot.Metadata(id)
+ if m == nil {
+ return nil, fmt.Errorf("no metadata for %s", id)
+ }
+
+ // Recursively analyze each "vertical" dependency
+ // for its types.Package and (perhaps) analysis.Facts.
+ // If any of them fails to produce a package, we cannot continue.
+ // We request only the analyzers that produce facts.
+ //
+ // Also, load the contents of each "compiled" Go file through
+ // the snapshot's cache.
+ //
+ // Both loops occur in parallel, and parallel with each other.
+ vdeps := make(map[PackageID]*analyzeSummary)
+ compiledGoFiles := make([]source.FileHandle, len(m.CompiledGoFiles))
+ {
+ var group errgroup.Group
+
+ // Analyze vertical dependencies.
+ // We request only the required analyzers that use facts.
+ var useFacts []*analysis.Analyzer
+ for _, a := range requiredAnalyzers(analyzers) {
+ if len(a.FactTypes) > 0 {
+ useFacts = append(useFacts, a)
+ }
+ }
+ var vdepsMu sync.Mutex
+ for _, id := range m.DepsByPkgPath {
+ id := id
+ group.Go(func() error {
+ res, err := snapshot.analyze(ctx, id, useFacts)
+ if err != nil {
+ return err // cancelled, or failed to produce a package
+ }
+
+ vdepsMu.Lock()
+ vdeps[id] = res
+ vdepsMu.Unlock()
+ return nil
+ })
+ }
+
+ // Read file contents.
+ // (In practice these will be cache hits
+ // on reads done by the initial workspace load
+ // or after a change modification event.)
+ for i, uri := range m.CompiledGoFiles {
+ i, uri := i, uri
+ group.Go(func() error {
+ fh, err := snapshot.GetFile(ctx, uri) // ~25us
+ compiledGoFiles[i] = fh
+ return err // e.g. cancelled
+ })
+ }
+
+ if err := group.Wait(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Inv: analyze() of all vdeps succeeded (though some actions may have failed).
+
+ // We no longer depend on the snapshot.
+ snapshot = nil
+
+ // At this point we have the action results (serialized
+ // packages and facts) of our immediate dependencies,
+ // and the metadata and content of this package.
+ //
+ // We now compute a hash for all our inputs, and consult a
+ // global cache of promised results. If nothing material
+ // has changed, we'll make a hit in the shared cache.
+ //
+ // The hash of our inputs is based on the serialized export
+ // data and facts so that immaterial changes can be pruned
+ // without decoding.
+ key := analysisCacheKey(analyzers, m, compiledGoFiles, vdeps)
+
+ // Access the cache.
+ var summary *analyzeSummary
+ const cacheKind = "analysis"
+ if data, err := filecache.Get(cacheKind, key); err == nil {
+ // cache hit
+ mustDecode(data, &summary)
+
+ } else if err != filecache.ErrNotFound {
+ return nil, bug.Errorf("internal error reading shared cache: %v", err)
+
+ } else {
+ // Cache miss: do the work.
+ var err error
+ summary, err = actuallyAnalyze(ctx, analyzers, m, vdeps, compiledGoFiles)
+ if err != nil {
+ return nil, err
+ }
+ data := mustEncode(summary)
+ if false {
+ log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), id)
+ }
+ if err := filecache.Set(cacheKind, key, data); err != nil {
+ return nil, fmt.Errorf("internal error updating shared cache: %v", err)
+ }
+ }
+
+ // Hit or miss, we need to merge the export data from
+ // dependencies so that it includes all the types
+ // that might be summoned by the type checker.
+ //
+ // TODO(adonovan): opt: reduce this set by recording
+ // which packages were actually summoned by insert().
+ // (Just makes map smaller; probably marginal?)
+ allExport := make(map[PackagePath][]byte)
+ for _, vdep := range vdeps {
+ for k, v := range vdep.allExport {
+ allExport[k] = v
+ }
+ }
+ allExport[m.PkgPath] = summary.Export
+ summary.allExport = allExport
+
+ return summary, nil
+}
+
+// analysisCacheKey returns a cache key that is a cryptographic digest
+// of the all the values that might affect type checking and analysis:
+// the analyzer names, package metadata, names and contents of
+// compiled Go files, and vdeps information (export data and facts).
+//
+// TODO(adonovan): safety: define our own flavor of Metadata
+// containing just the fields we need, and using it in the subsequent
+// logic, to keep us honest about hashing all parts that matter?
+func analysisCacheKey(analyzers []*analysis.Analyzer, m *source.Metadata, compiledGoFiles []source.FileHandle, vdeps map[PackageID]*analyzeSummary) [sha256.Size]byte {
+ hasher := sha256.New()
+
+ // In principle, a key must be the hash of an
+ // unambiguous encoding of all the relevant data.
+ // If it's ambiguous, we risk collisons.
+
+ // analyzers
+ fmt.Fprintf(hasher, "analyzers: %d\n", len(analyzers))
+ for _, a := range analyzers {
+ fmt.Fprintln(hasher, a.Name)
+ }
+
+ // package metadata
+ fmt.Fprintf(hasher, "package: %s %s %s\n", m.ID, m.Name, m.PkgPath)
+ // We can ignore m.DepsBy{Pkg,Import}Path: although the logic
+ // uses those fields, we account for them by hashing vdeps.
+
+ // type sizes
+ // This assertion is safe, but if a black-box implementation
+ // is ever needed, record Sizeof(*int) and Alignof(int64).
+ sz := m.TypesSizes.(*types.StdSizes)
+ fmt.Fprintf(hasher, "sizes: %d %d\n", sz.WordSize, sz.MaxAlign)
+
+ // metadata errors
+ for _, err := range m.Errors {
+ fmt.Fprintf(hasher, "error: %q", err)
+ }
+
+ // module Go version
+ if m.Module != nil && m.Module.GoVersion != "" {
+ fmt.Fprintf(hasher, "go %s\n", m.Module.GoVersion)
+ }
+
+ // file names and contents
+ fmt.Fprintf(hasher, "files: %d\n", len(compiledGoFiles))
+ for _, fh := range compiledGoFiles {
+ fmt.Fprintln(hasher, fh.FileIdentity())
+ }
+
+ // vdeps, in PackageID order
+ depIDs := make([]string, 0, len(vdeps))
+ for depID := range vdeps {
+ depIDs = append(depIDs, string(depID))
+ }
+ sort.Strings(depIDs)
+ for _, id := range depIDs {
+ vdep := vdeps[PackageID(id)]
+ fmt.Fprintf(hasher, "dep: %s\n", vdep.PkgPath)
+ fmt.Fprintf(hasher, "export: %s\n", vdep.DeepExportHash)
+
+ // action results: errors and facts
+ names := make([]string, 0, len(vdep.Actions))
+ for name := range vdep.Actions {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ summary := vdep.Actions[name]
+ fmt.Fprintf(hasher, "action %s\n", name)
+ if summary.Err != "" {
+ fmt.Fprintf(hasher, "error %s\n", summary.Err)
+ } else {
+ fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash)
+ // We can safely omit summary.diagnostics
+ // from the key since they have no downstream effect.
+ }
+ }
+ }
+
+ var hash [sha256.Size]byte
+ hasher.Sum(hash[:0])
+ return hash
+}
+
+// actuallyAnalyze implements the cache-miss case.
+// This function does not access the snapshot.
+func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *source.Metadata, vdeps map[PackageID]*analyzeSummary, compiledGoFiles []source.FileHandle) (*analyzeSummary, error) {
+
+ // Create a local FileSet for processing this package only.
+ fset := token.NewFileSet()
+
+ // Parse only the "compiled" Go files.
+ // Do the computation in parallel.
+ parsed := make([]*source.ParsedGoFile, len(compiledGoFiles))
+ {
+ var group errgroup.Group
+ for i, fh := range compiledGoFiles {
+ i, fh := i, fh
+ group.Go(func() error {
+ // Call parseGoImpl directly, not the caching wrapper,
+ // as cached ASTs require the global FileSet.
+ pgf, err := parseGoImpl(ctx, fset, fh, source.ParseFull)
+ parsed[i] = pgf
+ return err
+ })
+ }
+ if err := group.Wait(); err != nil {
+ return nil, err // cancelled, or catastrophic error (e.g. missing file)
+ }
+ }
+
+ // Type-check the package.
+ pkg := typeCheckForAnalysis(fset, parsed, m, vdeps)
+
+ // Build a map of PkgPath to *Package for all packages mentioned
+ // in exportdata for use by facts.
+ pkg.factsDecoder = facts.NewDecoder(pkg.types)
+
+ // Poll cancellation state.
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+
+ // TODO(adonovan): port the old logic to:
+ // - gather go/packages diagnostics from m.Errors? (port goPackagesErrorDiagnostics)
+ // - record unparseable file URIs so we can suppress type errors for these files.
+ // - gather diagnostics from expandErrors + typeErrorDiagnostics + depsErrors.
+
+ // -- analysis --
+
+ // Build action graph for this package.
+ // Each graph node (action) is one unit of analysis.
+ actions := make(map[*analysis.Analyzer]*action)
+ var mkAction func(a *analysis.Analyzer) *action
+ mkAction = func(a *analysis.Analyzer) *action {
+ act, ok := actions[a]
+ if !ok {
+ var hdeps []*action
+ for _, req := range a.Requires {
+ hdeps = append(hdeps, mkAction(req))
+ }
+ act = &action{a: a, pkg: pkg, vdeps: vdeps, hdeps: hdeps}
+ actions[a] = act
+ }
+ return act
+ }
+
+ // Build actions for initial package.
+ var roots []*action
+ for _, a := range analyzers {
+ roots = append(roots, mkAction(a))
+ }
+
+ // Execute the graph in parallel.
+ execActions(roots)
+
+ // Don't return (or cache) the result in case of cancellation.
+ if err := ctx.Err(); err != nil {
+ return nil, err // cancelled
+ }
+
+ // Return summaries only for the requested actions.
+ summaries := make(map[string]*actionSummary)
+ for _, act := range roots {
+ summaries[act.a.Name] = act.summary
+ }
+
+ return &analyzeSummary{
+ PkgPath: PackagePath(pkg.types.Path()),
+ Export: pkg.export,
+ DeepExportHash: pkg.deepExportHash,
+ Compiles: pkg.compiles,
+ Actions: summaries,
+ }, nil
+}
+
+func typeCheckForAnalysis(fset *token.FileSet, parsed []*source.ParsedGoFile, m *source.Metadata, vdeps map[PackageID]*analyzeSummary) *analysisPackage {
+ if false { // debugging
+ log.Println("typeCheckForAnalysis", m.PkgPath)
+ }
+
+ pkg := &analysisPackage{
+ m: m,
+ fset: fset,
+ parsed: parsed,
+ files: make([]*ast.File, len(parsed)),
+ compiles: len(m.Errors) == 0, // false => list error
+ types: types.NewPackage(string(m.PkgPath), string(m.Name)),
+ typesInfo: &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Scopes: make(map[ast.Node]*types.Scope),
+ },
+ typesSizes: m.TypesSizes,
+ }
+ typeparams.InitInstanceInfo(pkg.typesInfo)
+
+ for i, p := range parsed {
+ pkg.files[i] = p.File
+ if p.ParseErr != nil {
+ pkg.compiles = false // parse error
+ }
+ }
+
+ // Unsafe is special.
+ if m.PkgPath == "unsafe" {
+ pkg.types = types.Unsafe
+ return pkg
+ }
+
+ // Compute the union of transitive export data.
+ // (The actual values are shared, and not serialized.)
+ allExport := make(map[PackagePath][]byte)
+ for _, vdep := range vdeps {
+ for k, v := range vdep.allExport {
+ allExport[k] = v
+ }
+
+ if !vdep.Compiles {
+ pkg.compiles = false // transitive error
+ }
+ }
+
+ // exportHasher computes a hash of the names and export data of
+ // each package that was actually loaded during type checking.
+ //
+ // Because we use shallow export data, the hash for dependency
+ // analysis must incorporate indirect dependencies. As an
+ // optimization, we include only those that were actually
+ // used, which may be a small subset of those available.
+ //
+ // TODO(adonovan): opt: even better would be to implement a
+ // traversal over the package API like facts.NewDecoder does
+ // and only mention that set of packages in the hash.
+ // Perhaps there's a way to do that more efficiently.
+ //
+ // TODO(adonovan): opt: record the shallow hash alongside the
+ // shallow export data in the allExport map to avoid repeatedly
+ // hashing the export data.
+ //
+ // The writes to hasher below assume that type checking imports
+ // packages in a deterministic order.
+ exportHasher := sha256.New()
+ hashExport := func(pkgPath PackagePath, export []byte) {
+ fmt.Fprintf(exportHasher, "%s %d ", pkgPath, len(export))
+ exportHasher.Write(export)
+ }
+
+ // importer state
+ var (
+ insert func(p *types.Package, name string)
+ importMap = make(map[string]*types.Package) // keys are PackagePaths
+ )
+ loadFromExportData := func(pkgPath PackagePath) (*types.Package, error) {
+ export, ok := allExport[pkgPath]
+ if !ok {
+ return nil, bug.Errorf("missing export data for %q", pkgPath)
+ }
+ hashExport(pkgPath, export)
+ imported, err := gcimporter.IImportShallow(fset, importMap, export, string(pkgPath), insert)
+ if err != nil {
+ return nil, bug.Errorf("invalid export data for %q: %v", pkgPath, err)
+ }
+ return imported, nil
+ }
+ insert = func(p *types.Package, name string) {
+ imported, err := loadFromExportData(PackagePath(p.Path()))
+ if err != nil {
+ log.Fatalf("internal error: %v", err)
+ }
+ if imported != p {
+ log.Fatalf("internal error: inconsistent packages")
+ }
+ }
+
+ cfg := &types.Config{
+ Error: func(e error) {
+ pkg.compiles = false // type error
+ pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
+ },
+ Importer: importerFunc(func(importPath string) (*types.Package, error) {
+ if importPath == "unsafe" {
+ return types.Unsafe, nil // unsafe has no export data
+ }
+
+ // Beware that returning an error from this function
+ // will cause the type checker to synthesize a fake
+ // package whose Path is importPath, potentially
+ // losing a vendor/ prefix. If type-checking errors
+ // are swallowed, these packages may be confusing.
+
+ id, ok := m.DepsByImpPath[ImportPath(importPath)]
+ if !ok {
+ // The import syntax is inconsistent with the metadata.
+ // This could be because the import declaration was
+ // incomplete and the metadata only includes complete
+ // imports; or because the metadata ignores import
+ // edges that would lead to cycles in the graph.
+ return nil, fmt.Errorf("missing metadata for import of %q", importPath)
+ }
+
+ depResult, ok := vdeps[id] // id may be ""
+ if !ok {
+ // Analogous to (*snapshot).missingPkgError
+ // in the logic for regular type-checking,
+ // but without a snapshot we can't provide
+ // such detail, and anyway most analysis
+ // failures aren't surfaced in the UI.
+ return nil, fmt.Errorf("no required module provides package %q (id=%q)", importPath, id)
+ }
+
+ // (Duplicates logic from check.go.)
+ if !source.IsValidImport(m.PkgPath, depResult.PkgPath) {
+ return nil, fmt.Errorf("invalid use of internal package %s", importPath)
+ }
+
+ return loadFromExportData(depResult.PkgPath)
+ }),
+ }
+
+ // Set Go dialect.
+ if m.Module != nil && m.Module.GoVersion != "" {
+ goVersion := "go" + m.Module.GoVersion
+ // types.NewChecker panics if GoVersion is invalid.
+ // An unparsable mod file should probably stop us
+ // before we get here, but double check just in case.
+ if goVersionRx.MatchString(goVersion) {
+ typesinternal.SetGoVersion(cfg, goVersion)
+ }
+ }
+
+ // We want to type check cgo code if go/types supports it.
+ // We passed typecheckCgo to go/packages when we Loaded.
+ // TODO(adonovan): do we actually need this??
+ typesinternal.SetUsesCgo(cfg)
+
+ check := types.NewChecker(cfg, fset, pkg.types, pkg.typesInfo)
+
+ // Type checking errors are handled via the config, so ignore them here.
+ _ = check.Files(pkg.files)
+
+ // debugging (type errors are quite normal)
+ if false {
+ if pkg.typeErrors != nil {
+ log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors)
+ }
+ }
+
+ // Emit the export data and compute the deep hash.
+ export, err := gcimporter.IExportShallow(pkg.fset, pkg.types)
+ if err != nil {
+ log.Fatalf("internal error writing shallow export data: %v", err)
+ }
+ pkg.export = export
+ hashExport(m.PkgPath, export)
+ exportHasher.Sum(pkg.deepExportHash[:0])
+
+ return pkg
+}
+
+// analysisPackage contains information about a package, including
+// syntax trees, used transiently during its type-checking and analysis.
+type analysisPackage struct {
+ m *source.Metadata
+ fset *token.FileSet // local to this package
+ parsed []*source.ParsedGoFile
+ files []*ast.File // same as parsed[i].File
+ types *types.Package
+ compiles bool // package is transitively free of list/parse/type errors
+ factsDecoder *facts.Decoder
+ export []byte // encoding of types.Package
+ deepExportHash source.Hash // reflexive transitive hash of export data
+ typesInfo *types.Info
+ typeErrors []types.Error
+ typesSizes types.Sizes
+}
+
+// An action represents one unit of analysis work: the application of
+// one analysis to one package. Actions form a DAG, both within a
+// package (as different analyzers are applied, either in sequence or
+// parallel), and across packages (as dependencies are analyzed).
+type action struct {
+ once sync.Once
+ a *analysis.Analyzer
+ pkg *analysisPackage
+ hdeps []*action // horizontal dependencies
+ vdeps map[PackageID]*analyzeSummary // vertical dependencies
+
+ // results of action.exec():
+ result interface{} // result of Run function, of type a.ResultType
+ summary *actionSummary
+ err error
+}
+
+func (act *action) String() string {
+ return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.m.ID)
+}
+
+// execActions executes a set of action graph nodes in parallel.
+func execActions(actions []*action) {
+ var wg sync.WaitGroup
+ for _, act := range actions {
+ act := act
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ act.once.Do(func() {
+ execActions(act.hdeps) // analyze "horizontal" dependencies
+ act.result, act.summary, act.err = act.exec()
+ if act.err != nil {
+ act.summary = &actionSummary{Err: act.err.Error()}
+ // TODO(adonovan): suppress logging. But
+ // shouldn't the root error's causal chain
+ // include this information?
+ if false { // debugging
+ log.Printf("act.exec(%v) failed: %v", act, act.err)
+ }
+ }
+ })
+ }()
+ }
+ wg.Wait()
+}
+
+// exec defines the execution of a single action.
+// It returns the (ephemeral) result of the analyzer's Run function,
+// along with its (serializable) facts and diagnostics.
+// Or it returns an error if the analyzer did not run to
+// completion and deliver a valid result.
+func (act *action) exec() (interface{}, *actionSummary, error) {
+ analyzer := act.a
+ pkg := act.pkg
+
+ hasFacts := len(analyzer.FactTypes) > 0
+
+ // Report an error if any action dependency (vertical or horizontal) failed.
+ // To avoid long error messages describing chains of failure,
+ // we return the dependencies' error' unadorned.
+ if hasFacts {
+ // TODO(adonovan): use deterministic order.
+ for _, res := range act.vdeps {
+ if vdep := res.Actions[analyzer.Name]; vdep.Err != "" {
+ return nil, nil, errors.New(vdep.Err)
+ }
+ }
+ }
+ for _, dep := range act.hdeps {
+ if dep.err != nil {
+ return nil, nil, dep.err
+ }
+ }
+ // Inv: all action dependencies succeeded.
+
+ // Were there list/parse/type errors that might prevent analysis?
+ if !pkg.compiles && !analyzer.RunDespiteErrors {
+ return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.m.ID)
+ }
+ // Inv: package is well-formed enough to proceed with analysis.
+
+ if false { // debugging
+ log.Println("action.exec", act)
+ }
+
+ // Gather analysis Result values from horizontal dependencies.
+ var inputs = make(map[*analysis.Analyzer]interface{})
+ for _, dep := range act.hdeps {
+ inputs[dep.a] = dep.result
+ }
+
+ // TODO(adonovan): opt: facts.Set works but it may be more
+ // efficient to fork and tailor it to our precise needs.
+ //
+ // We've already sharded the fact encoding by action
+ // so that it can be done in parallel (hoisting the
+ // ImportMap call so that we build the map once per package).
+ // We could eliminate locking.
+ // We could also dovetail more closely with the export data
+ // decoder to obtain a more compact representation of
+ // packages and objects (e.g. its internal IDs, instead
+ // of PkgPaths and objectpaths.)
+
+ // Read and decode analysis facts for each imported package.
+ factset, err := pkg.factsDecoder.Decode(func(imp *types.Package) ([]byte, error) {
+ if !hasFacts {
+ return nil, nil // analyzer doesn't use facts, so no vdeps
+ }
+
+ // Package.Imports() may contain a fake "C" package. Ignore it.
+ if imp.Path() == "C" {
+ return nil, nil
+ }
+
+ id, ok := pkg.m.DepsByPkgPath[PackagePath(imp.Path())]
+ if !ok {
+ // This may mean imp was synthesized by the type
+ // checker because it failed to import it for any reason
+ // (e.g. bug processing export data; metadata ignoring
+ // a cycle-forming import).
+ // In that case, the fake package's imp.Path
+ // is set to the failed importPath (and thus
+ // it may lack a "vendor/" prefix).
+ //
+ // For now, silently ignore it on the assumption
+ // that the error is already reported elsewhere.
+ // return nil, fmt.Errorf("missing metadata")
+ return nil, nil
+ }
+
+ vdep, ok := act.vdeps[id]
+ if !ok {
+ return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id)
+ }
+ return vdep.Actions[analyzer.Name].Facts, nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err)
+ }
+
+ // TODO(adonovan): make Export*Fact panic rather than discarding
+ // undeclared fact types, so that we discover bugs in analyzers.
+ factFilter := make(map[reflect.Type]bool)
+ for _, f := range analyzer.FactTypes {
+ factFilter[reflect.TypeOf(f)] = true
+ }
+
+ // posToLocation converts from token.Pos to protocol form.
+ // TODO(adonovan): improve error messages.
+ posToLocation := func(start, end token.Pos) (protocol.Location, error) {
+ tokFile := pkg.fset.File(start)
+ for _, p := range pkg.parsed {
+ if p.Tok == tokFile {
+ if end == token.NoPos {
+ end = start
+ }
+ rng, err := p.Mapper.PosRange(start, end)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ return protocol.Location{
+ // TODO(adonovan): is this sound?
+ // See dual conversion in toSourceDiagnostic.
+ URI: protocol.DocumentURI(p.URI),
+ Range: rng,
+ }, nil
+ }
+ }
+ return protocol.Location{},
+ bug.Errorf("internal error: token.Pos not within package")
+ }
+
+ // Now run the (pkg, analyzer) action.
+ var diagnostics []gobDiagnostic
+ pass := &analysis.Pass{
+ Analyzer: analyzer,
+ Fset: pkg.fset,
+ Files: pkg.files,
+ Pkg: pkg.types,
+ TypesInfo: pkg.typesInfo,
+ TypesSizes: pkg.typesSizes,
+ TypeErrors: pkg.typeErrors,
+ ResultOf: inputs,
+ Report: func(d analysis.Diagnostic) {
+ // Prefix the diagnostic category with the analyzer's name.
+ if d.Category == "" {
+ d.Category = analyzer.Name
+ } else {
+ d.Category = analyzer.Name + "." + d.Category
+ }
+
+ diagnostic, err := toGobDiagnostic(posToLocation, d)
+ if err != nil {
+ bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err)
+ return
+ }
+ diagnostics = append(diagnostics, diagnostic)
+ },
+ ImportObjectFact: factset.ImportObjectFact,
+ ExportObjectFact: factset.ExportObjectFact,
+ ImportPackageFact: factset.ImportPackageFact,
+ ExportPackageFact: factset.ExportPackageFact,
+ AllObjectFacts: func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) },
+ AllPackageFacts: func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) },
+ }
+
+ // Recover from panics (only) within the analyzer logic.
+ // (Use an anonymous function to limit the recover scope.)
+ var result interface{}
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ // An Analyzer panicked, likely due to a bug.
+ //
+ // In general we want to discover and fix such panics quickly,
+ // so we don't suppress them, but some bugs in third-party
+ // analyzers cannot be quickly fixed, so we use an allowlist
+ // to suppress panics.
+ const strict = true
+ if strict && bug.PanicOnBugs &&
+ analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343
+ // Uncomment this when debugging suspected failures
+ // in the driver, not the analyzer.
+ if false {
+ debug.SetTraceback("all") // show all goroutines
+ }
+ panic(r)
+ } else {
+ // In production, suppress the panic and press on.
+ err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r)
+ }
+ }
+ }()
+ result, err = pass.Analyzer.Run(pass)
+ }()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
+ return nil, nil, bug.Errorf(
+ "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
+ pass.Pkg.Path(), pass.Analyzer, got, want)
+ }
+
+ // Disallow Export*Fact calls after Run.
+ // (A panic means the Analyzer is abusing concurrency.)
+ pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
+ panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
+ }
+ pass.ExportPackageFact = func(fact analysis.Fact) {
+ panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact))
+ }
+
+ factsdata := factset.Encode()
+ return result, &actionSummary{
+ Diagnostics: diagnostics,
+ Facts: factsdata,
+ FactsHash: source.HashOf(factsdata),
+ }, nil
+}
+
+// requiredAnalyzers returns the transitive closure of required analyzers in preorder.
+func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer {
+ var result []*analysis.Analyzer
+ seen := make(map[*analysis.Analyzer]bool)
+ var visitAll func([]*analysis.Analyzer)
+ visitAll = func(analyzers []*analysis.Analyzer) {
+ for _, a := range analyzers {
+ if !seen[a] {
+ seen[a] = true
+ result = append(result, a)
+ visitAll(a.Requires)
+ }
+ }
+ }
+ visitAll(analyzers)
+ return result
+}
+
+func mustEncode(x interface{}) []byte {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(x); err != nil {
+ log.Fatalf("internal error encoding %T: %v", x, err)
+ }
+ return buf.Bytes()
+}
+
+func mustDecode(data []byte, ptr interface{}) {
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil {
+ log.Fatalf("internal error decoding %T: %v", ptr, err)
+ }
+}
+
+// -- data types for serialization of analysis.Diagnostic --
+
+type gobDiagnostic struct {
+ Location protocol.Location
+ Category string
+ Message string
+ SuggestedFixes []gobSuggestedFix
+ Related []gobRelatedInformation
+}
+
+type gobRelatedInformation struct {
+ Location protocol.Location
+ Message string
+}
+
+type gobSuggestedFix struct {
+ Message string
+ TextEdits []gobTextEdit
+}
+
+type gobTextEdit struct {
+ Location protocol.Location
+ NewText []byte
+}
+
+// toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic,
+// which requires expanding token.Pos positions into protocol.Location form.
+func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), diag analysis.Diagnostic) (gobDiagnostic, error) {
+ var fixes []gobSuggestedFix
+ for _, fix := range diag.SuggestedFixes {
+ var gobEdits []gobTextEdit
+ for _, textEdit := range fix.TextEdits {
+ loc, err := posToLocation(textEdit.Pos, textEdit.End)
+ if err != nil {
+ return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err)
+ }
+ gobEdits = append(gobEdits, gobTextEdit{
+ Location: loc,
+ NewText: textEdit.NewText,
+ })
+ }
+ fixes = append(fixes, gobSuggestedFix{
+ Message: fix.Message,
+ TextEdits: gobEdits,
+ })
+ }
+
+ var related []gobRelatedInformation
+ for _, r := range diag.Related {
+ loc, err := posToLocation(r.Pos, r.End)
+ if err != nil {
+ return gobDiagnostic{}, fmt.Errorf("in Related: %w", err)
+ }
+ related = append(related, gobRelatedInformation{
+ Location: loc,
+ Message: r.Message,
+ })
+ }
+
+ loc, err := posToLocation(diag.Pos, diag.End)
+ if err != nil {
+ return gobDiagnostic{}, err
+ }
+ return gobDiagnostic{
+ Location: loc,
+ Category: diag.Category,
+ Message: diag.Message,
+ Related: related,
+ SuggestedFixes: fixes,
+ }, nil
+}
diff --git a/internal/lsp/cache/cache.go b/gopls/internal/lsp/cache/cache.go
similarity index 80%
rename from internal/lsp/cache/cache.go
rename to gopls/internal/lsp/cache/cache.go
index ac670b573e5..46b2e26a331 100644
--- a/internal/lsp/cache/cache.go
+++ b/gopls/internal/lsp/cache/cache.go
@@ -6,7 +6,6 @@ package cache
import (
"context"
- "crypto/sha256"
"fmt"
"go/ast"
"go/token"
@@ -21,31 +20,47 @@ import (
"sync/atomic"
"time"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
)
-func New(options func(*source.Options)) *Cache {
+// New Creates a new cache for gopls operation results, using the given file
+// set, shared store, and session options.
+//
+// Both the fset and store may be nil, but if store is non-nil so must be fset
+// (and they must always be used together), otherwise it may be possible to get
+// cached data referencing token.Pos values not mapped by the FileSet.
+func New(fset *token.FileSet, store *memoize.Store) *Cache {
index := atomic.AddInt64(&cacheIndex, 1)
+
+ if store != nil && fset == nil {
+ panic("non-nil store with nil fset")
+ }
+ if fset == nil {
+ fset = token.NewFileSet()
+ }
+ if store == nil {
+ store = &memoize.Store{}
+ }
+
c := &Cache{
id: strconv.FormatInt(index, 10),
- fset: token.NewFileSet(),
- options: options,
+ fset: fset,
+ store: store,
fileContent: map[span.URI]*fileHandle{},
}
return c
}
type Cache struct {
- id string
- fset *token.FileSet
- options func(*source.Options)
+ id string
+ fset *token.FileSet
- store memoize.Store
+ store *memoize.Store
fileMu sync.Mutex
fileContent map[span.URI]*fileHandle
@@ -55,7 +70,7 @@ type fileHandle struct {
modTime time.Time
uri span.URI
bytes []byte
- hash string
+ hash source.Hash
err error
// size is the file length as reported by Stat, for the purpose of
@@ -69,6 +84,7 @@ func (h *fileHandle) Saved() bool {
return true
}
+// GetFile stats and (maybe) reads the file, updates the cache, and returns it.
func (c *Cache) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
return c.getFile(ctx, uri)
}
@@ -101,7 +117,7 @@ func (c *Cache) getFile(ctx context.Context, uri span.URI) (*fileHandle, error)
return fh, nil
}
- fh, err := readFile(ctx, uri, fi)
+ fh, err := readFile(ctx, uri, fi) // ~25us
if err != nil {
return nil, err
}
@@ -126,7 +142,7 @@ func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, e
_ = ctx
defer done()
- data, err := ioutil.ReadFile(uri.Filename())
+ data, err := ioutil.ReadFile(uri.Filename()) // ~20us
if err != nil {
return &fileHandle{
modTime: fi.ModTime(),
@@ -139,39 +155,34 @@ func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, e
size: fi.Size(),
uri: uri,
bytes: data,
- hash: hashContents(data),
+ hash: source.HashOf(data),
}, nil
}
-func (c *Cache) NewSession(ctx context.Context) *Session {
+// NewSession creates a new gopls session with the given cache and options overrides.
+//
+// The provided optionsOverrides may be nil.
+func NewSession(ctx context.Context, c *Cache, optionsOverrides func(*source.Options)) *Session {
index := atomic.AddInt64(&sessionIndex, 1)
options := source.DefaultOptions().Clone()
- if c.options != nil {
- c.options(options)
+ if optionsOverrides != nil {
+ optionsOverrides(options)
}
s := &Session{
- cache: c,
id: strconv.FormatInt(index, 10),
+ cache: c,
+ gocmdRunner: &gocommand.Runner{},
options: options,
overlays: make(map[span.URI]*overlay),
- gocmdRunner: &gocommand.Runner{},
}
event.Log(ctx, "New session", KeyCreateSession.Of(s))
return s
}
-func (c *Cache) FileSet() *token.FileSet {
- return c.fset
-}
-
func (h *fileHandle) URI() span.URI {
return h.uri
}
-func (h *fileHandle) Hash() string {
- return h.hash
-}
-
func (h *fileHandle) FileIdentity() source.FileIdentity {
return source.FileIdentity{
URI: h.uri,
@@ -183,10 +194,6 @@ func (h *fileHandle) Read() ([]byte, error) {
return h.bytes, h.err
}
-func hashContents(contents []byte) string {
- return fmt.Sprintf("%x", sha256.Sum256(contents))
-}
-
var cacheIndex, sessionIndex, viewIndex int64
func (c *Cache) ID() string { return c.id }
@@ -207,17 +214,12 @@ func (c *Cache) PackageStats(withNames bool) template.HTML {
c.store.DebugOnlyIterate(func(k, v interface{}) {
switch k.(type) {
case packageHandleKey:
- v := v.(*packageData)
+ v := v.(typeCheckResult)
if v.pkg == nil {
break
}
- var typsCost, typInfoCost int64
- if v.pkg.types != nil {
- typsCost = typesCost(v.pkg.types.Scope())
- }
- if v.pkg.typesInfo != nil {
- typInfoCost = typesInfoCost(v.pkg.typesInfo)
- }
+ typsCost := typesCost(v.pkg.types.Scope())
+ typInfoCost := typesInfoCost(v.pkg.typesInfo)
stat := packageStat{
id: v.pkg.m.ID,
mode: v.pkg.mode,
diff --git a/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go
similarity index 58%
rename from internal/lsp/cache/check.go
rename to gopls/internal/lsp/cache/check.go
index b8a3655a9d4..cfac5ffb107 100644
--- a/internal/lsp/cache/check.go
+++ b/gopls/internal/lsp/cache/check.go
@@ -11,191 +11,195 @@ import (
"fmt"
"go/ast"
"go/types"
- "path"
"path/filepath"
"regexp"
- "sort"
"strings"
"sync"
"golang.org/x/mod/module"
+ "golang.org/x/sync/errgroup"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/typeparams"
"golang.org/x/tools/internal/typesinternal"
)
-type packageHandleKey string
-
-type packageHandle struct {
- handle *memoize.Handle
+// A packageKey identifies a packageHandle in the snapshot.packages map.
+type packageKey struct {
+ mode source.ParseMode
+ id PackageID
+}
- goFiles, compiledGoFiles []*parseGoHandle
+type packageHandleKey source.Hash
- // mode is the mode the files were parsed in.
- mode source.ParseMode
+// A packageHandle is a handle to the future result of type-checking a package.
+// The resulting package is obtained from the await() method.
+type packageHandle struct {
+ promise *memoize.Promise // [typeCheckResult]
// m is the metadata associated with the package.
- m *KnownMetadata
+ m *source.Metadata
// key is the hashed key for the package.
+ //
+ // It includes the all bits of the transitive closure of
+ // dependencies's sources. This is more than type checking
+ // really depends on: export data of direct deps should be
+ // enough. (The key for analysis actions could similarly
+ // hash only Facts of direct dependencies.)
key packageHandleKey
}
-func (ph *packageHandle) packageKey() packageKey {
- return packageKey{
- id: ph.m.ID,
- mode: ph.mode,
- }
-}
-
-func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) {
- for _, pgh := range ph.goFiles {
- f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader)
- if err != nil {
- continue
- }
- seen := map[string]struct{}{}
- for _, impSpec := range f.File.Imports {
- imp := strings.Trim(impSpec.Path.Value, `"`)
- if _, ok := seen[imp]; !ok {
- seen[imp] = struct{}{}
- result = append(result, imp)
- }
- }
- }
-
- sort.Strings(result)
- return result
-}
-
-// packageData contains the data produced by type-checking a package.
-type packageData struct {
+// typeCheckResult contains the result of a call to
+// typeCheckImpl, which type-checks a package.
+type typeCheckResult struct {
pkg *pkg
err error
}
-// buildPackageHandle returns a packageHandle for a given package and mode.
+// buildPackageHandle returns a handle for the future results of
+// type-checking the package identified by id in the given mode.
// It assumes that the given ID already has metadata available, so it does not
// attempt to reload missing or invalid metadata. The caller must reload
// metadata if needed.
func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) {
- if ph := s.getPackage(id, mode); ph != nil {
- return ph, nil
+ packageKey := packageKey{id: id, mode: mode}
+
+ s.mu.Lock()
+ entry, hit := s.packages.Get(packageKey)
+ m := s.meta.metadata[id]
+ s.mu.Unlock()
+
+ if m == nil {
+ return nil, fmt.Errorf("no metadata for %s", id)
}
- // Build the packageHandle for this ID and its dependencies.
- ph, deps, err := s.buildKey(ctx, id, mode)
- if err != nil {
- return nil, err
+ if hit {
+ return entry.(*packageHandle), nil
}
- // Do not close over the packageHandle or the snapshot in the Bind function.
- // This creates a cycle, which causes the finalizers to never run on the handles.
- // The possible cycles are:
- //
- // packageHandle.h.function -> packageHandle
- // packageHandle.h.function -> snapshot -> packageHandle
+ // Begin computing the key by getting the depKeys for all dependencies.
+ // This requires reading the transitive closure of dependencies' source files.
//
+ // It is tempting to parallelize the recursion here, but
+ // without de-duplication of subtasks this would lead to an
+ // exponential amount of work, and computing the key is
+ // expensive as it reads all the source files transitively.
+ // Notably, we don't update the s.packages cache until the
+ // entire key has been computed.
+ // TODO(adonovan): use a promise cache to ensure that the key
+ // for each package is computed by at most one thread, then do
+ // the recursive key building of dependencies in parallel.
+ deps := make(map[PackageID]*packageHandle)
+ var depKey source.Hash // XOR of all unique deps
+ for _, depID := range m.DepsByPkgPath {
+ depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
+ // Don't use invalid metadata for dependencies if the top-level
+ // metadata is valid. We only load top-level packages, so if the
+ // top-level is valid, all of its dependencies should be as well.
+ if err != nil {
+ event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, source.SnapshotLabels(s)...)
- m := ph.m
- key := ph.key
-
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
+ // This check ensures we break out of the slow
+ // buildPackageHandle recursion quickly when
+ // context cancelation is detected within GetFile.
+ if ctx.Err() != nil {
+ return nil, ctx.Err() // cancelled
+ }
- // Begin loading the direct dependencies, in parallel.
- var wg sync.WaitGroup
- for _, dep := range deps {
- wg.Add(1)
- go func(dep *packageHandle) {
- dep.check(ctx, snapshot)
- wg.Done()
- }(dep)
+ // One bad dependency should not prevent us from
+ // checking the entire package. Leave depKeys[i] unset.
+ continue
}
- data := &packageData{}
- data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
- // Make sure that the workers above have finished before we return,
- // especially in case of cancellation.
- wg.Wait()
+ depKey.XORWith(source.Hash(depHandle.key))
- return data
- }, nil)
- ph.handle = h
+ deps[depID] = depHandle
+ }
- // Cache the handle in the snapshot. If a package handle has already
- // been cached, addPackage will return the cached value. This is fine,
- // since the original package handle above will have no references and be
- // garbage collected.
- ph = s.addPackageHandle(ph)
+ // Read both lists of files of this package, in parallel.
+ //
+ // goFiles aren't presented to the type checker--nor
+ // are they included in the key, unsoundly--but their
+ // syntax trees are available from (*pkg).File(URI).
+ // TODO(adonovan): consider parsing them on demand?
+ // The need should be rare.
+ goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m)
+ if err != nil {
+ return nil, err
+ }
- return ph, nil
-}
+ // All the file reading has now been done.
+ // Create a handle for the result of type checking.
+ experimentalKey := s.View().Options().ExperimentalPackageCacheKey
+ phKey := computePackageKey(m.ID, compiledGoFiles, m, depKey, mode, experimentalKey)
+ promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} {
+ pkg, err := typeCheckImpl(ctx, arg.(*snapshot), goFiles, compiledGoFiles, m, mode, deps)
+ return typeCheckResult{pkg, err}
+ })
-// buildKey computes the key for a given packageHandle.
-func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) {
- m := s.getMetadata(id)
- if m == nil {
- return nil, nil, fmt.Errorf("no metadata for %s", id)
+ ph := &packageHandle{
+ promise: promise,
+ m: m,
+ key: phKey,
}
- goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode)
- if err != nil {
- return nil, nil, err
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Check that the metadata has not changed
+ // (which should invalidate this handle).
+ //
+ // (In future, handles should form a graph with edges from a
+ // packageHandle to the handles for parsing its files and the
+ // handles for type-checking its immediate deps, at which
+ // point there will be no need to even access s.meta.)
+ if s.meta.metadata[ph.m.ID] != ph.m {
+ return nil, fmt.Errorf("stale metadata for %s", ph.m.ID)
}
- compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode)
- if err != nil {
- return nil, nil, err
+
+ // Check cache again in case another thread got there first.
+ if prev, ok := s.packages.Get(packageKey); ok {
+ prevPH := prev.(*packageHandle)
+ release()
+ if prevPH.m != ph.m {
+ return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID)
+ }
+ return prevPH, nil
}
- ph := &packageHandle{
- m: m,
- goFiles: goFiles,
- compiledGoFiles: compiledGoFiles,
- mode: mode,
- }
- // Make sure all of the depList are sorted.
- depList := append([]PackageID{}, m.Deps...)
- sort.Slice(depList, func(i, j int) bool {
- return depList[i] < depList[j]
- })
- deps := make(map[PackagePath]*packageHandle)
+ // Update the map.
+ s.packages.Set(packageKey, ph, func(_, _ interface{}) { release() })
- // Begin computing the key by getting the depKeys for all dependencies.
- var depKeys []packageHandleKey
- for _, depID := range depList {
- depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
- // Don't use invalid metadata for dependencies if the top-level
- // metadata is valid. We only load top-level packages, so if the
- // top-level is valid, all of its dependencies should be as well.
- if err != nil || m.Valid && !depHandle.m.Valid {
- if err != nil {
- event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
- } else {
- event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
- }
+ return ph, nil
+}
- if ctx.Err() != nil {
- return nil, nil, ctx.Err()
- }
- // One bad dependency should not prevent us from checking the entire package.
- // Add a special key to mark a bad dependency.
- depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID)))
- continue
+// readGoFiles reads the content of Metadata.GoFiles and
+// Metadata.CompiledGoFiles, in parallel.
+func readGoFiles(ctx context.Context, s *snapshot, m *source.Metadata) (goFiles, compiledGoFiles []source.FileHandle, err error) {
+ var group errgroup.Group
+ getFileHandles := func(files []span.URI) []source.FileHandle {
+ fhs := make([]source.FileHandle, len(files))
+ for i, uri := range files {
+ i, uri := i, uri
+ group.Go(func() (err error) {
+ fhs[i], err = s.GetFile(ctx, uri) // ~25us
+ return
+ })
}
- deps[depHandle.m.PkgPath] = depHandle
- depKeys = append(depKeys, depHandle.key)
+ return fhs
}
- experimentalKey := s.View().Options().ExperimentalPackageCacheKey
- ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey)
- return ph, deps, nil
+ return getFileHandles(m.GoFiles),
+ getFileHandles(m.CompiledGoFiles),
+ group.Wait()
}
func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode {
@@ -208,13 +212,18 @@ func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode {
if s.view.Options().MemoryMode == source.ModeNormal {
return source.ParseFull
}
- if s.isActiveLocked(id, nil) {
+ if s.isActiveLocked(id) {
return source.ParseFull
}
return source.ParseExported
}
-func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
+// computePackageKey returns a key representing the act of type checking
+// a package named id containing the specified files, metadata, and
+// combined dependency hash.
+func computePackageKey(id PackageID, files []source.FileHandle, m *source.Metadata, depsKey source.Hash, mode source.ParseMode, experimentalKey bool) packageHandleKey {
+ // TODO(adonovan): opt: no need to materalize the bytes; hash them directly.
+ // Also, use field separators to avoid spurious collisions.
b := bytes.NewBuffer(nil)
b.WriteString(string(id))
if m.Module != nil {
@@ -225,38 +234,36 @@ func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps
// files, and deps). It should not otherwise affect the inputs to the type
// checker, so this experiment omits it. This should increase cache hits on
// the daemon as cfg contains the environment and working directory.
- b.WriteString(hashConfig(m.Config))
+ hc := hashConfig(m.Config)
+ b.Write(hc[:])
}
b.WriteByte(byte(mode))
- for _, dep := range deps {
- b.WriteString(string(dep))
- }
- for _, cgf := range pghs {
- b.WriteString(cgf.file.FileIdentity().String())
+ b.Write(depsKey[:])
+ for _, file := range files {
+ b.WriteString(file.FileIdentity().String())
}
- return packageHandleKey(hashContents(b.Bytes()))
-}
-
-// hashEnv returns a hash of the snapshot's configuration.
-func hashEnv(s *snapshot) string {
- s.view.optionsMu.Lock()
- env := s.view.options.EnvSlice()
- s.view.optionsMu.Unlock()
-
- b := &bytes.Buffer{}
- for _, e := range env {
- b.WriteString(e)
- }
- return hashContents(b.Bytes())
+ // Metadata errors are interpreted and memoized on the computed package, so
+ // we must hash them into the key here.
+ //
+ // TODO(rfindley): handle metadata diagnostics independently from
+ // type-checking diagnostics.
+ for _, err := range m.Errors {
+ b.WriteString(err.Msg)
+ b.WriteString(err.Pos)
+ b.WriteRune(rune(err.Kind))
+ }
+ return packageHandleKey(source.HashOf(b.Bytes()))
}
// hashConfig returns the hash for the *packages.Config.
-func hashConfig(config *packages.Config) string {
- b := bytes.NewBuffer(nil)
+func hashConfig(config *packages.Config) source.Hash {
+ // TODO(adonovan): opt: don't materialize the bytes; hash them directly.
+ // Also, use sound field separators to avoid collisions.
+ var b bytes.Buffer
// Dir, Mode, Env, BuildFlags are the parts of the config that can change.
b.WriteString(config.Dir)
- b.WriteString(string(rune(config.Mode)))
+ b.WriteRune(rune(config.Mode))
for _, e := range config.Env {
b.WriteString(e)
@@ -264,19 +271,16 @@ func hashConfig(config *packages.Config) string {
for _, f := range config.BuildFlags {
b.WriteString(f)
}
- return hashContents(b.Bytes())
-}
-
-func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) {
- return ph.check(ctx, s.(*snapshot))
+ return source.HashOf(b.Bytes())
}
-func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) {
- v, err := ph.handle.Get(ctx, s.generation, s)
+// await waits for typeCheckImpl to complete and returns its result.
+func (ph *packageHandle) await(ctx context.Context, s *snapshot) (*pkg, error) {
+ v, err := s.awaitPromise(ctx, ph.promise)
if err != nil {
return nil, err
}
- data := v.(*packageData)
+ data := v.(typeCheckResult)
return data.pkg, data.err
}
@@ -288,33 +292,44 @@ func (ph *packageHandle) ID() string {
return string(ph.m.ID)
}
-func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) {
- v := ph.handle.Cached(g)
+func (ph *packageHandle) cached() (*pkg, error) {
+ v := ph.promise.Cached()
if v == nil {
return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath)
}
- data := v.(*packageData)
+ data := v.(typeCheckResult)
return data.pkg, data.err
}
-func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) {
- pghs := make([]*parseGoHandle, 0, len(files))
- for _, uri := range files {
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- pghs = append(pghs, s.parseGoHandle(ctx, fh, mode))
- }
- return pghs, nil
-}
+// typeCheckImpl type checks the parsed source files in compiledGoFiles.
+// (The resulting pkg also holds the parsed but not type-checked goFiles.)
+// deps holds the future results of type-checking the direct dependencies.
+func typeCheckImpl(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *source.Metadata, mode source.ParseMode, deps map[PackageID]*packageHandle) (*pkg, error) {
+ // Start type checking of direct dependencies,
+ // in parallel and asynchronously.
+ // As the type checker imports each of these
+ // packages, it will wait for its completion.
+ var wg sync.WaitGroup
+ for _, dep := range deps {
+ wg.Add(1)
+ go func(dep *packageHandle) {
+ dep.await(ctx, snapshot) // ignore result
+ wg.Done()
+ }(dep)
+ }
+ // The 'defer' below is unusual but intentional:
+ // it is not necessary that each call to dep.check
+ // complete before type checking begins, as the type
+ // checker will wait for those it needs. But they do
+ // need to complete before this function returns and
+ // the snapshot is possibly destroyed.
+ defer wg.Wait()
-func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
var filter *unexportedFilter
if mode == source.ParseExported {
filter = &unexportedFilter{uses: map[string]bool{}}
}
- pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter)
+ pkg, err := doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter)
if err != nil {
return nil, err
}
@@ -325,16 +340,14 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source
// time keeping those names.
missing, unexpected := filter.ProcessErrors(pkg.typeErrors)
if len(unexpected) == 0 && len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID)))
- pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter)
+ pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter)
if err != nil {
return nil, err
}
missing, unexpected = filter.ProcessErrors(pkg.typeErrors)
}
if len(unexpected) != 0 || len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID)))
- pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil)
+ pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, nil)
if err != nil {
return nil, err
}
@@ -361,7 +374,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source
for _, e := range m.Errors {
diags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
if err != nil {
- event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(pkg.ID()))
+ event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(string(pkg.ID())))
continue
}
pkg.diagnostics = append(pkg.diagnostics, diags...)
@@ -383,7 +396,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source
for _, e := range pkg.parseErrors {
diags, err := parseErrorDiagnostics(snapshot, pkg, e)
if err != nil {
- event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(pkg.ID()))
+ event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(string(pkg.ID())))
continue
}
for _, diag := range diags {
@@ -401,7 +414,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source
for _, e := range expandErrors(unexpanded, snapshot.View().Options().RelatedInformationSupported) {
diags, err := typeErrorDiagnostics(snapshot, pkg, e)
if err != nil {
- event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(pkg.ID()))
+ event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(string(pkg.ID())))
continue
}
pkg.typeErrors = append(pkg.typeErrors, e.primary)
@@ -426,15 +439,16 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source
var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
-func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
+func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *source.Metadata, mode source.ParseMode, deps map[PackageID]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID)))
defer done()
pkg := &pkg{
- m: m,
- mode: mode,
- imports: make(map[PackagePath]*pkg),
- types: types.NewPackage(string(m.PkgPath), string(m.Name)),
+ m: m,
+ mode: mode,
+ fset: snapshot.FileSet(), // must match parse call below (snapshot.ParseGo for now)
+ deps: make(map[PackageID]*pkg),
+ types: types.NewPackage(string(m.PkgPath), string(m.Name)),
typesInfo: &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
@@ -443,21 +457,18 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
Selections: make(map[*ast.SelectorExpr]*types.Selection),
Scopes: make(map[ast.Node]*types.Scope),
},
- typesSizes: m.TypesSizes,
}
typeparams.InitInstanceInfo(pkg.typesInfo)
- for _, gf := range pkg.m.GoFiles {
- // In the presence of line directives, we may need to report errors in
- // non-compiled Go files, so we need to register them on the package.
- // However, we only need to really parse them in ParseFull mode, when
- // the user might actually be looking at the file.
- fh, err := snapshot.GetFile(ctx, gf)
- if err != nil {
- return nil, err
- }
- goMode := source.ParseFull
- if mode != source.ParseFull {
+ // Parse the non-compiled GoFiles. (These aren't presented to
+ // the type checker but are part of the returned pkg.)
+ // TODO(adonovan): opt: parallelize parsing.
+ for _, fh := range goFiles {
+ goMode := mode
+ if mode == source.ParseExported {
+ // This package is being loaded only for type information,
+ // to which non-compiled Go files are irrelevant,
+ // so parse only the header.
goMode = source.ParseHeader
}
pgf, err := snapshot.ParseGo(ctx, fh, goMode)
@@ -467,7 +478,8 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
pkg.goFiles = append(pkg.goFiles, pgf)
}
- if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil {
+ // Parse the CompiledGoFiles: those seen by the compiler/typechecker.
+ if err := parseCompiledGoFiles(ctx, compiledGoFiles, snapshot, mode, pkg, astFilter); err != nil {
return nil, err
}
@@ -501,23 +513,30 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
Error: func(e error) {
pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
},
- Importer: importerFunc(func(pkgPath string) (*types.Package, error) {
- // If the context was cancelled, we should abort.
- if ctx.Err() != nil {
- return nil, ctx.Err()
+ Importer: importerFunc(func(path string) (*types.Package, error) {
+ // While all of the import errors could be reported
+ // based on the metadata before we start type checking,
+ // reporting them via types.Importer places the errors
+ // at the correct source location.
+ id, ok := pkg.m.DepsByImpPath[ImportPath(path)]
+ if !ok {
+ // If the import declaration is broken,
+ // go list may fail to report metadata about it.
+ // See TestFixImportDecl for an example.
+ return nil, fmt.Errorf("missing metadata for import of %q", path)
}
- dep := resolveImportPath(pkgPath, pkg, deps)
- if dep == nil {
- return nil, snapshot.missingPkgError(ctx, pkgPath)
+ dep, ok := deps[id] // id may be ""
+ if !ok {
+ return nil, snapshot.missingPkgError(path)
}
- if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) {
- return nil, fmt.Errorf("invalid use of internal package %s", pkgPath)
+ if !source.IsValidImport(m.PkgPath, dep.m.PkgPath) {
+ return nil, fmt.Errorf("invalid use of internal package %s", path)
}
- depPkg, err := dep.check(ctx, snapshot)
+ depPkg, err := dep.await(ctx, snapshot)
if err != nil {
return nil, err
}
- pkg.imports[depPkg.m.PkgPath] = depPkg
+ pkg.deps[depPkg.m.ID] = depPkg
return depPkg.types, nil
}),
}
@@ -540,7 +559,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
// We passed typecheckCgo to go/packages when we Loaded.
typesinternal.SetUsesCgo(cfg)
- check := types.NewChecker(cfg, snapshot.FileSet(), pkg.types, pkg.typesInfo)
+ check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
var files []*ast.File
for _, cgf := range pkg.compiledGoFiles {
@@ -548,7 +567,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
}
// Type checking errors are handled via the config, so ignore them here.
- _ = check.Files(files)
+ _ = check.Files(files) // 50us-15ms, depending on size of package
// If the context was cancelled, we may have returned a ton of transient
// errors to the type checker. Swallow them.
@@ -558,22 +577,17 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
return pkg, nil
}
-func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
- for _, cgf := range pkg.m.CompiledGoFiles {
- fh, err := snapshot.GetFile(ctx, cgf)
- if err != nil {
- return err
- }
-
+func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHandle, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
+ // TODO(adonovan): opt: parallelize this loop, which takes 1-25ms.
+ for _, fh := range compiledGoFiles {
var pgf *source.ParsedGoFile
- var fixed bool
+ var err error
// Only parse Full through the cache -- we need to own Exported ASTs
// to prune them.
if mode == source.ParseFull {
- pgf, fixed, err = snapshot.parseGo(ctx, fh, mode)
+ pgf, err = snapshot.ParseGo(ctx, fh, mode)
} else {
- d := parseGo(ctx, snapshot.FileSet(), fh, mode)
- pgf, fixed, err = d.parsed, d.fixed, d.err
+ pgf, err = parseGoImpl(ctx, pkg.fset, fh, mode) // ~20us/KB
}
if err != nil {
return err
@@ -584,22 +598,31 @@ func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.P
}
// If we have fixed parse errors in any of the files, we should hide type
// errors, as they may be completely nonsensical.
- pkg.hasFixedFiles = pkg.hasFixedFiles || fixed
- }
- if mode != source.ParseExported {
- return nil
+ pkg.hasFixedFiles = pkg.hasFixedFiles || pgf.Fixed
}
- if astFilter != nil {
- var files []*ast.File
- for _, cgf := range pkg.compiledGoFiles {
- files = append(files, cgf.File)
- }
- astFilter.Filter(files)
- } else {
- for _, cgf := range pkg.compiledGoFiles {
- trimAST(cgf.File)
+
+ // Optionally remove parts that don't affect the exported API.
+ if mode == source.ParseExported {
+ // TODO(adonovan): opt: experiment with pre-parser
+ // trimming, either a scanner-based implementation
+ // such as https://go.dev/play/p/KUrObH1YkX8 (~31%
+ // speedup), or a byte-oriented implementation (2x
+ // speedup).
+ if astFilter != nil {
+ // aggressive pruning based on reachability
+ var files []*ast.File
+ for _, cgf := range pkg.compiledGoFiles {
+ files = append(files, cgf.File)
+ }
+ astFilter.Filter(files)
+ } else {
+ // simple trimming of function bodies
+ for _, cgf := range pkg.compiledGoFiles {
+ trimAST(cgf.File)
+ }
}
}
+
return nil
}
@@ -607,7 +630,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
// Select packages that can't be found, and were imported in non-workspace packages.
// Workspace packages already show their own errors.
var relevantErrors []*packagesinternal.PackageError
- for _, depsError := range pkg.m.depsErrors {
+ for _, depsError := range pkg.m.DepsErrors {
// Up to Go 1.15, the missing package was included in the stack, which
// was presumably a bug. We want the next one up.
directImporterIdx := len(depsError.ImportStack) - 1
@@ -637,6 +660,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
}
allImports := map[string][]fileImport{}
for _, cgf := range pkg.compiledGoFiles {
+ // TODO(adonovan): modify Imports() to accept a single token.File (cgf.Tok).
for _, group := range astutil.Imports(s.FileSet(), cgf.File) {
for _, imp := range group {
if imp.Path == nil {
@@ -659,7 +683,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
}
for _, imp := range allImports[item] {
- rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
+ rng, err := source.NewMappedRange(imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
if err != nil {
return nil, err
}
@@ -700,7 +724,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
for _, depErr := range relevantErrors {
for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
item := depErr.ImportStack[i]
- m := s.getMetadata(PackageID(item))
+ m := s.Metadata(PackageID(item))
if m == nil || m.Module == nil {
continue
}
@@ -709,7 +733,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
if reference == nil {
continue
}
- rng, err := rangeFromPositions(pm.Mapper, reference.Start, reference.End)
+ rng, err := pm.Mapper.OffsetRange(reference.Start.Byte, reference.End.Byte)
if err != nil {
return nil, err
}
@@ -733,21 +757,19 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost
// missingPkgError returns an error message for a missing package that varies
// based on the user's workspace mode.
-func (s *snapshot) missingPkgError(ctx context.Context, pkgPath string) error {
+func (s *snapshot) missingPkgError(pkgPath string) error {
var b strings.Builder
if s.workspaceMode()&moduleMode == 0 {
gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath))
-
- b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg))
-
+ fmt.Fprintf(&b, "cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg)
for _, gopath := range filepath.SplitList(s.view.gopath) {
gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath))
- b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg))
+ fmt.Fprintf(&b, "\n\t%s (from $GOPATH)", gopathSrcPkg)
}
} else {
- b.WriteString(fmt.Sprintf("no required module provides package %q", pkgPath))
- if err := s.getInitializationError(ctx); err != nil {
- b.WriteString(fmt.Sprintf("(workspace configuration error: %s)", err.MainError))
+ fmt.Fprintf(&b, "no required module provides package %q", pkgPath)
+ if err := s.getInitializationError(); err != nil {
+ fmt.Fprintf(&b, "\n(workspace configuration error: %s)", err.MainError)
}
}
return errors.New(b.String())
@@ -820,41 +842,6 @@ func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extende
return result
}
-// resolveImportPath resolves an import path in pkg to a package from deps.
-// It should produce the same results as resolveImportPath:
-// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990.
-func resolveImportPath(importPath string, pkg *pkg, deps map[PackagePath]*packageHandle) *packageHandle {
- if dep := deps[PackagePath(importPath)]; dep != nil {
- return dep
- }
- // We may be in GOPATH mode, in which case we need to check vendor dirs.
- searchDir := path.Dir(pkg.PkgPath())
- for {
- vdir := PackagePath(path.Join(searchDir, "vendor", importPath))
- if vdep := deps[vdir]; vdep != nil {
- return vdep
- }
-
- // Search until Dir doesn't take us anywhere new, e.g. "." or "/".
- next := path.Dir(searchDir)
- if searchDir == next {
- break
- }
- searchDir = next
- }
-
- // Vendor didn't work. Let's try minimal module compatibility mode.
- // In MMC, the packagePath is the canonical (.../vN/...) path, which
- // is hard to calculate. But the go command has already resolved the ID
- // to the non-versioned path, and we can take advantage of that.
- for _, dep := range deps {
- if dep.ID() == importPath {
- return dep
- }
- }
- return nil
-}
-
// An importFunc is an implementation of the single-method
// types.Importer interface based on a function value.
type importerFunc func(path string) (*types.Package, error)
diff --git a/gopls/internal/lsp/cache/debug.go b/gopls/internal/lsp/cache/debug.go
new file mode 100644
index 00000000000..fd82aff301e
--- /dev/null
+++ b/gopls/internal/lsp/cache/debug.go
@@ -0,0 +1,53 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "os"
+ "sort"
+)
+
+// This file contains helpers that can be used to instrument code while
+// debugging.
+
+// debugEnabled toggles the helpers below.
+const debugEnabled = false
+
+// If debugEnabled is true, debugf formats its arguments and prints to stderr.
+// If debugEnabled is false, it is a no-op.
+func debugf(format string, args ...interface{}) {
+ if !debugEnabled {
+ return
+ }
+ if false {
+ _ = fmt.Sprintf(format, args...) // encourage vet to validate format strings
+ }
+ fmt.Fprintf(os.Stderr, ">>> "+format+"\n", args...)
+}
+
+// If debugEnabled is true, dumpWorkspace prints a summary of workspace
+// packages to stderr. If debugEnabled is false, it is a no-op.
+func (s *snapshot) dumpWorkspace(context string) {
+ if !debugEnabled {
+ return
+ }
+
+ debugf("workspace (after %s):", context)
+ var ids []PackageID
+ for id := range s.workspacePackages {
+ ids = append(ids, id)
+ }
+
+ sort.Slice(ids, func(i, j int) bool {
+ return ids[i] < ids[j]
+ })
+
+ for _, id := range ids {
+ pkgPath := s.workspacePackages[id]
+ _, ok := s.meta.metadata[id]
+ debugf(" %s:%s (metadata: %t)", id, pkgPath, ok)
+ }
+}
diff --git a/internal/lsp/cache/error_test.go b/gopls/internal/lsp/cache/error_test.go
similarity index 100%
rename from internal/lsp/cache/error_test.go
rename to gopls/internal/lsp/cache/error_test.go
diff --git a/internal/lsp/cache/errors.go b/gopls/internal/lsp/cache/errors.go
similarity index 75%
rename from internal/lsp/cache/errors.go
rename to gopls/internal/lsp/cache/errors.go
index 342f2bea5d7..7ca4f078eff 100644
--- a/internal/lsp/cache/errors.go
+++ b/gopls/internal/lsp/cache/errors.go
@@ -4,22 +4,27 @@
package cache
+// This file defines routines to convert diagnostics from go list, go
+// get, go/packages, parsing, type checking, and analysis into
+// source.Diagnostic form, and suggesting quick fixes.
+
import (
"fmt"
"go/scanner"
- "go/token"
"go/types"
+ "log"
"regexp"
"strconv"
"strings"
- "golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/typesinternal"
)
@@ -82,7 +87,7 @@ func parseErrorDiagnostics(snapshot *snapshot, pkg *pkg, errList scanner.ErrorLi
return nil, err
}
pos := pgf.Tok.Pos(e.Pos.Offset)
- spn, err := span.NewRange(snapshot.FileSet(), pos, pos).Span()
+ spn, err := span.NewRange(pgf.Tok, pos, pos).Span()
if err != nil {
return nil, err
}
@@ -103,7 +108,7 @@ var importErrorRe = regexp.MustCompile(`could not import ([^\s]+)`)
var unsupportedFeatureRe = regexp.MustCompile(`.*require.* go(\d+\.\d+) or later`)
func typeErrorDiagnostics(snapshot *snapshot, pkg *pkg, e extendedError) ([]*source.Diagnostic, error) {
- code, spn, err := typeErrorData(snapshot.FileSet(), pkg, e.primary)
+ code, spn, err := typeErrorData(pkg, e.primary)
if err != nil {
return nil, err
}
@@ -122,9 +127,13 @@ func typeErrorDiagnostics(snapshot *snapshot, pkg *pkg, e extendedError) ([]*sou
diag.Code = code.String()
diag.CodeHref = typesCodeHref(snapshot, code)
}
+ switch code {
+ case typesinternal.UnusedVar, typesinternal.UnusedImport:
+ diag.Tags = append(diag.Tags, protocol.Unnecessary)
+ }
for _, secondary := range e.secondaries {
- _, secondarySpan, err := typeErrorData(snapshot.FileSet(), pkg, secondary)
+ _, secondarySpan, err := typeErrorData(pkg, secondary)
if err != nil {
return nil, err
}
@@ -187,69 +196,47 @@ func editGoDirectiveQuickFix(snapshot *snapshot, uri span.URI, version string) (
return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil
}
-func analysisDiagnosticDiagnostics(snapshot *snapshot, pkg *pkg, a *analysis.Analyzer, e *analysis.Diagnostic) ([]*source.Diagnostic, error) {
- var srcAnalyzer *source.Analyzer
- // Find the analyzer that generated this diagnostic.
- for _, sa := range source.EnabledAnalyzers(snapshot) {
- if a == sa.Analyzer {
- srcAnalyzer = sa
- break
- }
- }
-
- spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
+// toSourceDiagnostic converts a gobDiagnostic to "source" form.
+func toSourceDiagnostic(srcAnalyzer *source.Analyzer, gobDiag *gobDiagnostic) *source.Diagnostic {
kinds := srcAnalyzer.ActionKind
if len(srcAnalyzer.ActionKind) == 0 {
kinds = append(kinds, protocol.QuickFix)
}
- fixes, err := suggestedAnalysisFixes(snapshot, pkg, e, kinds)
- if err != nil {
- return nil, err
- }
+ fixes := suggestedAnalysisFixes(gobDiag, kinds)
if srcAnalyzer.Fix != "" {
- cmd, err := command.NewApplyFixCommand(e.Message, command.ApplyFixArgs{
- URI: protocol.URIFromSpanURI(spn.URI()),
- Range: rng,
+ cmd, err := command.NewApplyFixCommand(gobDiag.Message, command.ApplyFixArgs{
+ URI: gobDiag.Location.URI,
+ Range: gobDiag.Location.Range,
Fix: srcAnalyzer.Fix,
})
if err != nil {
- return nil, err
+ // JSON marshalling of these argument values cannot fail.
+ log.Fatalf("internal error in NewApplyFixCommand: %v", err)
}
for _, kind := range kinds {
fixes = append(fixes, source.SuggestedFixFromCommand(cmd, kind))
}
}
- related, err := relatedInformation(pkg, snapshot.FileSet(), e)
- if err != nil {
- return nil, err
- }
severity := srcAnalyzer.Severity
if severity == 0 {
severity = protocol.SeverityWarning
}
diag := &source.Diagnostic{
- URI: spn.URI(),
- Range: rng,
+ // TODO(adonovan): is this sound? See dual conversion in posToLocation.
+ URI: span.URI(gobDiag.Location.URI),
+ Range: gobDiag.Location.Range,
Severity: severity,
- Source: source.AnalyzerErrorKind(e.Category),
- Message: e.Message,
- Related: related,
+ Source: source.AnalyzerErrorKind(gobDiag.Category),
+ Message: gobDiag.Message,
+ Related: relatedInformation(gobDiag),
SuggestedFixes: fixes,
- Analyzer: srcAnalyzer,
}
// If the fixes only delete code, assume that the diagnostic is reporting dead code.
if onlyDeletions(fixes) {
diag.Tags = []protocol.DiagnosticTag{protocol.Unnecessary}
}
- return []*source.Diagnostic{diag}, nil
+ return diag
}
// onlyDeletions returns true if all of the suggested fixes are deletions.
@@ -277,21 +264,14 @@ func typesCodeHref(snapshot *snapshot, code typesinternal.ErrorCode) string {
return source.BuildLink(target, "golang.org/x/tools/internal/typesinternal", code.String())
}
-func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnostic, kinds []protocol.CodeActionKind) ([]source.SuggestedFix, error) {
+func suggestedAnalysisFixes(diag *gobDiagnostic, kinds []protocol.CodeActionKind) []source.SuggestedFix {
var fixes []source.SuggestedFix
for _, fix := range diag.SuggestedFixes {
edits := make(map[span.URI][]protocol.TextEdit)
for _, e := range fix.TextEdits {
- spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- edits[spn.URI()] = append(edits[spn.URI()], protocol.TextEdit{
- Range: rng,
+ uri := span.URI(e.Location.URI)
+ edits[uri] = append(edits[uri], protocol.TextEdit{
+ Range: e.Location.Range,
NewText: string(e.NewText),
})
}
@@ -304,36 +284,42 @@ func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnos
}
}
- return fixes, nil
+ return fixes
}
-func relatedInformation(pkg *pkg, fset *token.FileSet, diag *analysis.Diagnostic) ([]source.RelatedInformation, error) {
+func relatedInformation(diag *gobDiagnostic) []source.RelatedInformation {
var out []source.RelatedInformation
for _, related := range diag.Related {
- spn, err := span.NewRange(fset, related.Pos, related.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
out = append(out, source.RelatedInformation{
- URI: spn.URI(),
- Range: rng,
+ URI: span.URI(related.Location.URI),
+ Range: related.Location.Range,
Message: related.Message,
})
}
- return out, nil
+ return out
}
-func typeErrorData(fset *token.FileSet, pkg *pkg, terr types.Error) (typesinternal.ErrorCode, span.Span, error) {
+func typeErrorData(pkg *pkg, terr types.Error) (typesinternal.ErrorCode, span.Span, error) {
ecode, start, end, ok := typesinternal.ReadGo116ErrorData(terr)
if !ok {
start, end = terr.Pos, terr.Pos
ecode = 0
}
- posn := fset.Position(start)
+ // go/types may return invalid positions in some cases, such as
+ // in errors on tokens missing from the syntax tree.
+ if !start.IsValid() {
+ return 0, span.Span{}, fmt.Errorf("type error (%q, code %d, go116=%t) without position", terr.Msg, ecode, ok)
+ }
+ // go/types errors retain their FileSet.
+ // Sanity-check that we're using the right one.
+ fset := pkg.FileSet()
+ if fset != terr.Fset {
+ return 0, span.Span{}, bug.Errorf("wrong FileSet for type error")
+ }
+ posn := safetoken.StartPosition(fset, start)
+ if !posn.IsValid() {
+ return 0, span.Span{}, fmt.Errorf("position %d of type error %q (code %q) not found in FileSet", start, start, terr)
+ }
pgf, err := pkg.File(span.URIFromPath(posn.Filename))
if err != nil {
return 0, span.Span{}, err
@@ -341,17 +327,13 @@ func typeErrorData(fset *token.FileSet, pkg *pkg, terr types.Error) (typesintern
if !end.IsValid() || end == start {
end = analysisinternal.TypeErrorEndPos(fset, pgf.Src, start)
}
- spn, err := parsedGoSpan(pgf, start, end)
+ spn, err := span.FileSpan(pgf.Mapper.TokFile, start, end)
if err != nil {
return 0, span.Span{}, err
}
return ecode, spn, nil
}
-func parsedGoSpan(pgf *source.ParsedGoFile, start, end token.Pos) (span.Span, error) {
- return span.FileSpan(pgf.Mapper.TokFile, pgf.Mapper.TokFile, start, end)
-}
-
// spanToRange converts a span.Span to a protocol.Range,
// assuming that the span belongs to the package whose diagnostics are being computed.
func spanToRange(pkg *pkg, spn span.Span) (protocol.Range, error) {
@@ -397,7 +379,7 @@ func parseGoListImportCycleError(snapshot *snapshot, e packages.Error, pkg *pkg)
// Search file imports for the import that is causing the import cycle.
for _, imp := range cgf.File.Imports {
if imp.Path.Value == circImp {
- spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span()
+ spn, err := span.NewRange(cgf.Tok, imp.Pos(), imp.End()).Span()
if err != nil {
return msg, span.Span{}, false
}
diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go
new file mode 100644
index 00000000000..8e9e5d92c4f
--- /dev/null
+++ b/gopls/internal/lsp/cache/graph.go
@@ -0,0 +1,124 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// A metadataGraph is an immutable and transitively closed import
+// graph of Go packages, as obtained from go/packages.
+type metadataGraph struct {
+ // metadata maps package IDs to their associated metadata.
+ metadata map[PackageID]*source.Metadata
+
+ // importedBy maps package IDs to the list of packages that import them.
+ importedBy map[PackageID][]PackageID
+
+ // ids maps file URIs to package IDs, sorted by (!valid, cli, packageID).
+ // A single file may belong to multiple packages due to tests packages.
+ ids map[span.URI][]PackageID
+}
+
+// Clone creates a new metadataGraph, applying the given updates to the
+// receiver.
+func (g *metadataGraph) Clone(updates map[PackageID]*source.Metadata) *metadataGraph {
+ if len(updates) == 0 {
+ // Optimization: since the graph is immutable, we can return the receiver.
+ return g
+ }
+ result := &metadataGraph{metadata: make(map[PackageID]*source.Metadata, len(g.metadata))}
+ // Copy metadata.
+ for id, m := range g.metadata {
+ result.metadata[id] = m
+ }
+ for id, m := range updates {
+ if m == nil {
+ delete(result.metadata, id)
+ } else {
+ result.metadata[id] = m
+ }
+ }
+ result.build()
+ return result
+}
+
+// build constructs g.importedBy and g.uris from g.metadata.
+func (g *metadataGraph) build() {
+ // Build the import graph.
+ g.importedBy = make(map[PackageID][]PackageID)
+ for id, m := range g.metadata {
+ for _, depID := range m.DepsByPkgPath {
+ g.importedBy[depID] = append(g.importedBy[depID], id)
+ }
+ }
+
+ // Collect file associations.
+ g.ids = make(map[span.URI][]PackageID)
+ for id, m := range g.metadata {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+ for uri := range uris {
+ g.ids[uri] = append(g.ids[uri], id)
+ }
+ }
+
+ // Sort and filter file associations.
+ for uri, ids := range g.ids {
+ sort.Slice(ids, func(i, j int) bool {
+ cli := source.IsCommandLineArguments(ids[i])
+ clj := source.IsCommandLineArguments(ids[j])
+ if cli != clj {
+ return clj
+ }
+
+ // 2. packages appear in name order.
+ return ids[i] < ids[j]
+ })
+
+ // Choose the best IDs for each URI, according to the following rules:
+ // - If there are any valid real packages, choose them.
+ // - Else, choose the first valid command-line-argument package, if it exists.
+ //
+ // TODO(rfindley): it might be better to track all IDs here, and exclude
+ // them later when type checking, but this is the existing behavior.
+ for i, id := range ids {
+ // If we've seen *anything* prior to command-line arguments package, take
+ // it. Note that ids[0] may itself be command-line-arguments.
+ if i > 0 && source.IsCommandLineArguments(id) {
+ g.ids[uri] = ids[:i]
+ break
+ }
+ }
+ }
+}
+
+// reverseReflexiveTransitiveClosure returns a new mapping containing the
+// metadata for the specified packages along with any package that
+// transitively imports one of them, keyed by ID, including all the initial packages.
+func (g *metadataGraph) reverseReflexiveTransitiveClosure(ids ...PackageID) map[PackageID]*source.Metadata {
+ seen := make(map[PackageID]*source.Metadata)
+ var visitAll func([]PackageID)
+ visitAll = func(ids []PackageID) {
+ for _, id := range ids {
+ if seen[id] == nil {
+ if m := g.metadata[id]; m != nil {
+ seen[id] = m
+ visitAll(g.importedBy[id])
+ }
+ }
+ }
+ }
+ visitAll(ids)
+ return seen
+}
diff --git a/internal/lsp/cache/imports.go b/gopls/internal/lsp/cache/imports.go
similarity index 68%
rename from internal/lsp/cache/imports.go
rename to gopls/internal/lsp/cache/imports.go
index 01a2468ef34..2bda377746d 100644
--- a/internal/lsp/cache/imports.go
+++ b/gopls/internal/lsp/cache/imports.go
@@ -7,61 +7,48 @@ package cache
import (
"context"
"fmt"
+ "os"
"reflect"
"strings"
"sync"
"time"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/source"
)
type importsState struct {
ctx context.Context
- mu sync.Mutex
- processEnv *imports.ProcessEnv
- cleanupProcessEnv func()
- cacheRefreshDuration time.Duration
- cacheRefreshTimer *time.Timer
- cachedModFileHash string
- cachedBuildFlags []string
+ mu sync.Mutex
+ processEnv *imports.ProcessEnv
+ cleanupProcessEnv func()
+ cacheRefreshDuration time.Duration
+ cacheRefreshTimer *time.Timer
+ cachedModFileHash source.Hash
+ cachedBuildFlags []string
+ cachedDirectoryFilters []string
}
func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error {
s.mu.Lock()
defer s.mu.Unlock()
- // Find the hash of the active mod file, if any. Using the unsaved content
+ // Find the hash of active mod files, if any. Using the unsaved content
// is slightly wasteful, since we'll drop caches a little too often, but
// the mod file shouldn't be changing while people are autocompleting.
- var modFileHash string
- // If we are using 'legacyWorkspace' mode, we can just read the modfile from
- // the snapshot. Otherwise, we need to get the synthetic workspace mod file.
//
- // TODO(rfindley): we should be able to just always use the synthetic
- // workspace module, or alternatively use the go.work file.
- if snapshot.workspace.moduleSource == legacyWorkspace {
- for m := range snapshot.workspace.getActiveModFiles() { // range to access the only element
- modFH, err := snapshot.GetFile(ctx, m)
- if err != nil {
- return err
- }
- modFileHash = modFH.FileIdentity().Hash
- }
- } else {
- modFile, err := snapshot.workspace.modFile(ctx, snapshot)
- if err != nil {
- return err
- }
- modBytes, err := modFile.Format()
+ // TODO(rfindley): consider instead hashing on-disk modfiles here.
+ var modFileHash source.Hash
+ for m := range snapshot.workspace.ActiveModFiles() {
+ fh, err := snapshot.GetFile(ctx, m)
if err != nil {
return err
}
- modFileHash = hashContents(modBytes)
+ modFileHash.XORWith(fh.FileIdentity().Hash)
}
// view.goEnv is immutable -- changes make a new view. Options can change.
@@ -69,9 +56,11 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot
snapshot.view.optionsMu.Lock()
localPrefix := snapshot.view.options.Local
currentBuildFlags := snapshot.view.options.BuildFlags
+ currentDirectoryFilters := snapshot.view.options.DirectoryFilters
changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) ||
snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) ||
- modFileHash != s.cachedModFileHash
+ modFileHash != s.cachedModFileHash ||
+ !reflect.DeepEqual(snapshot.view.options.DirectoryFilters, s.cachedDirectoryFilters)
snapshot.view.optionsMu.Unlock()
// If anything relevant to imports has changed, clear caches and
@@ -91,6 +80,7 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot
}
s.cachedModFileHash = modFileHash
s.cachedBuildFlags = currentBuildFlags
+ s.cachedDirectoryFilters = currentDirectoryFilters
var err error
s.cleanupProcessEnv, err = s.populateProcessEnv(ctx, snapshot)
if err != nil {
@@ -141,20 +131,21 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho
pe.Logf = nil
}
- // Take an extra reference to the snapshot so that its workspace directory
- // (if any) isn't destroyed while we're using it.
- release := snapshot.generation.Acquire()
+ // Extract invocation details from the snapshot to use with goimports.
+ //
+ // TODO(rfindley): refactor to extract the necessary invocation logic into
+ // separate functions. Using goCommandInvocation is unnecessarily indirect,
+ // and has led to memory leaks in the past, when the snapshot was
+ // unintentionally held past its lifetime.
_, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{
WorkingDir: snapshot.view.rootURI.Filename(),
})
if err != nil {
return nil, err
}
- pe.WorkingDir = inv.WorkingDir
+
pe.BuildFlags = inv.BuildFlags
- pe.WorkingDir = inv.WorkingDir
- pe.ModFile = inv.ModFile
- pe.ModFlag = inv.ModFlag
+ pe.ModFlag = "readonly" // processEnv operations should not mutate the modfile
pe.Env = map[string]string{}
for _, kv := range inv.Env {
split := strings.SplitN(kv, "=", 2)
@@ -163,11 +154,31 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho
}
pe.Env[split[0]] = split[1]
}
+ // We don't actually use the invocation, so clean it up now.
+ cleanupInvocation()
+
+ // If the snapshot uses a synthetic workspace directory, create a copy for
+ // the lifecycle of the importsState.
+ //
+ // Notably, we cannot use the snapshot invocation working directory, as that
+ // is tied to the lifecycle of the snapshot.
+ //
+ // Otherwise return a no-op cleanup function.
+ cleanup = func() {}
+ if snapshot.usesWorkspaceDir() {
+ tmpDir, err := makeWorkspaceDir(ctx, snapshot.workspace, snapshot)
+ if err != nil {
+ return nil, err
+ }
+ pe.WorkingDir = tmpDir
+ cleanup = func() {
+ os.RemoveAll(tmpDir) // ignore error
+ }
+ } else {
+ pe.WorkingDir = snapshot.view.rootURI.Filename()
+ }
- return func() {
- cleanupInvocation()
- release()
- }, nil
+ return cleanup, nil
}
func (s *importsState) refreshProcessEnv() {
diff --git a/internal/lsp/cache/keys.go b/gopls/internal/lsp/cache/keys.go
similarity index 100%
rename from internal/lsp/cache/keys.go
rename to gopls/internal/lsp/cache/keys.go
diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go
new file mode 100644
index 00000000000..f79109a31d1
--- /dev/null
+++ b/gopls/internal/lsp/cache/load.go
@@ -0,0 +1,788 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+var loadID uint64 // atomic identifier for loads
+
+// errNoPackages indicates that a load query matched no packages.
+var errNoPackages = errors.New("no packages returned")
+
+// load calls packages.Load for the given scopes, updating package metadata,
+// import graph, and mapped files with the result.
+//
+// The resulting error may wrap the moduleErrorMap error type, representing
+// errors associated with specific modules.
+func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadScope) (err error) {
+ id := atomic.AddUint64(&loadID, 1)
+ eventName := fmt.Sprintf("go/packages.Load #%d", id) // unique name for logging
+
+ var query []string
+ var containsDir bool // for logging
+
+ // Keep track of module query -> module path so that we can later correlate query
+ // errors with errors.
+ moduleQueries := make(map[string]string)
+ for _, scope := range scopes {
+ switch scope := scope.(type) {
+ case packageLoadScope:
+ // The only time we pass package paths is when we're doing a
+ // partial workspace load. In those cases, the paths came back from
+ // go list and should already be GOPATH-vendorized when appropriate.
+ query = append(query, string(scope))
+
+ case fileLoadScope:
+ uri := span.URI(scope)
+ fh := s.FindFile(uri)
+ if fh == nil || s.View().FileKind(fh) != source.Go {
+ // Don't try to load a file that doesn't exist, or isn't a go file.
+ continue
+ }
+ contents, err := fh.Read()
+ if err != nil {
+ continue
+ }
+ if isStandaloneFile(contents, s.view.Options().StandaloneTags) {
+ query = append(query, uri.Filename())
+ } else {
+ query = append(query, fmt.Sprintf("file=%s", uri.Filename()))
+ }
+
+ case moduleLoadScope:
+ switch scope {
+ case "std", "cmd":
+ query = append(query, string(scope))
+ default:
+ modQuery := fmt.Sprintf("%s/...", scope)
+ query = append(query, modQuery)
+ moduleQueries[modQuery] = string(scope)
+ }
+
+ case viewLoadScope:
+ // If we are outside of GOPATH, a module, or some other known
+ // build system, don't load subdirectories.
+ if !s.ValidBuildConfiguration() {
+ query = append(query, "./")
+ } else {
+ query = append(query, "./...")
+ }
+
+ default:
+ panic(fmt.Sprintf("unknown scope type %T", scope))
+ }
+ switch scope.(type) {
+ case viewLoadScope, moduleLoadScope:
+ containsDir = true
+ }
+ }
+ if len(query) == 0 {
+ return nil
+ }
+ sort.Strings(query) // for determinism
+
+ ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query))
+ defer done()
+
+ flags := source.LoadWorkspace
+ if allowNetwork {
+ flags |= source.AllowNetwork
+ }
+ _, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{
+ WorkingDir: s.view.rootURI.Filename(),
+ })
+ if err != nil {
+ return err
+ }
+
+ // Set a last resort deadline on packages.Load since it calls the go
+ // command, which may hang indefinitely if it has a bug. golang/go#42132
+ // and golang/go#42255 have more context.
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+ defer cancel()
+
+ cfg := s.config(ctx, inv)
+ pkgs, err := packages.Load(cfg, query...)
+ cleanup()
+
+ // If the context was canceled, return early. Otherwise, we might be
+ // type-checking an incomplete result. Check the context directly,
+ // because go/packages adds extra information to the error.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ // This log message is sought for by TestReloadOnlyOnce.
+ labels := append(source.SnapshotLabels(s), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
+ if err != nil {
+ event.Error(ctx, eventName, err, labels...)
+ } else {
+ event.Log(ctx, eventName, labels...)
+ }
+
+ if len(pkgs) == 0 {
+ if err == nil {
+ err = errNoPackages
+ }
+ return fmt.Errorf("packages.Load error: %w", err)
+ }
+
+ moduleErrs := make(map[string][]packages.Error) // module path -> errors
+ filterer := buildFilterer(s.view.rootURI.Filename(), s.view.gomodcache, s.view.Options())
+ newMetadata := make(map[PackageID]*source.Metadata)
+ for _, pkg := range pkgs {
+ // The Go command returns synthetic list results for module queries that
+ // encountered module errors.
+ //
+ // For example, given a module path a.mod, we'll query for "a.mod/..." and
+ // the go command will return a package named "a.mod/..." holding this
+ // error. Save it for later interpretation.
+ //
+ // See golang/go#50862 for more details.
+ if mod := moduleQueries[pkg.PkgPath]; mod != "" { // a synthetic result for the unloadable module
+ if len(pkg.Errors) > 0 {
+ moduleErrs[mod] = pkg.Errors
+ }
+ continue
+ }
+
+ if !containsDir || s.view.Options().VerboseOutput {
+ event.Log(ctx, eventName, append(
+ source.SnapshotLabels(s),
+ tag.Package.Of(pkg.ID),
+ tag.Files.Of(pkg.CompiledGoFiles))...)
+ }
+
+ // Ignore packages with no sources, since we will never be able to
+ // correctly invalidate that metadata.
+ if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 {
+ continue
+ }
+ // Special case for the builtin package, as it has no dependencies.
+ if pkg.PkgPath == "builtin" {
+ if len(pkg.GoFiles) != 1 {
+ return fmt.Errorf("only expected 1 file for builtin, got %v", len(pkg.GoFiles))
+ }
+ s.setBuiltin(pkg.GoFiles[0])
+ continue
+ }
+ // Skip test main packages.
+ if isTestMain(pkg, s.view.gocache) {
+ continue
+ }
+ // Skip filtered packages. They may be added anyway if they're
+ // dependencies of non-filtered packages.
+ //
+ // TODO(rfindley): why exclude metadata arbitrarily here? It should be safe
+ // to capture all metadata.
+ if s.view.allFilesExcluded(pkg, filterer) {
+ continue
+ }
+ if err := buildMetadata(ctx, pkg, cfg, query, newMetadata, nil); err != nil {
+ return err
+ }
+ }
+
+ s.mu.Lock()
+
+ // Compute the minimal metadata updates (for Clone)
+ // required to preserve this invariant:
+ // for all id, s.packages.Get(id).m == s.meta.metadata[id].
+ updates := make(map[PackageID]*source.Metadata)
+ for _, m := range newMetadata {
+ if existing := s.meta.metadata[m.ID]; existing == nil {
+ updates[m.ID] = m
+ delete(s.shouldLoad, m.ID)
+ }
+ }
+ // Assert the invariant.
+ s.packages.Range(func(k, v interface{}) {
+ pk, ph := k.(packageKey), v.(*packageHandle)
+ if s.meta.metadata[pk.id] != ph.m {
+ // TODO(adonovan): upgrade to unconditional panic after Jan 2023.
+ bug.Reportf("inconsistent metadata")
+ }
+ })
+
+ event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates)))
+
+ s.meta = s.meta.Clone(updates)
+ s.resetIsActivePackageLocked()
+
+ s.workspacePackages = computeWorkspacePackagesLocked(s, s.meta)
+ s.dumpWorkspace("load")
+ s.mu.Unlock()
+
+ // Recompute the workspace package handle for any packages we invalidated.
+ //
+ // This is (putatively) an optimization since handle
+ // construction prefetches the content of all Go source files.
+ // It is safe to ignore errors, or omit this step entirely.
+ for _, m := range updates {
+ s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)) // ignore error
+ }
+
+ if len(moduleErrs) > 0 {
+ return &moduleErrorMap{moduleErrs}
+ }
+
+ return nil
+}
+
+type moduleErrorMap struct {
+ errs map[string][]packages.Error // module path -> errors
+}
+
+func (m *moduleErrorMap) Error() string {
+ var paths []string // sort for stability
+ for path, errs := range m.errs {
+ if len(errs) > 0 { // should always be true, but be cautious
+ paths = append(paths, path)
+ }
+ }
+ sort.Strings(paths)
+
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%d modules have errors:\n", len(paths))
+ for _, path := range paths {
+ fmt.Fprintf(&buf, "\t%s:%s\n", path, m.errs[path][0].Msg)
+ }
+
+ return buf.String()
+}
+
+// workspaceLayoutErrors returns an error decribing a misconfiguration of the
+// workspace, along with related diagnostic.
+//
+// The unusual argument ordering of results is intentional: if the resulting
+// error is nil, so must be the resulting diagnostics.
+//
+// If ctx is cancelled, it may return ctx.Err(), nil.
+//
+// TODO(rfindley): separate workspace diagnostics from critical workspace
+// errors.
+func (s *snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.Diagnostic) {
+ // TODO(rfindley): do we really not want to show a critical error if the user
+ // has no go.mod files?
+ if len(s.workspace.getKnownModFiles()) == 0 {
+ return nil, nil
+ }
+
+ // TODO(rfindley): both of the checks below should be delegated to the workspace.
+ if s.view.effectiveGO111MODULE() == off {
+ return nil, nil
+ }
+ if s.workspace.moduleSource != legacyWorkspace {
+ return nil, nil
+ }
+
+ // If the user has one module per view, there is nothing to warn about.
+ if s.ValidBuildConfiguration() && len(s.workspace.getKnownModFiles()) == 1 {
+ return nil, nil
+ }
+
+ // Apply diagnostics about the workspace configuration to relevant open
+ // files.
+ openFiles := s.openFiles()
+
+ // If the snapshot does not have a valid build configuration, it may be
+ // that the user has opened a directory that contains multiple modules.
+ // Check for that an warn about it.
+ if !s.ValidBuildConfiguration() {
+ var msg string
+ if s.view.goversion >= 18 {
+ msg = `gopls was not able to find modules in your workspace.
+When outside of GOPATH, gopls needs to know which modules you are working on.
+You can fix this by opening your workspace to a folder inside a Go module, or
+by using a go.work file to specify multiple modules.
+See the documentation for more information on setting up your workspace:
+https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
+ } else {
+ msg = `gopls requires a module at the root of your workspace.
+You can work with multiple modules by upgrading to Go 1.18 or later, and using
+go workspaces (go.work files).
+See the documentation for more information on setting up your workspace:
+https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
+ }
+ return fmt.Errorf(msg), s.applyCriticalErrorToFiles(ctx, msg, openFiles)
+ }
+
+ // If the user has one active go.mod file, they may still be editing files
+ // in nested modules. Check the module of each open file and add warnings
+ // that the nested module must be opened as a workspace folder.
+ if len(s.workspace.ActiveModFiles()) == 1 {
+ // Get the active root go.mod file to compare against.
+ var rootMod string
+ for uri := range s.workspace.ActiveModFiles() {
+ rootMod = uri.Filename()
+ }
+ rootDir := filepath.Dir(rootMod)
+ nestedModules := make(map[string][]source.VersionedFileHandle)
+ for _, fh := range openFiles {
+ mod, err := findRootPattern(ctx, filepath.Dir(fh.URI().Filename()), "go.mod", s)
+ if err != nil {
+ if ctx.Err() != nil {
+ return ctx.Err(), nil
+ }
+ continue
+ }
+ if mod == "" {
+ continue
+ }
+ if mod != rootMod && source.InDir(rootDir, mod) {
+ modDir := filepath.Dir(mod)
+ nestedModules[modDir] = append(nestedModules[modDir], fh)
+ }
+ }
+ var multiModuleMsg string
+ if s.view.goversion >= 18 {
+ multiModuleMsg = `To work on multiple modules at once, please use a go.work file.
+See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.`
+ } else {
+ multiModuleMsg = `To work on multiple modules at once, please upgrade to Go 1.18 and use a go.work file.
+See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.`
+ }
+ // Add a diagnostic to each file in a nested module to mark it as
+ // "orphaned". Don't show a general diagnostic in the progress bar,
+ // because the user may still want to edit a file in a nested module.
+ var srcDiags []*source.Diagnostic
+ for modDir, uris := range nestedModules {
+ msg := fmt.Sprintf("This file is in %s, which is a nested module in the %s module.\n%s", modDir, rootMod, multiModuleMsg)
+ srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...)
+ }
+ if len(srcDiags) != 0 {
+ return fmt.Errorf("You have opened a nested module.\n%s", multiModuleMsg), srcDiags
+ }
+ }
+ return nil, nil
+}
+
+func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.VersionedFileHandle) []*source.Diagnostic {
+ var srcDiags []*source.Diagnostic
+ for _, fh := range files {
+ // Place the diagnostics on the package or module declarations.
+ var rng protocol.Range
+ switch s.view.FileKind(fh) {
+ case source.Go:
+ if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil {
+ // Check that we have a valid `package foo` range to use for positioning the error.
+ if pgf.File.Package.IsValid() && pgf.File.Name != nil && pgf.File.Name.End().IsValid() {
+ pkgDecl := span.NewRange(pgf.Tok, pgf.File.Package, pgf.File.Name.End())
+ if spn, err := pkgDecl.Span(); err == nil {
+ rng, _ = pgf.Mapper.Range(spn)
+ }
+ }
+ }
+ case source.Mod:
+ if pmf, err := s.ParseMod(ctx, fh); err == nil {
+ if mod := pmf.File.Module; mod != nil && mod.Syntax != nil {
+ rng, _ = pmf.Mapper.OffsetRange(mod.Syntax.Start.Byte, mod.Syntax.End.Byte)
+ }
+ }
+ }
+ srcDiags = append(srcDiags, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: msg,
+ })
+ }
+ return srcDiags
+}
+
+// getWorkspaceDir returns the URI for the workspace directory
+// associated with this snapshot. The workspace directory is a
+// temporary directory containing the go.mod file computed from all
+// active modules.
+func (s *snapshot) getWorkspaceDir(ctx context.Context) (span.URI, error) {
+ s.mu.Lock()
+ dir, err := s.workspaceDir, s.workspaceDirErr
+ s.mu.Unlock()
+ if dir == "" && err == nil { // cache miss
+ dir, err = makeWorkspaceDir(ctx, s.workspace, s)
+ s.mu.Lock()
+ s.workspaceDir, s.workspaceDirErr = dir, err
+ s.mu.Unlock()
+ }
+ return span.URIFromPath(dir), err
+}
+
+// makeWorkspaceDir creates a temporary directory containing a go.mod
+// and go.sum file for each module in the workspace.
+// Note: snapshot's mutex must be unlocked for it to satisfy FileSource.
+func makeWorkspaceDir(ctx context.Context, workspace *workspace, fs source.FileSource) (string, error) {
+ file, err := workspace.modFile(ctx, fs)
+ if err != nil {
+ return "", err
+ }
+ modContent, err := file.Format()
+ if err != nil {
+ return "", err
+ }
+ sumContent, err := workspace.sumFile(ctx, fs)
+ if err != nil {
+ return "", err
+ }
+ tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod")
+ if err != nil {
+ return "", err
+ }
+ for name, content := range map[string][]byte{
+ "go.mod": modContent,
+ "go.sum": sumContent,
+ } {
+ if err := ioutil.WriteFile(filepath.Join(tmpdir, name), content, 0644); err != nil {
+ os.RemoveAll(tmpdir) // ignore error
+ return "", err
+ }
+ }
+ return tmpdir, nil
+}
+
+// buildMetadata populates the updates map with metadata updates to
+// apply, based on the given pkg. It recurs through pkg.Imports to ensure that
+// metadata exists for all dependencies.
+func buildMetadata(ctx context.Context, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*source.Metadata, path []PackageID) error {
+ // Allow for multiple ad-hoc packages in the workspace (see #47584).
+ pkgPath := PackagePath(pkg.PkgPath)
+ id := PackageID(pkg.ID)
+ if source.IsCommandLineArguments(id) {
+ suffix := ":" + strings.Join(query, ",")
+ id = PackageID(pkg.ID + suffix)
+ pkgPath = PackagePath(pkg.PkgPath + suffix)
+ }
+
+ if _, ok := updates[id]; ok {
+ // If we've already seen this dependency, there may be an import cycle, or
+ // we may have reached the same package transitively via distinct paths.
+ // Check the path to confirm.
+
+ // TODO(rfindley): this doesn't look sufficient. Any single piece of new
+ // metadata could theoretically introduce import cycles in the metadata
+ // graph. What's the point of this limited check here (and is it even
+ // possible to get an import cycle in data from go/packages)? Consider
+ // simply returning, so that this function need not return an error.
+ //
+ // We should consider doing a more complete guard against import cycles
+ // elsewhere.
+ for _, prev := range path {
+ if prev == id {
+ return fmt.Errorf("import cycle detected: %q", id)
+ }
+ }
+ return nil
+ }
+
+ // Recreate the metadata rather than reusing it to avoid locking.
+ m := &source.Metadata{
+ ID: id,
+ PkgPath: pkgPath,
+ Name: PackageName(pkg.Name),
+ ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
+ TypesSizes: pkg.TypesSizes,
+ Config: cfg,
+ Module: pkg.Module,
+ DepsErrors: packagesinternal.GetDepsErrors(pkg),
+ }
+ updates[id] = m
+
+ for _, err := range pkg.Errors {
+ // Filter out parse errors from go list. We'll get them when we
+ // actually parse, and buggy overlay support may generate spurious
+ // errors. (See TestNewModule_Issue38207.)
+ if strings.Contains(err.Msg, "expected '") {
+ continue
+ }
+ m.Errors = append(m.Errors, err)
+ }
+
+ for _, filename := range pkg.CompiledGoFiles {
+ uri := span.URIFromPath(filename)
+ m.CompiledGoFiles = append(m.CompiledGoFiles, uri)
+ }
+ for _, filename := range pkg.GoFiles {
+ uri := span.URIFromPath(filename)
+ m.GoFiles = append(m.GoFiles, uri)
+ }
+
+ depsByImpPath := make(map[ImportPath]PackageID)
+ depsByPkgPath := make(map[PackagePath]PackageID)
+ for importPath, imported := range pkg.Imports {
+ importPath := ImportPath(importPath)
+
+ // It is not an invariant that importPath == imported.PkgPath.
+ // For example, package "net" imports "golang.org/x/net/dns/dnsmessage"
+ // which refers to the package whose ID and PkgPath are both
+ // "vendor/golang.org/x/net/dns/dnsmessage". Notice the ImportMap,
+ // which maps ImportPaths to PackagePaths:
+ //
+ // $ go list -json net vendor/golang.org/x/net/dns/dnsmessage
+ // {
+ // "ImportPath": "net",
+ // "Name": "net",
+ // "Imports": [
+ // "C",
+ // "vendor/golang.org/x/net/dns/dnsmessage",
+ // "vendor/golang.org/x/net/route",
+ // ...
+ // ],
+ // "ImportMap": {
+ // "golang.org/x/net/dns/dnsmessage": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "golang.org/x/net/route": "vendor/golang.org/x/net/route"
+ // },
+ // ...
+ // }
+ // {
+ // "ImportPath": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "Name": "dnsmessage",
+ // ...
+ // }
+ //
+ // (Beware that, for historical reasons, go list uses
+ // the JSON field "ImportPath" for the package's
+ // path--effectively the linker symbol prefix.)
+ //
+ // The example above is slightly special to go list
+ // because it's in the std module. Otherwise,
+ // vendored modules are simply modules whose directory
+ // is vendor/ instead of GOMODCACHE, and the
+ // import path equals the package path.
+ //
+ // But in GOPATH (non-module) mode, it's possible for
+ // package vendoring to cause a non-identity ImportMap,
+ // as in this example:
+ //
+ // $ cd $HOME/src
+ // $ find . -type f
+ // ./b/b.go
+ // ./vendor/example.com/a/a.go
+ // $ cat ./b/b.go
+ // package b
+ // import _ "example.com/a"
+ // $ cat ./vendor/example.com/a/a.go
+ // package a
+ // $ GOPATH=$HOME GO111MODULE=off go list -json ./b | grep -A2 ImportMap
+ // "ImportMap": {
+ // "example.com/a": "vendor/example.com/a"
+ // },
+
+ // Don't remember any imports with significant errors.
+ //
+ // The len=0 condition is a heuristic check for imports of
+ // non-existent packages (for which go/packages will create
+ // an edge to a synthesized node). The heuristic is unsound
+ // because some valid packages have zero files, for example,
+ // a directory containing only the file p_test.go defines an
+ // empty package p.
+ // TODO(adonovan): clarify this. Perhaps go/packages should
+ // report which nodes were synthesized.
+ if importPath != "unsafe" && len(imported.CompiledGoFiles) == 0 {
+ depsByImpPath[importPath] = "" // missing
+ continue
+ }
+
+ depsByImpPath[importPath] = PackageID(imported.ID)
+ depsByPkgPath[PackagePath(imported.PkgPath)] = PackageID(imported.ID)
+ if err := buildMetadata(ctx, imported, cfg, query, updates, append(path, id)); err != nil {
+ event.Error(ctx, "error in dependency", err)
+ }
+ }
+ m.DepsByImpPath = depsByImpPath
+ m.DepsByPkgPath = depsByPkgPath
+
+ return nil
+}
+
+// containsPackageLocked reports whether p is a workspace package for the
+// snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsPackageLocked(s *snapshot, m *source.Metadata) bool {
+ // In legacy workspace mode, or if a package does not have an associated
+ // module, a package is considered inside the workspace if any of its files
+ // are under the workspace root (and not excluded).
+ //
+ // Otherwise if the package has a module it must be an active module (as
+ // defined by the module root or go.work file) and at least one file must not
+ // be filtered out by directoryFilters.
+ if m.Module != nil && s.workspace.moduleSource != legacyWorkspace {
+ modURI := span.URIFromPath(m.Module.GoMod)
+ _, ok := s.workspace.activeModFiles[modURI]
+ if !ok {
+ return false
+ }
+
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ filterFunc := s.view.filterFunc()
+ for uri := range uris {
+ // Don't use view.contains here. go.work files may include modules
+ // outside of the workspace folder.
+ if !strings.Contains(string(uri), "/vendor/") && !filterFunc(uri) {
+ return true
+ }
+ }
+ return false
+ }
+
+ return containsFileInWorkspaceLocked(s, m)
+}
+
+// containsOpenFileLocked reports whether any file referenced by m is open in
+// the snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsOpenFileLocked(s *snapshot, m *source.Metadata) bool {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ for uri := range uris {
+ if s.isOpenLocked(uri) {
+ return true
+ }
+ }
+ return false
+}
+
+// containsFileInWorkspaceLocked reports whether m contains any file inside the
+// workspace of the snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsFileInWorkspaceLocked(s *snapshot, m *source.Metadata) bool {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ for uri := range uris {
+ // In order for a package to be considered for the workspace, at least one
+ // file must be contained in the workspace and not vendored.
+
+ // The package's files are in this view. It may be a workspace package.
+ // Vendored packages are not likely to be interesting to the user.
+ if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) {
+ return true
+ }
+ }
+ return false
+}
+
+// computeWorkspacePackagesLocked computes workspace packages in the snapshot s
+// for the given metadata graph.
+//
+// s.mu must be held while calling this function.
+func computeWorkspacePackagesLocked(s *snapshot, meta *metadataGraph) map[PackageID]PackagePath {
+ workspacePackages := make(map[PackageID]PackagePath)
+ for _, m := range meta.metadata {
+ if !containsPackageLocked(s, m) {
+ continue
+ }
+
+ if source.IsCommandLineArguments(m.ID) {
+ // If all the files contained in m have a real package, we don't need to
+ // keep m as a workspace package.
+ if allFilesHaveRealPackages(meta, m) {
+ continue
+ }
+
+ // We only care about command-line-arguments packages if they are still
+ // open.
+ if !containsOpenFileLocked(s, m) {
+ continue
+ }
+ }
+
+ switch {
+ case m.ForTest == "":
+ // A normal package.
+ workspacePackages[m.ID] = m.PkgPath
+ case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath:
+ // The test variant of some workspace package or its x_test.
+ // To load it, we need to load the non-test variant with -test.
+ //
+ // Notably, this excludes intermediate test variants from workspace
+ // packages.
+ workspacePackages[m.ID] = m.ForTest
+ }
+ }
+ return workspacePackages
+}
+
+// allFilesHaveRealPackages reports whether all files referenced by m are
+// contained in a "real" package (not command-line-arguments).
+//
+// If m is valid but all "real" packages containing any file are invalid, this
+// function returns false.
+//
+// If m is not a command-line-arguments package, this is trivially true.
+func allFilesHaveRealPackages(g *metadataGraph, m *source.Metadata) bool {
+ n := len(m.CompiledGoFiles)
+checkURIs:
+ for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) {
+ for _, id := range g.ids[uri] {
+ if !source.IsCommandLineArguments(id) {
+ continue checkURIs
+ }
+ }
+ return false
+ }
+ return true
+}
+
+func isTestMain(pkg *packages.Package, gocache string) bool {
+ // Test mains must have an import path that ends with ".test".
+ if !strings.HasSuffix(pkg.PkgPath, ".test") {
+ return false
+ }
+ // Test main packages are always named "main".
+ if pkg.Name != "main" {
+ return false
+ }
+ // Test mains always have exactly one GoFile that is in the build cache.
+ if len(pkg.GoFiles) > 1 {
+ return false
+ }
+ if !source.InDir(gocache, pkg.GoFiles[0]) {
+ return false
+ }
+ return true
+}
diff --git a/gopls/internal/lsp/cache/maps.go b/gopls/internal/lsp/cache/maps.go
new file mode 100644
index 00000000000..5cbcaf78a97
--- /dev/null
+++ b/gopls/internal/lsp/cache/maps.go
@@ -0,0 +1,218 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/persistent"
+)
+
+// TODO(euroelessar): Use generics once support for go1.17 is dropped.
+
+type filesMap struct {
+ impl *persistent.Map
+}
+
+// uriLessInterface is the < relation for "any" values containing span.URIs.
+func uriLessInterface(a, b interface{}) bool {
+ return a.(span.URI) < b.(span.URI)
+}
+
+func newFilesMap() filesMap {
+ return filesMap{
+ impl: persistent.NewMap(uriLessInterface),
+ }
+}
+
+func (m filesMap) Clone() filesMap {
+ return filesMap{
+ impl: m.impl.Clone(),
+ }
+}
+
+func (m filesMap) Destroy() {
+ m.impl.Destroy()
+}
+
+func (m filesMap) Get(key span.URI) (source.VersionedFileHandle, bool) {
+ value, ok := m.impl.Get(key)
+ if !ok {
+ return nil, false
+ }
+ return value.(source.VersionedFileHandle), true
+}
+
+func (m filesMap) Range(do func(key span.URI, value source.VersionedFileHandle)) {
+ m.impl.Range(func(key, value interface{}) {
+ do(key.(span.URI), value.(source.VersionedFileHandle))
+ })
+}
+
+func (m filesMap) Set(key span.URI, value source.VersionedFileHandle) {
+ m.impl.Set(key, value, nil)
+}
+
+func (m filesMap) Delete(key span.URI) {
+ m.impl.Delete(key)
+}
+
+func parseKeyLessInterface(a, b interface{}) bool {
+ return parseKeyLess(a.(parseKey), b.(parseKey))
+}
+
+func parseKeyLess(a, b parseKey) bool {
+ if a.mode != b.mode {
+ return a.mode < b.mode
+ }
+ if a.file.Hash != b.file.Hash {
+ return a.file.Hash.Less(b.file.Hash)
+ }
+ return a.file.URI < b.file.URI
+}
+
+type isActivePackageCacheMap struct {
+ impl *persistent.Map
+}
+
+func newIsActivePackageCacheMap() isActivePackageCacheMap {
+ return isActivePackageCacheMap{
+ impl: persistent.NewMap(func(a, b interface{}) bool {
+ return a.(PackageID) < b.(PackageID)
+ }),
+ }
+}
+
+func (m isActivePackageCacheMap) Clone() isActivePackageCacheMap {
+ return isActivePackageCacheMap{
+ impl: m.impl.Clone(),
+ }
+}
+
+func (m isActivePackageCacheMap) Destroy() {
+ m.impl.Destroy()
+}
+
+func (m isActivePackageCacheMap) Get(key PackageID) (bool, bool) {
+ value, ok := m.impl.Get(key)
+ if !ok {
+ return false, false
+ }
+ return value.(bool), true
+}
+
+func (m isActivePackageCacheMap) Set(key PackageID, value bool) {
+ m.impl.Set(key, value, nil)
+}
+
+type parseKeysByURIMap struct {
+ impl *persistent.Map
+}
+
+func newParseKeysByURIMap() parseKeysByURIMap {
+ return parseKeysByURIMap{
+ impl: persistent.NewMap(uriLessInterface),
+ }
+}
+
+func (m parseKeysByURIMap) Clone() parseKeysByURIMap {
+ return parseKeysByURIMap{
+ impl: m.impl.Clone(),
+ }
+}
+
+func (m parseKeysByURIMap) Destroy() {
+ m.impl.Destroy()
+}
+
+func (m parseKeysByURIMap) Get(key span.URI) ([]parseKey, bool) {
+ value, ok := m.impl.Get(key)
+ if !ok {
+ return nil, false
+ }
+ return value.([]parseKey), true
+}
+
+func (m parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) {
+ m.impl.Range(func(key, value interface{}) {
+ do(key.(span.URI), value.([]parseKey))
+ })
+}
+
+func (m parseKeysByURIMap) Set(key span.URI, value []parseKey) {
+ m.impl.Set(key, value, nil)
+}
+
+func (m parseKeysByURIMap) Delete(key span.URI) {
+ m.impl.Delete(key)
+}
+
+func packageKeyLessInterface(x, y interface{}) bool {
+ return packageKeyLess(x.(packageKey), y.(packageKey))
+}
+
+func packageKeyLess(x, y packageKey) bool {
+ if x.mode != y.mode {
+ return x.mode < y.mode
+ }
+ return x.id < y.id
+}
+
+type knownDirsSet struct {
+ impl *persistent.Map
+}
+
+func newKnownDirsSet() knownDirsSet {
+ return knownDirsSet{
+ impl: persistent.NewMap(func(a, b interface{}) bool {
+ return a.(span.URI) < b.(span.URI)
+ }),
+ }
+}
+
+func (s knownDirsSet) Clone() knownDirsSet {
+ return knownDirsSet{
+ impl: s.impl.Clone(),
+ }
+}
+
+func (s knownDirsSet) Destroy() {
+ s.impl.Destroy()
+}
+
+func (s knownDirsSet) Contains(key span.URI) bool {
+ _, ok := s.impl.Get(key)
+ return ok
+}
+
+func (s knownDirsSet) Range(do func(key span.URI)) {
+ s.impl.Range(func(key, value interface{}) {
+ do(key.(span.URI))
+ })
+}
+
+func (s knownDirsSet) SetAll(other knownDirsSet) {
+ s.impl.SetAll(other.impl)
+}
+
+func (s knownDirsSet) Insert(key span.URI) {
+ s.impl.Set(key, nil, nil)
+}
+
+func (s knownDirsSet) Remove(key span.URI) {
+ s.impl.Delete(key)
+}
+
+// analysisKeyLessInterface is the less-than relation for analysisKey
+// values wrapped in an interface.
+func analysisKeyLessInterface(a, b interface{}) bool {
+ x, y := a.(analysisKey), b.(analysisKey)
+ if cmp := strings.Compare(x.analyzerNames, y.analyzerNames); cmp != 0 {
+ return cmp < 0
+ }
+ return x.pkgid < y.pkgid
+}
diff --git a/internal/lsp/cache/mod.go b/gopls/internal/lsp/cache/mod.go
similarity index 60%
rename from internal/lsp/cache/mod.go
rename to gopls/internal/lsp/cache/mod.go
index 5ac199bd96b..757bb5e8fca 100644
--- a/internal/lsp/cache/mod.go
+++ b/gopls/internal/lsp/cache/mod.go
@@ -14,162 +14,166 @@ import (
"golang.org/x/mod/modfile"
"golang.org/x/mod/module"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
)
-type parseModHandle struct {
- handle *memoize.Handle
-}
+// ParseMod parses a go.mod file, using a cache. It may return partial results and an error.
+func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+ uri := fh.URI()
-type parseModData struct {
- parsed *source.ParsedModule
+ s.mu.Lock()
+ entry, hit := s.parseModHandles.Get(uri)
+ s.mu.Unlock()
- // err is any error encountered while parsing the file.
- err error
-}
+ type parseModResult struct {
+ parsed *source.ParsedModule
+ err error
+ }
-func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) {
- v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
+ // cache miss?
+ if !hit {
+ promise, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} {
+ parsed, err := parseModImpl(ctx, fh)
+ return parseModResult{parsed, err}
+ })
+
+ entry = promise
+ s.mu.Lock()
+ s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
if err != nil {
return nil, err
}
- data := v.(*parseModData)
- return data.parsed, data.err
+ res := v.(parseModResult)
+ return res.parsed, res.err
}
-func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*source.ParsedModule, error) {
- if handle := s.getParseModHandle(modFH.URI()); handle != nil {
- return handle.parse(ctx, s)
- }
- h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
- _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
- defer done()
+// parseModImpl parses the go.mod file whose name and contents are in fh.
+// It may return partial results and an error.
+func parseModImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+ _, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI()))
+ defer done()
- contents, err := modFH.Read()
- if err != nil {
- return &parseModData{err: err}
- }
- m := protocol.NewColumnMapper(modFH.URI(), contents)
- file, parseErr := modfile.Parse(modFH.URI().Filename(), contents, nil)
- // Attempt to convert the error to a standardized parse error.
- var parseErrors []*source.Diagnostic
- if parseErr != nil {
- mfErrList, ok := parseErr.(modfile.ErrorList)
- if !ok {
- return &parseModData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
- }
- for _, mfErr := range mfErrList {
- rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
- if err != nil {
- return &parseModData{err: err}
- }
- parseErrors = append(parseErrors, &source.Diagnostic{
- URI: modFH.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ParseError,
- Message: mfErr.Err.Error(),
- })
+ contents, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ m := protocol.NewColumnMapper(fh.URI(), contents)
+ file, parseErr := modfile.Parse(fh.URI().Filename(), contents, nil)
+ // Attempt to convert the error to a standardized parse error.
+ var parseErrors []*source.Diagnostic
+ if parseErr != nil {
+ mfErrList, ok := parseErr.(modfile.ErrorList)
+ if !ok {
+ return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+ }
+ for _, mfErr := range mfErrList {
+ rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte)
+ if err != nil {
+ return nil, err
}
+ parseErrors = append(parseErrors, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ParseError,
+ Message: mfErr.Err.Error(),
+ })
}
- return &parseModData{
- parsed: &source.ParsedModule{
- URI: modFH.URI(),
- Mapper: m,
- File: file,
- ParseErrors: parseErrors,
- },
- err: parseErr,
- }
- }, nil)
+ }
+ return &source.ParsedModule{
+ URI: fh.URI(),
+ Mapper: m,
+ File: file,
+ ParseErrors: parseErrors,
+ }, parseErr
+}
+
+// ParseWork parses a go.work file, using a cache. It may return partial results and an error.
+// TODO(adonovan): move to new work.go file.
+func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+ uri := fh.URI()
- pmh := &parseModHandle{handle: h}
s.mu.Lock()
- s.parseModHandles[modFH.URI()] = pmh
+ entry, hit := s.parseWorkHandles.Get(uri)
s.mu.Unlock()
- return pmh.parse(ctx, s)
-}
-
-type parseWorkHandle struct {
- handle *memoize.Handle
-}
+ type parseWorkResult struct {
+ parsed *source.ParsedWorkFile
+ err error
+ }
-type parseWorkData struct {
- parsed *source.ParsedWorkFile
+ // cache miss?
+ if !hit {
+ handle, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} {
+ parsed, err := parseWorkImpl(ctx, fh)
+ return parseWorkResult{parsed, err}
+ })
- // err is any error encountered while parsing the file.
- err error
-}
+ entry = handle
+ s.mu.Lock()
+ s.parseWorkHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
+ }
-func (mh *parseWorkHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedWorkFile, error) {
- v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
if err != nil {
return nil, err
}
- data := v.(*parseWorkData)
- return data.parsed, data.err
+ res := v.(parseWorkResult)
+ return res.parsed, res.err
}
-func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*source.ParsedWorkFile, error) {
- if handle := s.getParseWorkHandle(modFH.URI()); handle != nil {
- return handle.parse(ctx, s)
- }
- h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
- _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
- defer done()
+// parseWorkImpl parses a go.work file. It may return partial results and an error.
+func parseWorkImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+ _, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI()))
+ defer done()
- contents, err := modFH.Read()
- if err != nil {
- return &parseWorkData{err: err}
- }
- m := protocol.NewColumnMapper(modFH.URI(), contents)
- file, parseErr := modfile.ParseWork(modFH.URI().Filename(), contents, nil)
- // Attempt to convert the error to a standardized parse error.
- var parseErrors []*source.Diagnostic
- if parseErr != nil {
- mfErrList, ok := parseErr.(modfile.ErrorList)
- if !ok {
- return &parseWorkData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
- }
- for _, mfErr := range mfErrList {
- rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
- if err != nil {
- return &parseWorkData{err: err}
- }
- parseErrors = append(parseErrors, &source.Diagnostic{
- URI: modFH.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ParseError,
- Message: mfErr.Err.Error(),
- })
+ contents, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ m := protocol.NewColumnMapper(fh.URI(), contents)
+ file, parseErr := modfile.ParseWork(fh.URI().Filename(), contents, nil)
+ // Attempt to convert the error to a standardized parse error.
+ var parseErrors []*source.Diagnostic
+ if parseErr != nil {
+ mfErrList, ok := parseErr.(modfile.ErrorList)
+ if !ok {
+ return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+ }
+ for _, mfErr := range mfErrList {
+ rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte)
+ if err != nil {
+ return nil, err
}
+ parseErrors = append(parseErrors, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ParseError,
+ Message: mfErr.Err.Error(),
+ })
}
- return &parseWorkData{
- parsed: &source.ParsedWorkFile{
- URI: modFH.URI(),
- Mapper: m,
- File: file,
- ParseErrors: parseErrors,
- },
- err: parseErr,
- }
- }, nil)
-
- pwh := &parseWorkHandle{handle: h}
- s.mu.Lock()
- s.parseWorkHandles[modFH.URI()] = pwh
- s.mu.Unlock()
-
- return pwh.parse(ctx, s)
+ }
+ return &source.ParsedWorkFile{
+ URI: fh.URI(),
+ Mapper: m,
+ File: file,
+ ParseErrors: parseErrors,
+ }, parseErr
}
// goSum reads the go.sum file for the go.mod file at modURI, if it exists. If
@@ -182,7 +186,7 @@ func (s *snapshot) goSum(ctx context.Context, modURI span.URI) []byte {
var sumFH source.FileHandle = s.FindFile(sumURI)
if sumFH == nil {
var err error
- sumFH, err = s.view.session.cache.getFile(ctx, sumURI)
+ sumFH, err = s.view.cache.getFile(ctx, sumURI)
if err != nil {
return nil
}
@@ -198,102 +202,82 @@ func sumFilename(modURI span.URI) string {
return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum"
}
-// modKey is uniquely identifies cached data for `go mod why` or dependencies
-// to upgrade.
-type modKey struct {
- sessionID, env, view string
- mod source.FileIdentity
- verb modAction
-}
+// ModWhy returns the "go mod why" result for each module named in a
+// require statement in the go.mod file.
+// TODO(adonovan): move to new mod_why.go file.
+func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
+ uri := fh.URI()
-type modAction int
+ if s.View().FileKind(fh) != source.Mod {
+ return nil, fmt.Errorf("%s is not a go.mod file", uri)
+ }
-const (
- why modAction = iota
- upgrade
-)
+ s.mu.Lock()
+ entry, hit := s.modWhyHandles.Get(uri)
+ s.mu.Unlock()
-type modWhyHandle struct {
- handle *memoize.Handle
-}
+ type modWhyResult struct {
+ why map[string]string
+ err error
+ }
-type modWhyData struct {
- // why keeps track of the `go mod why` results for each require statement
- // in the go.mod file.
- why map[string]string
+ // cache miss?
+ if !hit {
+ handle := memoize.NewPromise("modWhy", func(ctx context.Context, arg interface{}) interface{} {
+ why, err := modWhyImpl(ctx, arg.(*snapshot), fh)
+ return modWhyResult{why, err}
+ })
- err error
-}
+ entry = handle
+ s.mu.Lock()
+ s.modWhyHandles.Set(uri, entry, nil)
+ s.mu.Unlock()
+ }
-func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) {
- v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot)
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
if err != nil {
return nil, err
}
- data := v.(*modWhyData)
- return data.why, data.err
+ res := v.(modWhyResult)
+ return res.why, res.err
}
-func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
- if s.View().FileKind(fh) != source.Mod {
- return nil, fmt.Errorf("%s is not a go.mod file", fh.URI())
+// modWhyImpl returns the result of "go mod why -m" on the specified go.mod file.
+func modWhyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) (map[string]string, error) {
+ ctx, done := event.Start(ctx, "cache.ModWhy", tag.URI.Of(fh.URI()))
+ defer done()
+
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, err
}
- if handle := s.getModWhyHandle(fh.URI()); handle != nil {
- return handle.why(ctx, s)
+ // No requires to explain.
+ if len(pm.File.Require) == 0 {
+ return nil, nil // empty result
}
- key := modKey{
- sessionID: s.view.session.id,
- env: hashEnv(s),
- mod: fh.FileIdentity(),
- view: s.view.rootURI.Filename(),
- verb: why,
+ // Run `go mod why` on all the dependencies.
+ inv := &gocommand.Invocation{
+ Verb: "mod",
+ Args: []string{"why", "-m"},
+ WorkingDir: filepath.Dir(fh.URI().Filename()),
}
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI()))
- defer done()
-
- snapshot := arg.(*snapshot)
-
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- return &modWhyData{err: err}
- }
- // No requires to explain.
- if len(pm.File.Require) == 0 {
- return &modWhyData{}
- }
- // Run `go mod why` on all the dependencies.
- inv := &gocommand.Invocation{
- Verb: "mod",
- Args: []string{"why", "-m"},
- WorkingDir: filepath.Dir(fh.URI().Filename()),
- }
- for _, req := range pm.File.Require {
- inv.Args = append(inv.Args, req.Mod.Path)
- }
- stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
- if err != nil {
- return &modWhyData{err: err}
- }
- whyList := strings.Split(stdout.String(), "\n\n")
- if len(whyList) != len(pm.File.Require) {
- return &modWhyData{
- err: fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)),
- }
- }
- why := make(map[string]string, len(pm.File.Require))
- for i, req := range pm.File.Require {
- why[req.Mod.Path] = whyList[i]
- }
- return &modWhyData{why: why}
- }, nil)
-
- mwh := &modWhyHandle{handle: h}
- s.mu.Lock()
- s.modWhyHandles[fh.URI()] = mwh
- s.mu.Unlock()
-
- return mwh.why(ctx, s)
+ for _, req := range pm.File.Require {
+ inv.Args = append(inv.Args, req.Mod.Path)
+ }
+ stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
+ if err != nil {
+ return nil, err
+ }
+ whyList := strings.Split(stdout.String(), "\n\n")
+ if len(whyList) != len(pm.File.Require) {
+ return nil, fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require))
+ }
+ why := make(map[string]string, len(pm.File.Require))
+ for i, req := range pm.File.Require {
+ why[req.Mod.Path] = whyList[i]
+ }
+ return why, nil
}
// extractGoCommandError tries to parse errors that come from the go command
diff --git a/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go
similarity index 66%
rename from internal/lsp/cache/mod_tidy.go
rename to gopls/internal/lsp/cache/mod_tidy.go
index aa525e7413d..fa30df18e36 100644
--- a/internal/lsp/cache/mod_tidy.go
+++ b/gopls/internal/lsp/cache/mod_tidy.go
@@ -8,170 +8,141 @@ import (
"context"
"fmt"
"go/ast"
+ "go/token"
"io/ioutil"
"os"
"path/filepath"
- "sort"
"strconv"
"strings"
"golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
)
-type modTidyKey struct {
- sessionID string
- env string
- gomod source.FileIdentity
- imports string
- unsavedOverlays string
- view string
-}
+// ModTidy returns the go.mod file that would be obtained by running
+// "go mod tidy". Concurrent requests are combined into a single command.
+func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
+ uri := pm.URI
+ if pm.File == nil {
+ return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", uri)
+ }
-type modTidyHandle struct {
- handle *memoize.Handle
-}
+ s.mu.Lock()
+ entry, hit := s.modTidyHandles.Get(uri)
+ s.mu.Unlock()
-type modTidyData struct {
- tidied *source.TidiedModule
- err error
-}
+ type modTidyResult struct {
+ tidied *source.TidiedModule
+ err error
+ }
+
+ // Cache miss?
+ if !hit {
+ // If the file handle is an overlay, it may not be written to disk.
+ // The go.mod file has to be on disk for `go mod tidy` to work.
+ // TODO(rfindley): is this still true with Go 1.16 overlay support?
+ fh, err := s.GetFile(ctx, pm.URI)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := fh.(*overlay); ok {
+ if info, _ := os.Stat(uri.Filename()); info == nil {
+ return nil, source.ErrNoModOnDisk
+ }
+ }
+
+ if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
+ return &source.TidiedModule{
+ Diagnostics: criticalErr.Diagnostics,
+ }, nil
+ }
+ if ctx.Err() != nil { // must check ctx after GetCriticalError
+ return nil, ctx.Err()
+ }
+
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
-func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) {
- v, err := mth.handle.Get(ctx, snapshot.generation, snapshot)
+ handle := memoize.NewPromise("modTidy", func(ctx context.Context, arg interface{}) interface{} {
+ tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm)
+ return modTidyResult{tidied, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.modTidyHandles.Set(uri, entry, nil)
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
if err != nil {
return nil, err
}
- data := v.(*modTidyData)
- return data.tidied, data.err
+ res := v.(modTidyResult)
+ return res.tidied, res.err
}
-func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
- if pm.File == nil {
- return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", pm.URI)
- }
- if handle := s.getModTidyHandle(pm.URI); handle != nil {
- return handle.tidy(ctx, s)
+// modTidyImpl runs "go mod tidy" on a go.mod file.
+func modTidyImpl(ctx context.Context, snapshot *snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) {
+ ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename))
+ defer done()
+
+ inv := &gocommand.Invocation{
+ Verb: "mod",
+ Args: []string{"tidy"},
+ WorkingDir: filepath.Dir(filename),
}
- fh, err := s.GetFile(ctx, pm.URI)
+ // TODO(adonovan): ensure that unsaved overlays are passed through to 'go'.
+ tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
if err != nil {
return nil, err
}
- // If the file handle is an overlay, it may not be written to disk.
- // The go.mod file has to be on disk for `go mod tidy` to work.
- if _, ok := fh.(*overlay); ok {
- if info, _ := os.Stat(fh.URI().Filename()); info == nil {
- return nil, source.ErrNoModOnDisk
- }
- }
- if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
- return &source.TidiedModule{
- Diagnostics: criticalErr.DiagList,
- }, nil
+ // Keep the temporary go.mod file around long enough to parse it.
+ defer cleanup()
+
+ if _, err := snapshot.view.gocmdRunner.Run(ctx, *inv); err != nil {
+ return nil, err
}
- workspacePkgs, err := s.workspacePackageHandles(ctx)
+
+ // Go directly to disk to get the temporary mod file,
+ // since it is always on disk.
+ tempContents, err := ioutil.ReadFile(tmpURI.Filename())
if err != nil {
return nil, err
}
- importHash, err := s.hashImports(ctx, workspacePkgs)
+ ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
if err != nil {
+ // We do not need to worry about the temporary file's parse errors
+ // since it has been "tidied".
return nil, err
}
- s.mu.Lock()
- overlayHash := hashUnsavedOverlays(s.files)
- s.mu.Unlock()
-
- key := modTidyKey{
- sessionID: s.view.session.id,
- view: s.view.folder.Filename(),
- imports: importHash,
- unsavedOverlays: overlayHash,
- gomod: fh.FileIdentity(),
- env: hashEnv(s),
- }
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(fh.URI()))
- defer done()
-
- snapshot := arg.(*snapshot)
- inv := &gocommand.Invocation{
- Verb: "mod",
- Args: []string{"tidy"},
- WorkingDir: filepath.Dir(fh.URI().Filename()),
- }
- tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
- if err != nil {
- return &modTidyData{err: err}
- }
- // Keep the temporary go.mod file around long enough to parse it.
- defer cleanup()
-
- if _, err := s.view.session.gocmdRunner.Run(ctx, *inv); err != nil {
- return &modTidyData{err: err}
- }
- // Go directly to disk to get the temporary mod file, since it is
- // always on disk.
- tempContents, err := ioutil.ReadFile(tmpURI.Filename())
- if err != nil {
- return &modTidyData{err: err}
- }
- ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
- if err != nil {
- // We do not need to worry about the temporary file's parse errors
- // since it has been "tidied".
- return &modTidyData{err: err}
- }
- // Compare the original and tidied go.mod files to compute errors and
- // suggested fixes.
- diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs)
- if err != nil {
- return &modTidyData{err: err}
- }
- return &modTidyData{
- tidied: &source.TidiedModule{
- Diagnostics: diagnostics,
- TidiedContent: tempContents,
- },
- }
- }, nil)
-
- mth := &modTidyHandle{handle: h}
- s.mu.Lock()
- s.modTidyHandles[fh.URI()] = mth
- s.mu.Unlock()
-
- return mth.tidy(ctx, s)
-}
-
-func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) (string, error) {
- seen := map[string]struct{}{}
- var imports []string
- for _, ph := range wsPackages {
- for _, imp := range ph.imports(ctx, s) {
- if _, ok := seen[imp]; !ok {
- imports = append(imports, imp)
- seen[imp] = struct{}{}
- }
- }
+ // Compare the original and tidied go.mod files to compute errors and
+ // suggested fixes.
+ diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal)
+ if err != nil {
+ return nil, err
}
- sort.Strings(imports)
- hashed := strings.Join(imports, ",")
- return hashContents([]byte(hashed)), nil
+
+ return &source.TidiedModule{
+ Diagnostics: diagnostics,
+ TidiedContent: tempContents,
+ }, nil
}
// modTidyDiagnostics computes the differences between the original and tidied
// go.mod files to produce diagnostic and suggested fixes. Some diagnostics
// may appear on the Go files that import packages from missing modules.
-func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *source.ParsedModule, ideal *modfile.File, workspacePkgs []*packageHandle) (diagnostics []*source.Diagnostic, err error) {
+func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) {
// First, determine which modules are unused and which are missing from the
// original go.mod file.
var (
@@ -220,15 +191,20 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc
}
// Add diagnostics for missing modules anywhere they are imported in the
// workspace.
- for _, ph := range workspacePkgs {
+ // TODO(adonovan): opt: opportunities for parallelism abound.
+ for _, m := range snapshot.workspaceMetadata() {
+ // Read both lists of files of this package, in parallel.
+ goFiles, compiledGoFiles, err := readGoFiles(ctx, snapshot, m)
+ if err != nil {
+ return nil, err
+ }
+
missingImports := map[string]*modfile.Require{}
// If -mod=readonly is not set we may have successfully imported
// packages from missing modules. Otherwise they'll be in
// MissingDependencies. Combine both.
- importedPkgs := ph.imports(ctx, snapshot)
-
- for _, imp := range importedPkgs {
+ for imp := range parseImports(ctx, snapshot, goFiles) {
if req, ok := missing[imp]; ok {
missingImports[imp] = req
break
@@ -257,8 +233,8 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc
if len(missingImports) == 0 {
continue
}
- for _, pgh := range ph.compiledGoFiles {
- pgf, err := snapshot.ParseGo(ctx, pgh.file, source.ParseHeader)
+ for _, goFile := range compiledGoFiles {
+ pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader)
if err != nil {
continue
}
@@ -287,7 +263,7 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc
if !ok {
return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path)
}
- srcErr, err := missingModuleForImport(snapshot, m, imp, req, fixes)
+ srcErr, err := missingModuleForImport(pgf.Tok, m, imp, req, fixes)
if err != nil {
return nil, err
}
@@ -309,7 +285,7 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc
// unusedDiagnostic returns a source.Diagnostic for an unused require.
func unusedDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) {
- rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End)
+ rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
if err != nil {
return nil, err
}
@@ -334,8 +310,8 @@ func unusedDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, onlyDiagno
// directnessDiagnostic extracts errors when a dependency is labeled indirect when
// it should be direct and vice versa.
-func directnessDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, computeEdits diff.ComputeEdits) (*source.Diagnostic, error) {
- rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End)
+func directnessDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, computeEdits source.DiffFunction) (*source.Diagnostic, error) {
+ rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
if err != nil {
return nil, err
}
@@ -347,8 +323,8 @@ func directnessDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, comput
if comments := req.Syntax.Comment(); comments != nil && len(comments.Suffix) > 0 {
end := comments.Suffix[0].Start
end.LineRune += len(comments.Suffix[0].Token)
- end.Byte += len([]byte(comments.Suffix[0].Token))
- rng, err = rangeFromPositions(m, comments.Suffix[0].Start, end)
+ end.Byte += len(comments.Suffix[0].Token)
+ rng, err = m.OffsetRange(comments.Suffix[0].Start.Byte, end.Byte)
if err != nil {
return nil, err
}
@@ -381,7 +357,7 @@ func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*so
if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil {
start, end := pm.File.Module.Syntax.Span()
var err error
- rng, err = rangeFromPositions(pm.Mapper, start, end)
+ rng, err = pm.Mapper.OffsetRange(start.Byte, end.Byte)
if err != nil {
return nil, err
}
@@ -407,7 +383,7 @@ func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*so
// switchDirectness gets the edits needed to change an indirect dependency to
// direct and vice versa.
-func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdits diff.ComputeEdits) ([]protocol.TextEdit, error) {
+func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdits source.DiffFunction) ([]protocol.TextEdit, error) {
// We need a private copy of the parsed go.mod file, since we're going to
// modify it.
copied, err := modfile.Parse("", m.Content, nil)
@@ -441,24 +417,17 @@ func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdi
return nil, err
}
// Calculate the edits to be made due to the change.
- diff, err := computeEdits(m.URI, string(m.Content), string(newContent))
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(m, diff)
+ edits := computeEdits(string(m.Content), string(newContent))
+ return source.ToProtocolEdits(m, edits)
}
// missingModuleForImport creates an error for a given import path that comes
// from a missing module.
-func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
+func missingModuleForImport(file *token.File, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
if req.Syntax == nil {
return nil, fmt.Errorf("no syntax for %v", req)
}
- spn, err := span.NewRange(snapshot.FileSet(), imp.Path.Pos(), imp.Path.End()).Span()
- if err != nil {
- return nil, err
- }
- rng, err := m.Range(spn)
+ rng, err := m.PosRange(imp.Path.Pos(), imp.Path.End())
if err != nil {
return nil, err
}
@@ -472,14 +441,6 @@ func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper,
}, nil
}
-func rangeFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (protocol.Range, error) {
- spn, err := spanFromPositions(m, s, e)
- if err != nil {
- return protocol.Range{}, err
- }
- return m.Range(spn)
-}
-
func spanFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (span.Span, error) {
toPoint := func(offset int) (span.Point, error) {
l, c, err := span.ToPosition(m.TokFile, offset)
@@ -498,3 +459,26 @@ func spanFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (span.Sp
}
return span.New(m.URI, start, end), nil
}
+
+// parseImports parses the headers of the specified files and returns
+// the set of strings that appear in import declarations within
+// GoFiles. Errors are ignored.
+//
+// (We can't simply use Metadata.Imports because it is based on
+// CompiledGoFiles, after cgo processing.)
+func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) map[string]bool {
+ s.mu.Lock() // peekOrParse requires a locked snapshot (!)
+ defer s.mu.Unlock()
+ seen := make(map[string]bool)
+ for _, file := range files {
+ f, err := peekOrParse(ctx, s, file, source.ParseHeader)
+ if err != nil {
+ continue
+ }
+ for _, spec := range f.File.Imports {
+ path, _ := strconv.Unquote(spec.Path.Value)
+ seen[path] = true
+ }
+ }
+ return seen
+}
diff --git a/gopls/internal/lsp/cache/mod_vuln.go b/gopls/internal/lsp/cache/mod_vuln.go
new file mode 100644
index 00000000000..b16c8c57ba7
--- /dev/null
+++ b/gopls/internal/lsp/cache/mod_vuln.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/gopls/internal/vulncheck"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// ModVuln returns import vulnerability analysis for the given go.mod URI.
+// Concurrent requests are combined into a single command.
+func (s *snapshot) ModVuln(ctx context.Context, modURI span.URI) (*govulncheck.Result, error) {
+ s.mu.Lock()
+ entry, hit := s.modVulnHandles.Get(modURI)
+ s.mu.Unlock()
+
+ type modVuln struct {
+ result *govulncheck.Result
+ err error
+ }
+
+ // Cache miss?
+ if !hit {
+ // If the file handle is an overlay, it may not be written to disk.
+ // The go.mod file has to be on disk for vulncheck to work.
+ //
+ // TODO(hyangah): use overlays for vulncheck.
+ fh, err := s.GetFile(ctx, modURI)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := fh.(*overlay); ok {
+ if info, _ := os.Stat(modURI.Filename()); info == nil {
+ return nil, source.ErrNoModOnDisk
+ }
+ }
+
+ handle := memoize.NewPromise("modVuln", func(ctx context.Context, arg interface{}) interface{} {
+ result, err := modVulnImpl(ctx, arg.(*snapshot), modURI)
+ return modVuln{result, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.modVulnHandles.Set(modURI, entry, nil)
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(modVuln)
+ return res.result, res.err
+}
+
+func modVulnImpl(ctx context.Context, s *snapshot, uri span.URI) (*govulncheck.Result, error) {
+ if vulncheck.VulnerablePackages == nil {
+ return &govulncheck.Result{}, nil
+ }
+ fh, err := s.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ return vulncheck.VulnerablePackages(ctx, s, fh)
+}
diff --git a/internal/lsp/cache/os_darwin.go b/gopls/internal/lsp/cache/os_darwin.go
similarity index 100%
rename from internal/lsp/cache/os_darwin.go
rename to gopls/internal/lsp/cache/os_darwin.go
diff --git a/internal/lsp/cache/os_windows.go b/gopls/internal/lsp/cache/os_windows.go
similarity index 100%
rename from internal/lsp/cache/os_windows.go
rename to gopls/internal/lsp/cache/os_windows.go
diff --git a/internal/lsp/cache/parse.go b/gopls/internal/lsp/cache/parse.go
similarity index 77%
rename from internal/lsp/cache/parse.go
rename to gopls/internal/lsp/cache/parse.go
index 668c437f5c9..83f18dabee4 100644
--- a/internal/lsp/cache/parse.go
+++ b/gopls/internal/lsp/cache/parse.go
@@ -18,15 +18,13 @@ import (
"strconv"
"strings"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/safetoken"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
)
// parseKey uniquely identifies a parsed Go file.
@@ -35,233 +33,118 @@ type parseKey struct {
mode source.ParseMode
}
-type parseGoHandle struct {
- handle *memoize.Handle
- file source.FileHandle
- mode source.ParseMode
-}
-
-type parseGoData struct {
- parsed *source.ParsedGoFile
-
- // If true, we adjusted the AST to make it type check better, and
- // it may not match the source code.
- fixed bool
- err error // any other errors
-}
-
-func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode source.ParseMode) *parseGoHandle {
- key := parseKey{
- file: fh.FileIdentity(),
- mode: mode,
- }
- if pgh := s.getGoFile(key); pgh != nil {
- return pgh
- }
- parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
- return parseGo(ctx, snapshot.FileSet(), fh, mode)
- }, nil)
-
- pgh := &parseGoHandle{
- handle: parseHandle,
- file: fh,
- mode: mode,
- }
- return s.addGoFile(key, pgh)
-}
-
-func (pgh *parseGoHandle) String() string {
- return pgh.file.URI().Filename()
-}
-
+// ParseGo parses the file whose contents are provided by fh, using a cache.
+// The resulting tree may have be fixed up.
+//
+// Token position information will be added to the snapshot's FileSet.
+//
+// The parser mode must not be ParseExported: that mode is used during
+// type checking to destructively trim the tree to reduce work,
+// which is not safe for values from a shared cache.
+// TODO(adonovan): opt: shouldn't parseGoImpl do the trimming?
+// Then we can cache the result since it would never change.
+//
+// TODO(adonovan): in the absence of any way to add existing an
+// token.File to a new FileSet (see go.dev/issue/53200), caching ASTs
+// implies a global FileSet.
func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
- pgf, _, err := s.parseGo(ctx, fh, mode)
- return pgf, err
-}
-
-func (s *snapshot) parseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, bool, error) {
if mode == source.ParseExported {
panic("only type checking should use Exported")
}
- pgh := s.parseGoHandle(ctx, fh, mode)
- d, err := pgh.handle.Get(ctx, s.generation, s)
- if err != nil {
- return nil, false, err
+
+ key := parseKey{
+ file: fh.FileIdentity(),
+ mode: mode,
}
- data := d.(*parseGoData)
- return data.parsed, data.fixed, data.err
-}
-// cachedPGF returns the cached ParsedGoFile for the given ParseMode, if it
-// has already been computed. Otherwise, it returns nil.
-func (s *snapshot) cachedPGF(fh source.FileHandle, mode source.ParseMode) *source.ParsedGoFile {
- key := parseKey{file: fh.FileIdentity(), mode: mode}
- if pgh := s.getGoFile(key); pgh != nil {
- cached := pgh.handle.Cached(s.generation)
- if cached != nil {
- cached := cached.(*parseGoData)
- if cached.parsed != nil {
- return cached.parsed
+ s.mu.Lock()
+ entry, hit := s.parsedGoFiles.Get(key)
+ s.mu.Unlock()
+
+ // cache miss?
+ if !hit {
+ promise, release := s.store.Promise(key, func(ctx context.Context, arg interface{}) interface{} {
+ parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode)
+ return parseGoResult{parsed, err}
+ })
+
+ s.mu.Lock()
+ // Check cache again in case another thread got there first.
+ if prev, ok := s.parsedGoFiles.Get(key); ok {
+ entry = prev
+ release()
+ } else {
+ entry = promise
+ s.parsedGoFiles.Set(key, entry, func(_, _ interface{}) { release() })
+
+ // In order to correctly invalidate the key above, we must keep track of
+ // the parse key just created.
+ //
+ // TODO(rfindley): use a two-level map URI->parseKey->promise.
+ keys, _ := s.parseKeysByURI.Get(fh.URI())
+
+ // Only record the new key if it doesn't exist. This is overly cautious:
+ // we should only be setting the key if it doesn't exist. However, this
+ // logic will be replaced soon, and erring on the side of caution seemed
+ // wise.
+ foundKey := false
+ for _, existing := range keys {
+ if existing == key {
+ foundKey = true
+ break
+ }
+ }
+ if !foundKey {
+ keys = append(keys, key)
+ s.parseKeysByURI.Set(fh.URI(), keys)
}
}
+ s.mu.Unlock()
}
- return nil
-}
-
-type astCacheKey struct {
- pkg packageHandleKey
- uri span.URI
-}
-
-func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) {
- pkg := spkg.(*pkg)
- pkgHandle := s.getPackage(pkg.m.ID, pkg.mode)
- if pkgHandle == nil {
- return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID)
- }
- tok := s.FileSet().File(pos)
- if tok == nil {
- return nil, fmt.Errorf("no file for pos %v", pos)
- }
- pgf, err := pkg.File(span.URIFromPath(tok.Name()))
- if err != nil {
- return nil, err
- }
- astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} {
- return buildASTCache(pgf)
- }, nil)
- d, err := astHandle.Get(ctx, s.generation, s)
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
if err != nil {
return nil, err
}
- data := d.(*astCacheData)
- if data.err != nil {
- return nil, data.err
- }
- return data, nil
+ res := v.(parseGoResult)
+ return res.parsed, res.err
}
-func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) {
- data, err := s.astCacheData(ctx, spkg, pos)
- if err != nil {
- return nil, err
+// peekParseGoLocked peeks at the cache used by ParseGo but does not
+// populate it or wait for other threads to do so. On cache hit, it returns
+// the cache result of parseGoImpl; otherwise it returns (nil, nil).
+func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
+ entry, hit := s.parsedGoFiles.Get(parseKey{fh.FileIdentity(), mode})
+ if !hit {
+ return nil, nil // no-one has requested this file
}
- return data.posToDecl[pos], nil
-}
-
-func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos token.Pos) (*ast.Field, error) {
- data, err := s.astCacheData(ctx, spkg, pos)
- if err != nil {
- return nil, err
+ v := entry.(*memoize.Promise).Cached()
+ if v == nil {
+ return nil, nil // parsing is still in progress
}
- return data.posToField[pos], nil
-}
-
-type astCacheData struct {
- err error
-
- posToDecl map[token.Pos]ast.Decl
- posToField map[token.Pos]*ast.Field
+ res := v.(parseGoResult)
+ return res.parsed, res.err
}
-// buildASTCache builds caches to aid in quickly going from the typed
-// world to the syntactic world.
-func buildASTCache(pgf *source.ParsedGoFile) *astCacheData {
- var (
- // path contains all ancestors, including n.
- path []ast.Node
- // decls contains all ancestors that are decls.
- decls []ast.Decl
- )
-
- data := &astCacheData{
- posToDecl: make(map[token.Pos]ast.Decl),
- posToField: make(map[token.Pos]*ast.Field),
- }
-
- ast.Inspect(pgf.File, func(n ast.Node) bool {
- if n == nil {
- lastP := path[len(path)-1]
- path = path[:len(path)-1]
- if len(decls) > 0 && decls[len(decls)-1] == lastP {
- decls = decls[:len(decls)-1]
- }
- return false
- }
-
- path = append(path, n)
-
- switch n := n.(type) {
- case *ast.Field:
- addField := func(f ast.Node) {
- if f.Pos().IsValid() {
- data.posToField[f.Pos()] = n
- if len(decls) > 0 {
- data.posToDecl[f.Pos()] = decls[len(decls)-1]
- }
- }
- }
-
- // Add mapping for *ast.Field itself. This handles embedded
- // fields which have no associated *ast.Ident name.
- addField(n)
-
- // Add mapping for each field name since you can have
- // multiple names for the same type expression.
- for _, name := range n.Names {
- addField(name)
- }
-
- // Also map "X" in "...X" to the containing *ast.Field. This
- // makes it easy to format variadic signature params
- // properly.
- if elips, ok := n.Type.(*ast.Ellipsis); ok && elips.Elt != nil {
- addField(elips.Elt)
- }
- case *ast.FuncDecl:
- decls = append(decls, n)
-
- if n.Name != nil && n.Name.Pos().IsValid() {
- data.posToDecl[n.Name.Pos()] = n
- }
- case *ast.GenDecl:
- decls = append(decls, n)
-
- for _, spec := range n.Specs {
- switch spec := spec.(type) {
- case *ast.TypeSpec:
- if spec.Name != nil && spec.Name.Pos().IsValid() {
- data.posToDecl[spec.Name.Pos()] = n
- }
- case *ast.ValueSpec:
- for _, id := range spec.Names {
- if id != nil && id.Pos().IsValid() {
- data.posToDecl[id.Pos()] = n
- }
- }
- }
- }
- }
-
- return true
- })
-
- return data
+// parseGoResult holds the result of a call to parseGoImpl.
+type parseGoResult struct {
+ parsed *source.ParsedGoFile
+ err error
}
-func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) *parseGoData {
+// parseGoImpl parses the Go source file whose content is provided by fh.
+func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename()))
defer done()
ext := filepath.Ext(fh.URI().Filename())
if ext != ".go" && ext != "" { // files generated by cgo have no extension
- return &parseGoData{err: fmt.Errorf("cannot parse non-Go file %s", fh.URI())}
+ return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI())
}
src, err := fh.Read()
if err != nil {
- return &parseGoData{err: err}
+ return nil, err
}
parserMode := parser.AllErrors | parser.ParseComments
@@ -278,7 +161,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod
tok := fset.File(file.Pos())
if tok == nil {
- // file.Pos is the location of the package declaration. If there was
+ // file.Pos is the location of the package declaration (issue #53202). If there was
// none, we can't find the token.File that ParseFile created, and we
// have no choice but to recreate it.
tok = fset.AddFile(fh.URI().Filename(), -1, len(src))
@@ -289,7 +172,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod
// If there were parse errors, attempt to fix them up.
if parseErr != nil {
// Fix any badly parsed parts of the AST.
- fixed = fixAST(ctx, file, tok, src)
+ fixed = fixAST(file, tok, src)
for i := 0; i < 10; i++ {
// Fix certain syntax errors that render the file unparseable.
@@ -302,13 +185,8 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod
// it is likely we got stuck in a loop somehow. Log out a diff
// of the last changes we made to aid in debugging.
if i == 9 {
- edits, err := myers.ComputeEdits(fh.URI(), string(src), string(newSrc))
- if err != nil {
- event.Error(ctx, "error generating fixSrc diff", err, tag.File.Of(tok.Name()))
- } else {
- unified := diff.ToUnified("before", "after", string(src), edits)
- event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name()))
- }
+ unified := diff.Unified("before", "after", string(src), string(newSrc))
+ event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name()))
}
newFile, _ := parser.ParseFile(fset, fh.URI().Filename(), newSrc, parserMode)
@@ -318,27 +196,25 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod
src = newSrc
tok = fset.File(file.Pos())
- fixed = fixAST(ctx, file, tok, src)
+ fixed = fixAST(file, tok, src)
}
}
}
- return &parseGoData{
- parsed: &source.ParsedGoFile{
- URI: fh.URI(),
- Mode: mode,
- Src: src,
- File: file,
- Tok: tok,
- Mapper: &protocol.ColumnMapper{
- URI: fh.URI(),
- TokFile: tok,
- Content: src,
- },
- ParseErr: parseErr,
+ return &source.ParsedGoFile{
+ URI: fh.URI(),
+ Mode: mode,
+ Src: src,
+ Fixed: fixed,
+ File: file,
+ Tok: tok,
+ Mapper: &protocol.ColumnMapper{
+ URI: fh.URI(),
+ TokFile: tok,
+ Content: src,
},
- fixed: fixed,
- }
+ ParseErr: parseErr,
+ }, nil
}
// An unexportedFilter removes as much unexported AST from a set of Files as possible.
@@ -383,7 +259,7 @@ func (f *unexportedFilter) keep(ident *ast.Ident) bool {
func (f *unexportedFilter) filterDecl(decl ast.Decl) bool {
switch decl := decl.(type) {
case *ast.FuncDecl:
- if ident := recvIdent(decl); ident != nil && !f.keep(ident) {
+ if ident := source.RecvIdent(decl.Recv); ident != nil && !f.keep(ident) {
return false
}
return f.keep(decl.Name)
@@ -425,6 +301,8 @@ func (f *unexportedFilter) filterSpec(spec ast.Spec) bool {
}
switch typ := spec.Type.(type) {
case *ast.StructType:
+ // In practice this no longer filters anything;
+ // see comment at StructType case in recordUses.
f.filterFieldList(typ.Fields)
case *ast.InterfaceType:
f.filterFieldList(typ.Methods)
@@ -456,7 +334,7 @@ func (f *unexportedFilter) recordUses(file *ast.File) {
switch decl := decl.(type) {
case *ast.FuncDecl:
// Ignore methods on dropped types.
- if ident := recvIdent(decl); ident != nil && !f.keep(ident) {
+ if ident := source.RecvIdent(decl.Recv); ident != nil && !f.keep(ident) {
break
}
// Ignore functions with dropped names.
@@ -480,9 +358,19 @@ func (f *unexportedFilter) recordUses(file *ast.File) {
case *ast.TypeSpec:
switch typ := spec.Type.(type) {
case *ast.StructType:
- f.recordFieldUses(false, typ.Fields)
+ // We used to trim unexported fields but this
+ // had observable consequences. For example,
+ // the 'fieldalignment' analyzer would compute
+ // incorrect diagnostics from the size and
+ // offsets, and the UI hover information for
+ // types was inaccurate. So now we keep them.
+ if typ.Fields != nil {
+ for _, field := range typ.Fields.List {
+ f.recordIdents(field.Type)
+ }
+ }
case *ast.InterfaceType:
- f.recordFieldUses(false, typ.Methods)
+ f.recordInterfaceMethodUses(typ.Methods)
}
}
}
@@ -490,21 +378,6 @@ func (f *unexportedFilter) recordUses(file *ast.File) {
}
}
-// recvIdent returns the identifier of a method receiver, e.g. *int.
-func recvIdent(decl *ast.FuncDecl) *ast.Ident {
- if decl.Recv == nil || len(decl.Recv.List) == 0 {
- return nil
- }
- x := decl.Recv.List[0].Type
- if star, ok := x.(*ast.StarExpr); ok {
- x = star.X
- }
- if ident, ok := x.(*ast.Ident); ok {
- return ident
- }
- return nil
-}
-
// recordIdents records unexported identifiers in an Expr in uses.
// These may be types, e.g. in map[key]value, function names, e.g. in foo(),
// or simple variable references. References that will be discarded, such
@@ -531,37 +404,32 @@ func (f *unexportedFilter) recordIdents(x ast.Expr) {
}
// recordFuncType records the types mentioned by a function type.
-func (f *unexportedFilter) recordFuncType(x *ast.FuncType) {
- f.recordFieldUses(true, x.Params)
- f.recordFieldUses(true, x.Results)
-}
-
-// recordFieldUses records unexported identifiers used in fields, which may be
-// struct members, interface members, or function parameter/results.
-func (f *unexportedFilter) recordFieldUses(isParams bool, fields *ast.FieldList) {
- if fields == nil {
- return
- }
- for _, field := range fields.List {
- if isParams {
- // Parameter types of retained functions need to be retained.
+func (f *unexportedFilter) recordFuncType(fn *ast.FuncType) {
+ // Parameter and result types of retained functions need to be retained.
+ if fn.Params != nil {
+ for _, field := range fn.Params.List {
f.recordIdents(field.Type)
- continue
- }
- if ft, ok := field.Type.(*ast.FuncType); ok {
- // Function declarations in interfaces need all their types retained.
- f.recordFuncType(ft)
- continue
}
- if len(field.Names) == 0 {
- // Embedded fields might contribute exported names.
+ }
+ if fn.Results != nil {
+ for _, field := range fn.Results.List {
f.recordIdents(field.Type)
}
- for _, name := range field.Names {
- // We only need normal fields if they're exported.
- if ast.IsExported(name.Name) {
- f.recordIdents(field.Type)
- break
+ }
+}
+
+// recordInterfaceMethodUses records unexported identifiers used in interface methods.
+func (f *unexportedFilter) recordInterfaceMethodUses(methods *ast.FieldList) {
+ if methods != nil {
+ for _, method := range methods.List {
+ if len(method.Names) == 0 {
+ // I, pkg.I, I[T] -- embedded interface:
+ // may contribute exported names.
+ f.recordIdents(method.Type)
+ } else if ft, ok := method.Type.(*ast.FuncType); ok {
+ // f(T) -- ordinary interface method:
+ // needs all its types retained.
+ f.recordFuncType(ft)
}
}
}
@@ -588,32 +456,35 @@ func (f *unexportedFilter) ProcessErrors(errors []types.Error) (map[string]bool,
}
// trimAST clears any part of the AST not relevant to type checking
-// expressions at pos.
+// the package-level declarations.
func trimAST(file *ast.File) {
- ast.Inspect(file, func(n ast.Node) bool {
- if n == nil {
- return false
+ // Eliminate bodies of top-level functions, methods, inits.
+ for _, decl := range file.Decls {
+ if fn, ok := decl.(*ast.FuncDecl); ok {
+ fn.Body = nil
}
+ }
+
+ // Simplify remaining declarations.
+ ast.Inspect(file, func(n ast.Node) bool {
switch n := n.(type) {
- case *ast.FuncDecl:
- n.Body = nil
- case *ast.BlockStmt:
- n.List = nil
- case *ast.CaseClause:
- n.Body = nil
- case *ast.CommClause:
- n.Body = nil
+ case *ast.FuncLit:
+ // Eliminate bodies of literal functions.
+ // func() { ... } => func() {}
+ n.Body.List = nil
case *ast.CompositeLit:
// types.Info.Types for long slice/array literals are particularly
- // expensive. Try to clear them out.
+ // expensive. Try to clear them out: T{e, ..., e} => T{}
at, ok := n.Type.(*ast.ArrayType)
if !ok {
- // Composite literal. No harm removing all its fields.
+ // Map or struct literal: no harm removing all its fields.
n.Elts = nil
break
}
+
// Removing the elements from an ellipsis array changes its type.
// Try to set the length explicitly so we can continue.
+ // [...]T{e, ..., e} => [3]T[]{}
if _, ok := at.Len.(*ast.Ellipsis); ok {
length, ok := arrayLength(n)
if !ok {
@@ -685,14 +556,14 @@ func arrayLength(array *ast.CompositeLit) (int, bool) {
//
// If fixAST returns true, the resulting AST is considered "fixed", meaning
// positions have been mangled, and type checker errors may not make sense.
-func fixAST(ctx context.Context, n ast.Node, tok *token.File, src []byte) (fixed bool) {
+func fixAST(n ast.Node, tok *token.File, src []byte) (fixed bool) {
var err error
walkASTWithParent(n, func(n, parent ast.Node) bool {
switch n := n.(type) {
case *ast.BadStmt:
if fixed = fixDeferOrGoStmt(n, parent, tok, src); fixed {
// Recursively fix in our fixed node.
- _ = fixAST(ctx, parent, tok, src)
+ _ = fixAST(parent, tok, src)
} else {
err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err)
}
@@ -700,7 +571,7 @@ func fixAST(ctx context.Context, n ast.Node, tok *token.File, src []byte) (fixed
case *ast.BadExpr:
if fixed = fixArrayType(n, parent, tok, src); fixed {
// Recursively fix in our fixed node.
- _ = fixAST(ctx, parent, tok, src)
+ _ = fixAST(parent, tok, src)
return false
}
diff --git a/internal/lsp/cache/parse_test.go b/gopls/internal/lsp/cache/parse_test.go
similarity index 98%
rename from internal/lsp/cache/parse_test.go
rename to gopls/internal/lsp/cache/parse_test.go
index cb620f27432..e8db64530e6 100644
--- a/internal/lsp/cache/parse_test.go
+++ b/gopls/internal/lsp/cache/parse_test.go
@@ -149,7 +149,7 @@ type Exported struct {
}
var Var = Exported{foo:1}
`,
- kept: []string{"Exported", "Var"},
+ kept: []string{"Exported", "Var", "x"},
},
{
name: "drop_function_literals",
diff --git a/internal/lsp/cache/parsemode_go116.go b/gopls/internal/lsp/cache/parsemode_go116.go
similarity index 100%
rename from internal/lsp/cache/parsemode_go116.go
rename to gopls/internal/lsp/cache/parsemode_go116.go
diff --git a/internal/lsp/cache/parsemode_go117.go b/gopls/internal/lsp/cache/parsemode_go117.go
similarity index 100%
rename from internal/lsp/cache/parsemode_go117.go
rename to gopls/internal/lsp/cache/parsemode_go117.go
diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go
new file mode 100644
index 00000000000..6d138bea15c
--- /dev/null
+++ b/gopls/internal/lsp/cache/pkg.go
@@ -0,0 +1,173 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// Convenient local aliases for typed strings.
+type (
+ PackageID = source.PackageID
+ PackagePath = source.PackagePath
+ PackageName = source.PackageName
+ ImportPath = source.ImportPath
+)
+
+// pkg contains parse trees and type information for a package.
+type pkg struct {
+ m *source.Metadata
+ mode source.ParseMode
+ fset *token.FileSet // for now, same as the snapshot's FileSet
+ goFiles []*source.ParsedGoFile
+ compiledGoFiles []*source.ParsedGoFile
+ diagnostics []*source.Diagnostic
+ deps map[PackageID]*pkg // use m.DepsBy{Pkg,Imp}Path to look up ID
+ version *module.Version // may be nil; may differ from m.Module.Version
+ parseErrors []scanner.ErrorList
+ typeErrors []types.Error
+ types *types.Package
+ typesInfo *types.Info
+ hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors
+
+ analyses memoize.Store // maps analyzer.Name to Promise[actionResult]
+}
+
+func (p *pkg) String() string { return string(p.ID()) }
+
+// A loadScope defines a package loading scope for use with go/packages.
+type loadScope interface {
+ aScope()
+}
+
+type (
+ fileLoadScope span.URI // load packages containing a file (including command-line-arguments)
+ packageLoadScope string // load a specific package (the value is its PackageID)
+ moduleLoadScope string // load packages in a specific module
+ viewLoadScope span.URI // load the workspace
+)
+
+// Implement the loadScope interface.
+func (fileLoadScope) aScope() {}
+func (packageLoadScope) aScope() {}
+func (moduleLoadScope) aScope() {}
+func (viewLoadScope) aScope() {}
+
+func (p *pkg) ID() PackageID { return p.m.ID }
+func (p *pkg) Name() PackageName { return p.m.Name }
+func (p *pkg) PkgPath() PackagePath { return p.m.PkgPath }
+
+func (p *pkg) ParseMode() source.ParseMode {
+ return p.mode
+}
+
+func (p *pkg) CompiledGoFiles() []*source.ParsedGoFile {
+ return p.compiledGoFiles
+}
+
+func (p *pkg) File(uri span.URI) (*source.ParsedGoFile, error) {
+ for _, cgf := range p.compiledGoFiles {
+ if cgf.URI == uri {
+ return cgf, nil
+ }
+ }
+ for _, gf := range p.goFiles {
+ if gf.URI == uri {
+ return gf, nil
+ }
+ }
+ return nil, fmt.Errorf("no parsed file for %s in %v", uri, p.m.ID)
+}
+
+func (p *pkg) GetSyntax() []*ast.File {
+ var syntax []*ast.File
+ for _, pgf := range p.compiledGoFiles {
+ syntax = append(syntax, pgf.File)
+ }
+ return syntax
+}
+
+func (p *pkg) FileSet() *token.FileSet {
+ return p.fset
+}
+
+func (p *pkg) GetTypes() *types.Package {
+ return p.types
+}
+
+func (p *pkg) GetTypesInfo() *types.Info {
+ return p.typesInfo
+}
+
+func (p *pkg) GetTypesSizes() types.Sizes {
+ return p.m.TypesSizes
+}
+
+func (p *pkg) ForTest() string {
+ return string(p.m.ForTest)
+}
+
+// DirectDep returns the directly imported dependency of this package,
+// given its PackagePath. (If you have an ImportPath, e.g. a string
+// from an import declaration, use ResolveImportPath instead.
+// They may differ in case of vendoring.)
+func (p *pkg) DirectDep(pkgPath PackagePath) (source.Package, error) {
+ if id, ok := p.m.DepsByPkgPath[pkgPath]; ok {
+ if imp := p.deps[id]; imp != nil {
+ return imp, nil
+ }
+ }
+ return nil, fmt.Errorf("package does not import package with path %s", pkgPath)
+}
+
+// ResolveImportPath returns the directly imported dependency of this package,
+// given its ImportPath. See also DirectDep.
+func (p *pkg) ResolveImportPath(importPath ImportPath) (source.Package, error) {
+ if id, ok := p.m.DepsByImpPath[importPath]; ok && id != "" {
+ if imp := p.deps[id]; imp != nil {
+ return imp, nil
+ }
+ }
+ return nil, fmt.Errorf("package does not import %s", importPath)
+}
+
+func (p *pkg) Imports() []source.Package {
+ var result []source.Package // unordered
+ for _, dep := range p.deps {
+ result = append(result, dep)
+ }
+ return result
+}
+
+func (p *pkg) Version() *module.Version {
+ return p.version
+}
+
+func (p *pkg) HasListOrParseErrors() bool {
+ return len(p.m.Errors) != 0 || len(p.parseErrors) != 0
+}
+
+func (p *pkg) HasTypeErrors() bool {
+ return len(p.typeErrors) != 0
+}
+
+func (p *pkg) DiagnosticsForFile(uri span.URI) []*source.Diagnostic {
+ var res []*source.Diagnostic
+ for _, diag := range p.diagnostics {
+ if diag.URI == uri {
+ res = append(res, diag)
+ }
+ }
+ return res
+}
diff --git a/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go
similarity index 57%
rename from internal/lsp/cache/session.go
rename to gopls/internal/lsp/cache/session.go
index e018cb33bd8..5cf991bced9 100644
--- a/internal/lsp/cache/session.go
+++ b/gopls/internal/lsp/cache/session.go
@@ -7,44 +7,47 @@ package cache
import (
"context"
"fmt"
+ "os"
"strconv"
+ "strings"
"sync"
"sync/atomic"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/persistent"
"golang.org/x/tools/internal/xcontext"
)
type Session struct {
- cache *Cache
- id string
+ // Unique identifier for this session.
+ id string
+
+ // Immutable attributes shared across views.
+ cache *Cache // shared cache
+ gocmdRunner *gocommand.Runner // limits go command concurrency
optionsMu sync.Mutex
options *source.Options
- viewMu sync.RWMutex
+ viewMu sync.Mutex
views []*View
viewMap map[span.URI]*View // map of URI->best view
overlayMu sync.Mutex
overlays map[span.URI]*overlay
-
- // gocmdRunner guards go command calls from concurrency errors.
- gocmdRunner *gocommand.Runner
-
- progress *progress.Tracker
}
type overlay struct {
session *Session
uri span.URI
text []byte
- hash string
+ hash source.Hash
version int32
kind source.FileKind
@@ -117,26 +120,25 @@ func (c *closedFile) Version() int32 {
return 0
}
+// ID returns the unique identifier for this session on this server.
func (s *Session) ID() string { return s.id }
func (s *Session) String() string { return s.id }
+// Options returns a copy of the SessionOptions for this session.
func (s *Session) Options() *source.Options {
s.optionsMu.Lock()
defer s.optionsMu.Unlock()
return s.options
}
+// SetOptions sets the options of this session to new values.
func (s *Session) SetOptions(options *source.Options) {
s.optionsMu.Lock()
defer s.optionsMu.Unlock()
s.options = options
}
-func (s *Session) SetProgressTracker(tracker *progress.Tracker) {
- // The progress tracker should be set before any view is initialized.
- s.progress = tracker
-}
-
+// Shutdown the session and all views it has created.
func (s *Session) Shutdown(ctx context.Context) {
var views []*View
s.viewMu.Lock()
@@ -145,26 +147,32 @@ func (s *Session) Shutdown(ctx context.Context) {
s.viewMap = nil
s.viewMu.Unlock()
for _, view := range views {
- view.shutdown(ctx)
+ view.shutdown()
}
event.Log(ctx, "Shutdown session", KeyShutdownSession.Of(s))
}
-func (s *Session) Cache() interface{} {
+// Cache returns the cache that created this session, for debugging only.
+func (s *Session) Cache() *Cache {
return s.cache
}
-func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (source.View, source.Snapshot, func(), error) {
+// NewView creates a new View, returning it and its first snapshot. If a
+// non-empty tempWorkspace directory is provided, the View will record a copy
+// of its gopls workspace module in that directory, so that client tooling
+// can execute in the same main module. On success it also returns a release
+// function that must be called when the Snapshot is no longer needed.
+func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (*View, source.Snapshot, func(), error) {
s.viewMu.Lock()
defer s.viewMu.Unlock()
for _, view := range s.views {
- if span.CompareURI(view.folder, folder) == 0 {
+ if span.SameExistingFile(view.folder, folder) {
return nil, nil, nil, source.ErrViewExists
}
}
view, snapshot, release, err := s.createView(ctx, name, folder, options, 0)
if err != nil {
- return nil, nil, func() {}, err
+ return nil, nil, nil, err
}
s.views = append(s.views, view)
// we always need to drop the view map
@@ -172,28 +180,39 @@ func (s *Session) NewView(ctx context.Context, name string, folder span.URI, opt
return view, snapshot, release, nil
}
-func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, snapshotID uint64) (*View, *snapshot, func(), error) {
+func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, seqID uint64) (*View, *snapshot, func(), error) {
index := atomic.AddInt64(&viewIndex, 1)
- if s.cache.options != nil {
- s.cache.options(options)
+ // Get immutable workspace configuration.
+ //
+ // TODO(rfindley): this info isn't actually immutable. For example, GOWORK
+ // could be changed, or a user's environment could be modified.
+ // We need a mechanism to invalidate it.
+ wsInfo, err := s.getWorkspaceInformation(ctx, folder, options)
+ if err != nil {
+ return nil, nil, func() {}, err
}
- // Set the module-specific information.
- ws, err := s.getWorkspaceInformation(ctx, folder, options)
+ root := folder
+ // filterFunc is the path filter function for this workspace folder. Notably,
+ // it is relative to folder (which is specified by the user), not root.
+ filterFunc := pathExcludedByFilterFunc(folder.Filename(), wsInfo.gomodcache, options)
+ rootSrc, err := findWorkspaceModuleSource(ctx, root, s, filterFunc, options.ExperimentalWorkspaceModule)
if err != nil {
return nil, nil, func() {}, err
}
- root := folder
- if options.ExpandWorkspaceToModule {
- root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), options.ExperimentalWorkspaceModule)
- if err != nil {
- return nil, nil, func() {}, err
- }
+ if options.ExpandWorkspaceToModule && rootSrc != "" {
+ root = span.Dir(rootSrc)
+ }
+
+ explicitGowork := os.Getenv("GOWORK")
+ if v, ok := options.Env["GOWORK"]; ok {
+ explicitGowork = v
}
+ goworkURI := span.URIFromPath(explicitGowork)
// Build the gopls workspace, collecting active modules in the view.
- workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule)
+ workspace, err := newWorkspace(ctx, root, goworkURI, s, filterFunc, wsInfo.effectiveGO111MODULE() == off, options.ExperimentalWorkspaceModule)
if err != nil {
return nil, nil, func() {}, err
}
@@ -204,64 +223,92 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI,
backgroundCtx, cancel := context.WithCancel(baseCtx)
v := &View{
- session: s,
+ id: strconv.FormatInt(index, 10),
+ cache: s.cache,
+ gocmdRunner: s.gocmdRunner,
initialWorkspaceLoad: make(chan struct{}),
initializationSema: make(chan struct{}, 1),
- id: strconv.FormatInt(index, 10),
options: options,
baseCtx: baseCtx,
name: name,
folder: folder,
- moduleUpgrades: map[string]string{},
- filesByURI: map[span.URI]*fileBase{},
- filesByBase: map[string][]*fileBase{},
+ moduleUpgrades: map[span.URI]map[string]string{},
+ vulns: map[span.URI]*govulncheck.Result{},
+ filesByURI: make(map[span.URI]span.URI),
+ filesByBase: make(map[string][]canonicalURI),
rootURI: root,
- workspaceInformation: *ws,
+ rootSrc: rootSrc,
+ explicitGowork: goworkURI,
+ workspaceInformation: *wsInfo,
}
v.importsState = &importsState{
ctx: backgroundCtx,
processEnv: &imports.ProcessEnv{
GocmdRunner: s.gocmdRunner,
+ SkipPathInScan: func(dir string) bool {
+ prefix := strings.TrimSuffix(string(v.folder), "/") + "/"
+ uri := strings.TrimSuffix(string(span.URIFromPath(dir)), "/")
+ if !strings.HasPrefix(uri+"/", prefix) {
+ return false
+ }
+ filterer := source.NewFilterer(options.DirectoryFilters)
+ rel := strings.TrimPrefix(uri, prefix)
+ disallow := filterer.Disallow(rel)
+ return disallow
+ },
},
}
v.snapshot = &snapshot{
- id: snapshotID,
- view: v,
- backgroundCtx: backgroundCtx,
- cancel: cancel,
- initializeOnce: &sync.Once{},
- generation: s.cache.store.Generation(generationName(v, 0)),
- packages: make(map[packageKey]*packageHandle),
- meta: NewMetadataGraph(),
- files: make(map[span.URI]source.VersionedFileHandle),
- goFiles: make(map[parseKey]*parseGoHandle),
- symbols: make(map[span.URI]*symbolHandle),
- actions: make(map[actionKey]*actionHandle),
- workspacePackages: make(map[PackageID]PackagePath),
- unloadableFiles: make(map[span.URI]struct{}),
- parseModHandles: make(map[span.URI]*parseModHandle),
- parseWorkHandles: make(map[span.URI]*parseWorkHandle),
- modTidyHandles: make(map[span.URI]*modTidyHandle),
- modWhyHandles: make(map[span.URI]*modWhyHandle),
- workspace: workspace,
- }
+ sequenceID: seqID,
+ globalID: nextSnapshotID(),
+ view: v,
+ backgroundCtx: backgroundCtx,
+ cancel: cancel,
+ store: s.cache.store,
+ packages: persistent.NewMap(packageKeyLessInterface),
+ meta: &metadataGraph{},
+ files: newFilesMap(),
+ isActivePackageCache: newIsActivePackageCacheMap(),
+ parsedGoFiles: persistent.NewMap(parseKeyLessInterface),
+ parseKeysByURI: newParseKeysByURIMap(),
+ symbolizeHandles: persistent.NewMap(uriLessInterface),
+ analyses: persistent.NewMap(analysisKeyLessInterface),
+ workspacePackages: make(map[PackageID]PackagePath),
+ unloadableFiles: make(map[span.URI]struct{}),
+ parseModHandles: persistent.NewMap(uriLessInterface),
+ parseWorkHandles: persistent.NewMap(uriLessInterface),
+ modTidyHandles: persistent.NewMap(uriLessInterface),
+ modVulnHandles: persistent.NewMap(uriLessInterface),
+ modWhyHandles: persistent.NewMap(uriLessInterface),
+ knownSubdirs: newKnownDirsSet(),
+ workspace: workspace,
+ }
+ // Save one reference in the view.
+ v.releaseSnapshot = v.snapshot.Acquire()
+
+ // Record the environment of the newly created view in the log.
+ event.Log(ctx, viewEnv(v))
// Initialize the view without blocking.
initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx))
v.initCancelFirstAttempt = initCancel
snapshot := v.snapshot
- release := snapshot.generation.Acquire()
+
+ // Pass a second reference to the background goroutine.
+ bgRelease := snapshot.Acquire()
go func() {
- defer release()
+ defer bgRelease()
snapshot.initialize(initCtx, true)
}()
- return v, snapshot, snapshot.generation.Acquire(), nil
+
+ // Return a third reference to the caller.
+ return v, snapshot, snapshot.Acquire(), nil
}
-// View returns the view by name.
-func (s *Session) View(name string) source.View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
+// View returns a view with a matching name, if the session has one.
+func (s *Session) View(name string) *View {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
for _, view := range s.views {
if view.Name() == name {
return view
@@ -272,13 +319,14 @@ func (s *Session) View(name string) source.View {
// ViewOf returns a view corresponding to the given URI.
// If the file is not already associated with a view, pick one using some heuristics.
-func (s *Session) ViewOf(uri span.URI) (source.View, error) {
- return s.viewOf(uri)
+func (s *Session) ViewOf(uri span.URI) (*View, error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ return s.viewOfLocked(uri)
}
-func (s *Session) viewOf(uri span.URI) (*View, error) {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
+// Precondition: caller holds s.viewMu lock.
+func (s *Session) viewOfLocked(uri span.URI) (*View, error) {
// Check if we already know this file.
if v, found := s.viewMap[uri]; found {
return v, nil
@@ -291,26 +339,11 @@ func (s *Session) viewOf(uri span.URI) (*View, error) {
return s.viewMap[uri], nil
}
-func (s *Session) viewsOf(uri span.URI) []*View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
-
- var views []*View
- for _, view := range s.views {
- if source.InDir(view.folder.Filename(), uri.Filename()) {
- views = append(views, view)
- }
- }
- return views
-}
-
-func (s *Session) Views() []source.View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- result := make([]source.View, len(s.views))
- for i, v := range s.views {
- result[i] = v
- }
+func (s *Session) Views() []*View {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ result := make([]*View, len(s.views))
+ copy(result, s.views)
return result
}
@@ -323,6 +356,8 @@ func bestViewForURI(uri span.URI, views []*View) *View {
if longest != nil && len(longest.Folder()) > len(view.Folder()) {
continue
}
+ // TODO(rfindley): this should consider the workspace layout (i.e.
+ // go.work).
if view.contains(uri) {
longest = view
}
@@ -340,49 +375,53 @@ func bestViewForURI(uri span.URI, views []*View) *View {
return views[0]
}
-func (s *Session) removeView(ctx context.Context, view *View) error {
+// RemoveView removes the view v from the session
+func (s *Session) RemoveView(view *View) {
s.viewMu.Lock()
defer s.viewMu.Unlock()
- i, err := s.dropView(ctx, view)
- if err != nil {
- return err
+ i := s.dropView(view)
+ if i == -1 { // error reported elsewhere
+ return
}
// delete this view... we don't care about order but we do want to make
// sure we can garbage collect the view
- s.views[i] = s.views[len(s.views)-1]
- s.views[len(s.views)-1] = nil
- s.views = s.views[:len(s.views)-1]
- return nil
+ s.views = removeElement(s.views, i)
}
+// updateView recreates the view with the given options.
+//
+// If the resulting error is non-nil, the view may or may not have already been
+// dropped from the session.
func (s *Session) updateView(ctx context.Context, view *View, options *source.Options) (*View, error) {
s.viewMu.Lock()
defer s.viewMu.Unlock()
+ return s.updateViewLocked(ctx, view, options)
+}
+
+func (s *Session) updateViewLocked(ctx context.Context, view *View, options *source.Options) (*View, error) {
// Preserve the snapshot ID if we are recreating the view.
view.snapshotMu.Lock()
if view.snapshot == nil {
view.snapshotMu.Unlock()
panic("updateView called after View was already shut down")
}
- snapshotID := view.snapshot.id
+ seqID := view.snapshot.sequenceID // Preserve sequence IDs when updating a view in place.
view.snapshotMu.Unlock()
- i, err := s.dropView(ctx, view)
- if err != nil {
- return nil, err
+ i := s.dropView(view)
+ if i == -1 {
+ return nil, fmt.Errorf("view %q not found", view.id)
}
- v, _, release, err := s.createView(ctx, view.name, view.folder, options, snapshotID)
+ v, _, release, err := s.createView(ctx, view.name, view.folder, options, seqID)
release()
if err != nil {
// we have dropped the old view, but could not create the new one
// this should not happen and is very bad, but we still need to clean
// up the view array if it happens
- s.views[i] = s.views[len(s.views)-1]
- s.views[len(s.views)-1] = nil
- s.views = s.views[:len(s.views)-1]
+ s.views = removeElement(s.views, i)
return nil, err
}
// substitute the new view into the array where the old view was
@@ -390,28 +429,43 @@ func (s *Session) updateView(ctx context.Context, view *View, options *source.Op
return v, nil
}
-func (s *Session) dropView(ctx context.Context, v *View) (int, error) {
+// removeElement removes the ith element from the slice replacing it with the last element.
+// TODO(adonovan): generics, someday.
+func removeElement(slice []*View, index int) []*View {
+ last := len(slice) - 1
+ slice[index] = slice[last]
+ slice[last] = nil // aid GC
+ return slice[:last]
+}
+
+// dropView removes v from the set of views for the receiver s and calls
+// v.shutdown, returning the index of v in s.views (if found), or -1 if v was
+// not found. s.viewMu must be held while calling this function.
+func (s *Session) dropView(v *View) int {
// we always need to drop the view map
s.viewMap = make(map[span.URI]*View)
for i := range s.views {
if v == s.views[i] {
// we found the view, drop it and return the index it was found at
s.views[i] = nil
- v.shutdown(ctx)
- return i, nil
+ v.shutdown()
+ return i
}
}
- return -1, fmt.Errorf("view %s for %v not found", v.Name(), v.Folder())
+ // TODO(rfindley): it looks wrong that we don't shutdown v in this codepath.
+ // We should never get here.
+ bug.Reportf("tried to drop nonexistent view %q", v.id)
+ return -1
}
func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error {
- _, releases, err := s.DidModifyFiles(ctx, changes)
- for _, release := range releases {
- release()
- }
+ _, release, err := s.DidModifyFiles(ctx, changes)
+ release()
return err
}
+// TODO(rfindley): fileChange seems redundant with source.FileModification.
+// De-dupe into a common representation for changes.
type fileChange struct {
content []byte
exists bool
@@ -423,22 +477,92 @@ type fileChange struct {
isUnchanged bool
}
-func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- views := make(map[*View]map[span.URI]*fileChange)
- affectedViews := map[span.URI][]*View{}
+// DidModifyFiles reports a file modification to the session. It returns
+// the new snapshots after the modifications have been applied, paired with
+// the affected file URIs for those snapshots.
+// On success, it returns a release function that
+// must be called when the snapshots are no longer needed.
+//
+// TODO(rfindley): what happens if this function fails? It must leave us in a
+// broken state, which we should surface to the user, probably as a request to
+// restart gopls.
+func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, func(), error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ // Update overlays.
+ //
+ // TODO(rfindley): I think we do this while holding viewMu to prevent views
+ // from seeing the updated file content before they have processed
+ // invalidations, which could lead to a partial view of the changes (i.e.
+ // spurious diagnostics). However, any such view would immediately be
+ // invalidated here, so it is possible that we could update overlays before
+ // acquiring viewMu.
overlays, err := s.updateOverlays(ctx, changes)
if err != nil {
return nil, nil, err
}
- var forceReloadMetadata bool
+
+ // Re-create views whose root may have changed.
+ //
+ // checkRoots controls whether to re-evaluate view definitions when
+ // collecting views below. Any change to a go.mod or go.work file may have
+ // affected the definition of the view.
+ checkRoots := false
+ for _, c := range changes {
+ if isGoMod(c.URI) || isGoWork(c.URI) {
+ checkRoots = true
+ break
+ }
+ }
+
+ if checkRoots {
+ for _, view := range s.views {
+ // Check whether the view must be recreated. This logic looks hacky,
+ // as it uses the existing view gomodcache and options to re-evaluate
+ // the workspace source, then expects view creation to compute the same
+ // root source after first re-evaluating gomodcache and options.
+ //
+ // Well, it *is* a bit hacky, but in practice we will get the same
+ // gomodcache and options, as any environment change affecting these
+ // should have already invalidated the view (c.f. minorOptionsChange).
+ //
+ // TODO(rfindley): clean this up.
+ filterFunc := pathExcludedByFilterFunc(view.folder.Filename(), view.gomodcache, view.Options())
+ src, err := findWorkspaceModuleSource(ctx, view.folder, s, filterFunc, view.Options().ExperimentalWorkspaceModule)
+ if err != nil {
+ return nil, nil, err
+ }
+ if src != view.rootSrc {
+ _, err := s.updateViewLocked(ctx, view, view.Options())
+ if err != nil {
+ // Catastrophic failure, equivalent to a failure of session
+ // initialization and therefore should almost never happen. One
+ // scenario where this failure mode could occur is if some file
+ // permissions have changed preventing us from reading go.mod
+ // files.
+ //
+ // The view may or may not still exist. The best we can do is log
+ // and move on.
+ //
+ // TODO(rfindley): consider surfacing this error more loudly. We
+ // could report a bug, but it's not really a bug.
+ event.Error(ctx, "recreating view", err)
+ }
+ }
+ }
+ }
+
+ // Collect information about views affected by these changes.
+ views := make(map[*View]map[span.URI]*fileChange)
+ affectedViews := map[span.URI][]*View{}
+ // forceReloadMetadata records whether any change is the magic
+ // source.InvalidateMetadata action.
+ forceReloadMetadata := false
for _, c := range changes {
if c.Action == source.InvalidateMetadata {
forceReloadMetadata = true
}
-
// Build the list of affected views.
var changedViews []*View
for _, view := range s.views {
@@ -455,7 +579,7 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif
if c.OnDisk {
continue
}
- bestView, err := s.viewOf(c.URI)
+ bestView, err := s.viewOfLocked(c.URI)
if err != nil {
return nil, nil, err
}
@@ -467,8 +591,8 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif
// Apply the changes to all affected views.
for _, view := range changedViews {
- // Make sure that the file is added to the view.
- _ = view.getFile(c.URI)
+ // Make sure that the file is added to the view's knownFiles set.
+ view.canonicalURI(c.URI, true) // ignore result
if _, ok := views[view]; !ok {
views[view] = make(map[span.URI]*fileChange)
}
@@ -504,6 +628,14 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif
viewToSnapshot[view] = snapshot
}
+ // The release function is called when the
+ // returned URIs no longer need to be valid.
+ release := func() {
+ for _, release := range releases {
+ release()
+ }
+ }
+
// We only want to diagnose each changed file once, in the view to which
// it "most" belongs. We do this by picking the best view for each URI,
// and then aggregating the set of snapshots and their URIs (to avoid
@@ -521,22 +653,29 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif
}
snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI)
}
- return snapshotURIs, releases, nil
+
+ return snapshotURIs, release, nil
}
+// ExpandModificationsToDirectories returns the set of changes with the
+// directory changes removed and expanded to include all of the files in
+// the directory.
func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
var snapshots []*snapshot
+ s.viewMu.Lock()
for _, v := range s.views {
snapshot, release := v.getSnapshot()
defer release()
snapshots = append(snapshots, snapshot)
}
+ s.viewMu.Unlock()
+
knownDirs := knownDirectories(ctx, snapshots)
+ defer knownDirs.Destroy()
+
var result []source.FileModification
for _, c := range changes {
- if _, ok := knownDirs[c.URI]; !ok {
+ if !knownDirs.Contains(c.URI) {
result = append(result, c)
continue
}
@@ -558,16 +697,17 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes
// knownDirectories returns all of the directories known to the given
// snapshots, including workspace directories and their subdirectories.
-func knownDirectories(ctx context.Context, snapshots []*snapshot) map[span.URI]struct{} {
- result := map[span.URI]struct{}{}
+// It is responsibility of the caller to destroy the returned set.
+func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet {
+ result := newKnownDirsSet()
for _, snapshot := range snapshots {
dirs := snapshot.workspace.dirs(ctx, snapshot)
for _, dir := range dirs {
- result[dir] = struct{}{}
- }
- for _, dir := range snapshot.getKnownSubdirs(dirs) {
- result[dir] = struct{}{}
+ result.Insert(dir)
}
+ knownSubdirs := snapshot.getKnownSubdirs(dirs)
+ result.SetAll(knownSubdirs)
+ knownSubdirs.Destroy()
}
return result
}
@@ -585,6 +725,7 @@ func knownFilesInDir(ctx context.Context, snapshots []*snapshot, dir span.URI) m
return files
}
+// Precondition: caller holds s.viewMu lock.
func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModification) (map[span.URI]*overlay, error) {
s.overlayMu.Lock()
defer s.overlayMu.Unlock()
@@ -637,7 +778,7 @@ func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModif
if c.OnDisk || c.Action == source.Save {
version = o.version
}
- hash := hashContents(text)
+ hash := source.HashOf(text)
var sameContentOnDisk bool
switch c.Action {
case source.Delete:
@@ -671,7 +812,7 @@ func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModif
// When opening files, ensure that we actually have a well-defined view and file kind.
if c.Action == source.Open {
- view, err := s.ViewOf(o.uri)
+ view, err := s.viewOfLocked(o.uri)
if err != nil {
return nil, fmt.Errorf("updateOverlays: finding view for %s: %v", o.uri, err)
}
@@ -694,6 +835,7 @@ func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModif
return overlays, nil
}
+// GetFile returns a handle for the specified file.
func (s *Session) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
if overlay := s.readOverlay(uri); overlay != nil {
return overlay, nil
@@ -712,6 +854,7 @@ func (s *Session) readOverlay(uri span.URI) *overlay {
return nil
}
+// Overlays returns a slice of file overlays for the session.
func (s *Session) Overlays() []source.Overlay {
s.overlayMu.Lock()
defer s.overlayMu.Unlock()
@@ -723,9 +866,12 @@ func (s *Session) Overlays() []source.Overlay {
return overlays
}
+// FileWatchingGlobPatterns returns glob patterns to watch every directory
+// known by the view. For views within a module, this is the module root,
+// any directory in the module root, and any replace targets.
func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
patterns := map[string]struct{}{}
for _, view := range s.views {
snapshot, release := view.getSnapshot()
diff --git a/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
similarity index 57%
rename from internal/lsp/cache/snapshot.go
rename to gopls/internal/lsp/cache/snapshot.go
index a219935aa66..b7bdaddc184 100644
--- a/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -14,49 +14,53 @@ import (
"go/types"
"io"
"io/ioutil"
+ "log"
"os"
"path/filepath"
"regexp"
+ "runtime"
"sort"
"strconv"
"strings"
"sync"
+ "sync/atomic"
+ "unsafe"
"golang.org/x/mod/modfile"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
- "golang.org/x/tools/go/analysis"
+ "golang.org/x/sync/errgroup"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/bug"
- "golang.org/x/tools/internal/lsp/debug/log"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/persistent"
"golang.org/x/tools/internal/typesinternal"
)
type snapshot struct {
- memoize.Arg // allow as a memoize.Function arg
-
- id uint64
- view *View
+ sequenceID uint64
+ globalID source.GlobalSnapshotID
+ view *View
cancel func()
backgroundCtx context.Context
- // the cache generation that contains the data for this snapshot.
- generation *memoize.Generation
+ store *memoize.Store // cache of handles shared by all snapshots
- // The snapshot's initialization state is controlled by the fields below.
- //
- // initializeOnce guards snapshot initialization. Each snapshot is
- // initialized at most once: reinitialization is triggered on later snapshots
- // by invalidating this field.
- initializeOnce *sync.Once
+ refcount sync.WaitGroup // number of references
+ destroyedBy *string // atomically set to non-nil in Destroy once refcount = 0
+
+ // initialized reports whether the snapshot has been initialized. Concurrent
+ // initialization is guarded by the view.initializationSema. Each snapshot is
+ // initialized at most once: concurrent initialization is guarded by
+ // view.initializationSema.
+ initialized bool
// initializedErr holds the last error resulting from initialization. If
// initialization fails, we only retry when the the workspace modules change,
// to avoid too many go/packages calls.
@@ -69,70 +73,181 @@ type snapshot struct {
builtin span.URI
// meta holds loaded metadata.
+ //
+ // meta is guarded by mu, but the metadataGraph itself is immutable.
+ // TODO(rfindley): in many places we hold mu while operating on meta, even
+ // though we only need to hold mu while reading the pointer.
meta *metadataGraph
// files maps file URIs to their corresponding FileHandles.
// It may invalidated when a file's content changes.
- files map[span.URI]source.VersionedFileHandle
+ files filesMap
+
+ // parsedGoFiles maps a parseKey to the handle of the future result of parsing it.
+ parsedGoFiles *persistent.Map // from parseKey to *memoize.Promise[parseGoResult]
- // goFiles maps a parseKey to its parseGoHandle.
- goFiles map[parseKey]*parseGoHandle
+ // parseKeysByURI records the set of keys of parsedGoFiles that
+ // need to be invalidated for each URI.
+ // TODO(adonovan): opt: parseKey = ParseMode + URI, so this could
+ // be just a set of ParseModes, or we could loop over AllParseModes.
+ parseKeysByURI parseKeysByURIMap
- // TODO(rfindley): consider merging this with files to reduce burden on clone.
- symbols map[span.URI]*symbolHandle
+ // symbolizeHandles maps each file URI to a handle for the future
+ // result of computing the symbols declared in that file.
+ symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult]
- // packages maps a packageKey to a set of packageHandles to which that file belongs.
+ // packages maps a packageKey to a *packageHandle.
// It may be invalidated when a file's content changes.
- packages map[packageKey]*packageHandle
+ //
+ // Invariants to preserve:
+ // - packages.Get(id).meta == meta.metadata[id] for all ids
+ // - if a package is in packages, then all of its dependencies should also
+ // be in packages, unless there is a missing import
+ packages *persistent.Map // from packageKey to *packageHandle
- // actions maps an actionkey to its actionHandle.
- actions map[actionKey]*actionHandle
+ // isActivePackageCache maps package ID to the cached value if it is active or not.
+ // It may be invalidated when metadata changes or a new file is opened or closed.
+ isActivePackageCache isActivePackageCacheMap
+
+ // analyses maps an analysisKey (which identifies a package
+ // and a set of analyzers) to the handle for the future result
+ // of loading the package and analyzing it.
+ analyses *persistent.Map // from analysisKey to analysisHandle
// workspacePackages contains the workspace's packages, which are loaded
// when the view is created.
workspacePackages map[PackageID]PackagePath
+ // shouldLoad tracks packages that need to be reloaded, mapping a PackageID
+ // to the package paths that should be used to reload it
+ //
+ // When we try to load a package, we clear it from the shouldLoad map
+ // regardless of whether the load succeeded, to prevent endless loads.
+ shouldLoad map[PackageID][]PackagePath
+
// unloadableFiles keeps track of files that we've failed to load.
unloadableFiles map[span.URI]struct{}
// parseModHandles keeps track of any parseModHandles for the snapshot.
// The handles need not refer to only the view's go.mod file.
- parseModHandles map[span.URI]*parseModHandle
+ parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult]
// parseWorkHandles keeps track of any parseWorkHandles for the snapshot.
// The handles need not refer to only the view's go.work file.
- parseWorkHandles map[span.URI]*parseWorkHandle
+ parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult]
// Preserve go.mod-related handles to avoid garbage-collecting the results
// of various calls to the go command. The handles need not refer to only
// the view's go.mod file.
- modTidyHandles map[span.URI]*modTidyHandle
- modWhyHandles map[span.URI]*modWhyHandle
+ modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult]
+ modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult]
+ modVulnHandles *persistent.Map // from span.URI to *memoize.Promise[modVulnResult]
- workspace *workspace
- workspaceDirHandle *memoize.Handle
+ workspace *workspace // (not guarded by mu)
+
+ // The cached result of makeWorkspaceDir, created on demand and deleted by Snapshot.Destroy.
+ workspaceDir string
+ workspaceDirErr error
// knownSubdirs is the set of subdirectories in the workspace, used to
// create glob patterns for file watching.
- knownSubdirs map[span.URI]struct{}
+ knownSubdirs knownDirsSet
+ knownSubdirsPatternCache string
// unprocessedSubdirChanges are any changes that might affect the set of
// subdirectories in the workspace. They are not reflected to knownSubdirs
// during the snapshot cloning step as it can slow down cloning.
unprocessedSubdirChanges []*fileChange
}
-type packageKey struct {
- mode source.ParseMode
- id PackageID
+var globalSnapshotID uint64
+
+func nextSnapshotID() source.GlobalSnapshotID {
+ return source.GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1))
+}
+
+var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
+
+// Acquire prevents the snapshot from being destroyed until the returned function is called.
+//
+// (s.Acquire().release() could instead be expressed as a pair of
+// method calls s.IncRef(); s.DecRef(). The latter has the advantage
+// that the DecRefs are fungible and don't require holding anything in
+// addition to the refcounted object s, but paradoxically that is also
+// an advantage of the current approach, which forces the caller to
+// consider the release function at every stage, making a reference
+// leak more obvious.)
+func (s *snapshot) Acquire() func() {
+ type uP = unsafe.Pointer
+ if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil {
+ log.Panicf("%d: acquire() after Destroy(%q)", s.globalID, *(*string)(destroyedBy))
+ }
+ s.refcount.Add(1)
+ return s.refcount.Done
+}
+
+func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) {
+ return p.Get(ctx, s)
+}
+
+// destroy waits for all leases on the snapshot to expire then releases
+// any resources (reference counts and files) associated with it.
+// Snapshots being destroyed can be awaited using v.destroyWG.
+//
+// TODO(adonovan): move this logic into the release function returned
+// by Acquire when the reference count becomes zero. (This would cost
+// us the destroyedBy debug info, unless we add it to the signature of
+// memoize.RefCounted.Acquire.)
+//
+// The destroyedBy argument is used for debugging.
+//
+// v.snapshotMu must be held while calling this function, in order to preserve
+// the invariants described by the the docstring for v.snapshot.
+func (v *View) destroy(s *snapshot, destroyedBy string) {
+ v.snapshotWG.Add(1)
+ go func() {
+ defer v.snapshotWG.Done()
+ s.destroy(destroyedBy)
+ }()
+}
+
+func (s *snapshot) destroy(destroyedBy string) {
+ // Wait for all leases to end before commencing destruction.
+ s.refcount.Wait()
+
+ // Report bad state as a debugging aid.
+ // Not foolproof: another thread could acquire() at this moment.
+ type uP = unsafe.Pointer // looking forward to generics...
+ if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil {
+ log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.globalID, destroyedBy, *(*string)(old))
+ }
+
+ s.packages.Destroy()
+ s.isActivePackageCache.Destroy()
+ s.analyses.Destroy()
+ s.files.Destroy()
+ s.parsedGoFiles.Destroy()
+ s.parseKeysByURI.Destroy()
+ s.knownSubdirs.Destroy()
+ s.symbolizeHandles.Destroy()
+ s.parseModHandles.Destroy()
+ s.parseWorkHandles.Destroy()
+ s.modTidyHandles.Destroy()
+ s.modVulnHandles.Destroy()
+ s.modWhyHandles.Destroy()
+
+ if s.workspaceDir != "" {
+ if err := os.RemoveAll(s.workspaceDir); err != nil {
+ event.Error(context.Background(), "cleaning workspace dir", err)
+ }
+ }
}
-type actionKey struct {
- pkg packageKey
- analyzer *analysis.Analyzer
+func (s *snapshot) SequenceID() uint64 {
+ return s.sequenceID
}
-func (s *snapshot) ID() uint64 {
- return s.id
+func (s *snapshot) GlobalID() source.GlobalSnapshotID {
+ return s.globalID
}
func (s *snapshot) View() source.View {
@@ -144,12 +259,12 @@ func (s *snapshot) BackgroundContext() context.Context {
}
func (s *snapshot) FileSet() *token.FileSet {
- return s.view.session.cache.fset
+ return s.view.cache.fset
}
func (s *snapshot) ModFiles() []span.URI {
var uris []span.URI
- for modURI := range s.workspace.getActiveModFiles() {
+ for modURI := range s.workspace.ActiveModFiles() {
uris = append(uris, modURI)
}
return uris
@@ -164,16 +279,34 @@ func (s *snapshot) Templates() map[span.URI]source.VersionedFileHandle {
defer s.mu.Unlock()
tmpls := map[span.URI]source.VersionedFileHandle{}
- for k, fh := range s.files {
+ s.files.Range(func(k span.URI, fh source.VersionedFileHandle) {
if s.view.FileKind(fh) == source.Tmpl {
tmpls[k] = fh
}
- }
+ })
return tmpls
}
func (s *snapshot) ValidBuildConfiguration() bool {
- return validBuildConfiguration(s.view.rootURI, &s.view.workspaceInformation, s.workspace.getActiveModFiles())
+ // Since we only really understand the `go` command, if the user has a
+ // different GOPACKAGESDRIVER, assume that their configuration is valid.
+ if s.view.hasGopackagesDriver {
+ return true
+ }
+ // Check if the user is working within a module or if we have found
+ // multiple modules in the workspace.
+ if len(s.workspace.ActiveModFiles()) > 0 {
+ return true
+ }
+ // The user may have a multiple directories in their GOPATH.
+ // Check if the workspace is within any of them.
+ // TODO(rfindley): this should probably be subject to "if GO111MODULES = off {...}".
+ for _, gp := range filepath.SplitList(s.view.gopath) {
+ if source.InDir(filepath.Join(gp, "src"), s.view.rootURI.Filename()) {
+ return true
+ }
+ }
+ return false
}
// workspaceMode describes the way in which the snapshot's workspace should
@@ -190,7 +323,7 @@ func (s *snapshot) workspaceMode() workspaceMode {
// If the view is not in a module and contains no modules, but still has a
// valid workspace configuration, do not create the workspace module.
// It could be using GOPATH or a different build system entirely.
- if len(s.workspace.getActiveModFiles()) == 0 && validBuildConfiguration {
+ if len(s.workspace.ActiveModFiles()) == 0 && validBuildConfiguration {
return mode
}
mode |= moduleMode
@@ -227,7 +360,7 @@ func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packa
packages.NeedModule |
packages.LoadMode(packagesinternal.DepsErrors) |
packages.LoadMode(packagesinternal.ForTest),
- Fset: s.FileSet(),
+ Fset: nil, // we do our own parsing
Overlay: s.buildOverlay(),
ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
panic("go/packages must not be used to parse files")
@@ -245,7 +378,7 @@ func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packa
if typesinternal.SetUsesCgo(&types.Config{}) {
cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo)
}
- packagesinternal.SetGoCmdRunner(cfg, s.view.session.gocmdRunner)
+ packagesinternal.SetGoCmdRunner(cfg, s.view.gocmdRunner)
return cfg
}
@@ -256,7 +389,7 @@ func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.Invocatio
}
defer cleanup()
- return s.view.session.gocmdRunner.Run(ctx, *inv)
+ return s.view.gocmdRunner.Run(ctx, *inv)
}
func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
@@ -265,7 +398,7 @@ func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.Invocation
return err
}
defer cleanup()
- return s.view.session.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr)
+ return s.view.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr)
}
func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) {
@@ -286,7 +419,7 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri
invoke := func(args ...string) (*bytes.Buffer, error) {
inv.Verb = args[0]
inv.Args = args[1:]
- return s.view.session.gocmdRunner.Run(ctx, *inv)
+ return s.view.gocmdRunner.Run(ctx, *inv)
}
if err := run(invoke); err != nil {
return false, nil, nil, err
@@ -306,11 +439,25 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri
return true, modBytes, sumBytes, nil
}
+// goCommandInvocation populates inv with configuration for running go commands on the snapshot.
+//
+// TODO(rfindley): refactor this function to compose the required configuration
+// explicitly, rather than implicitly deriving it from flags and inv.
+//
+// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but
+// it used only after call to tempModFile. Clarify that it is only
+// non-nil on success.
func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
s.view.optionsMu.Lock()
allowModfileModificationOption := s.view.options.AllowModfileModifications
allowNetworkOption := s.view.options.AllowImplicitNetworkAccess
- inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.effectiveGo111Module)
+
+ // TODO(rfindley): this is very hard to follow, and may not even be doing the
+ // right thing: should inv.Env really trample view.options? Do we ever invoke
+ // this with a non-empty inv.Env?
+ //
+ // We should refactor to make it clearer that the correct env is being used.
+ inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.GO111MODULE())
inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...)
s.view.optionsMu.Unlock()
cleanup = func() {} // fallback
@@ -334,7 +481,6 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat
// - the working directory.
// - the -mod flag
// - the -modfile flag
- // - the -workfile flag
//
// These are dependent on a number of factors: whether we need to run in a
// synthetic workspace, whether flags are supported at the current go
@@ -349,7 +495,7 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat
if mode == source.LoadWorkspace {
switch s.workspace.moduleSource {
case legacyWorkspace:
- for m := range s.workspace.getActiveModFiles() { // range to access the only element
+ for m := range s.workspace.ActiveModFiles() { // range to access the only element
modURI = m
}
case goWorkWorkspace:
@@ -385,6 +531,9 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat
}
}
+ // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall
+ // back on the default behavior of vendorEnabled with an empty modURI. Figure
+ // out what is correct here and implement it explicitly.
vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent)
if err != nil {
return "", nil, cleanup, err
@@ -420,13 +569,15 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat
return "", nil, cleanup, source.ErrTmpModfileUnsupported
}
- // We should use -workfile if:
- // 1. We're not actively trying to mutate a modfile.
- // 2. We have an active go.work file.
- // 3. We're using at least Go 1.18.
+ // We should use -modfile if:
+ // - the workspace mode supports it
+ // - we're using a go.work file on go1.18+, or we need a temp mod file (for
+ // example, if running go mod tidy in a go.work workspace)
+ //
+ // TODO(rfindley): this is very hard to follow. Refactor.
useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18
if useWorkFile {
- // TODO(#51215): build a temp workfile and set GOWORK in the environment.
+ // Since we're running in the workspace root, the go command will resolve GOWORK automatically.
} else if useTempMod {
if modURI == "" {
return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir)
@@ -447,46 +598,65 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat
return tmpURI, inv, cleanup, nil
}
+// usesWorkspaceDir reports whether the snapshot should use a synthetic
+// workspace directory for running workspace go commands such as go list.
+//
+// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up
+// the latter, and deduplicate.
+func (s *snapshot) usesWorkspaceDir() bool {
+ switch s.workspace.moduleSource {
+ case legacyWorkspace:
+ return false
+ case goWorkWorkspace:
+ if s.view.goversion >= 18 {
+ return false
+ }
+ // Before go 1.18, the Go command did not natively support go.work files,
+ // so we 'fake' them with a workspace module.
+ }
+ return true
+}
+
func (s *snapshot) buildOverlay() map[string][]byte {
s.mu.Lock()
defer s.mu.Unlock()
overlays := make(map[string][]byte)
- for uri, fh := range s.files {
+ s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
overlay, ok := fh.(*overlay)
if !ok {
- continue
+ return
}
if overlay.saved {
- continue
+ return
}
// TODO(rstambler): Make sure not to send overlays outside of the current view.
overlays[uri.Filename()] = overlay.text
- }
+ })
return overlays
}
-func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string {
- var unsaved []string
- for uri, fh := range files {
- if overlay, ok := fh.(*overlay); ok && !overlay.saved {
- unsaved = append(unsaved, uri.Filename())
+// TypeCheck type-checks the specified packages in the given mode.
+func (s *snapshot) TypeCheck(ctx context.Context, mode source.TypecheckMode, ids ...PackageID) ([]source.Package, error) {
+ // Build all the handles...
+ var phs []*packageHandle
+ for _, id := range ids {
+ parseMode := source.ParseFull
+ if mode == source.TypecheckWorkspace {
+ parseMode = s.workspaceParseMode(id)
}
- }
- sort.Strings(unsaved)
- return hashContents([]byte(strings.Join(unsaved, "")))
-}
-
-func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) {
- ctx = event.Label(ctx, tag.URI.Of(uri))
- phs, err := s.packageHandlesForFile(ctx, uri, mode, includeTestVariants)
- if err != nil {
- return nil, err
+ ph, err := s.buildPackageHandle(ctx, id, parseMode)
+ if err != nil {
+ return nil, err
+ }
+ phs = append(phs, ph)
}
+
+ // ...then await them all.
var pkgs []source.Package
for _, ph := range phs {
- pkg, err := ph.check(ctx, s)
+ pkg, err := ph.await(ctx, s)
if err != nil {
return nil, err
}
@@ -495,291 +665,127 @@ func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode sourc
return pkgs, nil
}
-func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, pkgPolicy source.PackageFilter) (source.Package, error) {
- ctx = event.Label(ctx, tag.URI.Of(uri))
-
- phs, err := s.packageHandlesForFile(ctx, uri, mode, false)
- if err != nil {
- return nil, err
- }
+func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source.Metadata, error) {
+ s.mu.Lock()
- if len(phs) < 1 {
- return nil, fmt.Errorf("no packages")
- }
+ // Start with the set of package associations derived from the last load.
+ ids := s.meta.ids[uri]
- ph := phs[0]
- for _, handle := range phs[1:] {
- switch pkgPolicy {
- case source.WidestPackage:
- if ph == nil || len(handle.CompiledGoFiles()) > len(ph.CompiledGoFiles()) {
- ph = handle
- }
- case source.NarrowestPackage:
- if ph == nil || len(handle.CompiledGoFiles()) < len(ph.CompiledGoFiles()) {
- ph = handle
- }
+ shouldLoad := false // whether any packages containing uri are marked 'shouldLoad'
+ for _, id := range ids {
+ if len(s.shouldLoad[id]) > 0 {
+ shouldLoad = true
}
}
- if ph == nil {
- return nil, fmt.Errorf("no packages in input")
- }
- return ph.check(ctx, s)
-}
+ // Check if uri is known to be unloadable.
+ _, unloadable := s.unloadableFiles[uri]
-func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) {
- // Check if we should reload metadata for the file. We don't invalidate IDs
- // (though we should), so the IDs will be a better source of truth than the
- // metadata. If there are no IDs for the file, then we should also reload.
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- if kind := s.view.FileKind(fh); kind != source.Go {
- return nil, fmt.Errorf("no packages for non-Go file %s (%v)", uri, kind)
- }
- knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
- if err != nil {
- return nil, err
- }
+ s.mu.Unlock()
- var phs []*packageHandle
- for _, id := range knownIDs {
- // Filter out any intermediate test variants. We typically aren't
- // interested in these packages for file= style queries.
- if m := s.getMetadata(id); m != nil && m.IsIntermediateTestVariant && !includeTestVariants {
- continue
- }
- var parseModes []source.ParseMode
- switch mode {
- case source.TypecheckAll:
- if s.workspaceParseMode(id) == source.ParseFull {
- parseModes = []source.ParseMode{source.ParseFull}
- } else {
- parseModes = []source.ParseMode{source.ParseExported, source.ParseFull}
- }
- case source.TypecheckFull:
- parseModes = []source.ParseMode{source.ParseFull}
- case source.TypecheckWorkspace:
- parseModes = []source.ParseMode{s.workspaceParseMode(id)}
- }
+ // Reload if loading is likely to improve the package associations for uri:
+ // - uri is not contained in any valid packages
+ // - ...or one of the packages containing uri is marked 'shouldLoad'
+ // - ...but uri is not unloadable
+ if (shouldLoad || len(ids) == 0) && !unloadable {
+ scope := fileLoadScope(uri)
+ err := s.load(ctx, false, scope)
- for _, parseMode := range parseModes {
- ph, err := s.buildPackageHandle(ctx, id, parseMode)
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
+ // Guard against failed loads due to context cancellation.
+ //
+ // Return the context error here as the current operation is no longer
+ // valid.
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return nil, ctxErr
}
- }
- return phs, nil
-}
-func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) {
- knownIDs := s.getIDsForURI(uri)
- reload := len(knownIDs) == 0
- for _, id := range knownIDs {
- // Reload package metadata if any of the metadata has missing
- // dependencies, in case something has changed since the last time we
- // reloaded it.
- if s.noValidMetadataForID(id) {
- reload = true
- break
- }
- // TODO(golang/go#36918): Previously, we would reload any package with
- // missing dependencies. This is expensive and results in too many
- // calls to packages.Load. Determine what we should do instead.
- }
- if reload {
- err := s.load(ctx, false, fileURI(uri))
+ // We must clear scopes after loading.
+ //
+ // TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded
+ // packages as loaded. We could do this from snapshot.load and avoid
+ // raciness.
+ s.clearShouldLoad(scope)
- if !s.useInvalidMetadata() && err != nil {
- return nil, err
- }
- // We've tried to reload and there are still no known IDs for the URI.
- // Return the load error, if there was one.
- knownIDs = s.getIDsForURI(uri)
- if len(knownIDs) == 0 {
- return nil, err
+ // Don't return an error here, as we may still return stale IDs.
+ // Furthermore, the result of MetadataForFile should be consistent upon
+ // subsequent calls, even if the file is marked as unloadable.
+ if err != nil && !errors.Is(err, errNoPackages) {
+ event.Error(ctx, "MetadataForFile", err)
}
}
- return knownIDs, nil
-}
-
-// Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has
-// issues with overlays that will cause confusing error messages if we reuse
-// old metadata.
-func (s *snapshot) useInvalidMetadata() bool {
- return s.view.goversion >= 13 && s.view.Options().ExperimentalUseInvalidMetadata
-}
-
-func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
- ids := make(map[PackageID]struct{})
- s.transitiveReverseDependencies(PackageID(id), ids)
-
- // Make sure to delete the original package ID from the map.
- delete(ids, PackageID(id))
- var pkgs []source.Package
- for id := range ids {
- pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
- if err != nil {
- return nil, err
+ // Retrieve the metadata.
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ ids = s.meta.ids[uri]
+ metas := make([]*source.Metadata, len(ids))
+ for i, id := range ids {
+ metas[i] = s.meta.metadata[id]
+ if metas[i] == nil {
+ panic("nil metadata")
}
- pkgs = append(pkgs, pkg)
}
- return pkgs, nil
-}
-
-func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source.ParseMode) (*pkg, error) {
- ph, err := s.buildPackageHandle(ctx, id, mode)
- if err != nil {
- return nil, err
+ // Metadata is only ever added by loading,
+ // so if we get here and still have
+ // no IDs, uri is unloadable.
+ if !unloadable && len(ids) == 0 {
+ s.unloadableFiles[uri] = struct{}{}
}
- return ph.check(ctx, s)
-}
-// transitiveReverseDependencies populates the ids map with package IDs
-// belonging to the provided package and its transitive reverse dependencies.
-func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID]struct{}) {
- if _, ok := ids[id]; ok {
- return
- }
- m := s.getMetadata(id)
- // Only use invalid metadata if we support it.
- if m == nil || !(m.Valid || s.useInvalidMetadata()) {
- return
- }
- ids[id] = struct{}{}
- importedBy := s.getImportedBy(id)
- for _, parentID := range importedBy {
- s.transitiveReverseDependencies(parentID, ids)
- }
-}
+ // Sort packages "narrowest" to "widest" (in practice: non-tests before tests).
+ sort.Slice(metas, func(i, j int) bool {
+ return len(metas[i].CompiledGoFiles) < len(metas[j].CompiledGoFiles)
+ })
-func (s *snapshot) getGoFile(key parseKey) *parseGoHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.goFiles[key]
+ return metas, nil
}
-func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- if existing, ok := s.goFiles[key]; ok {
- return existing
+func (s *snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
}
- s.goFiles[key] = pgh
- return pgh
-}
-
-func (s *snapshot) getParseModHandle(uri span.URI) *parseModHandle {
s.mu.Lock()
- defer s.mu.Unlock()
- return s.parseModHandles[uri]
-}
-
-func (s *snapshot) getParseWorkHandle(uri span.URI) *parseWorkHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.parseWorkHandles[uri]
-}
-
-func (s *snapshot) getModWhyHandle(uri span.URI) *modWhyHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.modWhyHandles[uri]
-}
-
-func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.modTidyHandles[uri]
-}
-
-func (s *snapshot) getImportedBy(id PackageID) []PackageID {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.getImportedByLocked(id)
-}
-
-func (s *snapshot) getImportedByLocked(id PackageID) []PackageID {
- // If we haven't rebuilt the import graph since creating the snapshot.
- if len(s.meta.importedBy) == 0 {
- s.rebuildImportGraph()
- }
- return s.meta.importedBy[id]
-}
+ meta := s.meta
+ s.mu.Unlock()
-func (s *snapshot) clearAndRebuildImportGraph() {
- s.mu.Lock()
- defer s.mu.Unlock()
+ var rdeps map[PackageID]*source.Metadata
+ if transitive {
+ rdeps = meta.reverseReflexiveTransitiveClosure(id)
- // Completely invalidate the original map.
- s.meta.importedBy = make(map[PackageID][]PackageID)
- s.rebuildImportGraph()
-}
+ // Remove the original package ID from the map.
+ // (Callers all want irreflexivity but it's easier
+ // to compute reflexively then subtract.)
+ delete(rdeps, id)
-func (s *snapshot) rebuildImportGraph() {
- for id, m := range s.meta.metadata {
- for _, importID := range m.Deps {
- s.meta.importedBy[importID] = append(s.meta.importedBy[importID], id)
+ } else {
+ // direct reverse dependencies
+ rdeps = make(map[PackageID]*source.Metadata)
+ for _, rdepID := range meta.importedBy[id] {
+ if rdep := meta.metadata[rdepID]; rdep != nil {
+ rdeps[rdepID] = rdep
+ }
}
}
-}
-
-func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- // If the package handle has already been cached,
- // return the cached handle instead of overriding it.
- if ph, ok := s.packages[ph.packageKey()]; ok {
- return ph
- }
- s.packages[ph.packageKey()] = ph
- return ph
-}
-
-func (s *snapshot) workspacePackageIDs() (ids []PackageID) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for id := range s.workspacePackages {
- ids = append(ids, id)
- }
- return ids
+ return rdeps, nil
}
-func (s *snapshot) activePackageIDs() (ids []PackageID) {
- if s.view.Options().MemoryMode == source.ModeNormal {
- return s.workspacePackageIDs()
- }
-
+func (s *snapshot) workspaceMetadata() (meta []*source.Metadata) {
s.mu.Lock()
defer s.mu.Unlock()
- seen := make(map[PackageID]bool)
for id := range s.workspacePackages {
- if s.isActiveLocked(id, seen) {
- ids = append(ids, id)
- }
+ meta = append(meta, s.meta.metadata[id])
}
- return ids
+ return meta
}
-func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) {
- if seen == nil {
- seen = make(map[PackageID]bool)
- }
- if seen, ok := seen[id]; ok {
+func (s *snapshot) isActiveLocked(id PackageID) (active bool) {
+ if seen, ok := s.isActivePackageCache.Get(id); ok {
return seen
}
defer func() {
- seen[id] = active
+ s.isActivePackageCache.Set(id, active)
}()
m, ok := s.meta.metadata[id]
if !ok {
@@ -790,14 +796,21 @@ func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active
return true
}
}
- for _, dep := range m.Deps {
- if s.isActiveLocked(dep, seen) {
+ // TODO(rfindley): it looks incorrect that we don't also check GoFiles here.
+ // If a CGo file is open, we want to consider the package active.
+ for _, dep := range m.DepsByPkgPath {
+ if s.isActiveLocked(dep) {
return true
}
}
return false
}
+func (s *snapshot) resetIsActivePackageLocked() {
+ s.isActivePackageCache.Destroy()
+ s.isActivePackageCache = newIsActivePackageCacheMap()
+}
+
const fileExtensions = "go,mod,sum,work"
func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
@@ -811,12 +824,18 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru
patterns := map[string]struct{}{
fmt.Sprintf("**/*.{%s}", extensions): {},
}
+
+ if s.view.explicitGowork != "" {
+ patterns[s.view.explicitGowork.Filename()] = struct{}{}
+ }
+
+ // Add a pattern for each Go module in the workspace that is not within the view.
dirs := s.workspace.dirs(ctx, s)
for _, dir := range dirs {
dirName := dir.Filename()
// If the directory is within the view's folder, we're already watching
- // it with the pattern above.
+ // it with the first pattern above.
if source.InDir(s.view.folder.Filename(), dirName) {
continue
}
@@ -830,17 +849,42 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru
// contain Go code (golang/go#42348). To handle this, explicitly watch all
// of the directories in the workspace. We find them by adding the
// directories of every file in the snapshot's workspace directories.
- var dirNames []string
- for _, uri := range s.getKnownSubdirs(dirs) {
- dirNames = append(dirNames, uri.Filename())
- }
- sort.Strings(dirNames)
- if len(dirNames) > 0 {
- patterns[fmt.Sprintf("{%s}", strings.Join(dirNames, ","))] = struct{}{}
+ // There may be thousands.
+ if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" {
+ patterns[pattern] = struct{}{}
}
+
return patterns
}
+func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // First, process any pending changes and update the set of known
+ // subdirectories.
+ // It may change list of known subdirs and therefore invalidate the cache.
+ s.applyKnownSubdirsChangesLocked(wsDirs)
+
+ if s.knownSubdirsPatternCache == "" {
+ var builder strings.Builder
+ s.knownSubdirs.Range(func(uri span.URI) {
+ if builder.Len() == 0 {
+ builder.WriteString("{")
+ } else {
+ builder.WriteString(",")
+ }
+ builder.WriteString(uri.Filename())
+ })
+ if builder.Len() > 0 {
+ builder.WriteString("}")
+ s.knownSubdirsPatternCache = builder.String()
+ }
+ }
+
+ return s.knownSubdirsPatternCache
+}
+
// collectAllKnownSubdirs collects all of the subdirectories within the
// snapshot's workspace directories. None of the workspace directories are
// included.
@@ -850,18 +894,26 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
s.mu.Lock()
defer s.mu.Unlock()
- s.knownSubdirs = map[span.URI]struct{}{}
- for uri := range s.files {
+ s.knownSubdirs.Destroy()
+ s.knownSubdirs = newKnownDirsSet()
+ s.knownSubdirsPatternCache = ""
+ s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
s.addKnownSubdirLocked(uri, dirs)
- }
+ })
}
-func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI {
+func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet {
s.mu.Lock()
defer s.mu.Unlock()
// First, process any pending changes and update the set of known
// subdirectories.
+ s.applyKnownSubdirsChangesLocked(wsDirs)
+
+ return s.knownSubdirs.Clone()
+}
+
+func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) {
for _, c := range s.unprocessedSubdirChanges {
if c.isUnchanged {
continue
@@ -873,19 +925,13 @@ func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI {
}
}
s.unprocessedSubdirChanges = nil
-
- var result []span.URI
- for uri := range s.knownSubdirs {
- result = append(result, uri)
- }
- return result
}
func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
dir := filepath.Dir(uri.Filename())
// First check if the directory is already known, because then we can
// return early.
- if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok {
+ if s.knownSubdirs.Contains(span.URIFromPath(dir)) {
return
}
var matched span.URI
@@ -904,11 +950,12 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
break
}
uri := span.URIFromPath(dir)
- if _, ok := s.knownSubdirs[uri]; ok {
+ if s.knownSubdirs.Contains(uri) {
break
}
- s.knownSubdirs[uri] = struct{}{}
+ s.knownSubdirs.Insert(uri)
dir = filepath.Dir(dir)
+ s.knownSubdirsPatternCache = ""
}
}
@@ -916,11 +963,12 @@ func (s *snapshot) removeKnownSubdirLocked(uri span.URI) {
dir := filepath.Dir(uri.Filename())
for dir != "" {
uri := span.URIFromPath(dir)
- if _, ok := s.knownSubdirs[uri]; !ok {
+ if !s.knownSubdirs.Contains(uri) {
break
}
if info, _ := os.Stat(dir); info == nil {
- delete(s.knownSubdirs, uri)
+ s.knownSubdirs.Remove(uri)
+ s.knownSubdirsPatternCache = ""
}
dir = filepath.Dir(dir)
}
@@ -933,312 +981,172 @@ func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI
s.mu.Lock()
defer s.mu.Unlock()
- for uri := range s.files {
+ s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
if source.InDir(dir.Filename(), uri.Filename()) {
files = append(files, uri)
}
- }
+ })
return files
}
-func (s *snapshot) workspacePackageHandles(ctx context.Context) ([]*packageHandle, error) {
+func (s *snapshot) ActiveMetadata(ctx context.Context) ([]*source.Metadata, error) {
if err := s.awaitLoaded(ctx); err != nil {
return nil, err
}
- var phs []*packageHandle
- for _, pkgID := range s.workspacePackageIDs() {
- ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
- }
- return phs, nil
-}
-func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) {
- phs, err := s.activePackageHandles(ctx)
- if err != nil {
- return nil, err
- }
- var pkgs []source.Package
- for _, ph := range phs {
- pkg, err := ph.check(ctx, s)
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
- var phs []*packageHandle
- for _, pkgID := range s.activePackageIDs() {
- ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
- }
- return phs, nil
-}
-
-func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) {
- result := make(map[span.URI][]source.Symbol)
-
- // Keep going on errors, but log the first failure. Partial symbol results
- // are better than no symbol results.
- var firstErr error
- for uri, f := range s.files {
- sh := s.buildSymbolHandle(ctx, f)
- v, err := sh.handle.Get(ctx, s.generation, s)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
- data := v.(*symbolData)
- result[uri] = data.symbols
- }
- if firstErr != nil {
- event.Error(ctx, "getting snapshot symbols", firstErr)
+ if s.view.Options().MemoryMode == source.ModeNormal {
+ return s.workspaceMetadata(), nil
}
- return result, nil
-}
-func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]source.Metadata, error) {
- knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
- if err != nil {
- return nil, err
- }
- var mds []source.Metadata
- for _, id := range knownIDs {
- md := s.getMetadata(id)
- // TODO(rfindley): knownIDs and metadata should be in sync, but existing
- // code is defensive of nil metadata.
- if md != nil {
- mds = append(mds, md)
+ // ModeDegradeClosed
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ var active []*source.Metadata
+ for id := range s.workspacePackages {
+ if s.isActiveLocked(id) {
+ active = append(active, s.Metadata(id))
}
}
- return mds, nil
+ return active, nil
}
-func (s *snapshot) KnownPackages(ctx context.Context) ([]source.Package, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
-
- // The WorkspaceSymbols implementation relies on this function returning
- // workspace packages first.
- ids := s.workspacePackageIDs()
+// Symbols extracts and returns the symbols for each file in all the snapshot's views.
+func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol {
+ // Read the set of Go files out of the snapshot.
+ var goFiles []source.VersionedFileHandle
s.mu.Lock()
- for id := range s.meta.metadata {
- if _, ok := s.workspacePackages[id]; ok {
- continue
+ s.files.Range(func(uri span.URI, f source.VersionedFileHandle) {
+ if s.View().FileKind(f) == source.Go {
+ goFiles = append(goFiles, f)
}
- ids = append(ids, id)
- }
+ })
s.mu.Unlock()
- var pkgs []source.Package
- for _, id := range ids {
- pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) {
- // Don't reload workspace package metadata.
- // This function is meant to only return currently cached information.
- s.AwaitInitialized(ctx)
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- results := map[string]source.Package{}
- for _, ph := range s.packages {
- cachedPkg, err := ph.cached(s.generation)
- if err != nil {
- continue
- }
- for importPath, newPkg := range cachedPkg.imports {
- if oldPkg, ok := results[string(importPath)]; ok {
- // Using the same trick as NarrowestPackage, prefer non-variants.
- if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
- results[string(importPath)] = newPkg
- }
- } else {
- results[string(importPath)] = newPkg
+ // Symbolize them in parallel.
+ var (
+ group errgroup.Group
+ nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU
+ resultMu sync.Mutex
+ result = make(map[span.URI][]source.Symbol)
+ )
+ group.SetLimit(nprocs)
+ for _, f := range goFiles {
+ f := f
+ group.Go(func() error {
+ symbols, err := s.symbolize(ctx, f)
+ if err != nil {
+ return err
}
- }
- }
- return results, nil
-}
-
-func (s *snapshot) GoModForFile(uri span.URI) span.URI {
- return moduleForURI(s.workspace.activeModFiles, uri)
-}
-
-func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI {
- var match span.URI
- for modURI := range modFiles {
- if !source.InDir(dirURI(modURI).Filename(), uri.Filename()) {
- continue
- }
- if len(modURI) > len(match) {
- match = modURI
- }
- }
- return match
-}
-
-func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := packageKey{
- id: id,
- mode: mode,
- }
- return s.packages[key]
-}
-
-func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.symbols[uri]
-}
-
-func (s *snapshot) addSymbolHandle(sh *symbolHandle) *symbolHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- uri := sh.fh.URI()
- // If the package handle has already been cached,
- // return the cached handle instead of overriding it.
- if sh, ok := s.symbols[uri]; ok {
- return sh
- }
- s.symbols[uri] = sh
- return sh
-}
-
-func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := actionKey{
- pkg: packageKey{
- id: id,
- mode: m,
- },
- analyzer: a,
- }
- return s.actions[key]
-}
-
-func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := actionKey{
- analyzer: ah.analyzer,
- pkg: packageKey{
- id: ah.pkg.m.ID,
- mode: ah.pkg.mode,
- },
+ resultMu.Lock()
+ result[f.URI()] = symbols
+ resultMu.Unlock()
+ return nil
+ })
}
- if ah, ok := s.actions[key]; ok {
- return ah
+ // Keep going on errors, but log the first failure.
+ // Partial results are better than no symbol results.
+ if err := group.Wait(); err != nil {
+ event.Error(ctx, "getting snapshot symbols", err)
}
- s.actions[key] = ah
- return ah
+ return result
}
-func (s *snapshot) getIDsForURI(uri span.URI) []PackageID {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.meta.ids[uri]
-}
+func (s *snapshot) AllMetadata(ctx context.Context) ([]*source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
-func (s *snapshot) getMetadata(id PackageID) *KnownMetadata {
s.mu.Lock()
- defer s.mu.Unlock()
+ g := s.meta
+ s.mu.Unlock()
- return s.meta.metadata[id]
+ meta := make([]*source.Metadata, 0, len(g.metadata))
+ for _, m := range g.metadata {
+ meta = append(meta, m)
+ }
+ return meta, nil
}
-func (s *snapshot) shouldLoad(scope interface{}) bool {
+func (s *snapshot) CachedImportPaths(ctx context.Context) (map[PackagePath]source.Package, error) {
+ // Don't reload workspace package metadata.
+ // This function is meant to only return currently cached information.
+ s.AwaitInitialized(ctx)
+
s.mu.Lock()
defer s.mu.Unlock()
- switch scope := scope.(type) {
- case PackagePath:
- var meta *KnownMetadata
- for _, m := range s.meta.metadata {
- if m.PkgPath != scope {
- continue
- }
- meta = m
+ results := map[PackagePath]source.Package{}
+ s.packages.Range(func(_, v interface{}) {
+ cachedPkg, err := v.(*packageHandle).cached()
+ if err != nil {
+ return
}
- if meta == nil || meta.ShouldLoad {
- return true
+ for _, newPkg := range cachedPkg.deps {
+ pkgPath := newPkg.PkgPath()
+ if oldPkg, ok := results[pkgPath]; ok {
+ // Using the same trick as NarrowestPackage, prefer non-variants.
+ if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
+ results[pkgPath] = newPkg
+ }
+ } else {
+ results[pkgPath] = newPkg
+ }
}
- return false
- case fileURI:
- uri := span.URI(scope)
- ids := s.meta.ids[uri]
- if len(ids) == 0 {
- return true
+ })
+ return results, nil
+}
+
+// TODO(rfindley): clarify that this is only active modules. Or update to just
+// use findRootPattern.
+func (s *snapshot) GoModForFile(uri span.URI) span.URI {
+ return moduleForURI(s.workspace.activeModFiles, uri)
+}
+
+func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI {
+ var match span.URI
+ for modURI := range modFiles {
+ if !source.InDir(span.Dir(modURI).Filename(), uri.Filename()) {
+ continue
}
- for _, id := range ids {
- m, ok := s.meta.metadata[id]
- if !ok || m.ShouldLoad {
- return true
- }
+ if len(modURI) > len(match) {
+ match = modURI
}
- return false
- default:
- return true
}
+ return match
+}
+
+func (s *snapshot) Metadata(id PackageID) *source.Metadata {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.meta.metadata[id]
}
-func (s *snapshot) clearShouldLoad(scope interface{}) {
+// clearShouldLoad clears package IDs that no longer need to be reloaded after
+// scopes has been loaded.
+func (s *snapshot) clearShouldLoad(scopes ...loadScope) {
s.mu.Lock()
defer s.mu.Unlock()
- switch scope := scope.(type) {
- case PackagePath:
- var meta *KnownMetadata
- for _, m := range s.meta.metadata {
- if m.PkgPath == scope {
- meta = m
+ for _, scope := range scopes {
+ switch scope := scope.(type) {
+ case packageLoadScope:
+ scopePath := PackagePath(scope)
+ var toDelete []PackageID
+ for id, pkgPaths := range s.shouldLoad {
+ for _, pkgPath := range pkgPaths {
+ if pkgPath == scopePath {
+ toDelete = append(toDelete, id)
+ }
+ }
}
- }
- if meta == nil {
- return
- }
- meta.ShouldLoad = false
- case fileURI:
- uri := span.URI(scope)
- ids := s.meta.ids[uri]
- if len(ids) == 0 {
- return
- }
- for _, id := range ids {
- if m, ok := s.meta.metadata[id]; ok {
- m.ShouldLoad = false
+ for _, id := range toDelete {
+ delete(s.shouldLoad, id)
+ }
+ case fileLoadScope:
+ uri := span.URI(scope)
+ ids := s.meta.ids[uri]
+ for _, id := range ids {
+ delete(s.shouldLoad, id)
}
}
}
@@ -1247,65 +1155,14 @@ func (s *snapshot) clearShouldLoad(scope interface{}) {
// noValidMetadataForURILocked reports whether there is any valid metadata for
// the given URI.
func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool {
- ids, ok := s.meta.ids[uri]
- if !ok {
- return true
- }
- for _, id := range ids {
- if m, ok := s.meta.metadata[id]; ok && m.Valid {
+ for _, id := range s.meta.ids[uri] {
+ if _, ok := s.meta.metadata[id]; ok {
return false
}
}
return true
}
-// noValidMetadataForID reports whether there is no valid metadata for the
-// given ID.
-func (s *snapshot) noValidMetadataForID(id PackageID) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.noValidMetadataForIDLocked(id)
-}
-
-func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool {
- m := s.meta.metadata[id]
- return m == nil || !m.Valid
-}
-
-// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI.
-// Any existing invalid IDs are removed from the set of known IDs. IDs that are
-// not "command-line-arguments" are preferred, so if a new ID comes in for a
-// URI that previously only had "command-line-arguments", the new ID will
-// replace the "command-line-arguments" ID.
-func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) {
- for uri := range uris {
- // Collect the new set of IDs, preserving any valid existing IDs.
- newIDs := []PackageID{id}
- for _, existingID := range s.meta.ids[uri] {
- // Don't set duplicates of the same ID.
- if existingID == id {
- continue
- }
- // If the package previously only had a command-line-arguments ID,
- // delete the command-line-arguments workspace package.
- if source.IsCommandLineArguments(string(existingID)) {
- delete(s.workspacePackages, existingID)
- continue
- }
- // If the metadata for an existing ID is invalid, and we are
- // setting metadata for a new, valid ID--don't preserve the old ID.
- if m, ok := s.meta.metadata[existingID]; !ok || !m.Valid {
- continue
- }
- newIDs = append(newIDs, existingID)
- }
- sort.Slice(newIDs, func(i, j int) bool {
- return newIDs[i] < newIDs[j]
- })
- s.meta.ids[uri] = newIDs
- }
-}
-
func (s *snapshot) isWorkspacePackage(id PackageID) bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1315,12 +1172,13 @@ func (s *snapshot) isWorkspacePackage(id PackageID) bool {
}
func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle {
- f := s.view.getFile(uri)
+ uri, _ = s.view.canonicalURI(uri, true)
s.mu.Lock()
defer s.mu.Unlock()
- return s.files[f.URI()]
+ result, _ := s.files.Get(uri)
+ return result
}
// GetVersionedFile returns a File for the given URI. If the file is unknown it
@@ -1329,32 +1187,29 @@ func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle {
// GetVersionedFile succeeds even if the file does not exist. A non-nil error return
// indicates some type of internal error, for example if ctx is cancelled.
func (s *snapshot) GetVersionedFile(ctx context.Context, uri span.URI) (source.VersionedFileHandle, error) {
- f := s.view.getFile(uri)
+ uri, _ = s.view.canonicalURI(uri, true)
s.mu.Lock()
defer s.mu.Unlock()
- return s.getFileLocked(ctx, f)
-}
-// GetFile implements the fileSource interface by wrapping GetVersionedFile.
-func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- return s.GetVersionedFile(ctx, uri)
-}
-
-func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) {
- if fh, ok := s.files[f.URI()]; ok {
+ if fh, ok := s.files.Get(uri); ok {
return fh, nil
}
- fh, err := s.view.session.cache.getFile(ctx, f.URI())
+ fh, err := s.view.cache.getFile(ctx, uri) // read the file
if err != nil {
return nil, err
}
closed := &closedFile{fh}
- s.files[f.URI()] = closed
+ s.files.Set(uri, closed)
return closed, nil
}
+// GetFile implements the fileSource interface by wrapping GetVersionedFile.
+func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ return s.GetVersionedFile(ctx, uri)
+}
+
func (s *snapshot) IsOpen(uri span.URI) bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1367,35 +1222,29 @@ func (s *snapshot) openFiles() []source.VersionedFileHandle {
defer s.mu.Unlock()
var open []source.VersionedFileHandle
- for _, fh := range s.files {
- if s.isOpenLocked(fh.URI()) {
+ s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
+ if isFileOpen(fh) {
open = append(open, fh)
}
- }
+ })
return open
}
func (s *snapshot) isOpenLocked(uri span.URI) bool {
- _, open := s.files[uri].(*overlay)
+ fh, _ := s.files.Get(uri)
+ return isFileOpen(fh)
+}
+
+func isFileOpen(fh source.VersionedFileHandle) bool {
+ _, open := fh.(*overlay)
return open
}
func (s *snapshot) awaitLoaded(ctx context.Context) error {
loadErr := s.awaitLoadedAllErrors(ctx)
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // If we still have absolutely no metadata, check if the view failed to
- // initialize and return any errors.
- if s.useInvalidMetadata() && len(s.meta.metadata) > 0 {
- return nil
- }
- for _, m := range s.meta.metadata {
- if m.Valid {
- return nil
- }
- }
+ // TODO(rfindley): eliminate this function as part of simplifying
+ // CriticalErrors.
if loadErr != nil {
return loadErr.MainError
}
@@ -1403,6 +1252,10 @@ func (s *snapshot) awaitLoaded(ctx context.Context) error {
}
func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
+ if wsErr := s.workspace.criticalError(ctx, s); wsErr != nil {
+ return wsErr
+ }
+
loadErr := s.awaitLoadedAllErrors(ctx)
if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) {
return nil
@@ -1411,8 +1264,8 @@ func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
// Even if packages didn't fail to load, we still may want to show
// additional warnings.
if loadErr == nil {
- wsPkgs, _ := s.ActivePackages(ctx)
- if msg := shouldShowAdHocPackagesWarning(s, wsPkgs); msg != "" {
+ active, _ := s.ActiveMetadata(ctx)
+ if msg := shouldShowAdHocPackagesWarning(s, active); msg != "" {
return &source.CriticalError{
MainError: errors.New(msg),
}
@@ -1421,37 +1274,65 @@ func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
// with the user's workspace layout. Workspace packages that only have the
// ID "command-line-arguments" are usually a symptom of a bad workspace
// configuration.
- if containsCommandLineArguments(wsPkgs) {
- return s.workspaceLayoutError(ctx)
+ //
+ // This heuristic is path-dependent: we only get command-line-arguments
+ // packages when we've loaded using file scopes, which only occurs
+ // on-demand or via orphaned file reloading.
+ //
+ // TODO(rfindley): re-evaluate this heuristic.
+ if containsCommandLineArguments(active) {
+ err, diags := s.workspaceLayoutError(ctx)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil // see the API documentation for source.Snapshot
+ }
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
}
return nil
}
if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") {
- return s.workspaceLayoutError(ctx)
+ err, diags := s.workspaceLayoutError(ctx)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil // see the API documentation for source.Snapshot
+ }
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
}
return loadErr
}
+// A portion of this text is expected by TestBrokenWorkspace_OutsideModule.
const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src.
If you are using modules, please open your editor to a directory in your module.
If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.`
-func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, pkgs []source.Package) string {
- if snapshot.ValidBuildConfiguration() {
- return ""
- }
- for _, pkg := range pkgs {
- if len(pkg.MissingDependencies()) > 0 {
- return adHocPackagesWarning
+func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, active []*source.Metadata) string {
+ if !snapshot.ValidBuildConfiguration() {
+ for _, m := range active {
+ // A blank entry in DepsByImpPath
+ // indicates a missing dependency.
+ for _, importID := range m.DepsByImpPath {
+ if importID == "" {
+ return adHocPackagesWarning
+ }
+ }
}
}
return ""
}
-func containsCommandLineArguments(pkgs []source.Package) bool {
- for _, pkg := range pkgs {
- if source.IsCommandLineArguments(pkg.ID()) {
+func containsCommandLineArguments(metas []*source.Metadata) bool {
+ for _, m := range metas {
+ if source.IsCommandLineArguments(m.ID) {
return true
}
}
@@ -1462,38 +1343,47 @@ func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalErr
// Do not return results until the snapshot's view has been initialized.
s.AwaitInitialized(ctx)
- // TODO(rstambler): Should we be more careful about returning the
+ // TODO(rfindley): Should we be more careful about returning the
// initialization error? Is it possible for the initialization error to be
// corrected without a successful reinitialization?
- s.mu.Lock()
- initializedErr := s.initializedErr
- s.mu.Unlock()
- if initializedErr != nil {
- return initializedErr
+ if err := s.getInitializationError(); err != nil {
+ return err
}
+ // TODO(rfindley): revisit this handling. Calling reloadWorkspace with a
+ // cancelled context should have the same effect, so this preemptive handling
+ // should not be necessary.
+ //
+ // Also: GetCriticalError ignores context cancellation errors. Should we be
+ // returning nil here?
if ctx.Err() != nil {
return &source.CriticalError{MainError: ctx.Err()}
}
+ // TODO(rfindley): reloading is not idempotent: if we try to reload or load
+ // orphaned files below and fail, we won't try again. For that reason, we
+ // could get different results from subsequent calls to this function, which
+ // may cause critical errors to be suppressed.
+
if err := s.reloadWorkspace(ctx); err != nil {
diags := s.extractGoCommandErrors(ctx, err)
return &source.CriticalError{
- MainError: err,
- DiagList: diags,
+ MainError: err,
+ Diagnostics: diags,
}
}
- if err := s.reloadOrphanedFiles(ctx); err != nil {
+
+ if err := s.reloadOrphanedOpenFiles(ctx); err != nil {
diags := s.extractGoCommandErrors(ctx, err)
return &source.CriticalError{
- MainError: err,
- DiagList: diags,
+ MainError: err,
+ Diagnostics: diags,
}
}
return nil
}
-func (s *snapshot) getInitializationError(ctx context.Context) *source.CriticalError {
+func (s *snapshot) getInitializationError() *source.CriticalError {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1513,50 +1403,53 @@ func (s *snapshot) AwaitInitialized(ctx context.Context) {
// reloadWorkspace reloads the metadata for all invalidated workspace packages.
func (s *snapshot) reloadWorkspace(ctx context.Context) error {
- // See which of the workspace packages are missing metadata.
+ var scopes []loadScope
+ var seen map[PackagePath]bool
s.mu.Lock()
- missingMetadata := len(s.workspacePackages) == 0 || len(s.meta.metadata) == 0
- pkgPathSet := map[PackagePath]struct{}{}
- for id, pkgPath := range s.workspacePackages {
- if m, ok := s.meta.metadata[id]; ok && m.Valid {
- continue
- }
- missingMetadata = true
-
- // Don't try to reload "command-line-arguments" directly.
- if source.IsCommandLineArguments(string(pkgPath)) {
- continue
+ for _, pkgPaths := range s.shouldLoad {
+ for _, pkgPath := range pkgPaths {
+ if seen == nil {
+ seen = make(map[PackagePath]bool)
+ }
+ if seen[pkgPath] {
+ continue
+ }
+ seen[pkgPath] = true
+ scopes = append(scopes, packageLoadScope(pkgPath))
}
- pkgPathSet[pkgPath] = struct{}{}
}
s.mu.Unlock()
+ if len(scopes) == 0 {
+ return nil
+ }
+
// If the view's build configuration is invalid, we cannot reload by
// package path. Just reload the directory instead.
- if missingMetadata && !s.ValidBuildConfiguration() {
- return s.load(ctx, false, viewLoadScope("LOAD_INVALID_VIEW"))
+ if !s.ValidBuildConfiguration() {
+ scopes = []loadScope{viewLoadScope("LOAD_INVALID_VIEW")}
}
- if len(pkgPathSet) == 0 {
- return nil
- }
+ err := s.load(ctx, false, scopes...)
- var pkgPaths []interface{}
- for pkgPath := range pkgPathSet {
- pkgPaths = append(pkgPaths, pkgPath)
+ // Unless the context was canceled, set "shouldLoad" to false for all
+ // of the metadata we attempted to load.
+ if !errors.Is(err, context.Canceled) {
+ s.clearShouldLoad(scopes...)
}
- return s.load(ctx, false, pkgPaths...)
+
+ return err
}
-func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
+func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error {
// When we load ./... or a package path directly, we may not get packages
// that exist only in overlays. As a workaround, we search all of the files
// available in the snapshot and reload their metadata individually using a
// file= query if the metadata is unavailable.
- files := s.orphanedFiles()
+ files := s.orphanedOpenFiles()
// Files without a valid package declaration can't be loaded. Don't try.
- var scopes []interface{}
+ var scopes []loadScope
for _, file := range files {
pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
if err != nil {
@@ -1565,7 +1458,8 @@ func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
if !pgf.File.Package.IsValid() {
continue
}
- scopes = append(scopes, fileURI(file.URI()))
+
+ scopes = append(scopes, fileLoadScope(file.URI()))
}
if len(scopes) == 0 {
@@ -1589,7 +1483,7 @@ func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
s.mu.Lock()
for _, scope := range scopes {
- uri := span.URI(scope.(fileURI))
+ uri := span.URI(scope.(fileLoadScope))
if s.noValidMetadataForURILocked(uri) {
s.unloadableFiles[uri] = struct{}{}
}
@@ -1599,84 +1493,56 @@ func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
return nil
}
-func (s *snapshot) orphanedFiles() []source.VersionedFileHandle {
+func (s *snapshot) orphanedOpenFiles() []source.VersionedFileHandle {
s.mu.Lock()
defer s.mu.Unlock()
var files []source.VersionedFileHandle
- for uri, fh := range s.files {
+ s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
+ // Only consider open files, which will be represented as overlays.
+ if _, isOverlay := fh.(*overlay); !isOverlay {
+ return
+ }
// Don't try to reload metadata for go.mod files.
if s.view.FileKind(fh) != source.Go {
- continue
+ return
}
// If the URI doesn't belong to this view, then it's not in a workspace
// package and should not be reloaded directly.
- if !contains(s.view.session.viewsOf(uri), s.view) {
- continue
- }
- // If the file is not open and is in a vendor directory, don't treat it
- // like a workspace package.
- if _, ok := fh.(*overlay); !ok && inVendor(uri) {
- continue
+ if !source.InDir(s.view.folder.Filename(), uri.Filename()) {
+ return
}
// Don't reload metadata for files we've already deemed unloadable.
if _, ok := s.unloadableFiles[uri]; ok {
- continue
+ return
}
if s.noValidMetadataForURILocked(uri) {
files = append(files, fh)
}
- }
+ })
return files
}
-func contains(views []*View, view *View) bool {
- for _, v := range views {
- if v == view {
- return true
- }
- }
- return false
-}
-
+// TODO(golang/go#53756): this function needs to consider more than just the
+// absolute URI, for example:
+// - the position of /vendor/ with respect to the relevant module root
+// - whether or not go.work is in use (as vendoring isn't supported in workspace mode)
+//
+// Most likely, each call site of inVendor needs to be reconsidered to
+// understand and correctly implement the desired behavior.
func inVendor(uri span.URI) bool {
- if !strings.Contains(string(uri), "/vendor/") {
- return false
- }
- // Only packages in _subdirectories_ of /vendor/ are considered vendored
+ _, after, found := cut(string(uri), "/vendor/")
+ // Only subdirectories of /vendor/ are considered vendored
// (/vendor/a/foo.go is vendored, /vendor/foo.go is not).
- split := strings.Split(string(uri), "/vendor/")
- if len(split) < 2 {
- return false
- }
- return strings.Contains(split[1], "/")
-}
-
-func generationName(v *View, snapshotID uint64) string {
- return fmt.Sprintf("v%v/%v", v.id, snapshotID)
+ return found && strings.Contains(after, "/")
}
-// checkSnapshotLocked verifies that some invariants are preserved on the
-// snapshot.
-func checkSnapshotLocked(ctx context.Context, s *snapshot) {
- // Check that every go file for a workspace package is identified as
- // belonging to that workspace package.
- for wsID := range s.workspacePackages {
- if m, ok := s.meta.metadata[wsID]; ok {
- for _, uri := range m.GoFiles {
- found := false
- for _, id := range s.meta.ids[uri] {
- if id == wsID {
- found = true
- break
- }
- }
- if !found {
- log.Error.Logf(ctx, "workspace package %v not associated with %v", wsID, uri)
- }
- }
- }
+// TODO(adonovan): replace with strings.Cut when we can assume go1.18.
+func cut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
}
+ return s, "", false
}
// unappliedChanges is a file source that handles an uncloned snapshot.
@@ -1692,9 +1558,11 @@ func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.F
return ac.originalSnapshot.GetFile(ctx, uri)
}
-func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot {
- var vendorChanged bool
- newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{
+func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
+ ctx, done := event.Start(ctx, "snapshot.clone")
+ defer done()
+
+ newWorkspace, reinit := s.workspace.Clone(ctx, changes, &unappliedChanges{
originalSnapshot: s,
changes: changes,
})
@@ -1702,123 +1570,134 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
s.mu.Lock()
defer s.mu.Unlock()
- checkSnapshotLocked(ctx, s)
+ // Changes to vendor tree may require reinitialization,
+ // either because of an initialization error
+ // (e.g. "inconsistent vendoring detected"), or because
+ // one or more modules may have moved into or out of the
+ // vendor tree after 'go mod vendor' or 'rm -fr vendor/'.
+ for uri := range changes {
+ if inVendor(uri) && s.initializedErr != nil ||
+ strings.HasSuffix(string(uri), "/vendor/modules.txt") {
+ reinit = true
+ break
+ }
+ }
- newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1))
bgCtx, cancel := context.WithCancel(bgCtx)
result := &snapshot{
- id: s.id + 1,
- generation: newGen,
- view: s.view,
- backgroundCtx: bgCtx,
- cancel: cancel,
- builtin: s.builtin,
- initializeOnce: s.initializeOnce,
- initializedErr: s.initializedErr,
- meta: NewMetadataGraph(),
- packages: make(map[packageKey]*packageHandle, len(s.packages)),
- actions: make(map[actionKey]*actionHandle, len(s.actions)),
- files: make(map[span.URI]source.VersionedFileHandle, len(s.files)),
- goFiles: make(map[parseKey]*parseGoHandle, len(s.goFiles)),
- symbols: make(map[span.URI]*symbolHandle, len(s.symbols)),
- workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
- unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
- parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)),
- parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)),
- modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)),
- modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)),
- knownSubdirs: make(map[span.URI]struct{}, len(s.knownSubdirs)),
- workspace: newWorkspace,
- }
-
- if !workspaceChanged && s.workspaceDirHandle != nil {
- result.workspaceDirHandle = s.workspaceDirHandle
- newGen.Inherit(s.workspaceDirHandle)
- }
-
- // Copy all of the FileHandles.
- for k, v := range s.files {
- result.files[k] = v
- }
- for k, v := range s.symbols {
- if change, ok := changes[k]; ok {
- if change.exists {
- result.symbols[k] = result.buildSymbolHandle(ctx, change.fileHandle)
- }
- continue
- }
- newGen.Inherit(v.handle)
- result.symbols[k] = v
- }
+ sequenceID: s.sequenceID + 1,
+ globalID: nextSnapshotID(),
+ store: s.store,
+ view: s.view,
+ backgroundCtx: bgCtx,
+ cancel: cancel,
+ builtin: s.builtin,
+ initialized: s.initialized,
+ initializedErr: s.initializedErr,
+ packages: s.packages.Clone(),
+ isActivePackageCache: s.isActivePackageCache.Clone(),
+ analyses: s.analyses.Clone(),
+ files: s.files.Clone(),
+ parsedGoFiles: s.parsedGoFiles.Clone(),
+ parseKeysByURI: s.parseKeysByURI.Clone(),
+ symbolizeHandles: s.symbolizeHandles.Clone(),
+ workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
+ unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
+ parseModHandles: s.parseModHandles.Clone(),
+ parseWorkHandles: s.parseWorkHandles.Clone(),
+ modTidyHandles: s.modTidyHandles.Clone(),
+ modWhyHandles: s.modWhyHandles.Clone(),
+ modVulnHandles: s.modVulnHandles.Clone(),
+ knownSubdirs: s.knownSubdirs.Clone(),
+ workspace: newWorkspace,
+ }
+
+ // The snapshot should be initialized if either s was uninitialized, or we've
+ // detected a change that triggers reinitialization.
+ if reinit {
+ result.initialized = false
+ }
+
+ // Create a lease on the new snapshot.
+ // (Best to do this early in case the code below hides an
+ // incref/decref operation that might destroy it prematurely.)
+ release := result.Acquire()
// Copy the set of unloadable files.
+ //
+ // TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on
+ // changes to environment or workspace layout, or more generally on any
+ // metadata change?
+ //
+ // Maybe not, as major configuration changes cause a new view.
for k, v := range s.unloadableFiles {
result.unloadableFiles[k] = v
}
- // Copy all of the modHandles.
- for k, v := range s.parseModHandles {
- result.parseModHandles[k] = v
- }
- // Copy all of the parseWorkHandles.
- for k, v := range s.parseWorkHandles {
- result.parseWorkHandles[k] = v
- }
- for k, v := range s.goFiles {
- if _, ok := changes[k.file.URI]; ok {
- continue
+ // TODO(adonovan): merge loops over "changes".
+ for uri, change := range changes {
+ // Optimization: if the content did not change, we don't need to evict the
+ // parsed file. This is not the case for e.g. the files map, which may
+ // switch from on-disk state to overlay. Parsed files depend only on
+ // content and parse mode (which is captured in the parse key).
+ //
+ // NOTE: This also makes it less likely that we re-parse a file due to a
+ // cache-miss but get a cache-hit for the corresponding package. In the
+ // past, there was code that relied on ParseGo returning the type-checked
+ // syntax tree. That code was wrong, but avoiding invalidation here limits
+ // the blast radius of these types of bugs.
+ if !change.isUnchanged {
+ keys, ok := result.parseKeysByURI.Get(uri)
+ if ok {
+ for _, key := range keys {
+ result.parsedGoFiles.Delete(key)
+ }
+ result.parseKeysByURI.Delete(uri)
+ }
}
- newGen.Inherit(v.handle)
- result.goFiles[k] = v
- }
- // Copy all of the go.mod-related handles. They may be invalidated later,
- // so we inherit them at the end of the function.
- for k, v := range s.modTidyHandles {
- if _, ok := changes[k]; ok {
- continue
- }
- result.modTidyHandles[k] = v
- }
- for k, v := range s.modWhyHandles {
- if _, ok := changes[k]; ok {
- continue
- }
- result.modWhyHandles[k] = v
+ // Invalidate go.mod-related handles.
+ result.modTidyHandles.Delete(uri)
+ result.modWhyHandles.Delete(uri)
+ result.modVulnHandles.Delete(uri)
+
+ // Invalidate handles for cached symbols.
+ result.symbolizeHandles.Delete(uri)
}
// Add all of the known subdirectories, but don't update them for the
// changed files. We need to rebuild the workspace module to know the
// true set of known subdirectories, but we don't want to do that in clone.
- for k, v := range s.knownSubdirs {
- result.knownSubdirs[k] = v
- }
+ result.knownSubdirs = s.knownSubdirs.Clone()
+ result.knownSubdirsPatternCache = s.knownSubdirsPatternCache
for _, c := range changes {
result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
}
// directIDs keeps track of package IDs that have directly changed.
- // It maps id->invalidateMetadata.
+ // Note: this is not a set, it's a map from id to invalidateMetadata.
directIDs := map[PackageID]bool{}
// Invalidate all package metadata if the workspace module has changed.
- if workspaceReload {
+ if reinit {
for k := range s.meta.metadata {
directIDs[k] = true
}
}
- changedPkgFiles := map[PackageID]struct{}{} // packages whose file set may have changed
- anyImportDeleted := false
- for uri, change := range changes {
- // Maybe reinitialize the view if we see a change in the vendor
- // directory.
- if inVendor(uri) {
- vendorChanged = true
- }
+ // Compute invalidations based on file changes.
+ anyImportDeleted := false // import deletions can resolve cycles
+ anyFileOpenedOrClosed := false // opened files affect workspace packages
+ anyFileAdded := false // adding a file can resolve missing dependencies
+ for uri, change := range changes {
// The original FileHandle for this URI is cached on the snapshot.
- originalFH := s.files[uri]
+ originalFH, _ := s.files.Get(uri)
+ var originalOpen, newOpen bool
+ _, originalOpen = originalFH.(*overlay)
+ _, newOpen = change.fileHandle.(*overlay)
+ anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen)
+ anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil)
// If uri is a Go file, check if it has changed in a way that would
// invalidate metadata. Note that we can't use s.view.FileKind here,
@@ -1829,39 +1708,33 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle)
}
- invalidateMetadata = invalidateMetadata || forceReloadMetadata || workspaceReload
+ invalidateMetadata = invalidateMetadata || forceReloadMetadata || reinit
anyImportDeleted = anyImportDeleted || importDeleted
// Mark all of the package IDs containing the given file.
filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged)
- if pkgFileChanged {
- for id := range filePackageIDs {
- changedPkgFiles[id] = struct{}{}
- }
- }
for id := range filePackageIDs {
- directIDs[id] = directIDs[id] || invalidateMetadata
+ directIDs[id] = directIDs[id] || invalidateMetadata // may insert 'false'
}
// Invalidate the previous modTidyHandle if any of the files have been
// saved or if any of the metadata has been invalidated.
if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) {
- // TODO(rstambler): Only delete mod handles for which the
- // withoutURI is relevant.
- for k := range s.modTidyHandles {
- delete(result.modTidyHandles, k)
- }
- for k := range s.modWhyHandles {
- delete(result.modWhyHandles, k)
- }
+ // TODO(maybe): Only delete mod handles for
+ // which the withoutURI is relevant.
+ // Requires reverse-engineering the go command. (!)
+ result.modTidyHandles.Clear()
+ result.modWhyHandles.Clear()
+ result.modVulnHandles.Clear()
}
- delete(result.parseModHandles, uri)
- delete(result.parseWorkHandles, uri)
+
+ result.parseModHandles.Delete(uri)
+ result.parseWorkHandles.Delete(uri)
// Handle the invalidated file; it may have new contents or not exist.
if !change.exists {
- delete(result.files, uri)
+ result.files.Delete(uri)
} else {
- result.files[uri] = change.fileHandle
+ result.files.Set(uri, change.fileHandle)
}
// Make sure to remove the changed file from the unloadable set.
@@ -1889,8 +1762,23 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
}
}
+ // Adding a file can resolve missing dependencies from existing packages.
+ //
+ // We could be smart here and try to guess which packages may have been
+ // fixed, but until that proves necessary, just invalidate metadata for any
+ // package with missing dependencies.
+ if anyFileAdded {
+ for id, metadata := range s.meta.metadata {
+ for _, impID := range metadata.DepsByImpPath {
+ if impID == "" { // missing import
+ directIDs[id] = true
+ break
+ }
+ }
+ }
+ }
+
// Invalidate reverse dependencies too.
- // TODO(heschi): figure out the locking model and use transitiveReverseDeps?
// idsToInvalidate keeps track of transitive reverse dependencies.
// If an ID is present in the map, invalidate its types.
// If an ID's value is true, invalidate its metadata too.
@@ -1906,7 +1794,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
return
}
idsToInvalidate[id] = newInvalidateMetadata
- for _, rid := range s.getImportedByLocked(id) {
+ for _, rid := range s.meta.importedBy[id] {
addRevDeps(rid, invalidateMetadata)
}
}
@@ -1914,28 +1802,32 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
addRevDeps(id, invalidateMetadata)
}
- // Copy the package type information.
- for k, v := range s.packages {
- if _, ok := idsToInvalidate[k.id]; ok {
- continue
+ // Delete invalidated package type information.
+ for id := range idsToInvalidate {
+ for _, mode := range source.AllParseModes {
+ key := packageKey{mode, id}
+ result.packages.Delete(key)
}
- newGen.Inherit(v.handle)
- result.packages[k] = v
}
- // Copy the package analysis information.
- for k, v := range s.actions {
- if _, ok := idsToInvalidate[k.pkg.id]; ok {
- continue
+
+ // Delete invalidated analysis actions.
+ var actionsToDelete []analysisKey
+ result.analyses.Range(func(k, _ interface{}) {
+ key := k.(analysisKey)
+ if _, ok := idsToInvalidate[key.pkgid]; ok {
+ actionsToDelete = append(actionsToDelete, key)
}
- newGen.Inherit(v.handle)
- result.actions[k] = v
+ })
+ for _, key := range actionsToDelete {
+ result.analyses.Delete(key)
}
- // If the workspace mode has changed, we must delete all metadata, as it
- // is unusable and may produce confusing or incorrect diagnostics.
- // If a file has been deleted, we must delete metadata all packages
+ // If a file has been deleted, we must delete metadata for all packages
// containing that file.
- workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
+ //
+ // TODO(rfindley): why not keep invalid metadata in this case? If we
+ // otherwise allow operate on invalid metadata, why not continue to do so,
+ // skipping the missing file?
skipID := map[PackageID]bool{}
for _, c := range changes {
if c.exists {
@@ -1949,138 +1841,73 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
}
}
- // Collect all of the IDs that are reachable from the workspace packages.
- // Any unreachable IDs will have their metadata deleted outright.
- reachableID := map[PackageID]bool{}
- var addForwardDeps func(PackageID)
- addForwardDeps = func(id PackageID) {
- if reachableID[id] {
- return
- }
- reachableID[id] = true
- m, ok := s.meta.metadata[id]
- if !ok {
- return
- }
- for _, depID := range m.Deps {
- addForwardDeps(depID)
- }
- }
- for id := range s.workspacePackages {
- addForwardDeps(id)
- }
-
- // Copy the URI to package ID mappings, skipping only those URIs whose
- // metadata will be reloaded in future calls to load.
- deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
- idsInSnapshot := map[PackageID]bool{} // track all known IDs
- for uri, ids := range s.meta.ids {
- var resultIDs []PackageID
- for _, id := range ids {
- if skipID[id] || deleteInvalidMetadata && idsToInvalidate[id] {
- continue
- }
- // The ID is not reachable from any workspace package, so it should
- // be deleted.
- if !reachableID[id] {
- continue
- }
- idsInSnapshot[id] = true
- resultIDs = append(resultIDs, id)
+ // Any packages that need loading in s still need loading in the new
+ // snapshot.
+ for k, v := range s.shouldLoad {
+ if result.shouldLoad == nil {
+ result.shouldLoad = make(map[PackageID][]PackagePath)
}
- result.meta.ids[uri] = resultIDs
+ result.shouldLoad[k] = v
}
- // Copy the package metadata. We only need to invalidate packages directly
- // containing the affected file, and only if it changed in a relevant way.
+ // Compute which metadata updates are required. We only need to invalidate
+ // packages directly containing the affected file, and only if it changed in
+ // a relevant way.
+ metadataUpdates := make(map[PackageID]*source.Metadata)
for k, v := range s.meta.metadata {
- if !idsInSnapshot[k] {
- // Delete metadata for IDs that are no longer reachable from files
- // in the snapshot.
- continue
- }
invalidateMetadata := idsToInvalidate[k]
- // Mark invalidated metadata rather than deleting it outright.
- result.meta.metadata[k] = &KnownMetadata{
- Metadata: v.Metadata,
- Valid: v.Valid && !invalidateMetadata,
- ShouldLoad: v.ShouldLoad || invalidateMetadata,
- }
- }
-
- // Copy the set of initially loaded packages.
- for id, pkgPath := range s.workspacePackages {
- // Packages with the id "command-line-arguments" are generated by the
- // go command when the user is outside of GOPATH and outside of a
- // module. Do not cache them as workspace packages for longer than
- // necessary.
- if source.IsCommandLineArguments(string(id)) {
- if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok {
- continue
- }
- }
- // If all the files we know about in a package have been deleted,
- // the package is gone and we should no longer try to load it.
- if m := s.meta.metadata[id]; m != nil {
- hasFiles := false
- for _, uri := range s.meta.metadata[id].GoFiles {
- // For internal tests, we need _test files, not just the normal
- // ones. External tests only have _test files, but we can check
- // them anyway.
- if m.ForTest != "" && !strings.HasSuffix(string(uri), "_test.go") {
- continue
- }
- if _, ok := result.files[uri]; ok {
- hasFiles = true
- break
- }
+ // For metadata that has been newly invalidated, capture package paths
+ // requiring reloading in the shouldLoad map.
+ if invalidateMetadata && !source.IsCommandLineArguments(v.ID) {
+ if result.shouldLoad == nil {
+ result.shouldLoad = make(map[PackageID][]PackagePath)
}
- if !hasFiles {
- continue
+ needsReload := []PackagePath{v.PkgPath}
+ if v.ForTest != "" && v.ForTest != v.PkgPath {
+ // When reloading test variants, always reload their ForTest package as
+ // well. Otherwise, we may miss test variants in the resulting load.
+ //
+ // TODO(rfindley): is this actually sufficient? Is it possible that
+ // other test variants may be invalidated? Either way, we should
+ // determine exactly what needs to be reloaded here.
+ needsReload = append(needsReload, v.ForTest)
}
+ result.shouldLoad[k] = needsReload
}
- // If the package name of a file in the package has changed, it's
- // possible that the package ID may no longer exist. Delete it from
- // the set of workspace packages, on the assumption that we will add it
- // back when the relevant files are reloaded.
- if _, ok := changedPkgFiles[id]; ok {
+ // Check whether the metadata should be deleted.
+ if skipID[k] || invalidateMetadata {
+ metadataUpdates[k] = nil
continue
}
-
- result.workspacePackages[id] = pkgPath
}
- // Inherit all of the go.mod-related handles.
- for _, v := range result.modTidyHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.modWhyHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.parseModHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.parseWorkHandles {
- newGen.Inherit(v.handle)
+ // Update metadata, if necessary.
+ result.meta = s.meta.Clone(metadataUpdates)
+
+ // Update workspace and active packages, if necessary.
+ if result.meta != s.meta || anyFileOpenedOrClosed {
+ result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta)
+ result.resetIsActivePackageLocked()
+ } else {
+ result.workspacePackages = s.workspacePackages
}
+
// Don't bother copying the importedBy graph,
// as it changes each time we update metadata.
+ // TODO(rfindley): consolidate the this workspace mode detection with
+ // workspace invalidation.
+ workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
+
// If the snapshot's workspace mode has changed, the packages loaded using
// the previous mode are no longer relevant, so clear them out.
if workspaceModeChanged {
result.workspacePackages = map[PackageID]PackagePath{}
}
-
- // The snapshot may need to be reinitialized.
- if workspaceReload || vendorChanged {
- if workspaceChanged || result.initializedErr != nil {
- result.initializeOnce = &sync.Once{}
- }
- }
- return result
+ result.dumpWorkspace("clone")
+ return result, release
}
// invalidatedPackageIDs returns all packages invalidated by a change to uri.
@@ -2250,28 +2077,20 @@ func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH
return invalidate, pkgFileChanged, importDeleted
}
-// peekOrParse returns the cached ParsedGoFile if it exists, otherwise parses
-// without caching.
+// peekOrParse returns the cached ParsedGoFile if it exists,
+// otherwise parses without populating the cache.
//
// It returns an error if the file could not be read (note that parsing errors
// are stored in ParsedGoFile.ParseErr).
//
// lockedSnapshot must be locked.
func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
- key := parseKey{file: fh.FileIdentity(), mode: mode}
- if pgh := lockedSnapshot.goFiles[key]; pgh != nil {
- cached := pgh.handle.Cached(lockedSnapshot.generation)
- if cached != nil {
- cached := cached.(*parseGoData)
- if cached.parsed != nil {
- return cached.parsed, nil
- }
- }
+ // Peek in the cache without populating it.
+ // We do this to reduce retained heap, not work.
+ if parsed, _ := lockedSnapshot.peekParseGoLocked(fh, mode); parsed != nil {
+ return parsed, nil // cache hit
}
-
- fset := token.NewFileSet()
- data := parseGo(ctx, fset, fh, mode)
- return data.parsed, data.err
+ return parseGoImpl(ctx, token.NewFileSet(), fh, mode)
}
func magicCommentsChanged(original *ast.File, current *ast.File) bool {
@@ -2371,7 +2190,7 @@ func (s *snapshot) BuildGoplsMod(ctx context.Context) (*modfile.File, error) {
return buildWorkspaceModFile(ctx, allModules, s)
}
-// TODO(rfindley): move this to workspacemodule.go
+// TODO(rfindley): move this to workspace.go
func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) (*modfile.File, error) {
file := &modfile.File{}
file.AddModuleStmt("gopls-workspace")
@@ -2409,8 +2228,8 @@ func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{},
goVersion = parsed.Go.Version
}
path := parsed.Module.Mod.Path
- if _, ok := paths[path]; ok {
- return nil, fmt.Errorf("found module %q twice in the workspace", path)
+ if seen, ok := paths[path]; ok {
+ return nil, fmt.Errorf("found module %q multiple times in the workspace, at:\n\t%q\n\t%q", path, seen, modURI)
}
paths[path] = modURI
// If the module's path includes a major version, we expect it to have
@@ -2421,7 +2240,7 @@ func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{},
}
majorVersion = strings.TrimLeft(majorVersion, "/.") // handle gopkg.in versions
file.AddNewRequire(path, source.WorkspaceModuleVersion(majorVersion), false)
- if err := file.AddReplace(path, "", dirURI(modURI).Filename(), ""); err != nil {
+ if err := file.AddReplace(path, "", span.Dir(modURI).Filename(), ""); err != nil {
return nil, err
}
for _, exclude := range parsed.Exclude {
@@ -2459,11 +2278,11 @@ func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{},
// If a replace points to a module in the workspace, make sure we
// direct it to version of the module in the workspace.
if m, ok := paths[rep.New.Path]; ok {
- newPath = dirURI(m).Filename()
+ newPath = span.Dir(m).Filename()
newVersion = ""
} else if rep.New.Version == "" && !filepath.IsAbs(rep.New.Path) {
// Make any relative paths absolute.
- newPath = filepath.Join(dirURI(modURI).Filename(), rep.New.Path)
+ newPath = filepath.Join(span.Dir(modURI).Filename(), rep.New.Path)
}
if err := file.AddReplace(rep.Old.Path, rep.Old.Version, newPath, newVersion); err != nil {
return nil, err
diff --git a/gopls/internal/lsp/cache/standalone_go115.go b/gopls/internal/lsp/cache/standalone_go115.go
new file mode 100644
index 00000000000..79569ae10ec
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go115.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.16
+// +build !go1.16
+
+package cache
+
+// isStandaloneFile returns false, as the 'standaloneTags' setting is
+// unsupported on Go 1.15 and earlier.
+func isStandaloneFile(src []byte, standaloneTags []string) bool {
+ return false
+}
diff --git a/gopls/internal/lsp/cache/standalone_go116.go b/gopls/internal/lsp/cache/standalone_go116.go
new file mode 100644
index 00000000000..2f72d5f5495
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go116.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package cache
+
+import (
+ "go/build/constraint"
+ "go/parser"
+ "go/token"
+)
+
+// isStandaloneFile reports whether a file with the given contents should be
+// considered a 'standalone main file', meaning a package that consists of only
+// a single file.
+func isStandaloneFile(src []byte, standaloneTags []string) bool {
+ f, err := parser.ParseFile(token.NewFileSet(), "", src, parser.PackageClauseOnly|parser.ParseComments)
+ if err != nil {
+ return false
+ }
+
+ if f.Name == nil || f.Name.Name != "main" {
+ return false
+ }
+
+ for _, cg := range f.Comments {
+ // Even with PackageClauseOnly the parser consumes the semicolon following
+ // the package clause, so we must guard against comments that come after
+ // the package name.
+ if cg.Pos() > f.Name.Pos() {
+ continue
+ }
+ for _, comment := range cg.List {
+ if c, err := constraint.Parse(comment.Text); err == nil {
+ if tag, ok := c.(*constraint.TagExpr); ok {
+ for _, t := range standaloneTags {
+ if t == tag.Tag {
+ return true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return false
+}
diff --git a/gopls/internal/lsp/cache/standalone_go116_test.go b/gopls/internal/lsp/cache/standalone_go116_test.go
new file mode 100644
index 00000000000..9adf01e6cea
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go116_test.go
@@ -0,0 +1,96 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package cache
+
+import (
+ "testing"
+)
+
+func TestIsStandaloneFile(t *testing.T) {
+ tests := []struct {
+ desc string
+ contents string
+ standaloneTags []string
+ want bool
+ }{
+ {
+ "new syntax",
+ "//go:build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "legacy syntax",
+ "// +build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "multiple tags",
+ "//go:build ignore\n\npackage main\n",
+ []string{"exclude", "ignore"},
+ true,
+ },
+ {
+ "invalid tag",
+ "// +build ignore\n\npackage main\n",
+ []string{"script"},
+ false,
+ },
+ {
+ "non-main package",
+ "//go:build ignore\n\npackage p\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "alternate tag",
+ "// +build script\n\npackage main\n",
+ []string{"script"},
+ true,
+ },
+ {
+ "both syntax",
+ "//go:build ignore\n// +build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "after comments",
+ "// A non-directive comment\n//go:build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "after package decl",
+ "package main //go:build ignore\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "on line after package decl",
+ "package main\n\n//go:build ignore\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "combined with other expressions",
+ "\n\n//go:build ignore || darwin\n\npackage main\n",
+ []string{"ignore"},
+ false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ if got := isStandaloneFile([]byte(test.contents), test.standaloneTags); got != test.want {
+ t.Errorf("isStandaloneFile(%q, %v) = %t, want %t", test.contents, test.standaloneTags, got, test.want)
+ }
+ })
+ }
+}
diff --git a/internal/lsp/cache/symbols.go b/gopls/internal/lsp/cache/symbols.go
similarity index 73%
rename from internal/lsp/cache/symbols.go
rename to gopls/internal/lsp/cache/symbols.go
index db68912015e..69b2b044273 100644
--- a/internal/lsp/cache/symbols.go
+++ b/gopls/internal/lsp/cache/symbols.go
@@ -12,52 +12,54 @@ import (
"go/types"
"strings"
- "golang.org/x/tools/internal/lsp/lsppos"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/lsppos"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
)
-type symbolHandle struct {
- handle *memoize.Handle
+// symbolize returns the result of symbolizing the file identified by fh, using a cache.
+func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]source.Symbol, error) {
+ uri := fh.URI()
- fh source.FileHandle
+ s.mu.Lock()
+ entry, hit := s.symbolizeHandles.Get(uri)
+ s.mu.Unlock()
- // key is the hashed key for the package.
- key symbolHandleKey
-}
+ type symbolizeResult struct {
+ symbols []source.Symbol
+ err error
+ }
-// symbolData contains the data produced by extracting symbols from a file.
-type symbolData struct {
- symbols []source.Symbol
- err error
-}
+ // Cache miss?
+ if !hit {
+ type symbolHandleKey source.Hash
+ key := symbolHandleKey(fh.FileIdentity().Hash)
+ promise, release := s.store.Promise(key, func(_ context.Context, arg interface{}) interface{} {
+ symbols, err := symbolizeImpl(arg.(*snapshot), fh)
+ return symbolizeResult{symbols, err}
+ })
-type symbolHandleKey string
+ entry = promise
-func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle {
- if h := s.getSymbolHandle(fh.URI()); h != nil {
- return h
+ s.mu.Lock()
+ s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
}
- key := symbolHandleKey(fh.FileIdentity().Hash)
- h := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
- data := &symbolData{}
- data.symbols, data.err = symbolize(snapshot, fh)
- return data
- }, nil)
- sh := &symbolHandle{
- handle: h,
- fh: fh,
- key: key,
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
}
- return s.addSymbolHandle(sh)
+ res := v.(symbolizeResult)
+ return res.symbols, res.err
}
-// symbolize extracts symbols from a file. It uses a parsed file already
-// present in the cache but otherwise does not populate the cache.
-func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
+// symbolizeImpl reads and parses a file and extracts symbols from it.
+// It may use a parsed file already present in the cache but
+// otherwise does not populate the cache.
+func symbolizeImpl(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
src, err := fh.Read()
if err != nil {
return nil, err
@@ -68,9 +70,13 @@ func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error
fileDesc *token.File
)
- // If the file has already been fully parsed through the cache, we can just
- // use the result.
- if pgf := snapshot.cachedPGF(fh, source.ParseFull); pgf != nil {
+ // If the file has already been fully parsed through the
+ // cache, we can just use the result. But we don't want to
+ // populate the cache after a miss.
+ snapshot.mu.Lock()
+ pgf, _ := snapshot.peekParseGoLocked(fh, source.ParseFull)
+ snapshot.mu.Unlock()
+ if pgf != nil {
file = pgf.File
fileDesc = pgf.Tok
}
diff --git a/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go
similarity index 52%
rename from internal/lsp/cache/view.go
rename to gopls/internal/lsp/cache/view.go
index b0390a3fbde..14536e1cc6f 100644
--- a/internal/lsp/cache/view.go
+++ b/gopls/internal/lsp/cache/view.go
@@ -6,10 +6,10 @@
package cache
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
- "io"
"io/ioutil"
"os"
"path"
@@ -19,60 +19,87 @@ import (
"sort"
"strings"
"sync"
+ "time"
"golang.org/x/mod/modfile"
"golang.org/x/mod/semver"
exec "golang.org/x/sys/execabs"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
)
type View struct {
- session *Session
- id string
+ id string
- optionsMu sync.Mutex
- options *source.Options
-
- // mu protects most mutable state of the view.
- mu sync.Mutex
+ cache *Cache // shared cache
+ gocmdRunner *gocommand.Runner // limits go command concurrency
// baseCtx is the context handed to NewView. This is the parent of all
// background contexts created for this view.
baseCtx context.Context
- // cancel is called when all action being performed by the current view
- // should be stopped.
- cancel context.CancelFunc
-
- // name is the user visible name of this view.
+ // name is the user-specified name of this view.
name string
- // folder is the folder with which this view was constructed.
- folder span.URI
+ optionsMu sync.Mutex
+ options *source.Options
+
+ // Workspace information. The fields below are immutable, and together with
+ // options define the build list. Any change to these fields results in a new
+ // View.
+ //
+ // TODO(rfindley): consolidate and/or eliminate redundancy in these fields,
+ // which have evolved from different sources over time.
+ folder span.URI // user-specified workspace folder
+ rootURI span.URI // either folder or dir(rootSrc) (TODO: deprecate, in favor of folder+rootSrc)
+ rootSrc span.URI // file providing module information (go.mod or go.work); may be empty
+ explicitGowork span.URI // explicitGowork: if non-empty, a user-specified go.work location (TODO: deprecate)
+ workspaceInformation // grab-bag of Go environment information (TODO: cleanup)
importsState *importsState
- // moduleUpgrades tracks known upgrades for module paths.
- moduleUpgrades map[string]string
+ // moduleUpgrades tracks known upgrades for module paths in each modfile.
+ // Each modfile has a map of module name to upgrade version.
+ moduleUpgradesMu sync.Mutex
+ moduleUpgrades map[span.URI]map[string]string
- // keep track of files by uri and by basename, a single file may be mapped
- // to multiple uris, and the same basename may map to multiple files
- filesByURI map[span.URI]*fileBase
- filesByBase map[string][]*fileBase
+ // vulns maps each go.mod file's URI to its known vulnerabilities.
+ vulnsMu sync.Mutex
+ vulns map[span.URI]*govulncheck.Result
+
+ // filesByURI maps URIs to the canonical URI for the file it denotes.
+ // We also keep a set of candidates for a given basename
+ // to reduce the set of pairs that need to be tested for sameness.
+ //
+ // TODO(rfindley): move this file tracking to the session.
+ filesByMu sync.Mutex
+ filesByURI map[span.URI]span.URI // key is noncanonical URI (alias)
+ filesByBase map[string][]canonicalURI // key is basename
// initCancelFirstAttempt can be used to terminate the view's first
// attempt at initialization.
initCancelFirstAttempt context.CancelFunc
- snapshotMu sync.Mutex
- snapshot *snapshot // nil after shutdown has been called
+ // Track the latest snapshot via the snapshot field, guarded by snapshotMu.
+ //
+ // Invariant: whenever the snapshot field is overwritten, destroy(snapshot)
+ // is called on the previous (overwritten) snapshot while snapshotMu is held,
+ // incrementing snapshotWG. During shutdown the final snapshot is
+ // overwritten with nil and destroyed, guaranteeing that all observed
+ // snapshots have been destroyed via the destroy method, and snapshotWG may
+ // be waited upon to let these destroy operations complete.
+ snapshotMu sync.Mutex
+ snapshot *snapshot // latest snapshot; nil after shutdown has been called
+ releaseSnapshot func() // called when snapshot is no longer needed
+ snapshotWG sync.WaitGroup // refcount for pending destroy operations
// initialWorkspaceLoad is closed when the first workspace initialization has
// completed. If we failed to load, we only retry if the go.mod file changes,
@@ -82,40 +109,70 @@ type View struct {
// initializationSema is used limit concurrent initialization of snapshots in
// the view. We use a channel instead of a mutex to avoid blocking when a
// context is canceled.
+ //
+ // This field (along with snapshot.initialized) guards against duplicate
+ // initialization of snapshots. Do not change it without adjusting snapshot
+ // accordingly.
initializationSema chan struct{}
-
- // rootURI is the rootURI directory of this view. If we are in GOPATH mode, this
- // is just the folder. If we are in module mode, this is the module rootURI.
- rootURI span.URI
-
- // workspaceInformation tracks various details about this view's
- // environment variables, go version, and use of modules.
- workspaceInformation
}
type workspaceInformation struct {
// The Go version in use: X in Go 1.X.
goversion int
+ // The complete output of the go version command.
+ // (Call gocommand.ParseGoVersionOutput to extract a version
+ // substring such as go1.19.1 or go1.20-rc.1, go1.21-abcdef01.)
+ goversionOutput string
+
// hasGopackagesDriver is true if the user has a value set for the
// GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on
// their machine.
hasGopackagesDriver bool
// `go env` variables that need to be tracked by gopls.
+ //
+ // TODO(rfindley): eliminate this in favor of goEnv, or vice-versa.
environmentVariables
- // userGo111Module is the user's value of GO111MODULE.
- userGo111Module go111module
-
- // The value of GO111MODULE we want to run with.
- effectiveGo111Module string
-
// goEnv is the `go env` output collected when a view is created.
// It includes the values of the environment variables above.
goEnv map[string]string
}
+// effectiveGO111MODULE reports the value of GO111MODULE effective in the go
+// command at this go version, accounting for default values at different go
+// versions.
+func (w workspaceInformation) effectiveGO111MODULE() go111module {
+ // Off by default until Go 1.12.
+ go111module := w.GO111MODULE()
+ if go111module == "off" || (w.goversion < 12 && go111module == "") {
+ return off
+ }
+ // On by default as of Go 1.16.
+ if go111module == "on" || (w.goversion >= 16 && go111module == "") {
+ return on
+ }
+ return auto
+}
+
+// GO111MODULE returns the value of GO111MODULE to use for running the go
+// command. It differs from the user's environment in order to allow for the
+// more forgiving default value "auto" when using recent go versions.
+//
+// TODO(rfindley): it is probably not worthwhile diverging from the go command
+// here. The extra forgiveness may be nice, but breaks the invariant that
+// running the go command from the command line produces the same build list.
+//
+// Put differently: we shouldn't go out of our way to make GOPATH work, when
+// the go command does not.
+func (w workspaceInformation) GO111MODULE() string {
+ if w.goversion >= 16 && w.go111module == "" {
+ return "auto"
+ }
+ return w.go111module
+}
+
type go111module int
const (
@@ -124,10 +181,22 @@ const (
on
)
+// environmentVariables holds important environment variables captured by a
+// call to `go env`.
type environmentVariables struct {
- gocache, gopath, goroot, goprivate, gomodcache, go111module string
+ gocache, gopath, goroot, goprivate, gomodcache string
+
+ // Don't use go111module directly, because we choose to use a different
+ // default (auto) on Go 1.16 and later, to avoid spurious errors. Use
+ // the workspaceInformation.GO111MODULE method instead.
+ go111module string
}
+// workspaceMode holds various flags defining how the gopls workspace should
+// behave. They may be derived from the environment, user configuration, or
+// depend on the Go version.
+//
+// TODO(rfindley): remove workspace mode, in favor of explicit checks.
type workspaceMode int
const (
@@ -137,35 +206,14 @@ const (
tempModfile
)
-// fileBase holds the common functionality for all files.
-// It is intended to be embedded in the file implementations
-type fileBase struct {
- uris []span.URI
- fname string
-
- view *View
-}
-
-func (f *fileBase) URI() span.URI {
- return f.uris[0]
-}
-
-func (f *fileBase) filename() string {
- return f.fname
-}
-
-func (f *fileBase) addURI(uri span.URI) int {
- f.uris = append(f.uris, uri)
- return len(f.uris)
-}
-
func (v *View) ID() string { return v.id }
-// tempModFile creates a temporary go.mod file based on the contents of the
-// given go.mod file. It is the caller's responsibility to clean up the files
-// when they are done using them.
+// tempModFile creates a temporary go.mod file based on the contents
+// of the given go.mod file. On success, it is the caller's
+// responsibility to call the cleanup function when the file is no
+// longer needed.
func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) {
- filenameHash := hashContents([]byte(modFh.URI().Filename()))
+ filenameHash := source.Hashf("%s", modFh.URI().Filename())
tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash))
if err != nil {
return "", nil, err
@@ -184,7 +232,9 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu
return "", nil, err
}
- cleanup = func() {
+ // We use a distinct name here to avoid subtlety around the fact
+ // that both 'return' and 'defer' update the "cleanup" variable.
+ doCleanup := func() {
_ = os.Remove(tmpSumName)
_ = os.Remove(tmpURI.Filename())
}
@@ -192,7 +242,7 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu
// Be careful to clean up if we return an error from this function.
defer func() {
if err != nil {
- cleanup()
+ doCleanup()
cleanup = nil
}
}()
@@ -200,11 +250,11 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu
// Create an analogous go.sum, if one exists.
if gosum != nil {
if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil {
- return "", cleanup, err
+ return "", nil, err
}
}
- return tmpURI, cleanup, nil
+ return tmpURI, doCleanup, nil
}
// Name returns the user visible name of this view.
@@ -224,11 +274,15 @@ func (v *View) Options() *source.Options {
}
func (v *View) FileKind(fh source.FileHandle) source.FileKind {
+ // The kind of an unsaved buffer comes from the
+ // TextDocumentItem.LanguageID field in the didChange event,
+ // not from the file name. They may differ.
if o, ok := fh.(source.Overlay); ok {
if o.Kind() != source.UnknownKind {
return o.Kind()
}
}
+
fext := filepath.Ext(fh.URI().Filename())
switch fext {
case ".go":
@@ -258,6 +312,9 @@ func minorOptionsChange(a, b *source.Options) bool {
if !reflect.DeepEqual(a.DirectoryFilters, b.DirectoryFilters) {
return false
}
+ if !reflect.DeepEqual(a.StandaloneTags, b.StandaloneTags) {
+ return false
+ }
if a.MemoryMode != b.MemoryMode {
return false
}
@@ -271,7 +328,11 @@ func minorOptionsChange(a, b *source.Options) bool {
return reflect.DeepEqual(aBuildFlags, bBuildFlags)
}
-func (v *View) SetOptions(ctx context.Context, options *source.Options) (source.View, error) {
+// SetViewOptions sets the options of the given view to new values. Calling
+// this may cause the view to be invalidated and a replacement view added to
+// the session. If so the new view will be returned, otherwise the original one
+// will be returned.
+func (s *Session) SetViewOptions(ctx context.Context, v *View, options *source.Options) (*View, error) {
// no need to rebuild the view if the options were not materially changed
v.optionsMu.Lock()
if minorOptionsChange(v.options, options) {
@@ -280,27 +341,32 @@ func (v *View) SetOptions(ctx context.Context, options *source.Options) (source.
return v, nil
}
v.optionsMu.Unlock()
- newView, err := v.session.updateView(ctx, v, options)
+ newView, err := s.updateView(ctx, v, options)
return newView, err
}
-func (v *View) Rebuild(ctx context.Context) (source.Snapshot, func(), error) {
- newView, err := v.session.updateView(ctx, v, v.Options())
- if err != nil {
- return nil, func() {}, err
- }
- snapshot, release := newView.Snapshot(ctx)
- return snapshot, release, nil
-}
+// viewEnv returns a string describing the environment of a newly created view.
+func viewEnv(v *View) string {
+ v.optionsMu.Lock()
+ env := v.options.EnvSlice()
+ buildFlags := append([]string{}, v.options.BuildFlags...)
+ v.optionsMu.Unlock()
-func (s *snapshot) WriteEnv(ctx context.Context, w io.Writer) error {
- s.view.optionsMu.Lock()
- env := s.view.options.EnvSlice()
- buildFlags := append([]string{}, s.view.options.BuildFlags...)
- s.view.optionsMu.Unlock()
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, `go env for %v
+(root %s)
+(go version %s)
+(valid build configuration = %v)
+(build flags: %v)
+`,
+ v.folder.Filename(),
+ v.rootURI.Filename(),
+ strings.TrimRight(v.workspaceInformation.goversionOutput, "\n"),
+ v.snapshot.ValidBuildConfiguration(),
+ buildFlags)
fullEnv := make(map[string]string)
- for k, v := range s.view.goEnv {
+ for k, v := range v.goEnv {
fullEnv[k] = v
}
for _, v := range env {
@@ -312,29 +378,11 @@ func (s *snapshot) WriteEnv(ctx context.Context, w io.Writer) error {
fullEnv[s[0]] = s[1]
}
}
- goVersion, err := s.view.session.gocmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "version",
- Env: env,
- WorkingDir: s.view.rootURI.Filename(),
- })
- if err != nil {
- return err
- }
- fmt.Fprintf(w, `go env for %v
-(root %s)
-(go version %s)
-(valid build configuration = %v)
-(build flags: %v)
-`,
- s.view.folder.Filename(),
- s.view.rootURI.Filename(),
- strings.TrimRight(goVersion.String(), "\n"),
- s.ValidBuildConfiguration(),
- buildFlags)
for k, v := range fullEnv {
- fmt.Fprintf(w, "%s=%s\n", k, v)
+ fmt.Fprintf(&buf, "%s=%s\n", k, v)
}
- return nil
+
+ return buf.String()
}
func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error {
@@ -371,13 +419,14 @@ func (s *snapshot) locateTemplateFiles(ctx context.Context) {
relativeTo := s.view.folder.Filename()
searched := 0
+ filterer := buildFilterer(dir, s.view.gomodcache, s.view.Options())
// Change to WalkDir when we move up to 1.16
err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
relpath := strings.TrimPrefix(path, relativeTo)
- excluded := pathExcludedByFilter(relpath, dir, s.view.gomodcache, s.view.options)
+ excluded := pathExcludedByFilter(relpath, filterer)
if fileHasExtension(path, suffixes) && !excluded && !fi.IsDir() {
k := span.URIFromPath(path)
_, err := s.GetVersionedFile(ctx, k)
@@ -397,28 +446,30 @@ func (s *snapshot) locateTemplateFiles(ctx context.Context) {
}
func (v *View) contains(uri span.URI) bool {
+ // TODO(rfindley): should we ignore the root here? It is not provided by the
+ // user, and is undefined when go.work is outside the workspace. It would be
+ // better to explicitly consider the set of active modules wherever relevant.
inRoot := source.InDir(v.rootURI.Filename(), uri.Filename())
inFolder := source.InDir(v.folder.Filename(), uri.Filename())
+
if !inRoot && !inFolder {
return false
}
- // Filters are applied relative to the workspace folder.
- if inFolder {
- return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options())
- }
- return true
-}
-func (v *View) mapFile(uri span.URI, f *fileBase) {
- v.filesByURI[uri] = f
- if f.addURI(uri) == 1 {
- basename := basename(f.filename())
- v.filesByBase[basename] = append(v.filesByBase[basename], f)
- }
+ return !v.filterFunc()(uri)
}
-func basename(filename string) string {
- return strings.ToLower(filepath.Base(filename))
+// filterFunc returns a func that reports whether uri is filtered by the currently configured
+// directoryFilters.
+func (v *View) filterFunc() func(span.URI) bool {
+ filterer := buildFilterer(v.rootURI.Filename(), v.gomodcache, v.Options())
+ return func(uri span.URI) bool {
+ // Only filter relative to the configured root directory.
+ if source.InDir(v.folder.Filename(), uri.Filename()) {
+ return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), filterer)
+ }
+ return false
+ }
}
func (v *View) relevantChange(c source.FileModification) bool {
@@ -433,119 +484,119 @@ func (v *View) relevantChange(c source.FileModification) bool {
// TODO(rstambler): Make sure the go.work/gopls.mod files are always known
// to the view.
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- if c.URI == uriForSource(v.rootURI, src) {
+ if c.URI == uriForSource(v.rootURI, v.explicitGowork, src) {
return true
}
}
- // If the file is not known to the view, and the change is only on-disk,
- // we should not invalidate the snapshot. This is necessary because Emacs
- // sends didChangeWatchedFiles events for temp files.
- if c.OnDisk && (c.Action == source.Change || c.Action == source.Delete) {
- return false
- }
+
+ // Note: CL 219202 filtered out on-disk changes here that were not known to
+ // the view, but this introduces a race when changes arrive before the view
+ // is initialized (and therefore, before it knows about files). Since that CL
+ // had neither test nor associated issue, and cited only emacs behavior, this
+ // logic was deleted.
+
return v.contains(c.URI)
}
+// knownFile reports whether the specified valid URI (or an alias) is known to the view.
func (v *View) knownFile(uri span.URI) bool {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- f, err := v.findFile(uri)
- return f != nil && err == nil
+ _, known := v.canonicalURI(uri, false)
+ return known
}
-// getFile returns a file for the given URI.
-func (v *View) getFile(uri span.URI) *fileBase {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- f, _ := v.findFile(uri)
- if f != nil {
- return f
- }
- f = &fileBase{
- view: v,
- fname: uri.Filename(),
- }
- v.mapFile(uri, f)
- return f
+// TODO(adonovan): opt: eliminate 'filename' optimization. I doubt the
+// cost of allocation is significant relative to the
+// stat/open/fstat/close operations that follow on Windows.
+type canonicalURI struct {
+ uri span.URI
+ filename string // = uri.Filename(), an optimization (on Windows)
}
-// findFile checks the cache for any file matching the given uri.
+// canonicalURI returns the canonical URI that denotes the same file
+// as uri, which may differ due to case insensitivity, unclean paths,
+// soft or hard links, and so on. If no previous alias was found, or
+// the file is missing, insert determines whether to make uri the
+// canonical representative of the file or to return false.
//
-// An error is only returned for an irreparable failure, for example, if the
-// filename in question does not exist.
-func (v *View) findFile(uri span.URI) (*fileBase, error) {
- if f := v.filesByURI[uri]; f != nil {
- // a perfect match
- return f, nil
- }
- // no exact match stored, time to do some real work
- // check for any files with the same basename
- fname := uri.Filename()
- basename := basename(fname)
+// The cache grows indefinitely without invalidation: file system
+// operations may cause two URIs that used to denote the same file to
+// no longer to do so. Also, the basename cache grows without bound.
+// TODO(adonovan): fix both bugs.
+func (v *View) canonicalURI(uri span.URI, insert bool) (span.URI, bool) {
+ v.filesByMu.Lock()
+ defer v.filesByMu.Unlock()
+
+ // Have we seen this exact URI before?
+ if canonical, ok := v.filesByURI[uri]; ok {
+ return canonical, true
+ }
+
+ // Inspect all candidates with the same lowercase basename.
+ // This heuristic is easily defeated by symbolic links to files.
+ // Files with some basenames (e.g. doc.go) are very numerous.
+ //
+ // The set of candidates grows without bound, and incurs a
+ // linear sequence of SameFile queries to the file system.
+ //
+ // It is tempting to fetch the device/inode pair that
+ // uniquely identifies a file once, and then compare those
+ // pairs, but that would cause us to cache stale file system
+ // state (in addition to the filesByURI staleness).
+ filename := uri.Filename()
+ basename := strings.ToLower(filepath.Base(filename))
if candidates := v.filesByBase[basename]; candidates != nil {
- pathStat, err := os.Stat(fname)
- if os.IsNotExist(err) {
- return nil, err
- }
- if err != nil {
- return nil, nil // the file may exist, return without an error
- }
- for _, c := range candidates {
- if cStat, err := os.Stat(c.filename()); err == nil {
- if os.SameFile(pathStat, cStat) {
- // same file, map it
- v.mapFile(uri, c)
- return c, nil
+ if pathStat, _ := os.Stat(filename); pathStat != nil {
+ for _, c := range candidates {
+ if cStat, _ := os.Stat(c.filename); cStat != nil {
+ // On Windows, SameFile is more expensive as it must
+ // open the file and use the equivalent of fstat(2).
+ if os.SameFile(pathStat, cStat) {
+ v.filesByURI[uri] = c.uri
+ return c.uri, true
+ }
}
}
}
}
- // no file with a matching name was found, it wasn't in our cache
- return nil, nil
-}
-func (v *View) Shutdown(ctx context.Context) {
- v.session.removeView(ctx, v)
+ // No candidates, stat failed, or no candidate matched.
+ if insert {
+ v.filesByURI[uri] = uri
+ v.filesByBase[basename] = append(v.filesByBase[basename], canonicalURI{uri, filename})
+ }
+ return uri, insert
}
-// TODO(rFindley): probably some of this should also be one in View.Shutdown
-// above?
-func (v *View) shutdown(ctx context.Context) {
+// shutdown releases resources associated with the view, and waits for ongoing
+// work to complete.
+func (v *View) shutdown() {
// Cancel the initial workspace load if it is still running.
v.initCancelFirstAttempt()
- v.mu.Lock()
- if v.cancel != nil {
- v.cancel()
- v.cancel = nil
- }
- v.mu.Unlock()
v.snapshotMu.Lock()
if v.snapshot != nil {
- go v.snapshot.generation.Destroy("View.shutdown")
+ v.releaseSnapshot()
+ v.destroy(v.snapshot, "View.shutdown")
v.snapshot = nil
+ v.releaseSnapshot = nil
}
v.snapshotMu.Unlock()
- v.importsState.destroy()
-}
-func (v *View) Session() *Session {
- return v.session
+ v.importsState.destroy()
+ v.snapshotWG.Wait()
}
func (s *snapshot) IgnoredFile(uri span.URI) bool {
filename := uri.Filename()
var prefixes []string
- if len(s.workspace.getActiveModFiles()) == 0 {
+ if len(s.workspace.ActiveModFiles()) == 0 {
for _, entry := range filepath.SplitList(s.view.gopath) {
prefixes = append(prefixes, filepath.Join(entry, "src"))
}
} else {
prefixes = append(prefixes, s.view.gomodcache)
- for m := range s.workspace.getActiveModFiles() {
- prefixes = append(prefixes, dirURI(m).Filename())
+ for m := range s.workspace.ActiveModFiles() {
+ prefixes = append(prefixes, span.Dir(m).Filename())
}
}
for _, prefix := range prefixes {
@@ -583,7 +634,7 @@ func (v *View) getSnapshot() (*snapshot, func()) {
if v.snapshot == nil {
panic("getSnapshot called after shutdown")
}
- return v.snapshot, v.snapshot.generation.Acquire()
+ return v.snapshot, v.snapshot.Acquire()
}
func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) {
@@ -597,26 +648,36 @@ func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) {
<-s.view.initializationSema
}()
- if s.initializeOnce == nil {
+ s.mu.Lock()
+ initialized := s.initialized
+ s.mu.Unlock()
+
+ if initialized {
return
}
- s.initializeOnce.Do(func() {
- s.loadWorkspace(ctx, firstAttempt)
- s.collectAllKnownSubdirs(ctx)
- })
+
+ s.loadWorkspace(ctx, firstAttempt)
+ s.collectAllKnownSubdirs(ctx)
}
func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
defer func() {
- s.initializeOnce = nil
+ s.mu.Lock()
+ s.initialized = true
+ s.mu.Unlock()
if firstAttempt {
close(s.view.initialWorkspaceLoad)
}
}()
- // If we have multiple modules, we need to load them by paths.
- var scopes []interface{}
- var modDiagnostics []*source.Diagnostic
+ // TODO(rFindley): we should only locate template files on the first attempt,
+ // or guard it via a different mechanism.
+ s.locateTemplateFiles(ctx)
+
+ // Collect module paths to load by parsing go.mod files. If a module fails to
+ // parse, capture the parsing failure as a critical diagnostic.
+ var scopes []loadScope // scopes to load
+ var modDiagnostics []*source.Diagnostic // diagnostics for broken go.mod files
addError := func(uri span.URI, err error) {
modDiagnostics = append(modDiagnostics, &source.Diagnostic{
URI: uri,
@@ -625,17 +686,23 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
Message: err.Error(),
})
}
- s.locateTemplateFiles(ctx)
- if len(s.workspace.getActiveModFiles()) > 0 {
- for modURI := range s.workspace.getActiveModFiles() {
+
+ if len(s.workspace.ActiveModFiles()) > 0 {
+ for modURI := range s.workspace.ActiveModFiles() {
+ // Be careful not to add context cancellation errors as critical module
+ // errors.
fh, err := s.GetFile(ctx, modURI)
if err != nil {
- addError(modURI, err)
+ if ctx.Err() == nil {
+ addError(modURI, err)
+ }
continue
}
parsed, err := s.ParseMod(ctx, fh)
if err != nil {
- addError(modURI, err)
+ if ctx.Err() == nil {
+ addError(modURI, err)
+ }
continue
}
if parsed.File == nil || parsed.File.Module == nil {
@@ -649,12 +716,13 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
scopes = append(scopes, viewLoadScope("LOAD_VIEW"))
}
- // If we're loading anything, ensure we also load builtin.
- // TODO(rstambler): explain the rationale for this.
+ // If we're loading anything, ensure we also load builtin,
+ // since it provides fake definitions (and documentation)
+ // for types like int that are used everywhere.
if len(scopes) > 0 {
- scopes = append(scopes, PackagePath("builtin"))
+ scopes = append(scopes, packageLoadScope("builtin"))
}
- err := s.load(ctx, firstAttempt, scopes...)
+ err := s.load(ctx, true, scopes...)
// If the context is canceled on the first attempt, loading has failed
// because the go command has timed out--that should be a critical error.
@@ -673,18 +741,18 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
event.Error(ctx, "initial workspace load failed", err)
extractedDiags := s.extractGoCommandErrors(ctx, err)
criticalErr = &source.CriticalError{
- MainError: err,
- DiagList: append(modDiagnostics, extractedDiags...),
+ MainError: err,
+ Diagnostics: append(modDiagnostics, extractedDiags...),
}
case len(modDiagnostics) == 1:
criticalErr = &source.CriticalError{
- MainError: fmt.Errorf(modDiagnostics[0].Message),
- DiagList: modDiagnostics,
+ MainError: fmt.Errorf(modDiagnostics[0].Message),
+ Diagnostics: modDiagnostics,
}
case len(modDiagnostics) > 1:
criticalErr = &source.CriticalError{
- MainError: fmt.Errorf("error loading module names"),
- DiagList: modDiagnostics,
+ MainError: fmt.Errorf("error loading module names"),
+ Diagnostics: modDiagnostics,
}
}
@@ -707,23 +775,27 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
- if v.snapshot == nil {
+ prevSnapshot, prevReleaseSnapshot := v.snapshot, v.releaseSnapshot
+
+ if prevSnapshot == nil {
panic("invalidateContent called after shutdown")
}
// Cancel all still-running previous requests, since they would be
// operating on stale data.
- v.snapshot.cancel()
+ prevSnapshot.cancel()
// Do not clone a snapshot until its view has finished initializing.
- v.snapshot.AwaitInitialized(ctx)
+ prevSnapshot.AwaitInitialized(ctx)
- oldSnapshot := v.snapshot
+ // Save one lease of the cloned snapshot in the view.
+ v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
- v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
- go oldSnapshot.generation.Destroy("View.invalidateContent")
+ prevReleaseSnapshot()
+ v.destroy(prevSnapshot, "View.invalidateContent")
- return v.snapshot, v.snapshot.generation.Acquire()
+ // Return a second lease to the caller.
+ return v.snapshot, v.snapshot.Acquire()
}
func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (*workspaceInformation, error) {
@@ -739,26 +811,25 @@ func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI,
if err != nil {
return nil, err
}
-
- go111module := os.Getenv("GO111MODULE")
- if v, ok := options.Env["GO111MODULE"]; ok {
- go111module = v
+ goversionOutput, err := gocommand.GoVersionOutput(ctx, inv, s.gocmdRunner)
+ if err != nil {
+ return nil, err
}
+
// Make sure to get the `go env` before continuing with initialization.
- envVars, env, err := s.getGoEnv(ctx, folder.Filename(), goversion, go111module, options.EnvSlice())
+ envVars, env, err := s.getGoEnv(ctx, folder.Filename(), goversion, options.EnvSlice())
if err != nil {
return nil, err
}
- // If using 1.16, change the default back to auto. The primary effect of
- // GO111MODULE=on is to break GOPATH, which we aren't too interested in.
- if goversion >= 16 && go111module == "" {
- go111module = "auto"
- }
// The value of GOPACKAGESDRIVER is not returned through the go command.
gopackagesdriver := os.Getenv("GOPACKAGESDRIVER")
+ // TODO(rfindley): this looks wrong, or at least overly defensive. If the
+ // value of GOPACKAGESDRIVER is not returned from the go command... why do we
+ // look it up here?
for _, s := range env {
split := strings.SplitN(s, "=", 2)
if split[0] == "GOPACKAGESDRIVER" {
+ bug.Reportf("found GOPACKAGESDRIVER from the go command") // see note above
gopackagesdriver = split[1]
}
}
@@ -770,64 +841,52 @@ func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI,
return &workspaceInformation{
hasGopackagesDriver: hasGopackagesDriver,
- effectiveGo111Module: go111module,
- userGo111Module: go111moduleForVersion(go111module, goversion),
goversion: goversion,
+ goversionOutput: goversionOutput,
environmentVariables: envVars,
goEnv: env,
}, nil
}
-func go111moduleForVersion(go111module string, goversion int) go111module {
- // Off by default until Go 1.12.
- if go111module == "off" || (goversion < 12 && go111module == "") {
- return off
- }
- // On by default as of Go 1.16.
- if go111module == "on" || (goversion >= 16 && go111module == "") {
- return on
- }
- return auto
-}
-
-// findWorkspaceRoot searches for the best workspace root according to the
-// following heuristics:
-// - First, look for a parent directory containing a gopls.mod file
-// (experimental only).
-// - Then, a parent directory containing a go.mod file.
-// - Then, a child directory containing a go.mod file, if there is exactly
-// one (non-experimental only).
+// findWorkspaceModuleSource searches for a "module source" relative to the
+// given folder URI. A module source is the go.work or go.mod file that
+// provides module information.
+//
+// As a special case, this function returns a module source in a nested
+// directory if it finds no other module source, and exactly one nested module.
//
-// Otherwise, it returns folder.
-// TODO (rFindley): move this to workspace.go
-// TODO (rFindley): simplify this once workspace modules are enabled by default.
-func findWorkspaceRoot(ctx context.Context, folder span.URI, fs source.FileSource, excludePath func(string) bool, experimental bool) (span.URI, error) {
+// If no module source is found, it returns "".
+func findWorkspaceModuleSource(ctx context.Context, folderURI span.URI, fs source.FileSource, excludePath func(string) bool, experimental bool) (span.URI, error) {
patterns := []string{"go.work", "go.mod"}
if experimental {
patterns = []string{"go.work", "gopls.mod", "go.mod"}
}
+ folder := folderURI.Filename()
for _, basename := range patterns {
- dir, err := findRootPattern(ctx, folder, basename, fs)
+ match, err := findRootPattern(ctx, folder, basename, fs)
if err != nil {
- return "", fmt.Errorf("finding %s: %w", basename, err)
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return "", ctxErr
+ }
+ return "", err
}
- if dir != "" {
- return dir, nil
+ if match != "" {
+ return span.URIFromPath(match), nil
}
}
// The experimental workspace can handle nested modules at this point...
if experimental {
- return folder, nil
+ return "", nil
}
// ...else we should check if there's exactly one nested module.
- all, err := findModules(folder, excludePath, 2)
+ all, err := findModules(folderURI, excludePath, 2)
if err == errExhausted {
// Fall-back behavior: if we don't find any modules after searching 10000
// files, assume there are none.
event.Log(ctx, fmt.Sprintf("stopped searching for modules after %d files", fileLimit))
- return folder, nil
+ return "", nil
}
if err != nil {
return "", err
@@ -835,22 +894,27 @@ func findWorkspaceRoot(ctx context.Context, folder span.URI, fs source.FileSourc
if len(all) == 1 {
// range to access first element.
for uri := range all {
- return dirURI(uri), nil
+ return uri, nil
}
}
- return folder, nil
+ return "", nil
}
-func findRootPattern(ctx context.Context, folder span.URI, basename string, fs source.FileSource) (span.URI, error) {
- dir := folder.Filename()
+// findRootPattern looks for files with the given basename in dir or any parent
+// directory of dir, using the provided FileSource. It returns the first match,
+// starting from dir and search parents.
+//
+// The resulting string is either the file path of a matching file with the
+// given basename, or "" if none was found.
+func findRootPattern(ctx context.Context, dir, basename string, fs source.FileSource) (string, error) {
for dir != "" {
target := filepath.Join(dir, basename)
exists, err := fileExists(ctx, span.URIFromPath(target), fs)
if err != nil {
- return "", err
+ return "", err // not readable or context cancelled
}
if exists {
- return span.URIFromPath(dir), nil
+ return target, nil
}
// Trailing separators must be trimmed, otherwise filepath.Split is a noop.
next, _ := filepath.Split(strings.TrimRight(dir, string(filepath.Separator)))
@@ -869,29 +933,8 @@ func defaultCheckPathCase(path string) error {
return nil
}
-func validBuildConfiguration(folder span.URI, ws *workspaceInformation, modFiles map[span.URI]struct{}) bool {
- // Since we only really understand the `go` command, if the user has a
- // different GOPACKAGESDRIVER, assume that their configuration is valid.
- if ws.hasGopackagesDriver {
- return true
- }
- // Check if the user is working within a module or if we have found
- // multiple modules in the workspace.
- if len(modFiles) > 0 {
- return true
- }
- // The user may have a multiple directories in their GOPATH.
- // Check if the workspace is within any of them.
- for _, gp := range filepath.SplitList(ws.gopath) {
- if source.InDir(filepath.Join(gp, "src"), folder.Filename()) {
- return true
- }
- }
- return false
-}
-
// getGoEnv gets the view's various GO* values.
-func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, go111module string, configEnv []string) (environmentVariables, map[string]string, error) {
+func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, configEnv []string) (environmentVariables, map[string]string, error) {
envVars := environmentVariables{}
vars := map[string]*string{
"GOCACHE": &envVars.gocache,
@@ -907,6 +950,12 @@ func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, go
for k := range vars {
args = append(args, k)
}
+ // TODO(rfindley): GOWORK is not a property of the session. It may change
+ // when a workfile is added or removed.
+ //
+ // We need to distinguish between GOWORK values that are set by the GOWORK
+ // environment variable, and GOWORK values that are computed based on the
+ // location of a go.work file in the directory hierarchy.
args = append(args, "GOWORK")
inv := gocommand.Invocation{
@@ -931,13 +980,12 @@ func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, go
}
// Old versions of Go don't have GOMODCACHE, so emulate it.
+ //
+ // TODO(rfindley): consistent with the treatment of go111module, we should
+ // provide a wrapper method rather than mutating this value.
if envVars.gomodcache == "" && envVars.gopath != "" {
envVars.gomodcache = filepath.Join(filepath.SplitList(envVars.gopath)[0], "pkg/mod")
}
- // GO111MODULE does not appear in `go env` output until Go 1.13.
- if goversion < 13 {
- envVars.go111module = go111module
- }
return envVars, env, err
}
@@ -945,24 +993,81 @@ func (v *View) IsGoPrivatePath(target string) bool {
return globsMatchPath(v.goprivate, target)
}
-func (v *View) ModuleUpgrades() map[string]string {
- v.mu.Lock()
- defer v.mu.Unlock()
+func (v *View) ModuleUpgrades(modfile span.URI) map[string]string {
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
upgrades := map[string]string{}
- for mod, ver := range v.moduleUpgrades {
+ for mod, ver := range v.moduleUpgrades[modfile] {
upgrades[mod] = ver
}
return upgrades
}
-func (v *View) RegisterModuleUpgrades(upgrades map[string]string) {
- v.mu.Lock()
- defer v.mu.Unlock()
+func (v *View) RegisterModuleUpgrades(modfile span.URI, upgrades map[string]string) {
+ // Return early if there are no upgrades.
+ if len(upgrades) == 0 {
+ return
+ }
+
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
+ m := v.moduleUpgrades[modfile]
+ if m == nil {
+ m = make(map[string]string)
+ v.moduleUpgrades[modfile] = m
+ }
for mod, ver := range upgrades {
- v.moduleUpgrades[mod] = ver
+ m[mod] = ver
+ }
+}
+
+func (v *View) ClearModuleUpgrades(modfile span.URI) {
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
+
+ delete(v.moduleUpgrades, modfile)
+}
+
+const maxGovulncheckResultAge = 1 * time.Hour // Invalidate results older than this limit.
+var timeNow = time.Now // for testing
+
+func (v *View) Vulnerabilities(modfiles ...span.URI) map[span.URI]*govulncheck.Result {
+ m := make(map[span.URI]*govulncheck.Result)
+ now := timeNow()
+ v.vulnsMu.Lock()
+ defer v.vulnsMu.Unlock()
+
+ if len(modfiles) == 0 { // empty means all modfiles
+ for modfile := range v.vulns {
+ modfiles = append(modfiles, modfile)
+ }
}
+ for _, modfile := range modfiles {
+ vuln := v.vulns[modfile]
+ if vuln != nil && now.Sub(vuln.AsOf) > maxGovulncheckResultAge {
+ v.vulns[modfile] = nil // same as SetVulnerabilities(modfile, nil)
+ vuln = nil
+ }
+ m[modfile] = vuln
+ }
+ return m
+}
+
+func (v *View) SetVulnerabilities(modfile span.URI, vulns *govulncheck.Result) {
+ v.vulnsMu.Lock()
+ defer v.vulnsMu.Unlock()
+
+ v.vulns[modfile] = vulns
+}
+
+func (v *View) GoVersion() int {
+ return v.workspaceInformation.goversion
+}
+
+func (v *View) GoVersionString() string {
+ return gocommand.ParseGoVersionOutput(v.workspaceInformation.goversionOutput)
}
// Copied from
@@ -1014,41 +1119,46 @@ var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
// FileHandle from the cache for temporary files is problematic, since we
// cannot delete it.
func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modContent []byte) (bool, error) {
+ // Legacy GOPATH workspace?
if s.workspaceMode()&moduleMode == 0 {
return false, nil
}
+
+ // Explicit -mod flag?
matches := modFlagRegexp.FindStringSubmatch(s.view.goEnv["GOFLAGS"])
- var modFlag string
if len(matches) != 0 {
- modFlag = matches[1]
- }
- if modFlag != "" {
- // Don't override an explicit '-mod=vendor' argument.
- // We do want to override '-mod=readonly': it would break various module code lenses,
- // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway.
- return modFlag == "vendor", nil
+ modFlag := matches[1]
+ if modFlag != "" {
+ // Don't override an explicit '-mod=vendor' argument.
+ // We do want to override '-mod=readonly': it would break various module code lenses,
+ // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway.
+ return modFlag == "vendor", nil
+ }
}
modFile, err := modfile.Parse(modURI.Filename(), modContent, nil)
if err != nil {
return false, err
}
+
+ // No vendor directory?
if fi, err := os.Stat(filepath.Join(s.view.rootURI.Filename(), "vendor")); err != nil || !fi.IsDir() {
return false, nil
}
+
+ // Vendoring enabled by default by go declaration in go.mod?
vendorEnabled := modFile.Go != nil && modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0
return vendorEnabled, nil
}
-func (v *View) allFilesExcluded(pkg *packages.Package) bool {
- opts := v.Options()
+func (v *View) allFilesExcluded(pkg *packages.Package, filterer *source.Filterer) bool {
folder := filepath.ToSlash(v.folder.Filename())
for _, f := range pkg.GoFiles {
f = filepath.ToSlash(f)
if !strings.HasPrefix(f, folder) {
return false
}
- if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) {
+ if !pathExcludedByFilter(strings.TrimPrefix(f, folder), filterer) {
return false
}
}
@@ -1056,8 +1166,9 @@ func (v *View) allFilesExcluded(pkg *packages.Package) bool {
}
func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool {
+ filterer := buildFilterer(root, gomodcache, opts)
return func(path string) bool {
- return pathExcludedByFilter(path, root, gomodcache, opts)
+ return pathExcludedByFilter(path, filterer)
}
}
@@ -1067,12 +1178,18 @@ func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) fun
// TODO(rfindley): passing root and gomodcache here makes it confusing whether
// path should be absolute or relative, and has already caused at least one
// bug.
-func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool {
+func pathExcludedByFilter(path string, filterer *source.Filterer) bool {
path = strings.TrimPrefix(filepath.ToSlash(path), "/")
+ return filterer.Disallow(path)
+}
+
+func buildFilterer(root, gomodcache string, opts *source.Options) *source.Filterer {
+ // TODO(rfindley): this looks wrong. If gomodcache isn't actually nested
+ // under root, this will do the wrong thing.
gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/")
filters := opts.DirectoryFilters
if gomodcache != "" {
filters = append(filters, "-"+gomodcache)
}
- return source.FiltersDisallow(path, filters)
+ return source.NewFilterer(filters)
}
diff --git a/internal/lsp/cache/view_test.go b/gopls/internal/lsp/cache/view_test.go
similarity index 55%
rename from internal/lsp/cache/view_test.go
rename to gopls/internal/lsp/cache/view_test.go
index d76dcda8ed4..617dc31a0e7 100644
--- a/internal/lsp/cache/view_test.go
+++ b/gopls/internal/lsp/cache/view_test.go
@@ -5,14 +5,18 @@ package cache
import (
"context"
+ "encoding/json"
"io/ioutil"
"os"
"path/filepath"
"testing"
+ "time"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
)
func TestCaseInsensitiveFilesystem(t *testing.T) {
@@ -49,7 +53,7 @@ func TestCaseInsensitiveFilesystem(t *testing.T) {
}
}
-func TestFindWorkspaceRoot(t *testing.T) {
+func TestFindWorkspaceModuleSource(t *testing.T) {
workspace := `
-- a/go.mod --
module a
@@ -67,6 +71,10 @@ module d-goplsworkspace
module de
-- f/g/go.mod --
module fg
+-- h/go.work --
+go 1.18
+-- h/i/go.mod --
+module hi
`
dir, err := fake.Tempdir(fake.UnpackTxt(workspace))
if err != nil {
@@ -79,16 +87,18 @@ module fg
experimental bool
}{
{"", "", false}, // no module at root, and more than one nested module
- {"a", "a", false},
- {"a/x", "a", false},
- {"a/x/y", "a", false},
- {"b/c", "b/c", false},
- {"d", "d/e", false},
- {"d", "d", true},
- {"d/e", "d/e", false},
- {"d/e", "d", true},
- {"f", "f/g", false},
- {"f", "f", true},
+ {"a", "a/go.mod", false},
+ {"a/x", "a/go.mod", false},
+ {"a/x/y", "a/go.mod", false},
+ {"b/c", "b/c/go.mod", false},
+ {"d", "d/e/go.mod", false},
+ {"d", "d/gopls.mod", true},
+ {"d/e", "d/e/go.mod", false},
+ {"d/e", "d/gopls.mod", true},
+ {"f", "f/g/go.mod", false},
+ {"f", "", true},
+ {"h", "h/go.work", false},
+ {"h/i", "h/go.work", false},
}
for _, test := range tests {
@@ -96,12 +106,16 @@ module fg
rel := fake.RelativeTo(dir)
folderURI := span.URIFromPath(rel.AbsPath(test.folder))
excludeNothing := func(string) bool { return false }
- got, err := findWorkspaceRoot(ctx, folderURI, &osFileSource{}, excludeNothing, test.experimental)
+ got, err := findWorkspaceModuleSource(ctx, folderURI, &osFileSource{}, excludeNothing, test.experimental)
if err != nil {
t.Fatal(err)
}
- if gotf, wantf := filepath.Clean(got.Filename()), rel.AbsPath(test.want); gotf != wantf {
- t.Errorf("findWorkspaceRoot(%q, %t) = %q, want %q", test.folder, test.experimental, gotf, wantf)
+ want := span.URI("")
+ if test.want != "" {
+ want = span.URIFromPath(rel.AbsPath(test.want))
+ }
+ if got != want {
+ t.Errorf("findWorkspaceModuleSource(%q, %t) = %q, want %q", test.folder, test.experimental, got, want)
}
}
}
@@ -111,18 +125,11 @@ func TestInVendor(t *testing.T) {
path string
inVendor bool
}{
- {
- path: "foo/vendor/x.go",
- inVendor: false,
- },
- {
- path: "foo/vendor/x/x.go",
- inVendor: true,
- },
- {
- path: "foo/x.go",
- inVendor: false,
- },
+ {"foo/vendor/x.go", false},
+ {"foo/vendor/x/x.go", true},
+ {"foo/x.go", false},
+ {"foo/vendor/foo.txt", false},
+ {"foo/vendor/modules.txt", false},
} {
if got := inVendor(span.URIFromPath(tt.path)); got != tt.inVendor {
t.Errorf("expected %s inVendor %v, got %v", tt.path, tt.inVendor, got)
@@ -161,15 +168,14 @@ func TestFilters(t *testing.T) {
}
for _, tt := range tests {
- opts := &source.Options{}
- opts.DirectoryFilters = tt.filters
+ filterer := source.NewFilterer(tt.filters)
for _, inc := range tt.included {
- if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) {
+ if pathExcludedByFilter(inc, filterer) {
t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)
}
}
for _, exc := range tt.excluded {
- if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) {
+ if !pathExcludedByFilter(exc, filterer) {
t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc)
}
}
@@ -216,3 +222,67 @@ func TestSuffixes(t *testing.T) {
}
}
}
+
+func TestView_Vulnerabilities(t *testing.T) {
+ // TODO(hyangah): use t.Cleanup when we get rid of go1.13 legacy CI.
+ defer func() { timeNow = time.Now }()
+
+ now := time.Now()
+
+ view := &View{
+ vulns: make(map[span.URI]*govulncheck.Result),
+ }
+ file1, file2 := span.URIFromPath("f1/go.mod"), span.URIFromPath("f2/go.mod")
+
+ vuln1 := &govulncheck.Result{AsOf: now.Add(-(maxGovulncheckResultAge * 3) / 4)} // already ~3/4*maxGovulncheckResultAge old
+ view.SetVulnerabilities(file1, vuln1)
+
+ vuln2 := &govulncheck.Result{AsOf: now} // fresh.
+ view.SetVulnerabilities(file2, vuln2)
+
+ t.Run("fresh", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: vuln1,
+ file2: vuln2,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+
+ // maxGovulncheckResultAge/2 later
+ timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge / 2) }
+ t.Run("after30min", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: nil, // expired.
+ file2: vuln2,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+
+ // maxGovulncheckResultAge later
+ timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge + time.Minute) }
+
+ t.Run("after1hr", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: nil,
+ file2: nil,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+}
+
+func toJSON(x interface{}) string {
+ b, _ := json.MarshalIndent(x, "", " ")
+ return string(b)
+}
diff --git a/internal/lsp/cache/workspace.go b/gopls/internal/lsp/cache/workspace.go
similarity index 69%
rename from internal/lsp/cache/workspace.go
rename to gopls/internal/lsp/cache/workspace.go
index 669ce9290c9..da2abdb68b4 100644
--- a/internal/lsp/cache/workspace.go
+++ b/gopls/internal/lsp/cache/workspace.go
@@ -15,9 +15,9 @@ import (
"sync"
"golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
)
@@ -28,7 +28,7 @@ const (
legacyWorkspace = iota // non-module or single module mode
goplsModWorkspace // modules provided by a gopls.mod file
goWorkWorkspace // modules provided by a go.work file
- fileSystemWorkspace // modules scanned from the filesystem
+ fileSystemWorkspace // modules found by walking the filesystem
)
func (s workspaceSource) String() string {
@@ -46,6 +46,19 @@ func (s workspaceSource) String() string {
}
}
+// workspaceCommon holds immutable information about the workspace setup.
+//
+// TODO(rfindley): there is some redundancy here with workspaceInformation.
+// Reconcile these two types.
+type workspaceCommon struct {
+ root span.URI
+ excludePath func(string) bool
+
+ // explicitGowork is, if non-empty, the URI for the explicit go.work file
+ // provided via the user's environment.
+ explicitGowork span.URI
+}
+
// workspace tracks go.mod files in the workspace, along with the
// gopls.mod file, to provide support for multi-module workspaces.
//
@@ -58,8 +71,9 @@ func (s workspaceSource) String() string {
// This type is immutable (or rather, idempotent), so that it may be shared
// across multiple snapshots.
type workspace struct {
- root span.URI
- excludePath func(string) bool
+ workspaceCommon
+
+ // The source of modules in this workspace.
moduleSource workspaceSource
// activeModFiles holds the active go.mod files.
@@ -95,15 +109,24 @@ type workspace struct {
//
// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
// scans the filesystem to find modules.
-func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff bool, useWsModule bool) (*workspace, error) {
+//
+// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
+// the criticalError method to surface problems in the workspace.
+func newWorkspace(ctx context.Context, root, explicitGowork span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
ws := &workspace{
- root: root,
- excludePath: excludePath,
+ workspaceCommon: workspaceCommon{
+ root: root,
+ explicitGowork: explicitGowork,
+ excludePath: excludePath,
+ },
}
// The user may have a gopls.mod or go.work file that defines their
// workspace.
- if err := loadExplicitWorkspaceFile(ctx, ws, fs); err == nil {
+ //
+ // TODO(rfindley): if GO111MODULE=off, this looks wrong, though there are
+ // probably other problems.
+ if err := ws.loadExplicitWorkspaceFile(ctx, fs); err == nil {
return ws, nil
}
@@ -135,15 +158,15 @@ func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excl
// loadExplicitWorkspaceFile loads workspace information from go.work or
// gopls.mod files, setting the active modules, mod file, and module source
// accordingly.
-func loadExplicitWorkspaceFile(ctx context.Context, ws *workspace, fs source.FileSource) error {
+func (ws *workspace) loadExplicitWorkspaceFile(ctx context.Context, fs source.FileSource) error {
for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- fh, err := fs.GetFile(ctx, uriForSource(ws.root, src))
+ fh, err := fs.GetFile(ctx, uriForSource(ws.root, ws.explicitGowork, src))
if err != nil {
return err
}
contents, err := fh.Read()
if err != nil {
- continue
+ continue // TODO(rfindley): is it correct to proceed here?
}
var file *modfile.File
var activeModFiles map[span.URI]struct{}
@@ -170,14 +193,38 @@ func loadExplicitWorkspaceFile(ctx context.Context, ws *workspace, fs source.Fil
var noHardcodedWorkspace = errors.New("no hardcoded workspace")
+// TODO(rfindley): eliminate getKnownModFiles.
func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
return w.knownModFiles
}
-func (w *workspace) getActiveModFiles() map[span.URI]struct{} {
+// ActiveModFiles returns the set of active mod files for the current workspace.
+func (w *workspace) ActiveModFiles() map[span.URI]struct{} {
return w.activeModFiles
}
+// criticalError returns a critical error related to the workspace setup.
+func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
+ // For now, we narrowly report errors related to `go.work` files.
+ //
+ // TODO(rfindley): investigate whether other workspace validation errors
+ // can be consolidated here.
+ if w.moduleSource == goWorkWorkspace {
+ // We should have already built the modfile, but build here to be
+ // consistent about accessing w.mod after w.build.
+ //
+ // TODO(rfindley): build eagerly. Building lazily is a premature
+ // optimization that poses a significant burden on the code.
+ w.build(ctx, fs)
+ if w.buildErr != nil {
+ return &source.CriticalError{
+ MainError: w.buildErr,
+ }
+ }
+ }
+ return nil
+}
+
// modFile gets the workspace modfile associated with this workspace,
// computing it if it doesn't exist.
//
@@ -207,9 +254,10 @@ func (w *workspace) build(ctx context.Context, fs source.FileSource) {
// would not be obvious to the user how to recover.
ctx = xcontext.Detach(ctx)
- // If our module source is not gopls.mod, try to build the workspace module
- // from modules. Fall back on the pre-existing mod file if parsing fails.
- if w.moduleSource != goplsModWorkspace {
+ // If the module source is from the filesystem, try to build the workspace
+ // module from active modules discovered by scanning the filesystem. Fall
+ // back on the pre-existing mod file if parsing fails.
+ if w.moduleSource == fileSystemWorkspace {
file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
switch {
case err == nil:
@@ -222,6 +270,7 @@ func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.buildErr = err
}
}
+
if w.mod != nil {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
@@ -235,18 +284,21 @@ func (w *workspace) build(ctx context.Context, fs source.FileSource) {
w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
}
}
+
// Ensure that there is always at least the root dir.
if len(w.wsDirs) == 0 {
w.wsDirs = map[span.URI]struct{}{
w.root: {},
}
}
+
sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
if err == nil {
w.sum = sum
} else {
event.Error(ctx, "building workspace sum file", err)
}
+
w.built = true
}
@@ -257,36 +309,36 @@ func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
for d := range w.wsDirs {
dirs = append(dirs, d)
}
- sort.Slice(dirs, func(i, j int) bool {
- return source.CompareURI(dirs[i], dirs[j]) < 0
- })
+ sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
return dirs
}
-// invalidate returns a (possibly) new workspace after invalidating the changed
+// Clone returns a (possibly) new workspace after invalidating the changed
// files. If w is still valid in the presence of changedURIs, it returns itself
// unmodified.
//
-// The returned changed and reload flags control the level of invalidation.
-// Some workspace changes may affect workspace contents without requiring a
-// reload of metadata (for example, unsaved changes to a go.mod or go.sum
-// file).
-func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, changed, reload bool) {
+// The returned needReinit flag indicates to the caller that the workspace
+// needs to be reinitialized (because a relevant go.mod or go.work file has
+// been changed).
+//
+// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
+// should determine whether to re-initialize..
+func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
w.buildMu.Lock()
defer w.buildMu.Unlock()
// Clone the workspace. This may be discarded if nothing changed.
+ changed := false
result := &workspace{
- root: w.root,
- moduleSource: w.moduleSource,
- knownModFiles: make(map[span.URI]struct{}),
- activeModFiles: make(map[span.URI]struct{}),
- workFile: w.workFile,
- mod: w.mod,
- sum: w.sum,
- wsDirs: w.wsDirs,
- excludePath: w.excludePath,
+ workspaceCommon: w.workspaceCommon,
+ moduleSource: w.moduleSource,
+ knownModFiles: make(map[span.URI]struct{}),
+ activeModFiles: make(map[span.URI]struct{}),
+ workFile: w.workFile,
+ mod: w.mod,
+ sum: w.sum,
+ wsDirs: w.wsDirs,
}
for k, v := range w.knownModFiles {
result.knownModFiles[k] = v
@@ -295,12 +347,19 @@ func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileCh
result.activeModFiles[k] = v
}
+ equalURI := func(a, b span.URI) (r bool) {
+ // This query is a strange mix of syntax and file system state:
+ // deletion of a file causes a false result if the name doesn't change.
+ // Our tests exercise only the first clause.
+ return a == b || span.SameExistingFile(a, b)
+ }
+
// First handle changes to the go.work or gopls.mod file. This must be
// considered before any changes to go.mod or go.sum files, as these files
// determine which modules we care about. If go.work/gopls.mod has changed
// we need to either re-read it if it exists or walk the filesystem if it
// has been deleted. go.work should override the gopls.mod if both exist.
- changed, reload = handleWorkspaceFileChanges(ctx, result, changes, fs)
+ changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
// Next, handle go.mod changes that could affect our workspace.
for uri, change := range changes {
// Otherwise, we only care about go.mod files in the workspace directory.
@@ -308,8 +367,8 @@ func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileCh
continue
}
changed = true
- active := result.moduleSource != legacyWorkspace || source.CompareURI(modURI(w.root), uri) == 0
- reload = reload || (active && change.fileHandle.Saved())
+ active := result.moduleSource != legacyWorkspace || equalURI(modURI(w.root), uri)
+ needReinit = needReinit || (active && change.fileHandle.Saved())
// Don't mess with the list of mod files if using go.work or gopls.mod.
if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
continue
@@ -339,93 +398,78 @@ func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileCh
// Only changes to active go.sum files actually cause the workspace to
// change.
changed = true
- reload = reload || change.fileHandle.Saved()
+ needReinit = needReinit || change.fileHandle.Saved()
}
if !changed {
- return w, false, false
+ return w, false
}
- return result, changed, reload
+ return result, needReinit
}
// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
// file, updating ws accordingly. ws.root must be set.
func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
- // If go.work/gopls.mod has changed we need to either re-read it if it
- // exists or walk the filesystem if it has been deleted.
- // go.work should override the gopls.mod if both exist.
- for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- uri := uriForSource(ws.root, src)
- // File opens/closes are just no-ops.
- change, ok := changes[uri]
- if !ok {
- continue
- }
- if change.isUnchanged {
- break
+ if ws.moduleSource != goWorkWorkspace && ws.moduleSource != goplsModWorkspace {
+ return false, false
+ }
+
+ uri := uriForSource(ws.root, ws.explicitGowork, ws.moduleSource)
+ // File opens/closes are just no-ops.
+ change, ok := changes[uri]
+ if !ok || change.isUnchanged {
+ return false, false
+ }
+ if change.exists {
+ // Only invalidate if the file if it actually parses.
+ // Otherwise, stick with the current file.
+ var parsedFile *modfile.File
+ var parsedModules map[span.URI]struct{}
+ var err error
+ switch ws.moduleSource {
+ case goWorkWorkspace:
+ parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
+ case goplsModWorkspace:
+ parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
}
- if change.exists {
- // Only invalidate if the file if it actually parses.
- // Otherwise, stick with the current file.
- var parsedFile *modfile.File
- var parsedModules map[span.URI]struct{}
- var err error
- switch src {
- case goWorkWorkspace:
- parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
- case goplsModWorkspace:
- parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
- }
- if err != nil {
- // An unparseable file should not invalidate the workspace:
- // nothing good could come from changing the workspace in
- // this case.
- event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
- } else {
- // only update the modfile if it parsed.
- changed = true
- reload = change.fileHandle.Saved()
- ws.mod = parsedFile
- ws.moduleSource = src
- ws.knownModFiles = parsedModules
- ws.activeModFiles = make(map[span.URI]struct{})
- for k, v := range parsedModules {
- ws.activeModFiles[k] = v
- }
- }
- break // We've found an explicit workspace file, so can stop looking.
+ if err != nil {
+ // An unparseable file should not invalidate the workspace:
+ // nothing good could come from changing the workspace in
+ // this case.
+ //
+ // TODO(rfindley): well actually, it could potentially lead to a better
+ // critical error. Evaluate whether we can unify this case with the
+ // error returned by newWorkspace, without needlessly invalidating
+ // metadata.
+ event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
} else {
- // go.work/gopls.mod is deleted. search for modules again.
+ // only update the modfile if it parsed.
changed = true
- reload = true
- ws.moduleSource = fileSystemWorkspace
- // The parsed file is no longer valid.
- ws.mod = nil
- knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
- if err != nil {
- ws.knownModFiles = nil
- ws.activeModFiles = nil
- event.Error(ctx, "finding file system modules", err)
- } else {
- ws.knownModFiles = knownModFiles
- ws.activeModFiles = make(map[span.URI]struct{})
- for k, v := range ws.knownModFiles {
- ws.activeModFiles[k] = v
- }
+ reload = change.fileHandle.Saved()
+ ws.mod = parsedFile
+ ws.knownModFiles = parsedModules
+ ws.activeModFiles = make(map[span.URI]struct{})
+ for k, v := range parsedModules {
+ ws.activeModFiles[k] = v
}
}
+ return changed, reload
}
- return changed, reload
+ // go.work/gopls.mod is deleted. We should never see this as the view should have been recreated.
+ panic(fmt.Sprintf("internal error: workspace file %q deleted without reinitialization", uri))
}
// goplsModURI returns the URI for the gopls.mod file contained in root.
-func uriForSource(root span.URI, src workspaceSource) span.URI {
+func uriForSource(root, explicitGowork span.URI, src workspaceSource) span.URI {
var basename string
switch src {
case goplsModWorkspace:
basename = "gopls.mod"
case goWorkWorkspace:
+ if explicitGowork != "" {
+ return explicitGowork
+ }
basename = "go.work"
default:
return ""
@@ -443,6 +487,12 @@ func isGoMod(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.mod"
}
+// isGoWork reports if uri is a go.work file.
+func isGoWork(uri span.URI) bool {
+ return filepath.Base(uri.Filename()) == "go.work"
+}
+
+// isGoSum reports if uri is a go.sum or go.work.sum file.
func isGoSum(uri span.URI) bool {
return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
}
@@ -468,12 +518,6 @@ func fileHandleExists(fh source.FileHandle) (bool, error) {
return false, err
}
-// TODO(rFindley): replace this (and similar) with a uripath package analogous
-// to filepath.
-func dirURI(uri span.URI) span.URI {
- return span.URIFromPath(filepath.Dir(uri.Filename()))
-}
-
// getLegacyModules returns a module set containing at most the root module.
func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
@@ -501,6 +545,10 @@ func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs so
modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
modFiles[modURI] = struct{}{}
}
+
+ // TODO(rfindley): we should either not build the workspace modfile here, or
+ // not fail so hard. A failure in building the workspace modfile should not
+ // invalidate the active module paths extracted above.
modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
if err != nil {
return nil, nil, err
diff --git a/internal/lsp/cache/workspace_test.go b/gopls/internal/lsp/cache/workspace_test.go
similarity index 81%
rename from internal/lsp/cache/workspace_test.go
rename to gopls/internal/lsp/cache/workspace_test.go
index b809ad196a6..45ae0cc3432 100644
--- a/internal/lsp/cache/workspace_test.go
+++ b/gopls/internal/lsp/cache/workspace_test.go
@@ -12,9 +12,9 @@ import (
"testing"
"golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
)
// osFileSource is a fileSource that just reads from the operating system.
@@ -143,38 +143,6 @@ module moda.com`,
dirs: []string{".", "a"},
},
},
- {
- desc: "removing module",
- initial: `
--- a/go.mod --
-module moda.com
--- a/go.sum --
-golang.org/x/mod v0.3.0 h1:deadbeef
--- b/go.mod --
-module modb.com
--- b/go.sum --
-golang.org/x/mod v0.3.0 h1:beefdead`,
- initialState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a", "b"},
- sum: "golang.org/x/mod v0.3.0 h1:beefdead\ngolang.org/x/mod v0.3.0 h1:deadbeef\n",
- },
- updates: map[string]wsChange{
- "gopls.mod": {`module gopls-workspace
-
-require moda.com v0.0.0-goplsworkspace
-replace moda.com => $SANDBOX_WORKDIR/a`, true},
- },
- wantChanged: true,
- wantReload: true,
- finalState: wsState{
- modules: []string{"a/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a"},
- sum: "golang.org/x/mod v0.3.0 h1:deadbeef\n",
- },
- },
{
desc: "adding module",
initial: `
@@ -207,34 +175,6 @@ replace modb.com => $SANDBOX_WORKDIR/b`, true},
dirs: []string{".", "a", "b"},
},
},
- {
- desc: "deleting gopls.mod",
- initial: `
--- gopls.mod --
-module gopls-workspace
-
-require moda.com v0.0.0-goplsworkspace
-replace moda.com => $SANDBOX_WORKDIR/a
--- a/go.mod --
-module moda.com
--- b/go.mod --
-module modb.com`,
- initialState: wsState{
- modules: []string{"a/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a"},
- },
- updates: map[string]wsChange{
- "gopls.mod": {"", true},
- },
- wantChanged: true,
- wantReload: true,
- finalState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a", "b"},
- },
- },
{
desc: "broken module parsing",
initial: `
@@ -280,7 +220,7 @@ replace gopls.test => ../../gopls.test2`, false},
fs := &osFileSource{}
excludeNothing := func(string) bool { return false }
- w, err := newWorkspace(ctx, root, fs, excludeNothing, false, !test.legacyMode)
+ w, err := newWorkspace(ctx, root, "", fs, excludeNothing, false, !test.legacyMode)
if err != nil {
t.Fatal(err)
}
@@ -298,12 +238,13 @@ replace gopls.test => ../../gopls.test2`, false},
t.Fatal(err)
}
}
- got, gotChanged, gotReload := w.invalidate(ctx, changes, fs)
+ got, gotReinit := w.Clone(ctx, changes, fs)
+ gotChanged := got != w
if gotChanged != test.wantChanged {
t.Errorf("w.invalidate(): got changed %t, want %t", gotChanged, test.wantChanged)
}
- if gotReload != test.wantReload {
- t.Errorf("w.invalidate(): got reload %t, want %t", gotReload, test.wantReload)
+ if gotReinit != test.wantReload {
+ t.Errorf("w.invalidate(): got reload %t, want %t", gotReinit, test.wantReload)
}
checkState(ctx, t, fs, rel, got, test.finalState)
}
@@ -324,7 +265,7 @@ func workspaceFromTxtar(t *testing.T, files string) (*workspace, func(), error)
fs := &osFileSource{}
excludeNothing := func(string) bool { return false }
- workspace, err := newWorkspace(ctx, root, fs, excludeNothing, false, false)
+ workspace, err := newWorkspace(ctx, root, "", fs, excludeNothing, false, false)
return workspace, cleanup, err
}
@@ -385,7 +326,7 @@ func checkState(ctx context.Context, t *testing.T, fs source.FileSource, rel fak
t.Errorf("module source = %v, want %v", got.moduleSource, want.source)
}
modules := make(map[span.URI]struct{})
- for k := range got.getActiveModFiles() {
+ for k := range got.ActiveModFiles() {
modules[k] = struct{}{}
}
for _, modPath := range want.modules {
diff --git a/internal/lsp/call_hierarchy.go b/gopls/internal/lsp/call_hierarchy.go
similarity index 92%
rename from internal/lsp/call_hierarchy.go
rename to gopls/internal/lsp/call_hierarchy.go
index 43c4ea8d5b7..79eeb25cc15 100644
--- a/internal/lsp/call_hierarchy.go
+++ b/gopls/internal/lsp/call_hierarchy.go
@@ -7,8 +7,8 @@ package lsp
import (
"context"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
func (s *Server) prepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) {
diff --git a/internal/lsp/cmd/call_hierarchy.go b/gopls/internal/lsp/cmd/call_hierarchy.go
similarity index 94%
rename from internal/lsp/cmd/call_hierarchy.go
rename to gopls/internal/lsp/cmd/call_hierarchy.go
index c9f9e73e0e2..295dea8b0d4 100644
--- a/internal/lsp/cmd/call_hierarchy.go
+++ b/gopls/internal/lsp/cmd/call_hierarchy.go
@@ -10,8 +10,8 @@ import (
"fmt"
"strings"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -47,7 +47,7 @@ func (c *callHierarchy) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -114,7 +114,7 @@ func (c *callHierarchy) Run(ctx context.Context, args ...string) error {
// callItemPrintString returns a protocol.CallHierarchyItem object represented as a string.
// item and call ranges (protocol.Range) are converted to user friendly spans (1-indexed).
func callItemPrintString(ctx context.Context, conn *connection, item protocol.CallHierarchyItem, callsURI protocol.DocumentURI, calls []protocol.Range) (string, error) {
- itemFile := conn.AddFile(ctx, item.URI.SpanURI())
+ itemFile := conn.openFile(ctx, item.URI.SpanURI())
if itemFile.err != nil {
return "", itemFile.err
}
@@ -123,7 +123,7 @@ func callItemPrintString(ctx context.Context, conn *connection, item protocol.Ca
return "", err
}
- callsFile := conn.AddFile(ctx, callsURI.SpanURI())
+ callsFile := conn.openFile(ctx, callsURI.SpanURI())
if callsURI != "" && callsFile.err != nil {
return "", callsFile.err
}
diff --git a/internal/lsp/cmd/capabilities_test.go b/gopls/internal/lsp/cmd/capabilities_test.go
similarity index 95%
rename from internal/lsp/cmd/capabilities_test.go
rename to gopls/internal/lsp/cmd/capabilities_test.go
index 1d01b4bd0d7..4b38db751a4 100644
--- a/internal/lsp/cmd/capabilities_test.go
+++ b/gopls/internal/lsp/cmd/capabilities_test.go
@@ -12,9 +12,9 @@ import (
"path/filepath"
"testing"
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
// TestCapabilities does some minimal validation of the server's adherence to the LSP.
@@ -43,7 +43,7 @@ func TestCapabilities(t *testing.T) {
params.Capabilities.Workspace.Configuration = true
// Send an initialize request to the server.
- c.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), c.Client)
+ c.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil, nil), app.options), c.Client)
result, err := c.Server.Initialize(ctx, params)
if err != nil {
t.Fatal(err)
diff --git a/internal/lsp/cmd/check.go b/gopls/internal/lsp/cmd/check.go
similarity index 95%
rename from internal/lsp/cmd/check.go
rename to gopls/internal/lsp/cmd/check.go
index 9a136699270..cf081ca2615 100644
--- a/internal/lsp/cmd/check.go
+++ b/gopls/internal/lsp/cmd/check.go
@@ -9,7 +9,7 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
// check implements the check verb for gopls.
@@ -48,7 +48,7 @@ func (c *check) Run(ctx context.Context, args ...string) error {
for _, arg := range args {
uri := span.URIFromPath(arg)
uris = append(uris, uri)
- file := conn.AddFile(ctx, uri)
+ file := conn.openFile(ctx, uri)
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go
similarity index 94%
rename from internal/lsp/cmd/cmd.go
rename to gopls/internal/lsp/cmd/cmd.go
index a81eb839535..3aa74d067ae 100644
--- a/internal/lsp/cmd/cmd.go
+++ b/gopls/internal/lsp/cmd/cmd.go
@@ -22,14 +22,14 @@ import (
"text/tabwriter"
"time"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/tool"
"golang.org/x/tools/internal/xcontext"
)
@@ -286,7 +286,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
switch {
case app.Remote == "":
connection := newConnection(app)
- connection.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), connection.Client)
+ connection.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil, nil), app.options), connection.Client)
ctx = protocol.WithClient(ctx, connection.Client)
return connection, connection.initialize(ctx, app.options)
case strings.HasPrefix(app.Remote, "internal@"):
@@ -399,7 +399,7 @@ type cmdFile struct {
uri span.URI
mapper *protocol.ColumnMapper
err error
- added bool
+ open bool
diagnostics []protocol.Diagnostic
}
@@ -422,6 +422,10 @@ func fileURI(uri protocol.DocumentURI) span.URI {
return sURI
}
+func (c *cmdClient) CodeLensRefresh(context.Context) error { return nil }
+
+func (c *cmdClient) LogTrace(context.Context, *protocol.LogTraceParams) error { return nil }
+
func (c *cmdClient) ShowMessage(ctx context.Context, p *protocol.ShowMessageParams) error { return nil }
func (c *cmdClient) ShowMessageRequest(ctx context.Context, p *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
@@ -554,22 +558,24 @@ func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile {
return file
}
-func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile {
- c.Client.filesMu.Lock()
- defer c.Client.filesMu.Unlock()
+func (c *cmdClient) openFile(ctx context.Context, uri span.URI) *cmdFile {
+ c.filesMu.Lock()
+ defer c.filesMu.Unlock()
- file := c.Client.getFile(ctx, uri)
- // This should never happen.
- if file == nil {
- return &cmdFile{
- uri: uri,
- err: fmt.Errorf("no file found for %s", uri),
- }
+ file := c.getFile(ctx, uri)
+ if file.err != nil || file.open {
+ return file
}
- if file.err != nil || file.added {
+ file.open = true
+ return file
+}
+
+func (c *connection) openFile(ctx context.Context, uri span.URI) *cmdFile {
+ file := c.Client.openFile(ctx, uri)
+ if file.err != nil {
return file
}
- file.added = true
+
p := &protocol.DidOpenTextDocumentParams{
TextDocument: protocol.TextDocumentItem{
URI: protocol.URIFromSpanURI(uri),
diff --git a/internal/lsp/cmd/definition.go b/gopls/internal/lsp/cmd/definition.go
similarity index 94%
rename from internal/lsp/cmd/definition.go
rename to gopls/internal/lsp/cmd/definition.go
index 44e6fc8c717..edfd7392902 100644
--- a/internal/lsp/cmd/definition.go
+++ b/gopls/internal/lsp/cmd/definition.go
@@ -12,9 +12,9 @@ import (
"os"
"strings"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -80,7 +80,7 @@ func (d *definition) Run(ctx context.Context, args ...string) error {
}
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -113,7 +113,7 @@ func (d *definition) Run(ctx context.Context, args ...string) error {
if hover == nil {
return fmt.Errorf("%v: not an identifier", from)
}
- file = conn.AddFile(ctx, fileURI(locs[0].URI))
+ file = conn.openFile(ctx, fileURI(locs[0].URI))
if file.err != nil {
return fmt.Errorf("%v: %v", from, file.err)
}
diff --git a/internal/lsp/cmd/folding_range.go b/gopls/internal/lsp/cmd/folding_range.go
similarity index 92%
rename from internal/lsp/cmd/folding_range.go
rename to gopls/internal/lsp/cmd/folding_range.go
index 513c9bdd227..7a9cbf9e8fb 100644
--- a/internal/lsp/cmd/folding_range.go
+++ b/gopls/internal/lsp/cmd/folding_range.go
@@ -9,8 +9,8 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -44,7 +44,7 @@ func (r *foldingRanges) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/format.go b/gopls/internal/lsp/cmd/format.go
similarity index 84%
rename from internal/lsp/cmd/format.go
rename to gopls/internal/lsp/cmd/format.go
index 5e17ed4a570..2b8109c670a 100644
--- a/internal/lsp/cmd/format.go
+++ b/gopls/internal/lsp/cmd/format.go
@@ -10,10 +10,10 @@ import (
"fmt"
"io/ioutil"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
)
// format implements the format verb for gopls.
@@ -57,7 +57,7 @@ func (c *format) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
for _, arg := range args {
spn := span.Parse(arg)
- file := conn.AddFile(ctx, spn.URI())
+ file := conn.openFile(ctx, spn.URI())
if file.err != nil {
return file.err
}
@@ -76,11 +76,10 @@ func (c *format) Run(ctx context.Context, args ...string) error {
if err != nil {
return fmt.Errorf("%v: %v", spn, err)
}
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
+ formatted, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
if err != nil {
return fmt.Errorf("%v: %v", spn, err)
}
- formatted := diff.ApplyEdits(string(file.mapper.Content), sedits)
printIt := true
if c.List {
printIt = false
@@ -96,8 +95,11 @@ func (c *format) Run(ctx context.Context, args ...string) error {
}
if c.Diff {
printIt = false
- u := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
- fmt.Print(u)
+ unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
}
if printIt {
fmt.Print(formatted)
diff --git a/internal/lsp/cmd/help_test.go b/gopls/internal/lsp/cmd/help_test.go
similarity index 97%
rename from internal/lsp/cmd/help_test.go
rename to gopls/internal/lsp/cmd/help_test.go
index 536d19dc219..f8d9b0b75ca 100644
--- a/internal/lsp/cmd/help_test.go
+++ b/gopls/internal/lsp/cmd/help_test.go
@@ -12,7 +12,7 @@ import (
"path/filepath"
"testing"
- "golang.org/x/tools/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
"golang.org/x/tools/internal/testenv"
"golang.org/x/tools/internal/tool"
)
diff --git a/internal/lsp/cmd/highlight.go b/gopls/internal/lsp/cmd/highlight.go
similarity index 89%
rename from internal/lsp/cmd/highlight.go
rename to gopls/internal/lsp/cmd/highlight.go
index a325a2d53d9..d8b3d226e0b 100644
--- a/internal/lsp/cmd/highlight.go
+++ b/gopls/internal/lsp/cmd/highlight.go
@@ -8,10 +8,9 @@ import (
"context"
"flag"
"fmt"
- "sort"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -47,7 +46,7 @@ func (r *highlight) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -78,9 +77,7 @@ func (r *highlight) Run(ctx context.Context, args ...string) error {
results = append(results, s)
}
// Sort results to make tests deterministic since DocumentHighlight uses a map.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
+ span.SortSpans(results)
for _, s := range results {
fmt.Println(s)
diff --git a/internal/lsp/cmd/implementation.go b/gopls/internal/lsp/cmd/implementation.go
similarity index 91%
rename from internal/lsp/cmd/implementation.go
rename to gopls/internal/lsp/cmd/implementation.go
index 7b42d994303..dbc5fc3223b 100644
--- a/internal/lsp/cmd/implementation.go
+++ b/gopls/internal/lsp/cmd/implementation.go
@@ -10,8 +10,8 @@ import (
"fmt"
"sort"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -47,7 +47,7 @@ func (i *implementation) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -71,7 +71,7 @@ func (i *implementation) Run(ctx context.Context, args ...string) error {
var spans []string
for _, impl := range implementations {
- f := conn.AddFile(ctx, fileURI(impl.URI))
+ f := conn.openFile(ctx, fileURI(impl.URI))
span, err := f.mapper.Span(impl)
if err != nil {
return err
diff --git a/internal/lsp/cmd/imports.go b/gopls/internal/lsp/cmd/imports.go
similarity index 79%
rename from internal/lsp/cmd/imports.go
rename to gopls/internal/lsp/cmd/imports.go
index 49778603d23..fadc8466834 100644
--- a/internal/lsp/cmd/imports.go
+++ b/gopls/internal/lsp/cmd/imports.go
@@ -10,10 +10,10 @@ import (
"fmt"
"io/ioutil"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/tool"
)
@@ -56,7 +56,7 @@ func (t *imports) Run(ctx context.Context, args ...string) error {
from := span.Parse(args[0])
uri := from.URI()
- file := conn.AddFile(ctx, uri)
+ file := conn.openFile(ctx, uri)
if file.err != nil {
return file.err
}
@@ -74,17 +74,17 @@ func (t *imports) Run(ctx context.Context, args ...string) error {
continue
}
for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
}
}
}
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
+ newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
if err != nil {
return fmt.Errorf("%v: %v", edits, err)
}
- newContent := diff.ApplyEdits(string(file.mapper.Content), sedits)
-
filename := file.uri.Filename()
switch {
case t.Write:
@@ -92,8 +92,11 @@ func (t *imports) Run(ctx context.Context, args ...string) error {
ioutil.WriteFile(filename, []byte(newContent), 0644)
}
case t.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
- fmt.Print(diffs)
+ unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
default:
fmt.Print(string(newContent))
}
diff --git a/internal/lsp/cmd/info.go b/gopls/internal/lsp/cmd/info.go
similarity index 98%
rename from internal/lsp/cmd/info.go
rename to gopls/internal/lsp/cmd/info.go
index 8e581a37cb1..68ef40ffb29 100644
--- a/internal/lsp/cmd/info.go
+++ b/gopls/internal/lsp/cmd/info.go
@@ -14,9 +14,9 @@ import (
"os"
"strings"
- "golang.org/x/tools/internal/lsp/browser"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/browser"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/tool"
)
diff --git a/internal/lsp/cmd/links.go b/gopls/internal/lsp/cmd/links.go
similarity index 93%
rename from internal/lsp/cmd/links.go
rename to gopls/internal/lsp/cmd/links.go
index 1c48c8c50b9..b5413bba59f 100644
--- a/internal/lsp/cmd/links.go
+++ b/gopls/internal/lsp/cmd/links.go
@@ -11,8 +11,8 @@ import (
"fmt"
"os"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -53,7 +53,7 @@ func (l *links) Run(ctx context.Context, args ...string) error {
from := span.Parse(args[0])
uri := from.URI()
- file := conn.AddFile(ctx, uri)
+ file := conn.openFile(ctx, uri)
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/prepare_rename.go b/gopls/internal/lsp/cmd/prepare_rename.go
similarity index 94%
rename from internal/lsp/cmd/prepare_rename.go
rename to gopls/internal/lsp/cmd/prepare_rename.go
index 44a192b5be3..e61bd622fe0 100644
--- a/internal/lsp/cmd/prepare_rename.go
+++ b/gopls/internal/lsp/cmd/prepare_rename.go
@@ -10,8 +10,8 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -51,7 +51,7 @@ func (r *prepareRename) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/references.go b/gopls/internal/lsp/cmd/references.go
similarity index 92%
rename from internal/lsp/cmd/references.go
rename to gopls/internal/lsp/cmd/references.go
index 0697d2e11b7..2abbb919299 100644
--- a/internal/lsp/cmd/references.go
+++ b/gopls/internal/lsp/cmd/references.go
@@ -10,8 +10,8 @@ import (
"fmt"
"sort"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -51,7 +51,7 @@ func (r *references) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -74,7 +74,7 @@ func (r *references) Run(ctx context.Context, args ...string) error {
}
var spans []string
for _, l := range locations {
- f := conn.AddFile(ctx, fileURI(l.URI))
+ f := conn.openFile(ctx, fileURI(l.URI))
// convert location to span for user-friendly 1-indexed line
// and column numbers
span, err := f.mapper.Span(l)
diff --git a/internal/lsp/cmd/remote.go b/gopls/internal/lsp/cmd/remote.go
similarity index 97%
rename from internal/lsp/cmd/remote.go
rename to gopls/internal/lsp/cmd/remote.go
index 0f4c7216444..684981cfff8 100644
--- a/internal/lsp/cmd/remote.go
+++ b/gopls/internal/lsp/cmd/remote.go
@@ -13,8 +13,8 @@ import (
"log"
"os"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
)
type remote struct {
diff --git a/internal/lsp/cmd/rename.go b/gopls/internal/lsp/cmd/rename.go
similarity index 80%
rename from internal/lsp/cmd/rename.go
rename to gopls/internal/lsp/cmd/rename.go
index 9411275949f..2cbd260febb 100644
--- a/internal/lsp/cmd/rename.go
+++ b/gopls/internal/lsp/cmd/rename.go
@@ -13,10 +13,10 @@ import (
"path/filepath"
"sort"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/tool"
)
@@ -61,7 +61,7 @@ func (r *rename) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
@@ -81,24 +81,24 @@ func (r *rename) Run(ctx context.Context, args ...string) error {
var orderedURIs []string
edits := map[span.URI][]protocol.TextEdit{}
for _, c := range edit.DocumentChanges {
- uri := fileURI(c.TextDocument.URI)
- edits[uri] = append(edits[uri], c.Edits...)
- orderedURIs = append(orderedURIs, string(uri))
+ if c.TextDocumentEdit != nil {
+ uri := fileURI(c.TextDocumentEdit.TextDocument.URI)
+ edits[uri] = append(edits[uri], c.TextDocumentEdit.Edits...)
+ orderedURIs = append(orderedURIs, string(uri))
+ }
}
sort.Strings(orderedURIs)
changeCount := len(orderedURIs)
for _, u := range orderedURIs {
uri := span.URIFromURI(u)
- cmdFile := conn.AddFile(ctx, uri)
+ cmdFile := conn.openFile(ctx, uri)
filename := cmdFile.uri.Filename()
- // convert LSP-style edits to []diff.TextEdit cuz Spans are handy
- renameEdits, err := source.FromProtocolEdits(cmdFile.mapper, edits[uri])
+ newContent, renameEdits, err := source.ApplyProtocolEdits(cmdFile.mapper, edits[uri])
if err != nil {
return fmt.Errorf("%v: %v", edits, err)
}
- newContent := diff.ApplyEdits(string(cmdFile.mapper.Content), renameEdits)
switch {
case r.Write:
@@ -110,8 +110,11 @@ func (r *rename) Run(ctx context.Context, args ...string) error {
}
ioutil.WriteFile(filename, []byte(newContent), 0644)
case r.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits)
- fmt.Print(diffs)
+ unified, err := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
default:
if len(orderedURIs) > 1 {
fmt.Printf("%s:\n", filepath.Base(filename))
diff --git a/internal/lsp/cmd/semantictokens.go b/gopls/internal/lsp/cmd/semantictokens.go
similarity index 96%
rename from internal/lsp/cmd/semantictokens.go
rename to gopls/internal/lsp/cmd/semantictokens.go
index 7dbb7f93c61..3ed08d0248b 100644
--- a/internal/lsp/cmd/semantictokens.go
+++ b/gopls/internal/lsp/cmd/semantictokens.go
@@ -16,10 +16,10 @@ import (
"os"
"unicode/utf8"
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
)
// generate semantic tokens and interpolate them in the file
@@ -82,7 +82,7 @@ func (c *semtok) Run(ctx context.Context, args ...string) error {
}
defer conn.terminate(ctx)
uri := span.URIFromPath(args[0])
- file := conn.AddFile(ctx, uri)
+ file := conn.openFile(ctx, uri)
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/serve.go b/gopls/internal/lsp/cmd/serve.go
similarity index 94%
rename from internal/lsp/cmd/serve.go
rename to gopls/internal/lsp/cmd/serve.go
index 1c229a422b4..44d4b1d1d6b 100644
--- a/internal/lsp/cmd/serve.go
+++ b/gopls/internal/lsp/cmd/serve.go
@@ -14,12 +14,12 @@ import (
"os"
"time"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/internal/fakenet"
"golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/tool"
)
@@ -101,7 +101,7 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
return fmt.Errorf("creating forwarder: %w", err)
}
} else {
- ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon)
+ ss = lsprpc.NewStreamServer(cache.New(nil, nil), isDaemon, s.app.options)
}
var network, addr string
diff --git a/internal/lsp/cmd/signature.go b/gopls/internal/lsp/cmd/signature.go
similarity index 93%
rename from internal/lsp/cmd/signature.go
rename to gopls/internal/lsp/cmd/signature.go
index db948430183..77805628ad0 100644
--- a/internal/lsp/cmd/signature.go
+++ b/gopls/internal/lsp/cmd/signature.go
@@ -9,8 +9,8 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -46,7 +46,7 @@ func (r *signature) Run(ctx context.Context, args ...string) error {
defer conn.terminate(ctx)
from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
+ file := conn.openFile(ctx, from.URI())
if file.err != nil {
return file.err
}
diff --git a/internal/lsp/cmd/subcommands.go b/gopls/internal/lsp/cmd/subcommands.go
similarity index 100%
rename from internal/lsp/cmd/subcommands.go
rename to gopls/internal/lsp/cmd/subcommands.go
diff --git a/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go
similarity index 80%
rename from internal/lsp/cmd/suggested_fix.go
rename to gopls/internal/lsp/cmd/suggested_fix.go
index c6f26e2d685..78310b3b3b9 100644
--- a/internal/lsp/cmd/suggested_fix.go
+++ b/gopls/internal/lsp/cmd/suggested_fix.go
@@ -10,10 +10,10 @@ import (
"fmt"
"io/ioutil"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/tool"
)
@@ -56,7 +56,7 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
from := span.Parse(args[0])
uri := from.URI()
- file := conn.AddFile(ctx, uri)
+ file := conn.openFile(ctx, uri)
if file.err != nil {
return file.err
}
@@ -103,8 +103,10 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
}
if !from.HasPosition() {
for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
}
}
continue
@@ -118,8 +120,10 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
}
if span.ComparePoint(from.Start(), spn.Start()) == 0 {
for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
}
}
break
@@ -129,18 +133,19 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
// If suggested fix is not a diagnostic, still must collect edits.
if len(a.Diagnostics) == 0 {
for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
}
}
}
}
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
+ newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
if err != nil {
return fmt.Errorf("%v: %v", edits, err)
}
- newContent := diff.ApplyEdits(string(file.mapper.Content), sedits)
filename := file.uri.Filename()
switch {
@@ -149,7 +154,10 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
ioutil.WriteFile(filename, []byte(newContent), 0644)
}
case s.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ diffs, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
fmt.Print(diffs)
default:
fmt.Print(string(newContent))
diff --git a/internal/lsp/cmd/symbols.go b/gopls/internal/lsp/cmd/symbols.go
similarity index 96%
rename from internal/lsp/cmd/symbols.go
rename to gopls/internal/lsp/cmd/symbols.go
index b43a6dcd1f7..3ecdff8011c 100644
--- a/internal/lsp/cmd/symbols.go
+++ b/gopls/internal/lsp/cmd/symbols.go
@@ -11,8 +11,8 @@ import (
"fmt"
"sort"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/tool"
)
diff --git a/internal/lsp/cmd/test/call_hierarchy.go b/gopls/internal/lsp/cmd/test/call_hierarchy.go
similarity index 95%
rename from internal/lsp/cmd/test/call_hierarchy.go
rename to gopls/internal/lsp/cmd/test/call_hierarchy.go
index 38f8ed707a4..bb8d306224a 100644
--- a/internal/lsp/cmd/test/call_hierarchy.go
+++ b/gopls/internal/lsp/cmd/test/call_hierarchy.go
@@ -10,9 +10,9 @@ import (
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) {
diff --git a/gopls/internal/lsp/cmd/test/check.go b/gopls/internal/lsp/cmd/test/check.go
new file mode 100644
index 00000000000..35153c2700d
--- /dev/null
+++ b/gopls/internal/lsp/cmd/test/check.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmdtest
+
+import (
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// Diagnostics runs the "gopls check" command on a single file, parses
+// its diagnostics, and compares against the expectations defined by
+// markers in the source file.
+func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) {
+ out, _ := r.runGoplsCmd(t, "check", uri.Filename())
+
+ content, err := ioutil.ReadFile(uri.Filename())
+ if err != nil {
+ t.Fatal(err)
+ }
+ mapper := protocol.NewColumnMapper(uri, content)
+
+ // Parse command output into a set of diagnostics.
+ var got []*source.Diagnostic
+ for _, line := range strings.Split(out, "\n") {
+ if line == "" {
+ continue // skip blank
+ }
+ parts := strings.SplitN(line, ": ", 2) // "span: message"
+ if len(parts) != 2 {
+ t.Fatalf("output line not of form 'span: message': %q", line)
+ }
+ spn, message := span.Parse(parts[0]), parts[1]
+ rng, err := mapper.Range(spn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Set only the fields needed by DiffDiagnostics.
+ got = append(got, &source.Diagnostic{
+ URI: uri,
+ Range: rng,
+ Message: message,
+ })
+ }
+
+ // Don't expect fields that we can't populate from the command output.
+ for _, diag := range want {
+ if diag.Source == "no_diagnostics" {
+ continue // see DiffDiagnostics
+ }
+ diag.Source = ""
+ diag.Severity = 0
+ }
+
+ tests.CompareDiagnostics(t, uri, want, got)
+}
diff --git a/internal/lsp/cmd/test/cmdtest.go b/gopls/internal/lsp/cmd/test/cmdtest.go
similarity index 86%
rename from internal/lsp/cmd/test/cmdtest.go
rename to gopls/internal/lsp/cmd/test/cmdtest.go
index 312f7b8b435..16497093883 100644
--- a/internal/lsp/cmd/test/cmdtest.go
+++ b/gopls/internal/lsp/cmd/test/cmdtest.go
@@ -12,18 +12,19 @@ import (
"fmt"
"io"
"os"
+ "runtime"
"sync"
"testing"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/jsonrpc2/servertest"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/tool"
)
@@ -37,8 +38,8 @@ type runner struct {
func TestCommandLine(t *testing.T, testdata string, options func(*source.Options)) {
// On Android, the testdata directory is not copied to the runner.
- if stat, err := os.Stat(testdata); err != nil || !stat.IsDir() {
- t.Skip("testdata directory not present")
+ if runtime.GOOS == "android" {
+ t.Skip("testdata directory not present on android")
}
tests.RunTests(t, testdata, false, func(t *testing.T, datum *tests.Data) {
ctx := tests.Context(t)
@@ -50,8 +51,8 @@ func TestCommandLine(t *testing.T, testdata string, options func(*source.Options
func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer {
ctx = debug.WithInstance(ctx, "", "")
- cache := cache.New(options)
- ss := lsprpc.NewStreamServer(cache, false)
+ cache := cache.New(nil, nil)
+ ss := lsprpc.NewStreamServer(cache, false, options)
return servertest.NewTCPServer(ctx, ss, nil)
}
@@ -113,6 +114,12 @@ func (r *runner) Hover(t *testing.T, spn span.Span, info string) {
//TODO: hovering not supported on command line
}
+func (r *runner) InlayHints(t *testing.T, spn span.Span) {
+ // TODO: inlayHints not supported on command line
+}
+
+func (r *runner) SelectionRanges(t *testing.T, spn span.Span) {}
+
func (r *runner) runGoplsCmd(t testing.TB, args ...string) (string, string) {
rStdout, wStdout, err := os.Pipe()
if err != nil {
diff --git a/internal/lsp/cmd/test/definition.go b/gopls/internal/lsp/cmd/test/definition.go
similarity index 71%
rename from internal/lsp/cmd/test/definition.go
rename to gopls/internal/lsp/cmd/test/definition.go
index c82d9a6c1ae..ca84e80ebe2 100644
--- a/internal/lsp/cmd/test/definition.go
+++ b/gopls/internal/lsp/cmd/test/definition.go
@@ -10,10 +10,8 @@ import (
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
)
type godefMode int
@@ -47,15 +45,11 @@ func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) {
if mode&jsonGoDef != 0 && runtime.GOOS == "windows" {
got = strings.Replace(got, "file:///", "file://", -1)
}
- expect := strings.TrimSpace(string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
+ expect := strings.TrimSpace(string(r.data.Golden(t, tag, uri.Filename(), func() ([]byte, error) {
return []byte(got), nil
})))
if expect != "" && !strings.HasPrefix(got, expect) {
- d, err := myers.ComputeEdits("", expect, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("definition %v failed with %#v\n%s", tag, args, diff.ToUnified("expect", "got", expect, d))
+ tests.CheckSameMarkdown(t, got, expect)
}
}
}
diff --git a/internal/lsp/cmd/test/folding_range.go b/gopls/internal/lsp/cmd/test/folding_range.go
similarity index 81%
rename from internal/lsp/cmd/test/folding_range.go
rename to gopls/internal/lsp/cmd/test/folding_range.go
index 4478687b549..184c01a05bb 100644
--- a/internal/lsp/cmd/test/folding_range.go
+++ b/gopls/internal/lsp/cmd/test/folding_range.go
@@ -7,7 +7,7 @@ package cmdtest
import (
"testing"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
@@ -15,7 +15,7 @@ func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
uri := spn.URI()
filename := uri.Filename()
got, _ := r.NormalizeGoplsCmd(t, "folding_ranges", filename)
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
+ expect := string(r.data.Golden(t, goldenTag, filename, func() ([]byte, error) {
return []byte(got), nil
}))
diff --git a/internal/lsp/cmd/test/format.go b/gopls/internal/lsp/cmd/test/format.go
similarity index 95%
rename from internal/lsp/cmd/test/format.go
rename to gopls/internal/lsp/cmd/test/format.go
index 77eedd440e4..368d535b20a 100644
--- a/internal/lsp/cmd/test/format.go
+++ b/gopls/internal/lsp/cmd/test/format.go
@@ -6,14 +6,15 @@ package cmdtest
import (
"bytes"
- exec "golang.org/x/sys/execabs"
"io/ioutil"
"os"
"regexp"
"strings"
"testing"
- "golang.org/x/tools/internal/span"
+ exec "golang.org/x/sys/execabs"
+
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/testenv"
)
@@ -21,7 +22,7 @@ func (r *runner) Format(t *testing.T, spn span.Span) {
tag := "gofmt"
uri := spn.URI()
filename := uri.Filename()
- expect := string(r.data.Golden(tag, filename, func() ([]byte, error) {
+ expect := string(r.data.Golden(t, tag, filename, func() ([]byte, error) {
cmd := exec.Command("gofmt", filename)
contents, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files
contents = []byte(r.Normalize(fixFileHeader(string(contents))))
diff --git a/internal/lsp/cmd/test/highlight.go b/gopls/internal/lsp/cmd/test/highlight.go
similarity index 94%
rename from internal/lsp/cmd/test/highlight.go
rename to gopls/internal/lsp/cmd/test/highlight.go
index 99e8b2c3fc7..cd51b093c68 100644
--- a/internal/lsp/cmd/test/highlight.go
+++ b/gopls/internal/lsp/cmd/test/highlight.go
@@ -9,7 +9,7 @@ import (
"fmt"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Highlight(t *testing.T, spn span.Span, spans []span.Span) {
diff --git a/internal/lsp/cmd/test/implementation.go b/gopls/internal/lsp/cmd/test/implementation.go
similarity index 95%
rename from internal/lsp/cmd/test/implementation.go
rename to gopls/internal/lsp/cmd/test/implementation.go
index 189452466ce..e24584da99d 100644
--- a/internal/lsp/cmd/test/implementation.go
+++ b/gopls/internal/lsp/cmd/test/implementation.go
@@ -9,7 +9,7 @@ import (
"sort"
"testing"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Implementation(t *testing.T, spn span.Span, imps []span.Span) {
diff --git a/internal/lsp/cmd/test/imports.go b/gopls/internal/lsp/cmd/test/imports.go
similarity index 51%
rename from internal/lsp/cmd/test/imports.go
rename to gopls/internal/lsp/cmd/test/imports.go
index ce8aee55dfa..d26c88664e2 100644
--- a/internal/lsp/cmd/test/imports.go
+++ b/gopls/internal/lsp/cmd/test/imports.go
@@ -7,23 +7,19 @@ package cmdtest
import (
"testing"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
)
func (r *runner) Import(t *testing.T, spn span.Span) {
uri := spn.URI()
filename := uri.Filename()
got, _ := r.NormalizeGoplsCmd(t, "imports", filename)
- want := string(r.data.Golden("goimports", filename, func() ([]byte, error) {
+ want := string(r.data.Golden(t, "goimports", filename, func() ([]byte, error) {
return []byte(got), nil
}))
if want != got {
- d, err := myers.ComputeEdits(uri, want, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("imports failed for %s, expected:\n%s", filename, diff.ToUnified("want", "got", want, d))
+ unified := diff.Unified("want", "got", want, got)
+ t.Errorf("imports failed for %s, expected:\n%s", filename, unified)
}
}
diff --git a/internal/lsp/cmd/test/links.go b/gopls/internal/lsp/cmd/test/links.go
similarity index 81%
rename from internal/lsp/cmd/test/links.go
rename to gopls/internal/lsp/cmd/test/links.go
index 88df768323a..a9616ee48a9 100644
--- a/internal/lsp/cmd/test/links.go
+++ b/gopls/internal/lsp/cmd/test/links.go
@@ -8,9 +8,9 @@ import (
"encoding/json"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {
diff --git a/internal/lsp/cmd/test/prepare_rename.go b/gopls/internal/lsp/cmd/test/prepare_rename.go
similarity index 85%
rename from internal/lsp/cmd/test/prepare_rename.go
rename to gopls/internal/lsp/cmd/test/prepare_rename.go
index b5359e57b42..c818c0197da 100644
--- a/internal/lsp/cmd/test/prepare_rename.go
+++ b/gopls/internal/lsp/cmd/test/prepare_rename.go
@@ -8,10 +8,10 @@ import (
"fmt"
"testing"
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) {
diff --git a/internal/lsp/cmd/test/references.go b/gopls/internal/lsp/cmd/test/references.go
similarity index 97%
rename from internal/lsp/cmd/test/references.go
rename to gopls/internal/lsp/cmd/test/references.go
index 66d0d066286..85c9bc84a62 100644
--- a/internal/lsp/cmd/test/references.go
+++ b/gopls/internal/lsp/cmd/test/references.go
@@ -9,7 +9,7 @@ import (
"sort"
"testing"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) References(t *testing.T, spn span.Span, itemList []span.Span) {
diff --git a/internal/lsp/cmd/test/rename.go b/gopls/internal/lsp/cmd/test/rename.go
similarity index 63%
rename from internal/lsp/cmd/test/rename.go
rename to gopls/internal/lsp/cmd/test/rename.go
index 0fe2d1e1825..a9eb31e3877 100644
--- a/internal/lsp/cmd/test/rename.go
+++ b/gopls/internal/lsp/cmd/test/rename.go
@@ -8,7 +8,8 @@ import (
"fmt"
"testing"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
@@ -17,13 +18,13 @@ func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
loc := fmt.Sprintf("%v", spn)
got, err := r.NormalizeGoplsCmd(t, "rename", loc, newText)
got += err
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
+ want := string(r.data.Golden(t, goldenTag, filename, func() ([]byte, error) {
return []byte(got), nil
}))
- if expect != got {
- t.Errorf("rename failed with %v %v\nexpected:\n%s\ngot:\n%s", loc, newText, expect, got)
+ if diff := compare.Text(want, got); diff != "" {
+ t.Errorf("rename failed with %v %v (-want +got):\n%s", loc, newText, diff)
}
// now check we can build a valid unified diff
unified, _ := r.NormalizeGoplsCmd(t, "rename", "-d", loc, newText)
- checkUnified(t, filename, expect, unified)
+ checkUnified(t, filename, want, unified)
}
diff --git a/internal/lsp/cmd/test/semanticdriver.go b/gopls/internal/lsp/cmd/test/semanticdriver.go
similarity index 88%
rename from internal/lsp/cmd/test/semanticdriver.go
rename to gopls/internal/lsp/cmd/test/semanticdriver.go
index 247f755bf20..069dd64f6e6 100644
--- a/internal/lsp/cmd/test/semanticdriver.go
+++ b/gopls/internal/lsp/cmd/test/semanticdriver.go
@@ -8,7 +8,7 @@ import (
"strings"
"testing"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
@@ -18,7 +18,7 @@ func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
if stderr != "" {
t.Fatalf("%s: %q", filename, stderr)
}
- want := string(r.data.Golden("semantic", filename, func() ([]byte, error) {
+ want := string(r.data.Golden(t, "semantic", filename, func() ([]byte, error) {
return []byte(got), nil
}))
if want != got {
diff --git a/internal/lsp/cmd/test/signature.go b/gopls/internal/lsp/cmd/test/signature.go
similarity index 78%
rename from internal/lsp/cmd/test/signature.go
rename to gopls/internal/lsp/cmd/test/signature.go
index f6bdaebf312..40669e8d223 100644
--- a/internal/lsp/cmd/test/signature.go
+++ b/gopls/internal/lsp/cmd/test/signature.go
@@ -8,9 +8,9 @@ import (
"fmt"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) {
@@ -25,7 +25,7 @@ func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.Signa
return
}
goldenTag := want.Signatures[0].Label + "-signature"
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
+ expect := string(r.data.Golden(t, goldenTag, filename, func() ([]byte, error) {
return []byte(got), nil
}))
if tests.NormalizeAny(expect) != tests.NormalizeAny(got) {
diff --git a/internal/lsp/cmd/test/suggested_fix.go b/gopls/internal/lsp/cmd/test/suggested_fix.go
similarity index 52%
rename from internal/lsp/cmd/test/suggested_fix.go
rename to gopls/internal/lsp/cmd/test/suggested_fix.go
index c819e051735..1e61fe9bcd5 100644
--- a/internal/lsp/cmd/test/suggested_fix.go
+++ b/gopls/internal/lsp/cmd/test/suggested_fix.go
@@ -8,28 +8,31 @@ import (
"fmt"
"testing"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
)
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
+func (r *runner) SuggestedFix(t *testing.T, spn span.Span, suggestedFixes []tests.SuggestedFix, expectedActions int) {
uri := spn.URI()
filename := uri.Filename()
args := []string{"fix", "-a", fmt.Sprintf("%s", spn)}
- for _, kind := range actionKinds {
- if kind == "refactor.rewrite" {
+ var actionKinds []string
+ for _, sf := range suggestedFixes {
+ if sf.ActionKind == "refactor.rewrite" {
t.Skip("refactor.rewrite is not yet supported on the command line")
}
+ actionKinds = append(actionKinds, sf.ActionKind)
}
args = append(args, actionKinds...)
got, stderr := r.NormalizeGoplsCmd(t, args...)
if stderr == "ExecuteCommand is not yet supported on the command line" {
return // don't skip to keep the summary counts correct
}
- want := string(r.data.Golden("suggestedfix_"+tests.SpanName(spn), filename, func() ([]byte, error) {
+ want := string(r.data.Golden(t, "suggestedfix_"+tests.SpanName(spn), filename, func() ([]byte, error) {
return []byte(got), nil
}))
if want != got {
- t.Errorf("suggested fixes failed for %s:\n%s", filename, tests.Diff(t, want, got))
+ t.Errorf("suggested fixes failed for %s:\n%s", filename, compare.Text(want, got))
}
}
diff --git a/internal/lsp/cmd/test/symbols.go b/gopls/internal/lsp/cmd/test/symbols.go
similarity index 55%
rename from internal/lsp/cmd/test/symbols.go
rename to gopls/internal/lsp/cmd/test/symbols.go
index 055be030829..aaf3725d9c0 100644
--- a/internal/lsp/cmd/test/symbols.go
+++ b/gopls/internal/lsp/cmd/test/symbols.go
@@ -7,17 +7,18 @@ package cmdtest
import (
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) {
filename := uri.Filename()
got, _ := r.NormalizeGoplsCmd(t, "symbols", filename)
- expect := string(r.data.Golden("symbols", filename, func() ([]byte, error) {
+ expect := string(r.data.Golden(t, "symbols", filename, func() ([]byte, error) {
return []byte(got), nil
}))
- if expect != got {
- t.Errorf("symbols failed for %s expected:\n%s\ngot:\n%s", filename, expect, got)
+ if diff := compare.Text(expect, got); diff != "" {
+ t.Errorf("symbols differ from expected:\n%s", diff)
}
}
diff --git a/internal/lsp/cmd/test/workspace_symbol.go b/gopls/internal/lsp/cmd/test/workspace_symbol.go
similarity index 72%
rename from internal/lsp/cmd/test/workspace_symbol.go
rename to gopls/internal/lsp/cmd/test/workspace_symbol.go
index ce965f03a31..40c2c65d019 100644
--- a/internal/lsp/cmd/test/workspace_symbol.go
+++ b/gopls/internal/lsp/cmd/test/workspace_symbol.go
@@ -11,9 +11,10 @@ import (
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
@@ -43,11 +44,11 @@ func (r *runner) runWorkspaceSymbols(t *testing.T, uri span.URI, matcher, query
sort.Strings(filtered)
got := r.Normalize(strings.Join(filtered, "\n") + "\n")
- expect := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
+ expect := string(r.data.Golden(t, fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
return []byte(got), nil
}))
if expect != got {
- t.Errorf("workspace_symbol failed for %s:\n%s", query, tests.Diff(t, expect, got))
+ t.Errorf("workspace_symbol failed for %s:\n%s", query, compare.Text(expect, got))
}
}
diff --git a/internal/lsp/cmd/usage/api-json.hlp b/gopls/internal/lsp/cmd/usage/api-json.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/api-json.hlp
rename to gopls/internal/lsp/cmd/usage/api-json.hlp
diff --git a/internal/lsp/cmd/usage/bug.hlp b/gopls/internal/lsp/cmd/usage/bug.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/bug.hlp
rename to gopls/internal/lsp/cmd/usage/bug.hlp
diff --git a/internal/lsp/cmd/usage/call_hierarchy.hlp b/gopls/internal/lsp/cmd/usage/call_hierarchy.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/call_hierarchy.hlp
rename to gopls/internal/lsp/cmd/usage/call_hierarchy.hlp
diff --git a/internal/lsp/cmd/usage/check.hlp b/gopls/internal/lsp/cmd/usage/check.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/check.hlp
rename to gopls/internal/lsp/cmd/usage/check.hlp
diff --git a/internal/lsp/cmd/usage/definition.hlp b/gopls/internal/lsp/cmd/usage/definition.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/definition.hlp
rename to gopls/internal/lsp/cmd/usage/definition.hlp
diff --git a/internal/lsp/cmd/usage/fix.hlp b/gopls/internal/lsp/cmd/usage/fix.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/fix.hlp
rename to gopls/internal/lsp/cmd/usage/fix.hlp
diff --git a/internal/lsp/cmd/usage/folding_ranges.hlp b/gopls/internal/lsp/cmd/usage/folding_ranges.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/folding_ranges.hlp
rename to gopls/internal/lsp/cmd/usage/folding_ranges.hlp
diff --git a/internal/lsp/cmd/usage/format.hlp b/gopls/internal/lsp/cmd/usage/format.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/format.hlp
rename to gopls/internal/lsp/cmd/usage/format.hlp
diff --git a/internal/lsp/cmd/usage/help.hlp b/gopls/internal/lsp/cmd/usage/help.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/help.hlp
rename to gopls/internal/lsp/cmd/usage/help.hlp
diff --git a/internal/lsp/cmd/usage/highlight.hlp b/gopls/internal/lsp/cmd/usage/highlight.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/highlight.hlp
rename to gopls/internal/lsp/cmd/usage/highlight.hlp
diff --git a/internal/lsp/cmd/usage/implementation.hlp b/gopls/internal/lsp/cmd/usage/implementation.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/implementation.hlp
rename to gopls/internal/lsp/cmd/usage/implementation.hlp
diff --git a/internal/lsp/cmd/usage/imports.hlp b/gopls/internal/lsp/cmd/usage/imports.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/imports.hlp
rename to gopls/internal/lsp/cmd/usage/imports.hlp
diff --git a/internal/lsp/cmd/usage/inspect.hlp b/gopls/internal/lsp/cmd/usage/inspect.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/inspect.hlp
rename to gopls/internal/lsp/cmd/usage/inspect.hlp
diff --git a/internal/lsp/cmd/usage/licenses.hlp b/gopls/internal/lsp/cmd/usage/licenses.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/licenses.hlp
rename to gopls/internal/lsp/cmd/usage/licenses.hlp
diff --git a/internal/lsp/cmd/usage/links.hlp b/gopls/internal/lsp/cmd/usage/links.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/links.hlp
rename to gopls/internal/lsp/cmd/usage/links.hlp
diff --git a/internal/lsp/cmd/usage/prepare_rename.hlp b/gopls/internal/lsp/cmd/usage/prepare_rename.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/prepare_rename.hlp
rename to gopls/internal/lsp/cmd/usage/prepare_rename.hlp
diff --git a/internal/lsp/cmd/usage/references.hlp b/gopls/internal/lsp/cmd/usage/references.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/references.hlp
rename to gopls/internal/lsp/cmd/usage/references.hlp
diff --git a/internal/lsp/cmd/usage/remote.hlp b/gopls/internal/lsp/cmd/usage/remote.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/remote.hlp
rename to gopls/internal/lsp/cmd/usage/remote.hlp
diff --git a/internal/lsp/cmd/usage/rename.hlp b/gopls/internal/lsp/cmd/usage/rename.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/rename.hlp
rename to gopls/internal/lsp/cmd/usage/rename.hlp
diff --git a/internal/lsp/cmd/usage/semtok.hlp b/gopls/internal/lsp/cmd/usage/semtok.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/semtok.hlp
rename to gopls/internal/lsp/cmd/usage/semtok.hlp
diff --git a/internal/lsp/cmd/usage/serve.hlp b/gopls/internal/lsp/cmd/usage/serve.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/serve.hlp
rename to gopls/internal/lsp/cmd/usage/serve.hlp
diff --git a/internal/lsp/cmd/usage/signature.hlp b/gopls/internal/lsp/cmd/usage/signature.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/signature.hlp
rename to gopls/internal/lsp/cmd/usage/signature.hlp
diff --git a/internal/lsp/cmd/usage/symbols.hlp b/gopls/internal/lsp/cmd/usage/symbols.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/symbols.hlp
rename to gopls/internal/lsp/cmd/usage/symbols.hlp
diff --git a/internal/lsp/cmd/usage/usage.hlp b/gopls/internal/lsp/cmd/usage/usage.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/usage.hlp
rename to gopls/internal/lsp/cmd/usage/usage.hlp
diff --git a/internal/lsp/cmd/usage/version.hlp b/gopls/internal/lsp/cmd/usage/version.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/version.hlp
rename to gopls/internal/lsp/cmd/usage/version.hlp
diff --git a/internal/lsp/cmd/usage/vulncheck.hlp b/gopls/internal/lsp/cmd/usage/vulncheck.hlp
similarity index 70%
rename from internal/lsp/cmd/usage/vulncheck.hlp
rename to gopls/internal/lsp/cmd/usage/vulncheck.hlp
index 19a674b2ea7..4fbe573e22a 100644
--- a/internal/lsp/cmd/usage/vulncheck.hlp
+++ b/gopls/internal/lsp/cmd/usage/vulncheck.hlp
@@ -6,10 +6,12 @@ Usage:
WARNING: this command is experimental.
By default, the command outputs a JSON-encoded
- golang.org/x/tools/internal/lsp/command.VulncheckResult
+ golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult
message.
Example:
$ gopls vulncheck
-config
If true, the command reads a JSON-encoded package load configuration from stdin
+ -summary
+ If true, outputs a JSON-encoded govulnchecklib.Summary JSON
diff --git a/internal/lsp/cmd/usage/workspace.hlp b/gopls/internal/lsp/cmd/usage/workspace.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/workspace.hlp
rename to gopls/internal/lsp/cmd/usage/workspace.hlp
diff --git a/internal/lsp/cmd/usage/workspace_symbol.hlp b/gopls/internal/lsp/cmd/usage/workspace_symbol.hlp
similarity index 100%
rename from internal/lsp/cmd/usage/workspace_symbol.hlp
rename to gopls/internal/lsp/cmd/usage/workspace_symbol.hlp
diff --git a/internal/lsp/cmd/vulncheck.go b/gopls/internal/lsp/cmd/vulncheck.go
similarity index 56%
rename from internal/lsp/cmd/vulncheck.go
rename to gopls/internal/lsp/cmd/vulncheck.go
index 4d245cecb60..5c851b66e78 100644
--- a/internal/lsp/cmd/vulncheck.go
+++ b/gopls/internal/lsp/cmd/vulncheck.go
@@ -12,16 +12,15 @@ import (
"os"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ vulnchecklib "golang.org/x/tools/gopls/internal/vulncheck"
"golang.org/x/tools/internal/tool"
)
// vulncheck implements the vulncheck command.
type vulncheck struct {
- Config bool `flag:"config" help:"If true, the command reads a JSON-encoded package load configuration from stdin"`
- app *Application
+ Config bool `flag:"config" help:"If true, the command reads a JSON-encoded package load configuration from stdin"`
+ AsSummary bool `flag:"summary" help:"If true, outputs a JSON-encoded govulnchecklib.Summary JSON"`
+ app *Application
}
type pkgLoadConfig struct {
@@ -29,10 +28,6 @@ type pkgLoadConfig struct {
// the build system's query tool.
BuildFlags []string
- // Env is the environment to use when invoking the build system's query tool.
- // If Env is nil, the current environment is used.
- Env []string
-
// If Tests is set, the loader includes related test packages.
Tests bool
}
@@ -50,7 +45,7 @@ func (v *vulncheck) DetailedHelp(f *flag.FlagSet) {
WARNING: this command is experimental.
By default, the command outputs a JSON-encoded
- golang.org/x/tools/internal/lsp/command.VulncheckResult
+ golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult
message.
Example:
$ gopls vulncheck
@@ -60,17 +55,13 @@ func (v *vulncheck) DetailedHelp(f *flag.FlagSet) {
}
func (v *vulncheck) Run(ctx context.Context, args ...string) error {
- if len(args) > 1 {
- return tool.CommandLineErrorf("vulncheck accepts at most one package pattern")
- }
- pattern := "."
- if len(args) == 1 {
- pattern = args[0]
+ if vulnchecklib.Main == nil {
+ return fmt.Errorf("vulncheck command is available only in gopls compiled with go1.18 or newer")
}
- cwd, err := os.Getwd()
- if err != nil {
- return tool.CommandLineErrorf("failed to get current directory: %v", err)
+ // TODO(hyangah): what's wrong with allowing multiple targets?
+ if len(args) > 1 {
+ return tool.CommandLineErrorf("vulncheck accepts at most one package pattern")
}
var cfg pkgLoadConfig
if v.Config {
@@ -78,31 +69,16 @@ func (v *vulncheck) Run(ctx context.Context, args ...string) error {
return tool.CommandLineErrorf("failed to parse cfg: %v", err)
}
}
-
- opts := source.DefaultOptions().Clone()
- v.app.options(opts) // register hook
- if opts == nil || opts.Hooks.Govulncheck == nil {
- return tool.CommandLineErrorf("vulncheck feature is not available")
- }
-
- loadCfg := &packages.Config{
+ loadCfg := packages.Config{
Context: ctx,
Tests: cfg.Tests,
BuildFlags: cfg.BuildFlags,
- Env: cfg.Env,
+ // inherit the current process's cwd and env.
}
- res, err := opts.Hooks.Govulncheck(ctx, loadCfg, command.VulncheckArgs{
- Dir: protocol.URIFromPath(cwd),
- Pattern: pattern,
- })
- if err != nil {
- return tool.CommandLineErrorf("govulncheck failed: %v", err)
- }
- data, err := json.MarshalIndent(res, " ", " ")
- if err != nil {
- return tool.CommandLineErrorf("failed to decode results: %v", err)
+ if err := vulnchecklib.Main(loadCfg, args...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
}
- fmt.Printf("%s", data)
return nil
}
diff --git a/internal/lsp/cmd/workspace.go b/gopls/internal/lsp/cmd/workspace.go
similarity index 93%
rename from internal/lsp/cmd/workspace.go
rename to gopls/internal/lsp/cmd/workspace.go
index c0ddd9eb46e..2038d276348 100644
--- a/internal/lsp/cmd/workspace.go
+++ b/gopls/internal/lsp/cmd/workspace.go
@@ -9,9 +9,9 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
// workspace is a top-level command for working with the gopls workspace. This
diff --git a/internal/lsp/cmd/workspace_symbol.go b/gopls/internal/lsp/cmd/workspace_symbol.go
similarity index 93%
rename from internal/lsp/cmd/workspace_symbol.go
rename to gopls/internal/lsp/cmd/workspace_symbol.go
index 38fe5decf7f..be1e24ef324 100644
--- a/internal/lsp/cmd/workspace_symbol.go
+++ b/gopls/internal/lsp/cmd/workspace_symbol.go
@@ -9,8 +9,8 @@ import (
"flag"
"fmt"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/tool"
)
@@ -73,7 +73,7 @@ func (r *workspaceSymbol) Run(ctx context.Context, args ...string) error {
return err
}
for _, s := range symbols {
- f := conn.AddFile(ctx, fileURI(s.Location.URI))
+ f := conn.openFile(ctx, fileURI(s.Location.URI))
span, err := f.mapper.Span(s.Location)
if err != nil {
return err
diff --git a/internal/lsp/code_action.go b/gopls/internal/lsp/code_action.go
similarity index 81%
rename from internal/lsp/code_action.go
rename to gopls/internal/lsp/code_action.go
index 9d78e3c9ac9..0767d439b4c 100644
--- a/internal/lsp/code_action.go
+++ b/gopls/internal/lsp/code_action.go
@@ -10,14 +10,14 @@ import (
"sort"
"strings"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
)
func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) {
@@ -70,18 +70,41 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara
switch kind {
case source.Mod:
if diagnostics := params.Context.Diagnostics; len(diagnostics) > 0 {
- diags, err := mod.DiagnosticsForMod(ctx, snapshot, fh)
+ diags, err := mod.ModDiagnostics(ctx, snapshot, fh)
if source.IsNonFatalGoModError(err) {
return nil, nil
}
if err != nil {
return nil, err
}
- quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, diags)
+ udiags, err := mod.ModUpgradeDiagnostics(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, append(diags, udiags...))
if err != nil {
return nil, err
}
codeActions = append(codeActions, quickFixes...)
+
+ vdiags, err := mod.ModVulnerabilityDiagnostics(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ // Group vulnerabilities by location and then limit which code actions we return
+ // for each location.
+ m := make(map[protocol.Range][]*source.Diagnostic)
+ for _, v := range vdiags {
+ m[v.Range] = append(m[v.Range], v)
+ }
+ for _, sdiags := range m {
+ quickFixes, err = codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, sdiags)
+ if err != nil {
+ return nil, err
+ }
+ quickFixes = mod.SelectUpgradeCodeActions(quickFixes)
+ codeActions = append(codeActions, quickFixes...)
+ }
}
case source.Go:
// Don't suggest fixes for generated files, since they are generally
@@ -132,20 +155,19 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara
if ctx.Err() != nil {
return nil, ctx.Err()
}
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckFull, source.WidestPackage)
- if err != nil {
- return nil, err
- }
- pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
+ // Type-check the package and also run analysis,
+ // then combine their diagnostics.
+ pkg, _, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.TypecheckFull, source.WidestPackage)
if err != nil {
return nil, err
}
- analysisDiags, err := source.Analyze(ctx, snapshot, pkg, true)
+ analysisDiags, err := source.Analyze(ctx, snapshot, pkg.ID(), true)
if err != nil {
return nil, err
}
- fileDiags := append(pkgDiagnostics[uri], analysisDiags[uri]...)
+ var fileDiags []*source.Diagnostic
+ source.CombineDiagnostics(pkg, fh.URI(), analysisDiags, &fileDiags, &fileDiags)
// Split diagnostics into fixes, which must match incoming diagnostics,
// and non-fixes, which must match the requested range. Build actions
@@ -189,7 +211,7 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara
}
if wanted[protocol.RefactorExtract] {
- fixes, err := extractionFixes(ctx, snapshot, pkg, uri, params.Range)
+ fixes, err := extractionFixes(ctx, snapshot, uri, params.Range)
if err != nil {
return nil, err
}
@@ -257,6 +279,12 @@ func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic
if ident == fix.IdentName {
results = append(results, diagnostic)
}
+ // "undefined: X" may be an unresolved import at Go 1.20+.
+ case strings.HasPrefix(diagnostic.Message, "undefined: "):
+ ident := strings.TrimPrefix(diagnostic.Message, "undefined: ")
+ if ident == fix.IdentName {
+ results = append(results, diagnostic)
+ }
// "could not import: X" may be an invalid import.
case strings.HasPrefix(diagnostic.Message, "could not import: "):
ident := strings.TrimPrefix(diagnostic.Message, "could not import: ")
@@ -276,7 +304,7 @@ func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic
return results
}
-func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.Package, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
+func extractionFixes(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
if rng.Start == rng.End {
return nil, nil
}
@@ -284,7 +312,7 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P
if err != nil {
return nil, err
}
- _, pgf, err := source.GetParsedFile(ctx, snapshot, fh, source.NarrowestPackage)
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
if err != nil {
return nil, fmt.Errorf("getting file for Identifier: %w", err)
}
@@ -294,7 +322,7 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P
}
puri := protocol.URIFromSpanURI(uri)
var commands []protocol.Command
- if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok {
+ if _, ok, methodOk, _ := source.CanExtractFunction(pgf.Tok, srng, pgf.Src, pgf.File); ok {
cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{
URI: puri,
Fix: source.ExtractFunction,
@@ -338,16 +366,18 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P
return actions, nil
}
-func documentChanges(fh source.VersionedFileHandle, edits []protocol.TextEdit) []protocol.TextDocumentEdit {
- return []protocol.TextDocumentEdit{
+func documentChanges(fh source.VersionedFileHandle, edits []protocol.TextEdit) []protocol.DocumentChanges {
+ return []protocol.DocumentChanges{
{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(fh.URI()),
+ TextDocumentEdit: &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ },
},
+ Edits: edits,
},
- Edits: edits,
},
}
}
@@ -378,20 +408,22 @@ func codeActionsMatchingDiagnostics(ctx context.Context, snapshot source.Snapsho
func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd *source.Diagnostic, pd *protocol.Diagnostic) ([]protocol.CodeAction, error) {
var actions []protocol.CodeAction
for _, fix := range sd.SuggestedFixes {
- var changes []protocol.TextDocumentEdit
+ var changes []protocol.DocumentChanges
for uri, edits := range fix.Edits {
fh, err := snapshot.GetVersionedFile(ctx, uri)
if err != nil {
return nil, err
}
- changes = append(changes, protocol.TextDocumentEdit{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
+ changes = append(changes, protocol.DocumentChanges{
+ TextDocumentEdit: &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ },
},
+ Edits: edits,
},
- Edits: edits,
})
}
action := protocol.CodeAction{
@@ -411,7 +443,8 @@ func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd
}
func sameDiagnostic(pd protocol.Diagnostic, sd *source.Diagnostic) bool {
- return pd.Message == sd.Message && protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source)
+ return pd.Message == strings.TrimSpace(sd.Message) && // extra space may have been trimmed when converting to protocol.Diagnostic
+ protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source)
}
func goTest(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
diff --git a/internal/lsp/code_lens.go b/gopls/internal/lsp/code_lens.go
similarity index 80%
rename from internal/lsp/code_lens.go
rename to gopls/internal/lsp/code_lens.go
index e1944583883..f554e798c3c 100644
--- a/internal/lsp/code_lens.go
+++ b/gopls/internal/lsp/code_lens.go
@@ -9,11 +9,11 @@ import (
"fmt"
"sort"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
)
func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) {
@@ -48,10 +48,10 @@ func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams)
}
sort.Slice(result, func(i, j int) bool {
a, b := result[i], result[j]
- if protocol.CompareRange(a.Range, b.Range) == 0 {
- return a.Command.Command < b.Command.Command
+ if cmp := protocol.CompareRange(a.Range, b.Range); cmp != 0 {
+ return cmp < 0
}
- return protocol.CompareRange(a.Range, b.Range) < 0
+ return a.Command.Command < b.Command.Command
})
return result, nil
}
diff --git a/internal/lsp/command.go b/gopls/internal/lsp/command.go
similarity index 73%
rename from internal/lsp/command.go
rename to gopls/internal/lsp/command.go
index 862af6088ec..2022325a815 100644
--- a/internal/lsp/command.go
+++ b/gopls/internal/lsp/command.go
@@ -13,21 +13,24 @@ import (
"io"
"io/ioutil"
"os"
+ "os/exec"
"path/filepath"
"sort"
"strings"
+ "time"
"golang.org/x/mod/modfile"
"golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/progress"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/gopls/internal/vulncheck"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
)
@@ -74,6 +77,13 @@ type commandDeps struct {
type commandFunc func(context.Context, commandDeps) error
+// run performs command setup for command execution, and invokes the given run
+// function. If cfg.async is set, run executes the given func in a separate
+// goroutine, and returns as soon as setup is complete and the goroutine is
+// scheduled.
+//
+// Invariant: if the resulting error is non-nil, the given run func will
+// (eventually) be executed exactly once.
func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run commandFunc) (err error) {
if cfg.requireSave {
var unsaved []string
@@ -144,9 +154,15 @@ func (c *commandHandler) ApplyFix(ctx context.Context, args command.ApplyFixArgs
if err != nil {
return err
}
+ var changes []protocol.DocumentChanges
+ for _, edit := range edits {
+ changes = append(changes, protocol.DocumentChanges{
+ TextDocumentEdit: &edit,
+ })
+ }
r, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
Edit: protocol.WorkspaceEdit{
- DocumentChanges: edits,
+ DocumentChanges: changes,
},
})
if err != nil {
@@ -180,7 +196,7 @@ func (c *commandHandler) CheckUpgrades(ctx context.Context, args command.CheckUp
if err != nil {
return err
}
- deps.snapshot.View().RegisterModuleUpgrades(upgrades)
+ deps.snapshot.View().RegisterModuleUpgrades(args.URI.SpanURI(), upgrades)
// Re-diagnose the snapshot to publish the new module diagnostics.
c.s.diagnoseSnapshot(deps.snapshot, nil, false)
return nil
@@ -195,6 +211,29 @@ func (c *commandHandler) UpgradeDependency(ctx context.Context, args command.Dep
return c.GoGetModule(ctx, args)
}
+func (c *commandHandler) ResetGoModDiagnostics(ctx context.Context, args command.ResetGoModDiagnosticsArgs) error {
+ return c.run(ctx, commandConfig{
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ // Clear all diagnostics coming from the upgrade check source and vulncheck.
+ // This will clear the diagnostics in all go.mod files, but they
+ // will be re-calculated when the snapshot is diagnosed again.
+ if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.UpgradeNotification) {
+ deps.snapshot.View().ClearModuleUpgrades(args.URI.SpanURI())
+ c.s.clearDiagnosticSource(modCheckUpgradesSource)
+ }
+
+ if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.Vulncheck) {
+ deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), nil)
+ c.s.clearDiagnosticSource(modVulncheckSource)
+ }
+
+ // Re-diagnose the snapshot to remove the diagnostics.
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ return nil
+ })
+}
+
func (c *commandHandler) GoGetModule(ctx context.Context, args command.DependencyArgs) error {
return c.run(ctx, commandConfig{
progress: "Running go get",
@@ -322,15 +361,19 @@ func (c *commandHandler) RemoveDependency(ctx context.Context, args command.Remo
}
response, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
Edit: protocol.WorkspaceEdit{
- DocumentChanges: []protocol.TextDocumentEdit{{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: deps.fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(deps.fh.URI()),
+ DocumentChanges: []protocol.DocumentChanges{
+ {
+ TextDocumentEdit: &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: deps.fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(deps.fh.URI()),
+ },
+ },
+ Edits: edits,
},
},
- Edits: edits,
- }},
+ },
},
})
if err != nil {
@@ -361,10 +404,7 @@ func dropDependency(snapshot source.Snapshot, pm *source.ParsedModule, modulePat
return nil, err
}
// Calculate the edits to be made due to the change.
- diff, err := snapshot.View().Options().ComputeEdits(pm.URI, string(pm.Mapper.Content), string(newContent))
- if err != nil {
- return nil, err
- }
+ diff := snapshot.View().Options().ComputeEdits(string(pm.Mapper.Content), string(newContent))
return source.ToProtocolEdits(pm.Mapper, diff)
}
@@ -392,19 +432,20 @@ func (c *commandHandler) RunTests(ctx context.Context, args command.RunTestsArgs
func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error {
// TODO: fix the error reporting when this runs async.
- pkgs, err := snapshot.PackagesForFile(ctx, uri.SpanURI(), source.TypecheckWorkspace, false)
+ metas, err := snapshot.MetadataForFile(ctx, uri.SpanURI())
if err != nil {
return err
}
- if len(pkgs) == 0 {
+ metas = source.RemoveIntermediateTestVariants(metas)
+ if len(metas) == 0 {
return fmt.Errorf("package could not be found for file: %s", uri.SpanURI().Filename())
}
- pkgPath := pkgs[0].ForTest()
+ pkgPath := string(metas[0].ForTest)
// create output
buf := &bytes.Buffer{}
ew := progress.NewEventWriter(ctx, "test")
- out := io.MultiWriter(ew, progress.NewWorkDoneWriter(work), buf)
+ out := io.MultiWriter(ew, progress.NewWorkDoneWriter(ctx, work), buf)
// Run `go test -run Func` on each test.
var failedTests int
@@ -487,7 +528,7 @@ func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs
Args: []string{"-x", pattern},
WorkingDir: args.Dir.SpanURI().Filename(),
}
- stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(deps.work))
+ stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work))
if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil {
return err
}
@@ -544,9 +585,15 @@ func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Sna
if len(changes) == 0 {
return nil
}
+ var documentChanges []protocol.DocumentChanges
+ for _, change := range changes {
+ documentChanges = append(documentChanges, protocol.DocumentChanges{
+ TextDocumentEdit: &change,
+ })
+ }
response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
Edit: protocol.WorkspaceEdit{
- DocumentChanges: changes,
+ DocumentChanges: documentChanges,
},
})
if err != nil {
@@ -580,10 +627,7 @@ func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI,
}
m := protocol.NewColumnMapper(fh.URI(), oldContent)
- diff, err := snapshot.View().Options().ComputeEdits(uri, string(oldContent), string(newContent))
- if err != nil {
- return nil, err
- }
+ diff := snapshot.View().Options().ComputeEdits(string(oldContent), string(newContent))
edits, err := source.ToProtocolEdits(m, diff)
if err != nil {
return nil, err
@@ -653,16 +697,17 @@ func (c *commandHandler) ToggleGCDetails(ctx context.Context, args command.URIAr
progress: "Toggling GC Details",
forURI: args.URI,
}, func(ctx context.Context, deps commandDeps) error {
- pkg, err := deps.snapshot.PackageForFile(ctx, deps.fh.URI(), source.TypecheckWorkspace, source.NarrowestPackage)
+ metas, err := deps.snapshot.MetadataForFile(ctx, deps.fh.URI())
if err != nil {
return err
}
+ id := metas[0].ID // 0 => narrowest package
c.s.gcOptimizationDetailsMu.Lock()
- if _, ok := c.s.gcOptimizationDetails[pkg.ID()]; ok {
- delete(c.s.gcOptimizationDetails, pkg.ID())
+ if _, ok := c.s.gcOptimizationDetails[id]; ok {
+ delete(c.s.gcOptimizationDetails, id)
c.s.clearDiagnosticSource(gcDetailsSource)
} else {
- c.s.gcOptimizationDetails[pkg.ID()] = struct{}{}
+ c.s.gcOptimizationDetails[id] = struct{}{}
}
c.s.gcOptimizationDetailsMu.Unlock()
c.s.diagnoseSnapshot(deps.snapshot, nil, false)
@@ -691,7 +736,7 @@ func (c *commandHandler) GenerateGoplsMod(ctx context.Context, args command.URIA
if err != nil {
return fmt.Errorf("formatting mod file: %w", err)
}
- filename := filepath.Join(snapshot.View().Folder().Filename(), "gopls.mod")
+ filename := filepath.Join(v.Folder().Filename(), "gopls.mod")
if err := ioutil.WriteFile(filename, content, 0644); err != nil {
return fmt.Errorf("writing mod file: %w", err)
}
@@ -705,8 +750,10 @@ func (c *commandHandler) ListKnownPackages(ctx context.Context, args command.URI
progress: "Listing packages",
forURI: args.URI,
}, func(ctx context.Context, deps commandDeps) error {
- var err error
- result.Packages, err = source.KnownPackages(ctx, deps.snapshot, deps.fh)
+ pkgs, err := source.KnownPackagePaths(ctx, deps.snapshot, deps.fh)
+ for _, pkg := range pkgs {
+ result.Packages = append(result.Packages, string(pkg))
+ }
return err
})
return result, err
@@ -717,11 +764,11 @@ func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (
err := c.run(ctx, commandConfig{
forURI: args.URI,
}, func(ctx context.Context, deps commandDeps) error {
- pkg, err := deps.snapshot.PackageForFile(ctx, args.URI.SpanURI(), source.TypecheckWorkspace, source.NarrowestPackage)
+ fh, err := deps.snapshot.GetFile(ctx, args.URI.SpanURI())
if err != nil {
return err
}
- pgf, err := pkg.File(args.URI.SpanURI())
+ pgf, err := deps.snapshot.ParseGo(ctx, fh, source.ParseHeader)
if err != nil {
return err
}
@@ -735,15 +782,21 @@ func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (
name = imp.Name.Name
}
result.Imports = append(result.Imports, command.FileImport{
- Path: source.ImportPath(imp),
+ Path: string(source.UnquoteImportPath(imp)),
Name: name,
})
}
}
- for _, imp := range pkg.Imports() {
- result.PackageImports = append(result.PackageImports, command.PackageImport{
- Path: imp.PkgPath(), // This might be the vendored path under GOPATH vendoring, in which case it's a bug.
- })
+ metas, err := deps.snapshot.MetadataForFile(ctx, args.URI.SpanURI())
+ if err != nil {
+ return err // e.g. cancelled
+ }
+ if len(metas) == 0 {
+ return fmt.Errorf("no package containing %v", args.URI.SpanURI())
+ }
+ for pkgPath := range metas[0].DepsByPkgPath { // 0 => narrowest package
+ result.PackageImports = append(result.PackageImports,
+ command.PackageImport{Path: string(pkgPath)})
}
sort.Slice(result.PackageImports, func(i, j int) bool {
return result.PackageImports[i].Path < result.PackageImports[j].Path
@@ -790,34 +843,132 @@ func (c *commandHandler) StartDebugging(ctx context.Context, args command.Debugg
return result, nil
}
-func (c *commandHandler) RunVulncheckExp(ctx context.Context, args command.VulncheckArgs) (result command.VulncheckResult, _ error) {
+// Copy of pkgLoadConfig defined in internal/lsp/cmd/vulncheck.go
+// TODO(hyangah): decide where to define this.
+type pkgLoadConfig struct {
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // If Tests is set, the loader includes related test packages.
+ Tests bool
+}
+
+func (c *commandHandler) FetchVulncheckResult(ctx context.Context, arg command.URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error) {
+ ret := map[protocol.DocumentURI]*govulncheck.Result{}
+ err := c.run(ctx, commandConfig{forURI: arg.URI}, func(ctx context.Context, deps commandDeps) error {
+ if deps.snapshot.View().Options().Vulncheck == source.ModeVulncheckImports {
+ for _, modfile := range deps.snapshot.ModFiles() {
+ res, err := deps.snapshot.ModVuln(ctx, modfile)
+ if err != nil {
+ return err
+ }
+ ret[protocol.URIFromSpanURI(modfile)] = res
+ }
+ }
+ // Overwrite if there is any govulncheck-based result.
+ for modfile, result := range deps.snapshot.View().Vulnerabilities() {
+ ret[protocol.URIFromSpanURI(modfile)] = result
+ }
+ return nil
+ })
+ return ret, err
+}
+
+func (c *commandHandler) RunGovulncheck(ctx context.Context, args command.VulncheckArgs) (command.RunVulncheckResult, error) {
+ if args.URI == "" {
+ return command.RunVulncheckResult{}, errors.New("VulncheckArgs is missing URI field")
+ }
+
+ // Return the workdone token so that clients can identify when this
+ // vulncheck invocation is complete.
+ //
+ // Since the run function executes asynchronously, we use a channel to
+ // synchronize the start of the run and return the token.
+ tokenChan := make(chan protocol.ProgressToken, 1)
err := c.run(ctx, commandConfig{
- progress: "Running vulncheck",
+ async: true, // need to be async to be cancellable
+ progress: "govulncheck",
requireSave: true,
- forURI: args.Dir, // Will dir work?
+ forURI: args.URI,
}, func(ctx context.Context, deps commandDeps) error {
+ tokenChan <- deps.work.Token()
+
view := deps.snapshot.View()
opts := view.Options()
- if opts == nil || opts.Hooks.Govulncheck == nil {
+ // quickly test if gopls is compiled to support govulncheck
+ // by checking vulncheck.Main. Alternatively, we can continue and
+ // let the `gopls vulncheck` command fail. This is lighter-weight.
+ if vulncheck.Main == nil {
return errors.New("vulncheck feature is not available")
}
- buildFlags := opts.BuildFlags // XXX: is session.Options equivalent to view.Options?
+ cmd := exec.CommandContext(ctx, os.Args[0], "vulncheck", "-config", args.Pattern)
+ cmd.Dir = filepath.Dir(args.URI.SpanURI().Filename())
+
var viewEnv []string
if e := opts.EnvSlice(); e != nil {
viewEnv = append(os.Environ(), e...)
}
- cfg := &packages.Config{
- Context: ctx,
- Tests: true, // TODO(hyangah): add a field in args.
- BuildFlags: buildFlags,
- Env: viewEnv,
- Dir: args.Dir.SpanURI().Filename(),
- // TODO(hyangah): configure overlay
+ cmd.Env = viewEnv
+
+ // stdin: gopls vulncheck expects JSON-encoded configuration from STDIN when -config flag is set.
+ var stdin bytes.Buffer
+ cmd.Stdin = &stdin
+
+ if err := json.NewEncoder(&stdin).Encode(pkgLoadConfig{
+ BuildFlags: opts.BuildFlags,
+ // TODO(hyangah): add `tests` flag in command.VulncheckArgs
+ }); err != nil {
+ return fmt.Errorf("failed to pass package load config: %v", err)
}
- var err error
- result, err = opts.Hooks.Govulncheck(ctx, cfg, args)
- return err
+
+ // stderr: stream gopls vulncheck's STDERR as progress reports
+ er := progress.NewEventWriter(ctx, "vulncheck")
+ stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work))
+ cmd.Stderr = stderr
+ // TODO: can we stream stdout?
+ stdout, err := cmd.Output()
+ if err != nil {
+ return fmt.Errorf("failed to run govulncheck: %v", err)
+ }
+
+ var result govulncheck.Result
+ if err := json.Unmarshal(stdout, &result); err != nil {
+ // TODO: for easy debugging, log the failed stdout somewhere?
+ return fmt.Errorf("failed to parse govulncheck output: %v", err)
+ }
+ result.Mode = govulncheck.ModeGovulncheck
+ result.AsOf = time.Now()
+ deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), &result)
+
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ vulns := result.Vulns
+ affecting := make([]string, 0, len(vulns))
+ for _, v := range vulns {
+ if v.IsCalled() {
+ affecting = append(affecting, v.OSV.ID)
+ }
+ }
+ if len(affecting) == 0 {
+ return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Info,
+ Message: "No vulnerabilities found",
+ })
+ }
+ sort.Strings(affecting)
+ return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Warning,
+ Message: fmt.Sprintf("Found %v", strings.Join(affecting, ", ")),
+ })
})
- return result, err
+ if err != nil {
+ return command.RunVulncheckResult{}, err
+ }
+ select {
+ case <-ctx.Done():
+ return command.RunVulncheckResult{}, ctx.Err()
+ case token := <-tokenChan:
+ return command.RunVulncheckResult{Token: token}, nil
+ }
}
diff --git a/internal/lsp/command/command_gen.go b/gopls/internal/lsp/command/command_gen.go
similarity index 81%
rename from internal/lsp/command/command_gen.go
rename to gopls/internal/lsp/command/command_gen.go
index 22cfeff5bad..35d37ed4d6c 100644
--- a/internal/lsp/command/command_gen.go
+++ b/gopls/internal/lsp/command/command_gen.go
@@ -15,32 +15,34 @@ import (
"context"
"fmt"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
const (
- AddDependency Command = "add_dependency"
- AddImport Command = "add_import"
- ApplyFix Command = "apply_fix"
- CheckUpgrades Command = "check_upgrades"
- EditGoDirective Command = "edit_go_directive"
- GCDetails Command = "gc_details"
- Generate Command = "generate"
- GenerateGoplsMod Command = "generate_gopls_mod"
- GoGetPackage Command = "go_get_package"
- ListImports Command = "list_imports"
- ListKnownPackages Command = "list_known_packages"
- RegenerateCgo Command = "regenerate_cgo"
- RemoveDependency Command = "remove_dependency"
- RunTests Command = "run_tests"
- RunVulncheckExp Command = "run_vulncheck_exp"
- StartDebugging Command = "start_debugging"
- Test Command = "test"
- Tidy Command = "tidy"
- ToggleGCDetails Command = "toggle_gc_details"
- UpdateGoSum Command = "update_go_sum"
- UpgradeDependency Command = "upgrade_dependency"
- Vendor Command = "vendor"
+ AddDependency Command = "add_dependency"
+ AddImport Command = "add_import"
+ ApplyFix Command = "apply_fix"
+ CheckUpgrades Command = "check_upgrades"
+ EditGoDirective Command = "edit_go_directive"
+ FetchVulncheckResult Command = "fetch_vulncheck_result"
+ GCDetails Command = "gc_details"
+ Generate Command = "generate"
+ GenerateGoplsMod Command = "generate_gopls_mod"
+ GoGetPackage Command = "go_get_package"
+ ListImports Command = "list_imports"
+ ListKnownPackages Command = "list_known_packages"
+ RegenerateCgo Command = "regenerate_cgo"
+ RemoveDependency Command = "remove_dependency"
+ ResetGoModDiagnostics Command = "reset_go_mod_diagnostics"
+ RunGovulncheck Command = "run_govulncheck"
+ RunTests Command = "run_tests"
+ StartDebugging Command = "start_debugging"
+ Test Command = "test"
+ Tidy Command = "tidy"
+ ToggleGCDetails Command = "toggle_gc_details"
+ UpdateGoSum Command = "update_go_sum"
+ UpgradeDependency Command = "upgrade_dependency"
+ Vendor Command = "vendor"
)
var Commands = []Command{
@@ -49,6 +51,7 @@ var Commands = []Command{
ApplyFix,
CheckUpgrades,
EditGoDirective,
+ FetchVulncheckResult,
GCDetails,
Generate,
GenerateGoplsMod,
@@ -57,8 +60,9 @@ var Commands = []Command{
ListKnownPackages,
RegenerateCgo,
RemoveDependency,
+ ResetGoModDiagnostics,
+ RunGovulncheck,
RunTests,
- RunVulncheckExp,
StartDebugging,
Test,
Tidy,
@@ -100,6 +104,12 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte
return nil, err
}
return nil, s.EditGoDirective(ctx, a0)
+ case "gopls.fetch_vulncheck_result":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.FetchVulncheckResult(ctx, a0)
case "gopls.gc_details":
var a0 protocol.DocumentURI
if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
@@ -148,18 +158,24 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte
return nil, err
}
return nil, s.RemoveDependency(ctx, a0)
- case "gopls.run_tests":
- var a0 RunTestsArgs
+ case "gopls.reset_go_mod_diagnostics":
+ var a0 ResetGoModDiagnosticsArgs
if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
return nil, err
}
- return nil, s.RunTests(ctx, a0)
- case "gopls.run_vulncheck_exp":
+ return nil, s.ResetGoModDiagnostics(ctx, a0)
+ case "gopls.run_govulncheck":
var a0 VulncheckArgs
if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
return nil, err
}
- return s.RunVulncheckExp(ctx, a0)
+ return s.RunGovulncheck(ctx, a0)
+ case "gopls.run_tests":
+ var a0 RunTestsArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.RunTests(ctx, a0)
case "gopls.start_debugging":
var a0 DebuggingArgs
if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
@@ -268,6 +284,18 @@ func NewEditGoDirectiveCommand(title string, a0 EditGoDirectiveArgs) (protocol.C
}, nil
}
+func NewFetchVulncheckResultCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.fetch_vulncheck_result",
+ Arguments: args,
+ }, nil
+}
+
func NewGCDetailsCommand(title string, a0 protocol.DocumentURI) (protocol.Command, error) {
args, err := MarshalArgs(a0)
if err != nil {
@@ -364,26 +392,38 @@ func NewRemoveDependencyCommand(title string, a0 RemoveDependencyArgs) (protocol
}, nil
}
-func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) {
+func NewResetGoModDiagnosticsCommand(title string, a0 ResetGoModDiagnosticsArgs) (protocol.Command, error) {
args, err := MarshalArgs(a0)
if err != nil {
return protocol.Command{}, err
}
return protocol.Command{
Title: title,
- Command: "gopls.run_tests",
+ Command: "gopls.reset_go_mod_diagnostics",
+ Arguments: args,
+ }, nil
+}
+
+func NewRunGovulncheckCommand(title string, a0 VulncheckArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.run_govulncheck",
Arguments: args,
}, nil
}
-func NewRunVulncheckExpCommand(title string, a0 VulncheckArgs) (protocol.Command, error) {
+func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) {
args, err := MarshalArgs(a0)
if err != nil {
return protocol.Command{}, err
}
return protocol.Command{
Title: title,
- Command: "gopls.run_vulncheck_exp",
+ Command: "gopls.run_tests",
Arguments: args,
}, nil
}
diff --git a/internal/lsp/command/commandmeta/meta.go b/gopls/internal/lsp/command/commandmeta/meta.go
similarity index 97%
rename from internal/lsp/command/commandmeta/meta.go
rename to gopls/internal/lsp/command/commandmeta/meta.go
index a3a357df4b0..bf85c4faa9b 100644
--- a/internal/lsp/command/commandmeta/meta.go
+++ b/gopls/internal/lsp/command/commandmeta/meta.go
@@ -17,7 +17,7 @@ import (
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/command"
)
type Command struct {
@@ -52,7 +52,7 @@ func Load() (*packages.Package, []*Command, error) {
Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps,
BuildFlags: []string{"-tags=generate"},
},
- "golang.org/x/tools/internal/lsp/command",
+ "golang.org/x/tools/gopls/internal/lsp/command",
)
if err != nil {
return nil, nil, fmt.Errorf("packages.Load: %v", err)
@@ -244,7 +244,7 @@ func findField(pkg *packages.Package, pos token.Pos) (*ast.Field, error) {
fset := pkg.Fset
var file *ast.File
for _, f := range pkg.Syntax {
- if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename {
+ if fset.File(f.Pos()).Name() == fset.File(pos).Name() {
file = f
break
}
diff --git a/internal/lsp/command/gen/gen.go b/gopls/internal/lsp/command/gen/gen.go
similarity index 94%
rename from internal/lsp/command/gen/gen.go
rename to gopls/internal/lsp/command/gen/gen.go
index 8f7a2d50313..29428699ee6 100644
--- a/internal/lsp/command/gen/gen.go
+++ b/gopls/internal/lsp/command/gen/gen.go
@@ -13,7 +13,7 @@ import (
"text/template"
"golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/command/commandmeta"
+ "golang.org/x/tools/gopls/internal/lsp/command/commandmeta"
)
const src = `// Copyright 2021 The Go Authors. All rights reserved.
@@ -109,10 +109,10 @@ func Generate() ([]byte, error) {
Imports: map[string]bool{
"context": true,
"fmt": true,
- "golang.org/x/tools/internal/lsp/protocol": true,
+ "golang.org/x/tools/gopls/internal/lsp/protocol": true,
},
}
- const thispkg = "golang.org/x/tools/internal/lsp/command"
+ const thispkg = "golang.org/x/tools/gopls/internal/lsp/command"
for _, c := range d.Commands {
for _, arg := range c.Args {
pth := pkgPath(arg.Type)
diff --git a/internal/lsp/command/generate.go b/gopls/internal/lsp/command/generate.go
similarity index 88%
rename from internal/lsp/command/generate.go
rename to gopls/internal/lsp/command/generate.go
index 14628c733b5..79ff49b0e33 100644
--- a/internal/lsp/command/generate.go
+++ b/gopls/internal/lsp/command/generate.go
@@ -12,7 +12,7 @@ import (
"io/ioutil"
"os"
- "golang.org/x/tools/internal/lsp/command/gen"
+ "golang.org/x/tools/gopls/internal/lsp/command/gen"
)
func main() {
diff --git a/internal/lsp/command/interface.go b/gopls/internal/lsp/command/interface.go
similarity index 88%
rename from internal/lsp/command/interface.go
rename to gopls/internal/lsp/command/interface.go
index 8e4b1056d32..ec910fc43d3 100644
--- a/internal/lsp/command/interface.go
+++ b/gopls/internal/lsp/command/interface.go
@@ -17,7 +17,8 @@ package command
import (
"context"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
// Interface defines the interface gopls exposes for the
@@ -98,6 +99,11 @@ type Interface interface {
// Removes a dependency from the go.mod file of a module.
RemoveDependency(context.Context, RemoveDependencyArgs) error
+ // ResetGoModDiagnostics: Reset go.mod diagnostics
+ //
+ // Reset diagnostics in the go.mod file of a module.
+ ResetGoModDiagnostics(context.Context, ResetGoModDiagnosticsArgs) error
+
// GoGetPackage: go get a package
//
// Runs `go get` to fetch a package.
@@ -144,10 +150,15 @@ type Interface interface {
// address.
StartDebugging(context.Context, DebuggingArgs) (DebuggingResult, error)
- // RunVulncheckExp: Run vulncheck (experimental)
+ // RunGovulncheck: Run govulncheck.
//
// Run vulnerability check (`govulncheck`).
- RunVulncheckExp(context.Context, VulncheckArgs) (VulncheckResult, error)
+ RunGovulncheck(context.Context, VulncheckArgs) (RunVulncheckResult, error)
+
+ // FetchVulncheckResult: Get known vulncheck result
+ //
+ // Fetch the result of latest vulnerability check (`govulncheck`).
+ FetchVulncheckResult(context.Context, URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error)
}
type RunTestsArgs struct {
@@ -267,21 +278,6 @@ type PackageImport struct {
Path string
}
-type WorkspaceMetadataArgs struct {
-}
-
-type WorkspaceMetadataResult struct {
- // All workspaces for this session.
- Workspaces []Workspace
-}
-
-type Workspace struct {
- // The workspace name.
- Name string
- // The workspace module directory.
- ModuleDir string
-}
-
type DebuggingArgs struct {
// Optional: the address (including port) for the debug server to listen on.
// If not provided, the debug server will bind to "localhost:0", and the
@@ -313,15 +309,30 @@ type DebuggingResult struct {
URLs []string
}
+type ResetGoModDiagnosticsArgs struct {
+ URIArg
+
+ // Optional: source of the diagnostics to reset.
+ // If not set, all resettable go.mod diagnostics will be cleared.
+ DiagnosticSource string
+}
+
type VulncheckArgs struct {
- // Dir is the directory from which vulncheck will run from.
- Dir protocol.DocumentURI
+ // Any document in the directory from which govulncheck will run.
+ URI protocol.DocumentURI
// Package pattern. E.g. "", ".", "./...".
Pattern string
- // TODO: Flag []string (flags accepted by govulncheck, e.g., -tests)
- // TODO: Format string (json, text)
+ // TODO: -tests
+}
+
+// RunVulncheckResult holds the result of asynchronously starting the vulncheck
+// command.
+type RunVulncheckResult struct {
+ // Token holds the progress token for LSP workDone reporting of the vulncheck
+ // invocation.
+ Token protocol.ProgressToken
}
type VulncheckResult struct {
@@ -347,6 +358,7 @@ type StackEntry struct {
}
// Vuln models an osv.Entry and representative call stacks.
+// TODO: deprecate
type Vuln struct {
// ID is the vulnerability ID (osv.Entry.ID).
// https://ossf.github.io/osv-schema/#id-modified-fields
@@ -359,8 +371,10 @@ type Vuln struct {
Aliases []string `json:",omitempty"`
// Symbol is the name of the detected vulnerable function or method.
+ // Can be empty if the vulnerability exists in required modules, but no vulnerable symbols are used.
Symbol string `json:",omitempty"`
// PkgPath is the package path of the detected Symbol.
+ // Can be empty if the vulnerability exists in required modules, but no vulnerable packages are used.
PkgPath string `json:",omitempty"`
// ModPath is the module path corresponding to PkgPath.
// TODO: how do we specify standard library's vulnerability?
diff --git a/internal/lsp/command/interface_test.go b/gopls/internal/lsp/command/interface_test.go
similarity index 92%
rename from internal/lsp/command/interface_test.go
rename to gopls/internal/lsp/command/interface_test.go
index 9ea30b4463e..de3ce62737f 100644
--- a/internal/lsp/command/interface_test.go
+++ b/gopls/internal/lsp/command/interface_test.go
@@ -9,7 +9,7 @@ import (
"io/ioutil"
"testing"
- "golang.org/x/tools/internal/lsp/command/gen"
+ "golang.org/x/tools/gopls/internal/lsp/command/gen"
"golang.org/x/tools/internal/testenv"
)
diff --git a/internal/lsp/command/util.go b/gopls/internal/lsp/command/util.go
similarity index 100%
rename from internal/lsp/command/util.go
rename to gopls/internal/lsp/command/util.go
diff --git a/internal/lsp/completion.go b/gopls/internal/lsp/completion.go
similarity index 88%
rename from internal/lsp/completion.go
rename to gopls/internal/lsp/completion.go
index 06af1bdaec0..c967c1faa22 100644
--- a/internal/lsp/completion.go
+++ b/gopls/internal/lsp/completion.go
@@ -9,14 +9,14 @@ import (
"fmt"
"strings"
+ "golang.org/x/tools/gopls/internal/lsp/lsppos"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/completion"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/gopls/internal/lsp/work"
"golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/lsppos"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/source/completion"
- "golang.org/x/tools/internal/lsp/template"
- "golang.org/x/tools/internal/lsp/work"
+ "golang.org/x/tools/internal/event/tag"
)
func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
@@ -64,9 +64,9 @@ func (s *Server) completion(ctx context.Context, params *protocol.CompletionPara
if err != nil {
return nil, err
}
- tf := snapshot.FileSet().File(surrounding.Start())
- mapper := lsppos.NewTokenMapper(src, tf)
- rng, err := mapper.Range(surrounding.Start(), surrounding.End())
+ srng := surrounding.Range()
+ tf := snapshot.FileSet().File(srng.Start) // not same as srng.TokFile due to //line
+ rng, err := lsppos.NewTokenMapper(src, tf).Range(srng.Start, srng.End)
if err != nil {
return nil, err
}
diff --git a/internal/lsp/completion_test.go b/gopls/internal/lsp/completion_test.go
similarity index 93%
rename from internal/lsp/completion_test.go
rename to gopls/internal/lsp/completion_test.go
index d496a40a5cc..22578467dbf 100644
--- a/internal/lsp/completion_test.go
+++ b/gopls/internal/lsp/completion_test.go
@@ -8,10 +8,10 @@ import (
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
)
func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
@@ -129,12 +129,12 @@ func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*sourc
original := view.Options()
modified := view.Options().Clone()
options(modified)
- view, err = view.SetOptions(r.ctx, modified)
+ view, err = r.server.session.SetViewOptions(r.ctx, view, modified)
if err != nil {
t.Error(err)
return nil
}
- defer view.SetOptions(r.ctx, original)
+ defer r.server.session.SetViewOptions(r.ctx, view, original)
list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
diff --git a/internal/lsp/debounce.go b/gopls/internal/lsp/debounce.go
similarity index 100%
rename from internal/lsp/debounce.go
rename to gopls/internal/lsp/debounce.go
diff --git a/internal/lsp/debounce_test.go b/gopls/internal/lsp/debounce_test.go
similarity index 100%
rename from internal/lsp/debounce_test.go
rename to gopls/internal/lsp/debounce_test.go
diff --git a/internal/lsp/debug/buildinfo_go1.12.go b/gopls/internal/lsp/debug/buildinfo_go1.12.go
similarity index 100%
rename from internal/lsp/debug/buildinfo_go1.12.go
rename to gopls/internal/lsp/debug/buildinfo_go1.12.go
diff --git a/internal/lsp/debug/buildinfo_go1.18.go b/gopls/internal/lsp/debug/buildinfo_go1.18.go
similarity index 100%
rename from internal/lsp/debug/buildinfo_go1.18.go
rename to gopls/internal/lsp/debug/buildinfo_go1.18.go
diff --git a/internal/lsp/debug/info.go b/gopls/internal/lsp/debug/info.go
similarity index 95%
rename from internal/lsp/debug/info.go
rename to gopls/internal/lsp/debug/info.go
index bcc2f4f0605..00752e6f9a3 100644
--- a/internal/lsp/debug/info.go
+++ b/gopls/internal/lsp/debug/info.go
@@ -16,7 +16,7 @@ import (
"sort"
"strings"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
type PrintMode int
@@ -38,17 +38,6 @@ type ServerVersion struct {
Version string
}
-type Module struct {
- ModuleVersion
- Replace *ModuleVersion `json:"replace,omitempty"`
-}
-
-type ModuleVersion struct {
- Path string `json:"path,omitempty"`
- Version string `json:"version,omitempty"`
- Sum string `json:"sum,omitempty"`
-}
-
// VersionInfo returns the build info for the gopls process. If it was not
// built in module mode, we return a GOPATH-specific message with the
// hardcoded version.
diff --git a/internal/lsp/debug/info_test.go b/gopls/internal/lsp/debug/info_test.go
similarity index 100%
rename from internal/lsp/debug/info_test.go
rename to gopls/internal/lsp/debug/info_test.go
diff --git a/internal/lsp/debug/log/log.go b/gopls/internal/lsp/debug/log/log.go
similarity index 95%
rename from internal/lsp/debug/log/log.go
rename to gopls/internal/lsp/debug/log/log.go
index 44638f8a582..e3eaa106f7e 100644
--- a/internal/lsp/debug/log/log.go
+++ b/gopls/internal/lsp/debug/log/log.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
+ "golang.org/x/tools/internal/event/tag"
)
// Level parameterizes log severity.
diff --git a/internal/lsp/debug/metrics.go b/gopls/internal/lsp/debug/metrics.go
similarity index 97%
rename from internal/lsp/debug/metrics.go
rename to gopls/internal/lsp/debug/metrics.go
index 8efc1d495e0..c8da803d6b1 100644
--- a/internal/lsp/debug/metrics.go
+++ b/gopls/internal/lsp/debug/metrics.go
@@ -7,7 +7,7 @@ package debug
import (
"golang.org/x/tools/internal/event/export/metric"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
+ "golang.org/x/tools/internal/event/tag"
)
var (
diff --git a/internal/lsp/debug/rpc.go b/gopls/internal/lsp/debug/rpc.go
similarity index 99%
rename from internal/lsp/debug/rpc.go
rename to gopls/internal/lsp/debug/rpc.go
index 033ee3797fb..5610021479c 100644
--- a/internal/lsp/debug/rpc.go
+++ b/gopls/internal/lsp/debug/rpc.go
@@ -17,7 +17,7 @@ import (
"golang.org/x/tools/internal/event/core"
"golang.org/x/tools/internal/event/export"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
+ "golang.org/x/tools/internal/event/tag"
)
var RPCTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
diff --git a/internal/lsp/debug/serve.go b/gopls/internal/lsp/debug/serve.go
similarity index 97%
rename from internal/lsp/debug/serve.go
rename to gopls/internal/lsp/debug/serve.go
index 0bdee92c5e0..6934adf490f 100644
--- a/internal/lsp/debug/serve.go
+++ b/gopls/internal/lsp/debug/serve.go
@@ -26,6 +26,10 @@ import (
"sync"
"time"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug/log"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/core"
"golang.org/x/tools/internal/event/export"
@@ -34,12 +38,7 @@ import (
"golang.org/x/tools/internal/event/export/prometheus"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/bug"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug/log"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/internal/event/tag"
)
type contextKeyType int
@@ -88,10 +87,7 @@ func (st *State) Caches() []*cache.Cache {
var caches []*cache.Cache
seen := make(map[string]struct{})
for _, client := range st.Clients() {
- cache, ok := client.Session.Cache().(*cache.Cache)
- if !ok {
- continue
- }
+ cache := client.Session.Cache()
if _, found := seen[cache.ID()]; found {
continue
}
@@ -134,11 +130,7 @@ func (st *State) Session(id string) *cache.Session {
func (st *State) Views() []*cache.View {
var views []*cache.View
for _, s := range st.Sessions() {
- for _, v := range s.Views() {
- if cv, ok := v.(*cache.View); ok {
- views = append(views, cv)
- }
- }
+ views = append(views, s.Views()...)
}
return views
}
@@ -208,7 +200,7 @@ func (st *State) addClient(session *cache.Session) {
}
// DropClient removes a client from the set being served.
-func (st *State) dropClient(session source.Session) {
+func (st *State) dropClient(session *cache.Session) {
st.mu.Lock()
defer st.mu.Unlock()
for i, c := range st.clients {
@@ -320,7 +312,8 @@ func (i *Instance) getFile(r *http.Request) interface{} {
return nil
}
for _, o := range s.Overlays() {
- if o.FileIdentity().Hash == identifier {
+ // TODO(adonovan): understand and document this comparison.
+ if o.FileIdentity().Hash.String() == identifier {
return o
}
}
@@ -771,8 +764,6 @@ var MainTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
{{range .State.Caches}}
{{template "cachelink" .ID}}
{{end}}
Sessions
{{range .State.Sessions}}
{{template "sessionlink" .ID}} from {{template "cachelink" .Cache.ID}}
{{end}}
-
Views
-
{{range .State.Views}}
{{.Name}} is {{template "viewlink" .ID}} from {{template "sessionlink" .Session.ID}} in {{.Folder}}