From 0f22a0585cc3edc0a6535bc03029b176095c98c1 Mon Sep 17 00:00:00 2001 From: Grant Nelson Date: Wed, 2 Jul 2025 15:13:44 -0600 Subject: [PATCH] Add decl type initialization grouping and ordering --- compiler/compiler.go | 7 + compiler/compiler_test.go | 284 ++++++++++++++- compiler/decls.go | 12 + compiler/internal/grouper/grouper.go | 141 +++++++ compiler/internal/grouper/info.go | 173 +++++++++ compiler/internal/grouper/info_test.go | 330 +++++++++++++++++ compiler/internal/sequencer/README.md | 161 ++++++++ compiler/internal/sequencer/sequencer.go | 97 +++++ compiler/internal/sequencer/sequencer_test.go | 194 ++++++++++ compiler/internal/sequencer/squencerImp.go | 344 ++++++++++++++++++ compiler/internal/sequencer/vertex.go | 63 ++++ compiler/internal/sequencer/vertexSet.go | 55 +++ compiler/internal/sequencer/vertexStack.go | 26 ++ compiler/internal/typeparams/resolver.go | 11 + internal/srctesting/srctesting.go | 5 +- 15 files changed, 1898 insertions(+), 5 deletions(-) create mode 100644 compiler/internal/grouper/grouper.go create mode 100644 compiler/internal/grouper/info.go create mode 100644 compiler/internal/grouper/info_test.go create mode 100644 compiler/internal/sequencer/README.md create mode 100644 compiler/internal/sequencer/sequencer.go create mode 100644 compiler/internal/sequencer/sequencer_test.go create mode 100644 compiler/internal/sequencer/squencerImp.go create mode 100644 compiler/internal/sequencer/vertex.go create mode 100644 compiler/internal/sequencer/vertexSet.go create mode 100644 compiler/internal/sequencer/vertexStack.go diff --git a/compiler/compiler.go b/compiler/compiler.go index e8264c946..c209cae3d 100644 --- a/compiler/compiler.go +++ b/compiler/compiler.go @@ -18,6 +18,7 @@ import ( "time" "github.com/gopherjs/gopherjs/compiler/internal/dce" + "github.com/gopherjs/gopherjs/compiler/internal/grouper" "github.com/gopherjs/gopherjs/compiler/linkname" "github.com/gopherjs/gopherjs/compiler/prelude" "golang.org/x/tools/go/gcexportdata" @@ -118,6 +119,8 @@ func WriteProgramCode(pkgs []*Archive, w *SourceMapFilter, goVersion string) err gls.Add(pkg.GoLinknames) } + // Perform dead code elimination (DCE) on the declarations + // to get the selection of the declarations that are actually used. sel := &dce.Selector[*Decl]{} for _, pkg := range pkgs { for _, d := range pkg.Declarations { @@ -134,6 +137,10 @@ func WriteProgramCode(pkgs []*Archive, w *SourceMapFilter, goVersion string) err } dceSelection := sel.AliveDecls() + // Set the Decl.Grouper().Group values for each declaration. + // The group number is used to determine the type initialization order. + grouper.Group(dceSelection) + if _, err := w.Write([]byte("\"use strict\";\n(function() {\n\n")); err != nil { return err } diff --git a/compiler/compiler_test.go b/compiler/compiler_test.go index 3562b01d4..1a1e1193a 100644 --- a/compiler/compiler_test.go +++ b/compiler/compiler_test.go @@ -13,6 +13,7 @@ import ( "golang.org/x/tools/go/packages" "github.com/gopherjs/gopherjs/compiler/internal/dce" + "github.com/gopherjs/gopherjs/compiler/internal/grouper" "github.com/gopherjs/gopherjs/compiler/linkname" "github.com/gopherjs/gopherjs/compiler/sources" "github.com/gopherjs/gopherjs/internal/srctesting" @@ -995,6 +996,254 @@ func TestArchiveSelectionAfterSerialization(t *testing.T) { } } +func Test_OrderOfTypeInit_Simple(t *testing.T) { + src1 := ` + package main + import "github.com/gopherjs/gopherjs/compiler/collections" + import "github.com/gopherjs/gopherjs/compiler/cat" + import "github.com/gopherjs/gopherjs/compiler/box" + + func main() { + s := collections.NewStack[box.Unboxer[cat.Cat]]() + s.Push(box.Box(cat.Cat{Name: "Erwin"})) + s.Push(box.Box(cat.Cat{Name: "Dirac"})) + println(s.Pop().Unbox().Name) + }` + src2 := ` + package collections + type Stack[T any] struct { values []T } + func NewStack[T any]() *Stack[T] { + return &Stack[T]{} + } + func (s *Stack[T]) Count() int { + return len(s.values) + } + func (s *Stack[T]) Push(value T) { + s.values = append(s.values, value) + } + func (s *Stack[T]) Pop() (value T) { + if len(s.values) > 0 { + maxIndex := len(s.values) - 1 + s.values, value = s.values[:maxIndex], s.values[maxIndex] + } + return + }` + src3 := ` + package cat + type Cat struct { Name string }` + src4 := ` + package box + type Unboxer[T any] interface { Unbox() T } + type boxImp[T any] struct { whatsInTheBox T } + func Box[T any](value T) Unboxer[T] { + return &boxImp[T]{whatsInTheBox: value} + } + func (b *boxImp[T]) Unbox() T { return b.whatsInTheBox }` + + sel := declSelection(t, + []srctesting.Source{ + {Name: `main.go`, Contents: []byte(src1)}, + }, + []srctesting.Source{ + {Name: `collections/stack.go`, Contents: []byte(src2)}, + {Name: `cat/cat.go`, Contents: []byte(src3)}, + {Name: `box/box.go`, Contents: []byte(src4)}, + }) + + // Group 0 + // (imports, typeVars, funcVars, and init:main are defaulted into group 0) + // box + sel.InGroup(0, `typeVar:github.com/gopherjs/gopherjs/compiler/box.Unboxer`) // type box.Unboxer[T] + sel.InGroup(0, `typeVar:github.com/gopherjs/gopherjs/compiler/box.boxImp`) // type box.boxImp[T] + sel.InGroup(0, `funcVar:github.com/gopherjs/gopherjs/compiler/box.Box`) // func box.Box[T] + // cat + sel.InGroup(0, `typeVar:github.com/gopherjs/gopherjs/compiler/cat.Cat`) // type cat.Cat + sel.InGroup(0, `type:github.com/gopherjs/gopherjs/compiler/cat.Cat`) // type cat.Cat + // collections + sel.InGroup(0, `typeVar:github.com/gopherjs/gopherjs/compiler/collections.Stack`) // type collections.Stack[T] + sel.InGroup(0, `funcVar:github.com/gopherjs/gopherjs/compiler/collections.NewStack`) // func collections.NewStack[T] + // main + sel.InGroup(0, `init:main`) + sel.InGroup(0, `funcVar:command-line-arguments.main`) + sel.InGroup(0, `func:command-line-arguments.main`) + + // Group 1 + // box + sel.InGroup(1, `type:github.com/gopherjs/gopherjs/compiler/box.Unboxer`) // box.Unboxer[cat.Cat] + sel.InGroup(1, `type:github.com/gopherjs/gopherjs/compiler/box.boxImp`) // box.boxImp[cat.Cat] + sel.InGroup(1, `func:github.com/gopherjs/gopherjs/compiler/box.Box`) // box.Box[cat.Cat]() box.Unboxer[cat.Cat] + sel.InGroup(1, `func:github.com/gopherjs/gopherjs/compiler/box.(*boxImp).Unbox`) // box.boxImp[cat.Cat].Unbox + sel.InGroup(1, `anonType:github.com/gopherjs/gopherjs/compiler/box.ptrType`) // *boxImp[cat.Cat] + + // Group 2 + // collections + sel.InGroup(2, `anonType:github.com/gopherjs/gopherjs/compiler/collections.sliceType`) // []box.Unboxer[cat.Cat] + sel.InGroup(2, `type:github.com/gopherjs/gopherjs/compiler/collections.Stack`) // collections.Stack[box.Unboxer[cat.Cat]] + sel.InGroup(2, `anonType:github.com/gopherjs/gopherjs/compiler/collections.ptrType`) // *collections.Stack[box.Unboxer[cat.Cat]] + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.NewStack`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.(*Stack).Count`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.(*Stack).Push`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.(*Stack).Pop`) +} + +func Test_OrderOfTypeInit_PingPong(t *testing.T) { + src1 := ` + package main + import "github.com/gopherjs/gopherjs/compiler/collections" + import "github.com/gopherjs/gopherjs/compiler/cat" + + func main() { + s := collections.NewHashSet[cat.Cat[collections.BadHasher]]() + s.Add(cat.Cat[collections.BadHasher]{Name: "Fluffy"}) + s.Add(cat.Cat[collections.BadHasher]{Name: "Mittens"}) + s.Add(cat.Cat[collections.BadHasher]{Name: "Whiskers"}) + println(s.Count(), "elements") + }` + src2 := ` + package collections + // HashSet keeps a set of non-nil elements that have unique hashes. + type HashSet[E Hashable] struct { data map[uint]E } + func NewHashSet[E Hashable]() *HashSet[E] { + return &HashSet[E]{ data: map[uint]E{} } + } + func (s *HashSet[E]) Add(e E) { + s.data[e.Hash()] = e + } + func (s *HashSet[E]) Count() int { + return len(s.data) + }` + src3 := ` + package collections + type Hasher interface { + Add(value uint) + Sum() uint + } + + type Hashable interface { + Hash() uint + } + + type BadHasher struct { value uint } + func (h BadHasher) Add(value uint) { h.value += value } + func (h BadHasher) Sum() uint { return h.value }` + src4 := ` + package cat + import "github.com/gopherjs/gopherjs/compiler/collections" + + type Cat[H collections.Hasher] struct { Name string } + func (c Cat[H]) Hash() uint { + var h H + for _, v := range []rune(c.Name) { + h.Add(uint(v)) + } + return h.Sum() + }` + + sel := declSelection(t, + []srctesting.Source{ + {Name: `main.go`, Contents: []byte(src1)}, + }, + []srctesting.Source{ + {Name: `collections/hashmap.go`, Contents: []byte(src2)}, + {Name: `collections/hashes.go`, Contents: []byte(src3)}, + {Name: `cat/cat.go`, Contents: []byte(src4)}, + }) + + // Group 0 + // imports, funcVars, typevars, and init:main are in group 0 by default. + sel.InGroup(0, `func:command-line-arguments.main`) + sel.InGroup(0, `anonType:github.com/gopherjs/gopherjs/compiler/cat.sliceType`) // []rune + sel.InGroup(0, `type:github.com/gopherjs/gopherjs/compiler/collections.BadHasher`) + sel.InGroup(0, `func:github.com/gopherjs/gopherjs/compiler/collections.BadHasher.Add`) + sel.InGroup(0, `func:github.com/gopherjs/gopherjs/compiler/collections.BadHasher.Sum`) + + // Group 1 + sel.InGroup(1, `type:github.com/gopherjs/gopherjs/compiler/cat.Cat`) + sel.InGroup(1, `func:github.com/gopherjs/gopherjs/compiler/cat.Cat.Hash`) + + // Group 2 + sel.InGroup(2, `anonType:github.com/gopherjs/gopherjs/compiler/collections.mapType`) // map[uint]cat.Cat[collections.BadHasher] + sel.InGroup(2, `type:github.com/gopherjs/gopherjs/compiler/collections.HashSet`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.(*HashSet).Add`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.(*HashSet).Count`) + sel.InGroup(2, `anonType:github.com/gopherjs/gopherjs/compiler/collections.ptrType`) // *collections.HashSet[cat.Cat[collections.BadHasher]] + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/collections.NewHashSet`) +} + +func Test_OrderOfTypeInit_HiddenParamMissingInterface(t *testing.T) { + // If a type (typically an interface) is only used as a parameter or + // a result in top-level functions, it will not be a DCE dependency + // of any other declaration and therefore be considered dead. + // Because of how JS works, this will not cause a problem when calling + // the function. + // If a function pointer to a top-level function (like done when using + // reflections), the function pointer will define the parameters + // and results, so that type will be alive. + // + // This test checks that the dead and missing type parameter will + // not cause a problem with the type initialization ordering. + src1 := ` + package main + import "github.com/gopherjs/gopherjs/compiler/dragon" + import "github.com/gopherjs/gopherjs/compiler/drawer" + + func main() { + t := dragon.Trogdor[drawer.Cottages]{} + t.Target = drawer.Cottages{} + drawer.Draw(t) + }` + src2 := ` + package drawer + import "github.com/gopherjs/gopherjs/compiler/dragon" + + type Cottages struct {} + func (c Cottages) String() string { + return "thatched-roof cottage" + } + + func Draw[D dragon.Dragon](d D) { + d.Burninate() + }` + src3 := ` + package dragon + type Target interface{ String() string } + type Dragon interface { Burninate() } + + type Trogdor[T Target] struct { Target T } + func (t Trogdor[T]) Burninate() { + println("burninating the " + t.Target.String()) + }` + + sel := declSelection(t, + []srctesting.Source{ + {Name: `main.go`, Contents: []byte(src1)}, + }, + []srctesting.Source{ + {Name: `drawer/drawer.go`, Contents: []byte(src2)}, + {Name: `dragon/dragon.go`, Contents: []byte(src3)}, + }) + + // command-line-arguments + sel.IsAlive(`func:command-line-arguments.main`) + sel.InGroup(0, `funcVar:command-line-arguments.main`) + + // drawer + sel.IsAlive(`type:github.com/gopherjs/gopherjs/compiler/drawer.Cottages`) + sel.InGroup(0, `type:github.com/gopherjs/gopherjs/compiler/drawer.Cottages`) + + sel.IsAlive(`funcVar:github.com/gopherjs/gopherjs/compiler/drawer.Draw`) + sel.IsAlive(`func:github.com/gopherjs/gopherjs/compiler/drawer.Draw`) + sel.InGroup(2, `func:github.com/gopherjs/gopherjs/compiler/drawer.Draw`) + + // dragon + sel.IsDead(`type:github.com/gopherjs/gopherjs/compiler/dragon.Target`) + sel.IsDead(`type:github.com/gopherjs/gopherjs/compiler/dragon.Dragon`) + + sel.IsAlive(`typeVar:github.com/gopherjs/gopherjs/compiler/dragon.Trogdor`) + sel.IsAlive(`type:github.com/gopherjs/gopherjs/compiler/dragon.Trogdor`) + sel.InGroup(1, `type:github.com/gopherjs/gopherjs/compiler/dragon.Trogdor`) +} + func TestNestedConcreteTypeInGenericFunc(t *testing.T) { // This is a test of a type defined inside a generic function // that uses the type parameter of the function as a field type. @@ -1165,6 +1414,10 @@ func compile(t *testing.T, sourceFiles []srctesting.Source, minify bool) string // compileProject compiles the given root package and all packages imported by the root. // This returns the compiled archives of all packages keyed by their import path. func compileProject(t *testing.T, root *packages.Package, minify bool) map[string]*Archive { + return compileProjectWithContext(t, root, types.NewContext(), minify) +} + +func compileProjectWithContext(t *testing.T, root *packages.Package, tContext *types.Context, minify bool) map[string]*Archive { t.Helper() pkgMap := map[string]*packages.Package{} packages.Visit([]*packages.Package{root}, nil, func(pkg *packages.Package) { @@ -1191,7 +1444,6 @@ func compileProject(t *testing.T, root *packages.Package, minify bool) map[strin return srcs, nil } - tContext := types.NewContext() sortedSources := make([]*sources.Sources, 0, len(allSrcs)) for _, srcs := range allSrcs { sortedSources = append(sortedSources, srcs) @@ -1308,7 +1560,8 @@ type selectionTester struct { func declSelection(t *testing.T, sourceFiles []srctesting.Source, auxFiles []srctesting.Source) *selectionTester { t.Helper() root := srctesting.ParseSources(t, sourceFiles, auxFiles) - archives := compileProject(t, root, false) + tc := types.NewContext() + archives := compileProjectWithContext(t, root, tc, false) mainPkg := archives[root.PkgPath] paths := make([]string, 0, len(archives)) @@ -1328,6 +1581,7 @@ func declSelection(t *testing.T, sourceFiles []srctesting.Source, auxFiles []src } } dceSelection := sel.AliveDecls() + grouper.Group(dceSelection) return &selectionTester{ t: t, @@ -1343,15 +1597,37 @@ func (st *selectionTester) PrintDeclStatus() { for _, pkg := range st.packages { st.t.Logf(`Package %s`, pkg.ImportPath) for _, decl := range pkg.Declarations { + group := decl.Grouper().Group if _, ok := st.dceSelection[decl]; ok { - st.t.Logf(` [Alive] %q`, decl.FullName) + st.t.Logf(` [Alive] [%d] %q`, group, decl.FullName) } else { - st.t.Logf(` [Dead] %q`, decl.FullName) + st.t.Logf(` [Dead] [%d] %q`, group, decl.FullName) } } } } +func (st *selectionTester) PrintOrderMermaid() { + st.t.Helper() + mermaid := grouper.ToMermaid(st.dceSelection, func(d *Decl) string { + text := d.FullName + text = strings.ReplaceAll(text, `github.com/gopherjs/gopherjs/compiler/`, ``) + text = strings.ReplaceAll(text, `<`, `[`) + text = strings.ReplaceAll(text, `>`, `]`) + return text + }) + st.t.Logf(`Mermaid:\n%s`, mermaid) +} + +func (st *selectionTester) InGroup(group int, declFullName string) { + st.t.Helper() + decl := st.FindDecl(declFullName) + got := decl.Grouper().Group + if got != group { + st.t.Errorf(`expected the decl %q to be in group %d, but it is in group %d`, declFullName, group, got) + } +} + func (st *selectionTester) IsAlive(declFullName string) { st.t.Helper() decl := st.FindDecl(declFullName) diff --git a/compiler/decls.go b/compiler/decls.go index eb5322130..b9b05a10c 100644 --- a/compiler/decls.go +++ b/compiler/decls.go @@ -14,6 +14,7 @@ import ( "github.com/gopherjs/gopherjs/compiler/internal/analysis" "github.com/gopherjs/gopherjs/compiler/internal/dce" + "github.com/gopherjs/gopherjs/compiler/internal/grouper" "github.com/gopherjs/gopherjs/compiler/internal/symbol" "github.com/gopherjs/gopherjs/compiler/internal/typeparams" "github.com/gopherjs/gopherjs/compiler/sources" @@ -58,6 +59,9 @@ type Decl struct { InitCode []byte // DCEInfo stores the information for dead-code elimination. DCEInfo dce.Info + // GroupInfo stores the information for grouping and ordering the + // initialization of this declaration relative to other declarations. + GroupInfo grouper.Info // Set to true if a function performs a blocking operation (I/O or // synchronization). The compiler will have to generate function code such // that it can be resumed after a blocking operation completes without @@ -80,6 +84,11 @@ func (d *Decl) Dce() *dce.Info { return &d.DCEInfo } +// Grouper gets the information for grouping and ordering for the type initialization. +func (d *Decl) Grouper() *grouper.Info { + return &d.GroupInfo +} + // topLevelObjects extracts package-level variables, functions and named types // from the package AST. func (fc *funcContext) topLevelObjects(srcs *sources.Sources) (vars []*types.Var, functions []*ast.FuncDecl, typeNames typesutil.TypeNames) { @@ -341,6 +350,7 @@ func (fc *funcContext) newFuncDecl(fun *ast.FuncDecl, inst typeparams.Instance) LinkingName: symbol.New(o), } d.Dce().SetName(o, inst.TNest, inst.TArgs) + d.Grouper().SetInstance(fc.pkgCtx.typesCtx, inst) if typesutil.IsMethod(o) { recv := typesutil.RecvType(o.Type().(*types.Signature)).Obj() @@ -481,6 +491,7 @@ func (fc *funcContext) newNamedTypeInstDecl(inst typeparams.Instance) (*Decl, er FullName: typeDeclFullName(inst), } d.Dce().SetName(inst.Object, inst.TNest, inst.TArgs) + d.Grouper().SetInstance(fc.pkgCtx.typesCtx, inst) fc.pkgCtx.CollectDCEDeps(d, func() { // Code that declares a JS type (i.e. prototype) for each Go type. d.DeclCode = fc.CatchOutput(0, func() { @@ -606,6 +617,7 @@ func (fc *funcContext) anonTypeDecls(anonTypes []*types.TypeName) []*Decl { Vars: []string{t.Name()}, } d.Dce().SetName(t, nil, nil) + d.Grouper().SetInstance(fc.pkgCtx.typesCtx, typeparams.Instance{Object: t}) fc.pkgCtx.CollectDCEDeps(d, func() { d.DeclCode = []byte(fmt.Sprintf("\t%s = $%sType(%s);\n", t.Name(), strings.ToLower(typeKind(t.Type())[5:]), fc.initArgs(t.Type()))) }) diff --git a/compiler/internal/grouper/grouper.go b/compiler/internal/grouper/grouper.go new file mode 100644 index 000000000..463b77482 --- /dev/null +++ b/compiler/internal/grouper/grouper.go @@ -0,0 +1,141 @@ +package grouper + +import ( + "fmt" + "go/types" + + "github.com/gopherjs/gopherjs/compiler/internal/sequencer" +) + +type Decl interface { + Grouper() *Info + comparable +} + +// Group groups the declarations by their dependencies and set the group number +// (i.e. the dependency depth) for each Info. +// +// This returns the group count, where each decl with the same +// group number is at the same depth and can be initialized together. +// All group numbers will be in the range [0, count). +// +// This assumes that the `Grouper() *Info` methods on the declarations will +// consistently return the same unique *Info for each declaration. +// +// This may panic with ErrCycleDetected if a cycle is detected in the dependency +// graph created by the declarations' types. (see [Sequencer] for more details) +// +// [Sequencer]: ../sequencer/sequencer.go +func Group[D Decl](decl map[D]struct{}) int { + g := prepareGrouper(decl) + for d := range decl { + g.assignGroup(d) + } + return g.count() +} + +// ToMermaid generates a mermaid diagram string for the given declarations. +// This is useful for visualizing the dependency graph of the declarations +// any any possible cycles while debugging the type initialization order. +// +// This will not panic if a cycle is detected in the dependency graph, +// instead it will indicate the declarations involved in the cycle with red +// but the depth groups may be incorrect. +// +// The `toString` function is used to convert the declaration to a string +// for the mermaid diagram. If `toString` is nil, then `%v` is used. +func ToMermaid[D Decl](decl map[D]struct{}, toString func(d D) string) string { + g := prepareGrouper(decl) + return g.toMermaid(decl, toString) +} + +func prepareGrouper[D Decl](decl map[D]struct{}) *grouper[D] { + g := &grouper[D]{ + typeMap: make(map[types.Type][]*Info, len(decl)), + seq: sequencer.New[*Info](), + } + for d := range decl { + g.addDecl(d) + } + for d := range decl { + g.addDeps(d) + } + return g +} + +type grouper[D Decl] struct { + typeMap map[types.Type][]*Info + seq sequencer.Sequencer[*Info] +} + +func (g *grouper[D]) addDecl(d D) { + info := d.Grouper() + if info == nil || (info.name == nil && len(info.dep) == 0) { + // If the decl has no name and no deps, then it was a type + // that doesn't needed to be ordered, so we can skip it. + info.Group = 0 + return + } + if info.name != nil { + g.typeMap[info.name] = append(g.typeMap[info.name], info) + } + g.seq.Add(info) +} + +func (g *grouper[D]) addDeps(d D) { + info := d.Grouper() + if !g.seq.Has(info) { + // If the sequencer doesn't have this decl, then it was a type + // that doesn't needed to be ordered, so we can skip it. + return + } + + for dep := range info.dep { + // If a type can not be found it doesn't exist so isn't initialized. + // So we can skip adding any dependencies for it. + if depInfos, ok := g.typeMap[dep]; ok { + g.seq.Add(info, depInfos...) + } + } +} + +func (g *grouper[D]) count() int { + return g.seq.DepthCount() +} + +func (g *grouper[D]) assignGroup(d D) { + info := d.Grouper() + // Calling `Depth` may perform sequencing if it hasn't been run before. + // It may cause a panic if a cycle is detected, + // but the cycle might not involve the current declaration and the panic + // would have occurred with any other declaration too. + depth := g.seq.Depth(info) + // If the depth is negative, then decl was not in the sequencer + // and was already assigned to group 0. + if depth >= 0 { + info.Group = depth + } +} + +func (g *grouper[D]) toMermaid(decl map[D]struct{}, toString func(d D) string) string { + if toString == nil { + toString = func(d D) string { + return fmt.Sprintf("%v", d) + } + } + + infoMap := make(map[*Info]D, len(decl)) + for d := range decl { + if info := d.Grouper(); g.seq.Has(info) { + infoMap[info] = d + } + } + + return g.seq.ToMermaid(func(info *Info) string { + if decl, ok := infoMap[info]; ok { + return toString(decl) + } + // This shouldn't happen, but handle it gracefully anyway. + return `unknown decl` + }) +} diff --git a/compiler/internal/grouper/info.go b/compiler/internal/grouper/info.go new file mode 100644 index 000000000..06e7277e9 --- /dev/null +++ b/compiler/internal/grouper/info.go @@ -0,0 +1,173 @@ +package grouper + +import ( + "go/types" + + "github.com/gopherjs/gopherjs/compiler/internal/typeparams" + "github.com/gopherjs/gopherjs/compiler/typesutil" +) + +type Info struct { + // Group is the group number for initializing this declaration. + // The declarations in the same group should still be initialized in the + // same order as they were declared based on imports first. + Group int + + // name is the concrete named type that this declaration is associated with. + // This may be nil for declarations that do not have an associated + // concrete named type, such as functions and methods. + name *types.Named + + // dep is a set of named types from other packages that this declaration + // depends on. This may be empty if there are no dependencies. + dep map[*types.Named]struct{} +} + +// SetInstance sets the types and dependencies used by the grouper to represent +// the declaration this grouper info is attached to. +func (i *Info) SetInstance(tc *types.Context, inst typeparams.Instance) { + i.setType(tc, inst) + + var pkg *types.Package + if inst.Object != nil { + pkg = inst.Object.Pkg() + } + + i.addAllDeps(tc, inst, pkg) +} + +func (i *Info) setType(tc *types.Context, inst typeparams.Instance) { + if inst.Object == nil { + return + } + switch inst.Object.Type().(type) { + // TODO(grantnelson-wf): Determine how to handle *types.Alias in go1.22 + case *types.Named: + i.name = inst.Resolve(tc).(*types.Named) + } +} + +func (i *Info) initPendingDeps(tc *types.Context, inst typeparams.Instance) []types.Type { + var pending []types.Type + pending = append(pending, inst.TNest...) + pending = append(pending, inst.TArgs...) + + if inst.Object == nil { + // shouldn't happen, but if it does, just check the type args. + return pending + } + + if i.name != nil { + // If `i.name`` is set then we know we have a named type + // that we have to dig into to find its dependencies. + // By using `i.name` we know that the type has been resolved. + tArgs := i.name.TypeArgs() + for j := tArgs.Len() - 1; j >= 0; j-- { + pending = append(pending, tArgs.At(j)) + } + + r := typeparams.NewResolver(tc, inst) + pending = append(pending, r.Substitute(i.name.Underlying())) + return pending + } + + if fn, ok := inst.Object.(*types.Func); ok { + sig := fn.Type().(*types.Signature) + if recv := typesutil.RecvType(sig); recv != nil { + // The instance is a method, resolve the receiver type + // and the signature of the method to find its dependencies. + recvInst := typeparams.Instance{ + Object: recv.Obj(), + TNest: inst.TNest, + TArgs: inst.TArgs, + } + pending = append(pending, recvInst.Resolve(tc)) + + r := typeparams.NewResolver(tc, recvInst) + pending = append(pending, r.Substitute(sig)) + return pending + } + + // The instance is a function, resolve the signature. + pending = append(pending, inst.Resolve(tc)) + return pending + } + + // If `i.name` is not set and it isn't a method, we can add the type + // as a dependency directly without needing to resolve it further. + // This will take a type like `[]Cat` and add `Cat` as a dependency. + pending = append(pending, inst.Object.Type()) + return pending +} + +func (i *Info) addAllDeps(tc *types.Context, inst typeparams.Instance, pkg *types.Package) { + pending := i.initPendingDeps(tc, inst) + touched := make(map[types.Type]struct{}) + for len(pending) > 0 { + max := len(pending) - 1 + t := pending[max] + pending = pending[:max] + if _, ok := touched[t]; ok { + continue // already processed this type + } + touched[t] = struct{}{} + + switch t := t.(type) { + case *types.Basic: + // ignore basic types like int, string, unsafe.Pointer, etc. + + case *types.Named: + if t.Obj() == nil || t.Obj().Pkg() == nil { + continue // skip objects in universal scope, e.g. `error` + } + if typesutil.IsJsPackage(t.Obj().Pkg()) && t.Obj().Name() == "Object" { + continue // skip *js.Object + } + if pkg != nil && pkg == t.Obj().Pkg() { + // skip over named types from the same package, + // continue into them to depend on the same dependencies as they do. + // This prevents circular dependencies from being added. + tArgs := t.TypeArgs() + for j := tArgs.Len() - 1; j >= 0; j-- { + pending = append(pending, tArgs.At(j)) + } + + inst2 := typeparams.Instance{Object: t.Obj()} + inst2.TArgs = make(typesutil.TypeList, t.TypeArgs().Len()) + for j := 0; j < t.TypeArgs().Len(); j++ { + inst2.TArgs[j] = t.TypeArgs().At(j) + } + r := typeparams.NewResolver(tc, inst2) + pending = append(pending, r.Substitute(t.Underlying())) + continue + } + + // add the dependency to the set + if i.dep == nil { + i.dep = make(map[*types.Named]struct{}) + } + i.dep[t] = struct{}{} + + case *types.Struct: + for j := t.NumFields() - 1; j >= 0; j-- { + pending = append(pending, t.Field(j).Type()) + } + + case *types.Signature: + for j := t.Params().Len() - 1; j >= 0; j-- { + pending = append(pending, t.Params().At(j).Type()) + } + for j := t.Results().Len() - 1; j >= 0; j-- { + pending = append(pending, t.Results().At(j).Type()) + } + + case *types.Map: + pending = append(pending, t.Key()) + pending = append(pending, t.Elem()) + + case interface{ Elem() types.Type }: + // Handles *types.Pointer, *types.Slice, *types.Array, and *types.Chan + pending = append(pending, t.Elem()) + } + } +} diff --git a/compiler/internal/grouper/info_test.go b/compiler/internal/grouper/info_test.go new file mode 100644 index 000000000..0fdfe4a49 --- /dev/null +++ b/compiler/internal/grouper/info_test.go @@ -0,0 +1,330 @@ +package grouper + +import ( + "go/ast" + "go/types" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/gopherjs/gopherjs/compiler/internal/typeparams" + "github.com/gopherjs/gopherjs/compiler/typesutil" + "github.com/gopherjs/gopherjs/internal/srctesting" +) + +func TestInstanceDecomposition(t *testing.T) { + type testData struct { + name string + context *types.Context + instance typeparams.Instance + expName *types.Named + expDeps map[*types.Named]struct{} + } + + tests := []testData{ + func() testData { + tg := readTypes(t, `type Foo[T, U, V any] struct {}`) + return testData{ + name: `do not depend on basic types`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo`), + TArgs: tg.TypeList(`int`, `string`, `bool`), + }, + expName: tg.Named(`Foo[int, string, bool]`), + expDeps: nil, + } + }(), + func() testData { + tg := readTypes(t, `type Foo[T, U any] struct {}`) + return testData{ + name: `do not depend on empty any or error`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo`), + TArgs: tg.TypeList(`any`, `error`), + }, + expName: tg.Named(`Foo[any, error]`), + expDeps: nil, + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo[T, U any] struct {} + type Baz[V any] struct {}`) + return testData{ + name: `depend on type parameters`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo`), + TArgs: tg.TypeList(`Baz[any]`, `Foo[int, bool]`), + }, + expName: tg.Named(`Foo[Baz[any], Foo[int, bool]]`), + expDeps: tg.NamedSet(`Baz[any]`, `Foo[int, bool]`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct {} + var f *Foo`) + return testData{ + name: `depend on pointer element`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`f`), + }, + expName: nil, // `*Foo` is not named so it can't be depended on by name + expDeps: tg.NamedSet(`Foo`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct {} + var s []Foo`) + return testData{ + name: `depend on slice element`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`s`), + }, + expName: nil, // `[]Foo` is not named + expDeps: tg.NamedSet(`Foo`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct {} + var c chan Foo`) + return testData{ + name: `depend on chan element`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`c`), + }, + expName: nil, // `chan Foo` is not named + expDeps: tg.NamedSet(`Foo`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct {} + type Bar struct {} + var m map[Bar]Foo`) + return testData{ + name: `depend on map key and element`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`m`), + }, + expName: nil, // `map[Bar]Foo` is not named + expDeps: tg.NamedSet(`Bar`, `Foo`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct { X Bar[Baz] } + type Bar[T any] struct {} + type Baz struct {}`) + return testData{ + name: `depend on fields`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo`), + }, + expName: tg.Named(`Foo`), + expDeps: tg.NamedSet(`Bar[Baz]`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo struct {} + func (f Foo) Bar(p *Baz) []*Taz { return nil} + type Baz struct {} + type Taz struct {}`) + return testData{ + name: `depend on receiver, parameter, and result types`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo.Bar`), + }, + expName: nil, // methods are not named + expDeps: tg.NamedSet(`Foo`, `Baz`, `Taz`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo[T any] struct {} + func (f *Foo[T]) Bar(x int, y int) {}`) + return testData{ + name: `depend on complex receiver types`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo.Bar`), + TArgs: tg.TypeList(`int`), + }, + expName: nil, + expDeps: tg.NamedSet(`Foo[int]`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo[T any] struct {} + func Bar[T any](x []*Foo[T]) map[string]*Foo[T] { return nil } + type Baz struct {}`) + return testData{ + name: `depend on resolved parameters and results`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Bar`), + TArgs: tg.TypeList(`Baz`), + }, + expName: nil, + expDeps: tg.NamedSet(`Baz`, `Foo[Baz]`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo[T any] struct {} + type Bar struct {} + var Baz = Foo[Bar]{}`) + return testData{ + name: `variables depend on the named in their type`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Baz`), + }, + expName: tg.Named(`Foo[Bar]`), + expDeps: tg.NamedSet(`Bar`), + } + }(), + func() testData { + tg := readTypes(t, ` + type Foo []struct{ b Bar } + type Bar struct {} + type Baz Foo`) + return testData{ + name: `dependency on underlying types for aliased types`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Baz`), + }, + expName: tg.Named(`Baz`), + expDeps: tg.NamedSet(`Bar`), + } + }(), + func() testData { + tg := readTypes(t, ` + func Foo[T any]() any { + type Bar struct{ x T} + return Bar{} + } + type Baz struct{}`) + return testData{ + name: `depend on implicit nesting type arguments`, + context: tg.tf.Context, + instance: typeparams.Instance{ + Object: tg.Object(`Foo.Bar`), + TNest: tg.TypeList(`Baz`), + }, + expName: tg.Object(`Foo.Bar`).Type().(*types.Named), + expDeps: tg.NamedSet(`Baz`), + } + }(), + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + info := &Info{} + // Instead of calling SetInstance, we manually set the type and + // dependencies so that we can tell it to not skip the same package + // dependencies (via passing in a nil package to addAllDeps). + // This will make testing Info a lot easier. + info.setType(test.context, test.instance) + info.addAllDeps(test.context, test.instance, nil) + + if info.name != test.expName { + t.Errorf("expected type %v, got %v", test.expName, info.name) + } + if diff := cmp.Diff(test.expDeps, info.dep); diff != "" { + t.Errorf("unexpected dependencies (-want +got):\n%s", diff) + } + }) + } +} + +type typeGetter struct { + tf *srctesting.Fixture + cache map[string]types.Type +} + +func readTypes(t *testing.T, src string) typeGetter { + t.Helper() + tf := srctesting.New(t) + tf.Check(`pkg/test`, tf.Parse(`test.go`, "package testcase\n"+src)) + return typeGetter{ + tf: tf, + cache: make(map[string]types.Type), + } +} + +func (tg typeGetter) Object(name string) types.Object { + tg.tf.T.Helper() + importPath := `pkg/test` + if path, remainder, found := strings.Cut(name, `.`); found { + if _, has := tg.tf.Packages[path]; has { + importPath, name = path, remainder + } + } + pkg := tg.tf.Packages[importPath] + if pkg == nil { + tg.tf.T.Fatalf(`missing package %q in fixture`, importPath) + } + return srctesting.LookupObj(pkg, name) +} + +func (tg typeGetter) Type(expr string) types.Type { + tg.tf.T.Helper() + if typ, ok := tg.cache[expr]; ok { + return typ + } + + f := tg.tf.Parse(`eval`, "package testcase\nvar _ "+expr) + config := &types.Config{ + Context: tg.tf.Context, + Sizes: &types.StdSizes{WordSize: 4, MaxAlign: 8}, + Importer: tg.tf, + } + pkg := tg.tf.Packages[`pkg/test`] + ck := types.NewChecker(config, tg.tf.FileSet, pkg, tg.tf.Info) + if err := ck.Files([]*ast.File{f}); err != nil { + tg.tf.T.Fatalf("failed to type check expression %q: %v", expr, err) + } + + node := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Type + typ := tg.tf.Info.Types[node].Type + tg.cache[expr] = typ + return typ +} + +func (tg typeGetter) TypeList(expr ...string) typesutil.TypeList { + tg.tf.T.Helper() + result := make([]types.Type, len(expr)) + for i, expr := range expr { + result[i] = tg.Type(expr) + } + return result +} + +func (tg typeGetter) Named(expr string) *types.Named { + tg.tf.T.Helper() + return tg.Type(expr).(*types.Named) +} + +func (tg typeGetter) NamedSet(exprs ...string) map[*types.Named]struct{} { + tg.tf.T.Helper() + result := make(map[*types.Named]struct{}, len(exprs)) + for _, expr := range exprs { + result[tg.Named(expr)] = struct{}{} + } + return result +} diff --git a/compiler/internal/sequencer/README.md b/compiler/internal/sequencer/README.md new file mode 100644 index 000000000..aa06fd9a3 --- /dev/null +++ b/compiler/internal/sequencer/README.md @@ -0,0 +1,161 @@ +# Sequencer + +- [Overview](#overview) +- [Limitations](#limitations) +- [Design](#design) +- [Ordering and Grouping](#ordering-and-grouping) + +## Overview + +The sequencer is a tool used to determine the order of steps to process +a set of items based on each items' dependencies. +This can group items together if they can all be processed in the same step. +This assumes there are no circular dependencies and will error if any +cycle is detected. + +The sequencer _could_ be used to solve several problems[^1], such as: + +- Ordering type initialization +- Ordering constant resolution +- Ordering packages for parallel parsing +- Explicit super-type determination to solidify implicit duck-typing +- Checking for cycles in dependencies + +[^1]: We don't use the sequencer for all of those problems. Some are solved +with other tools and some are solutions we don't currently support. Several +of them would need an additional value tagging system added. + +## Limitations + +> [!IMPORTANT] +> The sequencer can only sequence a collection of items that do _not_ have +> dependency cycles. + + +> [!WARNING] +> The sequencer can only sequence items that are +> [comparable](https://go.dev/ref/spec#Comparison_operators) +> +> Since the items are used a keys in the graph, the comparable parts +> of an item must not be modified after being added, +> since that could cause keying issues. + +The sequencer does not: + +- sort items in the groups, each group is randomly ordered +- provide any weighted or prioritized dependencies +- provide a tagging system for data propagation needed for problems like DCE +- allow removing items from the graph nor removing dependencies + +## Design + +The sequencer uses a type of +[DAG](https://en.wikipedia.org/wiki/Directed_acyclic_graph) +called a [polyforest](https://en.wikipedia.org/wiki/Polytree). +A polyforest is an acyclic forest were branches may have more than one parent +branch and those parents may be from the same or different trees. + +The polyforest, like any graph, is made up of vertices and edges. +The vertices are the items being ordered and the directed edge starts from a +vertex, the parent, and ends on a vertex, child, dependent of that parent. +Each vertex may have zero or more parents and zero or more children. +Any vertex that has no children (other vertices depending on it) is a root. +Any vertex that has no parents (dependencies) is a leaf. +A vertex may be a leaf and root at the same time. +The graph flows from the root towards the leaves via child to parent. +There may be zero or more paths from any root to any leaf. + +```mermaid +flowchart TB + v1["1"] --> v7 & v4 & v5 + v2["2"] --> v5 + v3["3"] + v4["4"] --> v7 + v5["5"] --> v7 & v8 + v6["6"] --> v8 + v7["7"] + v8["8"] +``` + +In the above example: + +- The vertices are $1$, $2$, $3$, $4$, $5$, $6$, $7$, and $8$ +- The edged ($child$ → {$parents$} ) are $1$ → {$4, 5, 7$}, + $2$ → {$5$}, $4$ → {$7$}, $5$ → {$7, 8$}, and $6$ → {$8$} +- The leafs are $3$, $7$, and $8$ +- The roots are $1$, $2$, $3$, and $8$ + +## Ordering and Grouping + +All the leaf vertices will receive a depth of zero value. +All other vertices will receive the maximum value of its parents' depths plus one. +All the vertices with the same depth value are in a group and may be processed +together. The depth values provide the ordering of those groups. + +For example if there are three depth groups. +Depth 0 will contain all the leaf vertices with no children. +Depth 1 will contain all the vertices that only depend on vertices in depth 0. +Depth 2 will contain all vertices that depend on vertices in depth 0 and 1. +At each depth, the vertices must depend on at least one vertex in the prior +depth, otherwise that vertex would have been put into that prior depth itself. + +```mermaid +flowchart TB + v1["1"] --> v7 & v4 & v5 + v2["2"] --> v5 + v3["3"] + v4["4"] --> v7 + v5["5"] --> v7 & v8 + v6["6"] --> v8 + v7["7"] + v8["8"] + subgraph Depth 2 + v1 & v2 + end + subgraph Depth 1 + v4 & v5 & v6 + end + subgraph Depth 0 + v7 & v8 & v3 + end +``` + +In the above example: + +- the group for depth 0 is $\{3, 7, 8\}$ +- the group for depth 1 is $\{4, 5, 6\}$ +- the group for depth 2 is $\{1, 2\}$ + +There are several ways to perform the grouping and depth determination. +One way is to set all the vertices to zero (not just the leaves) then update +each vertex with the maximum of their parents plus one until no more changes +are made. That would take $n \cdot d$ amount of time, where $n$ is the number +of vertices and $d$ is the maximum depth. This can be improved by propagating +starting from each leaf heading towards the roots. Anytime a vertex depth +changes recalculate all the children vertices. +However, this is still slow because the same children depths will be +recalculated each time a parent is changed and it could still get stuck +in an infinite loop if there is a cycle. + +To keep from having to recalculate a child's depth, each vertex will +keep a count of parents it is waiting on. When a vertex has its depth +assigned, that vertex's children will have that parent count decremented. +When that parent count is zero, the vertex will be put into the set of +vertices that need to be calculated. +The set of vertices to be calculated is initially populated with +the leaves since they aren't waiting on any parents. +If all the set of vertices pending calculation is empty and there are no +more vertices waiting on parents, then the depth determination is done. + +However, if the set of vertices pending calculation is empty but there +are still vertices waiting on parents, then a cycle exists within those +vertices still waiting. Some of the vertices waiting may not participate +in the cycle but instead simply depend on vertices in the cycle. +There might also be multiple cycles. Cycles indicate the dependency +information given to the sequencer was bad so the sequencer will return +an error and provide information about any cycle. + +> [!NOTE] +> This assumes that the sequencing will be performed only once +> after all dependencies have been added. It doesn't have the partial +> sequencing capabilities, so it will always recalculate everything. diff --git a/compiler/internal/sequencer/sequencer.go b/compiler/internal/sequencer/sequencer.go new file mode 100644 index 000000000..098aaef93 --- /dev/null +++ b/compiler/internal/sequencer/sequencer.go @@ -0,0 +1,97 @@ +package sequencer + +import "errors" + +// ErrCycleDetected is panicked from a method performing sequencing +// (e.g. `Depth`, `DepthCount`, and `Group`) to indicate that a cycle +// was detected in the dependency graph. +var ErrCycleDetected = errors.New(`cycle detected in the dependency graph`) + +// Sequencer is a tool for determining the groups and ordering of the groups +// of items based on their dependencies. +type Sequencer[T comparable] interface { + + // Add adds a `child` item with a dependency on the given `parents`. + Add(child T, parents ...T) + + // Has checks if an item exists in the sequencer. + Has(item T) bool + + // Children returns the items that are dependent on the given item. + // If the given item doesn't exist then nil is returned. + // Each time this is called it creates a new slice. + // The items in the slice are in random order. + Children(item T) []T + + // Parents returns the items that the given item depends on. + // If the given item doesn't exist then nil is returned. + // Each time this is called it creates a new slice. + // The items in the slice are in random order. + Parents(item T) []T + + // Depth returns the depth of the item in the dependency graph. + // Zero indicates the item is a leaf item with no dependencies. + // If the given item doesn't exist then -1 is returned. + // + // This may have to perform sequencing of the items, so + // this may panic with `ErrCycleDetected` if a cycle is detected. + Depth(item T) int + + // DepthCount returns the number of unique depths in the dependency graph. + // + // This may have to perform sequencing of the items, so + // this may panic with `ErrCycleDetected` if a cycle is detected. + DepthCount() int + + // Group returns all the items at the given depth. + // If the depth is out-of-bounds, it returns an empty slice. + // The depth is zero-based, so the depth 0 group is the leaf items. + // Each time this is called it creates a new slice. + // The items in the slice are in random order. + // + // This may have to perform sequencing of the items, so + // this may panic with `ErrCycleDetected` if a cycle is detected. + Group(depth int) []T + + // AllGroups returns all the items grouped by their depth. + // Each group is a slice of items at the same depth. + // The depth is zero-based, so the first group is the leaf items. + // Each time this is called it creates a new slices. + // The items in the slices are in random order. + // + // This may have to perform sequencing of the items, so + // this may panic with `ErrCycleDetected` if a cycle is detected. + AllGroups() [][]T + + // GetCycles returns the items that were unable to be sequenced + // due to a cycle in the dependency graph. + // The returned items may participate in one or more cycles or + // depends on an item in a cycle. + // Otherwise nil is returned if there are no cycles. + // + // There is no need to call this method before calling other methods. + // If this returns a non-empty slice, other methods that perform sequencing + // (e.g. `Depth`, `DepthCount`, and `Group`) will panic with `ErrCycleDetected`. + // Obviously, this will not panic if a cycle is detected. + // + // This may have to perform sequencing of the items. + GetCycles() []T + + // ToMermaid returns a string representation of the dependency graph in + // Mermaid syntax. This is useful for visualizing the dependencies and + // debugging dependency issues. When a cycle is detected, the items + // participating in the cycle or depending on an item in a cycle + // will be marked with red and the groups may be incorrect. + // + // The `itemToString` function is used to convert the item to a string + // representation for the Mermaid graph. It should return a unique string. + // If nil, then `%v` will be used to convert the item to a string. + ToMermaid(itemToString func(item T) string) string +} + +// New creates a new sequencer for the given item type T. +func New[T comparable]() Sequencer[T] { + return &sequencerImp[T]{ + vertices: vertexSet[T]{}, + } +} diff --git a/compiler/internal/sequencer/sequencer_test.go b/compiler/internal/sequencer/sequencer_test.go new file mode 100644 index 000000000..69812b568 --- /dev/null +++ b/compiler/internal/sequencer/sequencer_test.go @@ -0,0 +1,194 @@ +package sequencer + +import ( + "errors" + "math/rand" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestBasicSequencing(t *testing.T) { + s := New[string]() + s.Add(`Rad`, `Bob`, `Chris`) + s.Add(`Stripe`, `Bob`, `Chris`) + s.Add(`Bandit`, `Bob`, `Chris`) + s.Add(`Brandy`, `Mort`) + s.Add(`Chili`, `Mort`) + s.Add(`Muffin`, `Stripe`, `Trixie`) + s.Add(`Socks`, `Stripe`, `Trixie`) + s.Add(`Bluey`, `Bandit`, `Chili`) + s.Add(`Bingo`, `Bandit`, `Chili`) + s.Add(`Frisky`) + + if !s.Has(`Bob`) { + t.Errorf(`expected to find Bob in sequencer, but did not`) + } + if s.Has(`Ted`) { + t.Errorf(`expected to not find Ted in sequencer, but did not`) + } + + gotC := s.Children(`Bandit`) + sort.Strings(gotC) + expC := []string{`Bingo`, `Bluey`} + if diff := cmp.Diff(gotC, expC); len(diff) > 0 { + t.Errorf("unexpected children (-got +exp):\n%s", diff) + } + if gotC := s.Children(`Ted`); len(gotC) != 0 { + t.Errorf("expected no children for an item not in the sequencer, got: %v", gotC) + } + + gotP := s.Parents(`Bandit`) + sort.Strings(gotP) + expP := []string{`Bob`, `Chris`} + if diff := cmp.Diff(gotP, expP); len(diff) > 0 { + t.Errorf("unexpected parents (-got +exp):\n%s", diff) + } + if gotP := s.Parents(`Ted`); len(gotP) != 0 { + t.Errorf("expected no parents for an item not in the sequencer, got: %v", gotP) + } + + if depth := s.Depth(`Bandit`); depth != 1 { + t.Errorf("expected depth of Bandit to be 1, got: %d", depth) + } + if depth := s.Depth(`Ted`); depth != -1 { + t.Errorf("expected depth of an item not in the sequencer to be -1, got: %d", depth) + } + + if mmd := s.ToMermaid(nil); len(mmd) == 0 { + t.Errorf(`expected non-empty mermaid output, but got empty`) + } + + // Check getting the groups individually. + count := s.DepthCount() + got := make([][]string, count) + for i := 0; i < s.DepthCount(); i++ { + group := s.Group(i) + sort.Strings(group) + got[i] = group + } + exp := [][]string{ + {`Bob`, `Chris`, `Frisky`, `Mort`, `Trixie`}, + {`Bandit`, `Brandy`, `Chili`, `Rad`, `Stripe`}, + {`Bingo`, `Bluey`, `Muffin`, `Socks`}, + } + if diff := cmp.Diff(got, exp); len(diff) > 0 { + t.Errorf("unexpected sequencing (-got +exp):\n%s", diff) + } + + // Using AllGroups should return the same result as reading the groups individually. + got = s.AllGroups() + for _, group := range got { + sort.Strings(group) + } + if diff := cmp.Diff(got, exp); len(diff) > 0 { + t.Errorf("unexpected sequencing (-got +exp):\n%s", diff) + } +} + +func TestDiamonds(t *testing.T) { + s := New[string]() + // This makes several diamonds in the graph to check that vertices + // are only processed once all the parents are processed. + s.Add(`A`, `B`, `C`, `D`, `G`) + s.Add(`B`, `D`, `E`) + s.Add(`C`, `D`, `F`) + s.Add(`D`, `G`) + s.Add(`E`, `G`) + s.Add(`F`, `G`) + + if mmd := s.ToMermaid(nil); len(mmd) == 0 { + t.Errorf(`expected non-empty mermaid output, but got empty`) + } + + got := s.AllGroups() + for _, group := range got { + sort.Strings(group) + } + exp := [][]string{ + {`G`}, + {`D`, `E`, `F`}, + {`B`, `C`}, + {`A`}, + } + if diff := cmp.Diff(got, exp); len(diff) > 0 { + t.Errorf("unexpected sequencing (-got +exp):\n%s", diff) + } +} + +func TestCycleDetection(t *testing.T) { + s := New[string]() + s.Add(`A`, `B`, `D`) // D is a leaf not part of the cycle + s.Add(`B`, `C`, `D`) + s.Add(`C`, `A`) // This creates a cycle A-> B->C->A + s.Add(`E`, `A`) // E is a branch not part of the cycle + + t.Log(s.ToMermaid(nil)) // Should not panic + + // Add more to reset the sequencer state + s.Add(`F`, `E`) // F is a root via E not part of the cycle + + expectPanic := func(h func()) { + defer func() { + r := recover().(error) + if !errors.Is(r, ErrCycleDetected) { + t.Errorf(`expected panic due to cycle, but got: %v`, r) + } + }() + h() + s.DepthCount() + t.Errorf(`expected panic due to cycle, but did not panic`) + } + + expectPanic(func() { s.DepthCount() }) + expectPanic(func() { s.Depth(`A`) }) + expectPanic(func() { s.Group(2) }) + + if mmd := s.ToMermaid(nil); len(mmd) == 0 { // Should not panic + t.Errorf(`expected non-empty mermaid output, but got empty`) + } + + cycles := s.GetCycles() + sort.Strings(cycles) + exp := []string{`A`, `B`, `C`} + if diff := cmp.Diff(cycles, exp); len(diff) > 0 { + t.Errorf("unexpected cycles (-got +exp):\n%s", diff) + } +} + +func TestLargeGraph(t *testing.T) { + const itemCount = 1000 + const maxDeps = 10 + + items := make([]int, itemCount) + for i := 0; i < itemCount; i++ { + items[i] = i + } + + r := rand.New(rand.NewSource(0)) + r.Shuffle(itemCount, func(i, j int) { + items[i], items[j] = items[j], items[i] + }) + + s := New[int]() + for i := 0; i < maxDeps; i++ { + s.Add(items[i]) // Add leaf items with no dependencies + } + for i := maxDeps; i < itemCount; i++ { + s.Add(items[i]) + + // "Randomly" add dependencies to previous items, since only previous + // items are chosen from no cycles should occur. + // If the same item is chosen multiple times it should have no effect. + depCount := r.Intn(maxDeps) + for j := 0; j < depCount; j++ { + s.Add(items[i], items[r.Intn(i)]) + } + } + + s.DepthCount() // This should not panic and internal validation should pass. + if len(s.GetCycles()) > 0 { + t.Errorf(`expected no cycles in the large graph, but found some`) + } +} diff --git a/compiler/internal/sequencer/squencerImp.go b/compiler/internal/sequencer/squencerImp.go new file mode 100644 index 000000000..29d27334f --- /dev/null +++ b/compiler/internal/sequencer/squencerImp.go @@ -0,0 +1,344 @@ +package sequencer + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strings" +) + +// errSequencerLogic is panicked if an error internal to the sequencer logic +// or error in the parent/child pointers is detected. +// This error should never be panicked if the sequencer is working correctly. +var errSequencerLogic = errors.New(`error in sequencer logic or parent/child pointers`) + +type sequencerImp[T comparable] struct { + // vertices is a set of all vertices indexed by the item they represent. + vertices vertexSet[T] + + // needSequencing indicates that the sequencer needs to perform sequencing. + needSequencing bool + + // depthCount is the number of unique depths in the dependency graph. + // This may be invalid if sequencing needs to be performed. + depthCount int + + // groups is the map of groups indexed by their depth. + // This may contain invalid groups if sequencing needs to be performed. + groups map[int]vertexSet[T] + + // dependencyCycles is the list of items that are part of any cycle + // or depend on an item in a cycle. + dependencyCycles vertexSet[T] +} + +func (s *sequencerImp[T]) Add(child T, parents ...T) { + c := s.getOrAdd(child) + for _, parent := range parents { + if !c.parents.has(parent) { + p := s.getOrAdd(parent) + c.addDependency(p) + } + } +} + +func (s *sequencerImp[T]) Has(item T) bool { + return s.vertices.has(item) +} + +func (s *sequencerImp[T]) Children(item T) []T { + if v, exists := s.vertices[item]; exists { + return v.children.toSlice() + } + return nil +} + +func (s *sequencerImp[T]) Parents(item T) []T { + if v, exists := s.vertices[item]; exists { + return v.parents.toSlice() + } + return nil +} + +func (s *sequencerImp[T]) Depth(item T) int { + s.performSequencing(true) + if v, exists := s.vertices[item]; exists { + return v.depth + } + return -1 +} + +func (s *sequencerImp[T]) DepthCount() int { + s.performSequencing(true) + return s.depthCount +} + +func (s *sequencerImp[T]) Group(depth int) []T { + s.performSequencing(true) + return s.groups[depth].toSlice() +} + +func (s *sequencerImp[T]) AllGroups() [][]T { + s.performSequencing(true) + groups := make([][]T, s.depthCount) + for depth := 0; depth < s.depthCount; depth++ { + groups[depth] = s.groups[depth].toSlice() + } + return groups +} + +func (s *sequencerImp[T]) GetCycles() []T { + s.performSequencing(false) + return s.dependencyCycles.toSlice() +} + +type sortByName[T comparable] struct { + vertices []*vertex[T] + names []string +} + +func (s *sortByName[T]) Len() int { + return len(s.vertices) +} + +func (s *sortByName[T]) Less(i, j int) bool { + return s.names[i] < s.names[j] +} + +func (s *sortByName[T]) Swap(i, j int) { + s.vertices[i], s.vertices[j] = s.vertices[j], s.vertices[i] + s.names[i], s.names[j] = s.names[j], s.names[i] +} + +func (s *sequencerImp[T]) ToMermaid(itemToString func(item T) string) string { + s.performSequencing(false) + + if itemToString == nil { + itemToString = func(item T) string { + return fmt.Sprintf("%v", item) + } + } + + buf := &bytes.Buffer{} + write := func(format string, args ...any) { + // Ignore the error since we are writing to a buffer. + _, _ = fmt.Fprintf(buf, format, args...) + } + + // Sort the output to make it easier to read and compare consecutive runs. + vertices := make([]*vertex[T], 0, len(s.vertices)) + names := make([]string, 0, len(vertices)) + for _, v := range s.vertices { + vertices = append(vertices, v) + names = append(names, itemToString(v.item)) + } + sort.Sort(&sortByName[T]{vertices: vertices, names: names}) + + ids := make(map[*vertex[T]]string, len(s.vertices)) + for i, v := range vertices { + ids[v] = fmt.Sprintf(`v%d`, i) + } + + toIds := func(vs vertexSet[T]) string { + rs := make([]string, 0, len(vs)) + for _, v := range vs { + rs = append(rs, ids[v]) + } + sort.Strings(rs) + return strings.Join(rs, ` & `) + } + + write("flowchart TB\n") + if len(s.dependencyCycles) > 0 { + write(" classDef partOfCycle stroke:#f00\n") + } + for i, v := range vertices { + write(` %s["%v"]`, ids[v], names[i]) + if s.dependencyCycles.has(v.item) { + write(`:::partOfCycle`) + } + if len(v.parents) > 0 { + write(` --> %s`, toIds(v.parents)) + } + write("\n") + } + for depth := s.depthCount - 1; depth >= 0; depth-- { + if group := s.groups[depth]; len(group) > 0 { + write(" subgraph Depth %d\n", depth) + write(" %s\n", toIds(group)) + write(" end\n") + } + } + return buf.String() +} + +func (s *sequencerImp[T]) getOrAdd(item T) *vertex[T] { + v, added := s.vertices.getOrAdd(item) + s.needSequencing = s.needSequencing || added + return v +} + +// performSequencing performs a full sequencing of the items in the +// dependency graph. It calculates the depth of each item and groups +// them by their depth. +// +// `panicOnCycle“ indicates whether to panic if a cycle is detected, +// or to exit gracefully. +// +// This assumes that the sequencing is not called often and is typically +// only called after all the items have been added. Because of this, +// it always performs a full sequencing of the items without using any +// previous solved information. Although this is slower for the few cases +// where sequencing happens often with only a few new items added at a time, +// it is much simpler to implement and maintain full sequencing than +// implementing both incremental and full sequencing. +func (s *sequencerImp[T]) performSequencing(panicOnCycle bool) { + if !s.needSequencing { + // If a sequencing was already performed and determined that there + // was a cycle, panic if `panicOnCycle` is true. + if len(s.dependencyCycles) > 0 && panicOnCycle { + panic(ErrCycleDetected) + } + return + } + s.needSequencing = false + + // Perform a full sequencing of the items. + s.clearGroups() + ready := newVertexStack[T](len(s.vertices)) + waitingCount := s.prepareWaitingAndReady(true, s.vertices, ready) + waitingCount = s.propagateDepth(true, waitingCount, ready) + if waitingCount <= 0 { + s.validateGroups() + return + } + + // If there are still waiting vertices, it means there is a cycle. + // Prune off any branches to roots that are not part of the cycles + // using the same logic that starts from the leaves except starting + // from the roots and working backwards. + // This will not be able to remove branches that go between two cycles + // even if vertices in that branch can not reach themselves via a cycle. + wv := s.vertices.getWaiting(waitingCount) + waitingCount = s.prepareWaitingAndReady(false, wv, ready) + waitingCount = s.propagateDepth(false, waitingCount, ready) + + // Sanity check that we have waiting vertices left and we didn't + // somehow prune away the vertices participating in the cycles. + if waitingCount <= 0 { + panic(fmt.Errorf(`%w: pruning cycles resulting in no items in the cycles`, errSequencerLogic)) + } + + // Anything still waiting is part of a cycle or depends on an item in a + // cycle that wasn't able to be pruned. + s.dependencyCycles = s.vertices.getWaiting(waitingCount) + if panicOnCycle { + panic(ErrCycleDetected) + } +} + +// clearGroups resets the sequencer state, clearing the groups and depth count. +func (s *sequencerImp[T]) clearGroups() { + s.depthCount = 0 + s.groups = map[int]vertexSet[T]{} + s.dependencyCycles = nil +} + +// writeDepth updates the sequencer state with the depth of the given vertex. +func (s *sequencerImp[T]) writeDepth(v *vertex[T]) { + depth := v.parents.maxDepth() + 1 + v.depth = depth + if _, exists := s.groups[depth]; !exists { + s.groups[depth] = vertexSet[T]{} + if depth >= s.depthCount { + s.depthCount = depth + 1 + } + } + s.groups[depth].add(v) +} + +// prepareWaitingAndReady prepare the ready sets so that any leaf (or root) vertex +// is ready to be processed and any waiting vertex has its parent count. +// This returns the number of waiting vertices. +// +// If `forward` is true, it prepares the vertices for sequencing by starting with the leaves. +// If `forward` is false, it prepares the vertices for reducing to cycles by starting with the roots. +func (s *sequencerImp[T]) prepareWaitingAndReady(forward bool, vs vertexSet[T], ready *vertexStack[T]) int { + waitingCount := 0 + for _, v := range vs { + if forward { + v.waiting = len(v.parents) + } else { + // For reducing to cycles, count the number of children that are still waiting. + count := 0 + for _, c := range v.children { + if vs.has(c.item) { + count++ + } + } + v.waiting = count + } + + if v.isReady() { + s.writeDepth(v) + ready.push(v) + } else { + waitingCount++ + } + } + return waitingCount +} + +// propagateDepth processes the ready vertices, assigning them a depth and +// updating the waiting vertices. If a waiting vertex has all of its +// parents (or children) processed, then move it to the ready list. +// This continues until all ready vertices are processed. +func (s *sequencerImp[T]) propagateDepth(forward bool, waitingCount int, ready *vertexStack[T]) int { + for ready.hasMore() { + v := ready.pop() + s.writeDepth(v) + for _, c := range v.edges(forward) { + c.decWaiting() + if c.isReady() { + ready.push(c) + waitingCount-- + } + } + } + return waitingCount +} + +// validateGroups validates that the groups and depths are correctly formed. +// This is a sanity check to ensure that the sequencer logic appears correct. +func (s *sequencerImp[T]) validateGroups() { + if s.depthCount <= 0 { + panic(fmt.Errorf(`%w: depth count is invalid`, errSequencerLogic)) + } + count := 0 + for depth := 0; depth < s.depthCount; depth++ { + group := s.groups[depth] + if len(group) == 0 { + panic(fmt.Errorf(`%w: group %d is empty`, errSequencerLogic, depth)) + } + for _, v := range group { + if v.depth != depth { + panic(fmt.Errorf(`%w: vertex %v in group %d has depth %d`, errSequencerLogic, v.item, depth, v.depth)) + } + hasPrior := false + for _, p := range v.parents { + if p.depth >= v.depth { + panic(fmt.Errorf(`%w: vertex %v has parent %v with depth %d that is not less than its depth %d`, errSequencerLogic, v.item, p.item, p.depth, v.depth)) + } + hasPrior = hasPrior || p.depth == v.depth-1 + } + if depth > 0 && !hasPrior { + panic(fmt.Errorf(`%w: vertex %v in group %d has no parent with depth %d`, errSequencerLogic, v.item, depth, v.depth-1)) + } + } + count += len(group) + } + if count != len(s.vertices) { + panic(fmt.Errorf(`%w: vertices in groups, %d, does not match vertex count %d`, errSequencerLogic, count, len(s.vertices))) + } +} diff --git a/compiler/internal/sequencer/vertex.go b/compiler/internal/sequencer/vertex.go new file mode 100644 index 000000000..dc8e081c3 --- /dev/null +++ b/compiler/internal/sequencer/vertex.go @@ -0,0 +1,63 @@ +package sequencer + +// vertex represents a single item in the dependency graph. +type vertex[T comparable] struct { + item T + depth int + parents vertexSet[T] + children vertexSet[T] + + // waiting is used during sequencing. Typically it contains the number of + // parents that are not yet processed for this vertex. + // When it reaches 0, the vertex can be processed. + // It is used to avoid processing the same vertex multiple times. + // + // If a cycle is detected in the graph, this value can be used to + // reduce the number of vertices that are dependent on the cycle. + // In that case this value will be set to the number of children + // that are not yet processed for this vertex. + // + // When reducing to cycles this can be negative for any vertex that was + // ready during sequencing. We don't want those to be processed again, + // so we only say a vertex is ready when waiting is zero. + // + // Since this number is only used during sequencing it could have been + // stored in a map, however, it is faster to store it directly in the + // vertex and avoid the map lookup. + waiting int +} + +func newVertex[T comparable](item T) *vertex[T] { + return &vertex[T]{ + item: item, + depth: -1, + waiting: -1, + } +} + +func (v *vertex[T]) addDependency(p *vertex[T]) { + if p.children == nil { + p.children = vertexSet[T]{} + } + p.children.add(v) + + if v.parents == nil { + v.parents = vertexSet[T]{} + } + v.parents.add(p) +} + +func (v *vertex[T]) edges(forward bool) vertexSet[T] { + if forward { + return v.children + } + return v.parents +} + +func (v *vertex[T]) decWaiting() { + v.waiting-- +} + +func (v *vertex[T]) isReady() bool { + return v.waiting == 0 +} diff --git a/compiler/internal/sequencer/vertexSet.go b/compiler/internal/sequencer/vertexSet.go new file mode 100644 index 000000000..bcb6ada31 --- /dev/null +++ b/compiler/internal/sequencer/vertexSet.go @@ -0,0 +1,55 @@ +package sequencer + +// vertexSet is a set of vertices indexed by the item the represent. +// +// The values will be unique since vertices contain the item themselves, +// and no two vertices can represent the same item in the graph, +// meaning this map is bijective. +type vertexSet[T comparable] map[T]*vertex[T] + +func (vs vertexSet[T]) add(v *vertex[T]) { + vs[v.item] = v +} + +func (vs vertexSet[T]) getOrAdd(item T) (*vertex[T], bool) { + if v, exists := vs[item]; exists { + return v, false + } + + v := newVertex(item) + vs.add(v) + return v, true +} + +func (vs vertexSet[T]) has(item T) bool { + _, exists := vs[item] + return exists +} + +func (vs vertexSet[T]) maxDepth() int { + maxDepth := -1 + for _, v := range vs { + if v.depth > maxDepth { + maxDepth = v.depth + } + } + return maxDepth +} + +func (vs vertexSet[T]) getWaiting(capacity int) vertexSet[T] { + wvs := make(vertexSet[T], capacity) + for _, v := range vs { + if !v.isReady() { + wvs.add(v) + } + } + return wvs +} + +func (vs vertexSet[T]) toSlice() []T { + items := make([]T, 0, len(vs)) + for item := range vs { + items = append(items, item) + } + return items +} diff --git a/compiler/internal/sequencer/vertexStack.go b/compiler/internal/sequencer/vertexStack.go new file mode 100644 index 000000000..9e1fbfed5 --- /dev/null +++ b/compiler/internal/sequencer/vertexStack.go @@ -0,0 +1,26 @@ +package sequencer + +type vertexStack[T comparable] struct { + stack []*vertex[T] +} + +func newVertexStack[T comparable](capacity int) *vertexStack[T] { + return &vertexStack[T]{ + stack: make([]*vertex[T], 0, capacity), + } +} + +func (vs *vertexStack[T]) hasMore() bool { + return len(vs.stack) > 0 +} + +func (vs *vertexStack[T]) push(v *vertex[T]) { + vs.stack = append(vs.stack, v) +} + +func (vs *vertexStack[T]) pop() *vertex[T] { + maxIndex := len(vs.stack) - 1 + v := vs.stack[maxIndex] + vs.stack = vs.stack[:maxIndex] + return v +} diff --git a/compiler/internal/typeparams/resolver.go b/compiler/internal/typeparams/resolver.go index 5718c364d..aaffd9973 100644 --- a/compiler/internal/typeparams/resolver.go +++ b/compiler/internal/typeparams/resolver.go @@ -79,6 +79,17 @@ func NewResolver(tc *types.Context, root Instance) *Resolver { replacements[nestTParams.At(i)] = root.TNest[i] } + // If no type arguments are provided, check if the type already has + // type arguments. This is the case for instantiated objects in the instance. + if tParams.Len() > 0 && len(root.TArgs) == 0 { + if typ, ok := root.Object.Type().(interface{ TypeArgs() *types.TypeList }); ok { + root.TArgs = make(typesutil.TypeList, typ.TypeArgs().Len()) + for i := 0; i < typ.TypeArgs().Len(); i++ { + root.TArgs[i] = typ.TypeArgs().At(i) + } + } + } + // Check the root's type parameters and arguments match, // then add them to the replacements. if tParams.Len() != len(root.TArgs) { diff --git a/internal/srctesting/srctesting.go b/internal/srctesting/srctesting.go index e4242991c..9e4a4974b 100644 --- a/internal/srctesting/srctesting.go +++ b/internal/srctesting/srctesting.go @@ -20,6 +20,7 @@ import ( // Fixture provides utilities for parsing and type checking Go code in tests. type Fixture struct { T *testing.T + Context *types.Context FileSet *token.FileSet Info *types.Info Packages map[string]*types.Package @@ -41,6 +42,7 @@ func newInfo() *types.Info { func New(t *testing.T) *Fixture { return &Fixture{ T: t, + Context: types.NewContext(), FileSet: token.NewFileSet(), Info: newInfo(), Packages: map[string]*types.Package{}, @@ -65,6 +67,7 @@ func (f *Fixture) Parse(name, src string) *ast.File { func (f *Fixture) Check(importPath string, files ...*ast.File) (*types.Info, *types.Package) { f.T.Helper() config := &types.Config{ + Context: f.Context, Sizes: &types.StdSizes{WordSize: 4, MaxAlign: 8}, Importer: f, } @@ -148,7 +151,7 @@ func Format(t *testing.T, fset *token.FileSet, node any) string { return buf.String() } -// LookupObj returns a top-level object with the given name. +// LookupObj returns a top-level or nested object with the given name. // // Methods can be referred to as RecvTypeName.MethodName. func LookupObj(pkg *types.Package, name string) types.Object {