diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000000..139c590eb87892 --- /dev/null +++ b/VERSION @@ -0,0 +1,2 @@ +go1.23.4 +time 2024-11-27T20:27:20Z diff --git a/codereview.cfg b/codereview.cfg index 77a74f108eae36..3cf4bb2dd376de 100644 --- a/codereview.cfg +++ b/codereview.cfg @@ -1 +1,2 @@ -branch: master +branch: release-branch.go1.23 +parent-branch: master diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 6c23e59adf19eb..be93c4a24bb566 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -2579,6 +2579,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype) @@ -2595,9 +2600,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.PtrType: @@ -2826,6 +2830,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype) @@ -2842,9 +2851,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.VoidType: @@ -3110,10 +3118,11 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct } // Round off up to talign, assumed to be a power of 2. + origOff := off off = (off + talign - 1) &^ (talign - 1) if f.ByteOffset > off { - fld, sizes = c.pad(fld, sizes, f.ByteOffset-off) + fld, sizes = c.pad(fld, sizes, f.ByteOffset-origOff) off = f.ByteOffset } if f.ByteOffset < off { diff --git a/src/cmd/cgo/internal/test/cgo_test.go b/src/cmd/cgo/internal/test/cgo_test.go index 5e02888b3dddd9..5393552e07a4d1 100644 --- a/src/cmd/cgo/internal/test/cgo_test.go +++ b/src/cmd/cgo/internal/test/cgo_test.go @@ -70,6 +70,7 @@ func Test31891(t *testing.T) { test31891(t) } func Test42018(t *testing.T) { test42018(t) } func Test45451(t *testing.T) { test45451(t) } func Test49633(t *testing.T) { test49633(t) } +func Test69086(t *testing.T) { test69086(t) } func TestAlign(t *testing.T) { testAlign(t) } func TestAtol(t *testing.T) { testAtol(t) } func TestBlocking(t *testing.T) { testBlocking(t) } diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go index 374689631d77ab..362be79a737bee 100644 --- a/src/cmd/cgo/internal/test/test.go +++ b/src/cmd/cgo/internal/test/test.go @@ -940,6 +940,19 @@ typedef struct { } issue67517struct; static void issue67517(issue67517struct* p) {} +// Issue 69086. +// GCC added the __int128 type in GCC 4.6, released in 2011. +typedef struct { + int a; +#ifdef __SIZEOF_INT128__ + unsigned __int128 b; +#else + uint64_t b; +#endif + unsigned char c; +} issue69086struct; +static int issue690861(issue69086struct* p) { p->b = 1234; return p->c; } +static int issue690862(unsigned long ul1, unsigned long ul2, unsigned int u, issue69086struct s) { return (int)(s.b); } */ import "C" @@ -2349,3 +2362,24 @@ func issue67517() { b: nil, }) } + +// Issue 69086. +func test69086(t *testing.T) { + var s C.issue69086struct + + typ := reflect.TypeOf(s) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + t.Logf("field %d: name %s size %d align %d offset %d", i, f.Name, f.Type.Size(), f.Type.Align(), f.Offset) + } + + s.c = 1 + got := C.issue690861(&s) + if got != 1 { + t.Errorf("field: got %d, want 1", got) + } + got = C.issue690862(1, 2, 3, s) + if got != 1234 { + t.Errorf("call: got %d, want 1234", got) + } +} diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index a8eebead25dc9f..c263b82d5768f4 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -33,7 +33,7 @@ import ( "unicode" ) -var globalSkip = func(t *testing.T) {} +var globalSkip = func(t testing.TB) {} // Program to run. var bin []string @@ -59,12 +59,12 @@ func TestMain(m *testing.M) { func testMain(m *testing.M) int { if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") } return m.Run() } if runtime.GOOS == "linux" { if _, err := os.Stat("/etc/alpine-release"); err == nil { - globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } return m.Run() } } @@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) { } } -// Issue 59294. Test calling Go function from C after using some -// stack space. +// Issue 59294 and 68285. Test calling Go function from C after with +// various stack space. func TestDeepStack(t *testing.T) { globalSkip(t) testenv.MustHaveGoBuild(t) @@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) { } } +func BenchmarkCgoCallbackMainThread(b *testing.B) { + // Benchmark for calling into Go fron C main thread. + // See issue #68587. + // + // It uses a subprocess, which is a C binary that calls + // Go on the main thread b.N times. There is some overhead + // for launching the subprocess. It is probably fine when + // b.N is large. + + globalSkip(b) + testenv.MustHaveGoBuild(b) + testenv.MustHaveCGO(b) + testenv.MustHaveBuildMode(b, "c-archive") + + if !testWork { + defer func() { + os.Remove("testp10" + exeSuffix) + os.Remove("libgo10.a") + os.Remove("libgo10.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10") + out, err := cmd.CombinedOutput() + b.Logf("%v\n%s", cmd.Args, out) + if err != nil { + b.Fatal(err) + } + + ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + b.Logf("%v\n%s", ccArgs, out) + if err != nil { + b.Fatal(err) + } + + argv := cmdToRun("./testp10") + argv = append(argv, fmt.Sprint(b.N)) + cmd = exec.Command(argv[0], argv[1:]...) + + b.ResetTimer() + err = cmd.Run() + if err != nil { + b.Fatal(err) + } +} + func TestSharedObject(t *testing.T) { // Test that we can put a Go c-archive into a C shared object. globalSkip(t) diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go new file mode 100644 index 00000000000000..803a0fa5f1cb35 --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoF +func GoF() {} + +func main() {} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go index acb08d90ecd5bf..3528bef654ddb3 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go @@ -6,9 +6,29 @@ package main import "runtime" +// extern void callGoWithVariousStack(int); import "C" func main() {} //export GoF -func GoF() { runtime.GC() } +func GoF(p int32) { + runtime.GC() + if p != 0 { + panic("panic") + } +} + +//export callGoWithVariousStackAndGoFrame +func callGoWithVariousStackAndGoFrame(p int32) { + if p != 0 { + defer func() { + e := recover() + if e == nil { + panic("did not panic") + } + runtime.GC() + }() + } + C.callGoWithVariousStack(C.int(p)); +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main10.c b/src/cmd/cgo/internal/testcarchive/testdata/main10.c new file mode 100644 index 00000000000000..53c3c83a99e35c --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/main10.c @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include + +#include "libgo10.h" + +int main(int argc, char **argv) { + int n, i; + + if (argc != 2) { + perror("wrong arg"); + return 2; + } + n = atoi(argv[1]); + for (i = 0; i < n; i++) + GoF(); + + return 0; +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main9.c b/src/cmd/cgo/internal/testcarchive/testdata/main9.c index 95ad4dea49fb1a..e641d8a8027a5f 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/main9.c +++ b/src/cmd/cgo/internal/testcarchive/testdata/main9.c @@ -6,19 +6,27 @@ void use(int *x) { (*x)++; } -void callGoFWithDeepStack() { +void callGoFWithDeepStack(int p) { int x[10000]; use(&x[0]); use(&x[9999]); - GoF(); + GoF(p); use(&x[0]); use(&x[9999]); } +void callGoWithVariousStack(int p) { + GoF(0); // call GoF without using much stack + callGoFWithDeepStack(p); // call GoF with a deep stack + GoF(0); // again on a shallow stack +} + int main() { - GoF(); // call GoF without using much stack - callGoFWithDeepStack(); // call GoF with a deep stack + callGoWithVariousStack(0); + + callGoWithVariousStackAndGoFrame(0); // normal execution + callGoWithVariousStackAndGoFrame(1); // panic and recover } diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..32f5a771a34a66 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." - // TODO(mdempsky): Better way to recognize this. - fn := f.Sym().Name - cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + for p := c.ClosureParent; p != nil; p = p.ClosureParent { + if p == f { + return true + } + } + return false } diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go index 7fe4445dad7638..ffeddea0c9d588 100644 --- a/src/cmd/compile/internal/importer/gcimporter_test.go +++ b/src/cmd/compile/internal/importer/gcimporter_test.go @@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) { compileAndImportPkg(t, "issue25596") } +func TestIssue70394(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + pkg := compileAndImportPkg(t, "alias") + obj := lookupObj(t, pkg.Scope(), "A") + + typ := obj.Type() + if _, ok := typ.(*types2.Alias); !ok { + t.Fatalf("type of %s is %s, wanted an alias", obj, typ) + } +} + func importPkg(t *testing.T, path, srcDir string) *types2.Package { pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) if err != nil { diff --git a/src/cmd/compile/internal/importer/testdata/alias.go b/src/cmd/compile/internal/importer/testdata/alias.go new file mode 100644 index 00000000000000..51492fc943ea0c --- /dev/null +++ b/src/cmd/compile/internal/importer/testdata/alias.go @@ -0,0 +1,7 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +type A = int32 diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index d3c7d4516f7ee8..9d267e6db411c0 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/syntax" "cmd/compile/internal/types2" "cmd/internal/src" + "internal/buildcfg" "internal/pkgbits" ) @@ -28,11 +29,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pr := pkgReader{ PkgDecoder: input, - ctxt: ctxt, - imports: imports, - // Currently, the compiler panics when using Alias types. - // TODO(gri) set to true once this is fixed (issue #66873) - enableAlias: false, + ctxt: ctxt, + imports: imports, + enableAlias: true, posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), @@ -411,6 +410,14 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) { panic("weird") case pkgbits.ObjAlias: + if buildcfg.Experiment.AliasTypeParams && len(r.dict.bounds) > 0 { + // Temporary work-around for issue #68526: rather than panicking + // with an non-descriptive index-out-of-bounds panic when trying + // to access a missing type parameter, instead panic with a more + // descriptive error. Only needed for Go 1.23; Go 1.24 will have + // the correct implementation. + panic("importing generic type aliases is not supported in Go 1.23 (see issue #68526)") + } pos := r.pos() typ := r.typ() return newAliasTypeName(pr.enableAlias, pos, objPkg, objName, typ) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index d0c8ee359befff..4fa9055b4b2c0b 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -51,6 +51,8 @@ import ( // the generated ODCLFUNC, but there is no // pointer from the Func back to the OMETHVALUE. type Func struct { + // if you add or remove a field, don't forget to update sizeof_test.go + miniNode Body Nodes @@ -76,6 +78,9 @@ type Func struct { // Populated during walk. Closures []*Func + // Parent of a closure + ClosureParent *Func + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. @@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, fn.Nname.Defn = fn pkg.Funcs = append(pkg.Funcs, fn) + fn.ClosureParent = outerfn return fn } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 68d2865595b716..6331cceb4a59b4 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 176, 296}, + {Func{}, 180, 304}, {Name{}, 96, 168}, } diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go index 97ab254395332a..e488c3cf377cae 100644 --- a/src/cmd/compile/internal/rangefunc/rangefunc_test.go +++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go @@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) { t.Errorf("Expected y=3, got y=%d\n", y) } } + +func Bug70035(s1, s2, s3 []string) string { + var c1 string + for v1 := range slices.Values(s1) { + var c2 string + for v2 := range slices.Values(s2) { + var c3 string + for v3 := range slices.Values(s3) { + c3 = c3 + v3 + } + c2 = c2 + v2 + c3 + } + c1 = c1 + v1 + c2 + } + return c1 +} + +func Test70035(t *testing.T) { + got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"}) + want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC" + if got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 5148d5db034142..07f35b1854acaf 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -134,10 +134,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 5126ac51116cd9..a6b105ace5cc33 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2898,22 +2898,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 91ad474e9df315..ada421ba939ed4 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -327,7 +327,6 @@ func (check *Checker) initFiles(files []*syntax.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -336,33 +335,18 @@ func (check *Checker) initFiles(files []*syntax.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of allowing //go:build lines + // to sometimes set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -377,6 +361,13 @@ func (check *Checker) initFiles(files []*syntax.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) > 0 { + return a + } + return b +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 72227ab12256dd..308d1f550ad4fa 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/syntax" "errors" "fmt" + "internal/buildcfg" . "internal/types/errors" ) @@ -126,8 +127,9 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -138,7 +140,7 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 20e3f52facd9de..b339def7354e28 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -1121,3 +1121,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + conf := Config{EnableAlias: true} + pkg := mustTypecheck(src, &conf, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index 1859b27aa4edfb..02b5ecf1669ea5 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -282,7 +282,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go index 650ae846a61e85..7c4cd732501e43 100644 --- a/src/cmd/compile/internal/types2/subst.go +++ b/src/cmd/compile/internal/types2/subst.go @@ -115,7 +115,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 0457502e393942..a2d9e42c615ca4 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -131,8 +131,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 49f02012d3103a..484fef03d10a16 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -9,7 +9,7 @@ require ( golang.org/x/mod v0.19.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.22.0 - golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 + golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 golang.org/x/term v0.20.0 golang.org/x/tools v0.22.1-0.20240618181713-f2d2ebe43e72 ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index ee671f95122344..919dbd2dc74c74 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -16,8 +16,8 @@ golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 h1:+bltxAtk8YFEQ61B/lcYQM8e+7XjLwSDbpspVaVYkz8= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 h1:Lj8KbuZmoFUbI6pQ28G3Diz/5bRYD2UY5vfAmhrLZWo= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0d8455d92e336b..bc484dedf6ed54 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int { return int(sh.Size) } -func addbuildinfo(val string) { +func addbuildinfo(ctxt *Link) { + val := *flagHostBuildid if val == "gobuildid" { buildID := *flagBuildid if buildID == "" { Exitf("-B gobuildid requires a Go build ID supplied via -buildid") } + if ctxt.IsDarwin() { + buildinfo = uuidFromGoBuildId(buildID) + return + } + hashedBuildID := notsha256.Sum256([]byte(buildID)) buildinfo = hashedBuildID[:20] @@ -821,11 +827,13 @@ func addbuildinfo(val string) { if !strings.HasPrefix(val, "0x") { Exitf("-B argument must start with 0x: %s", val) } - ov := val val = val[2:] - const maxLen = 32 + maxLen := 32 + if ctxt.IsDarwin() { + maxLen = 16 + } if hex.DecodedLen(len(val)) > maxLen { Exitf("-B option too long (max %d digits): %s", maxLen, ov) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 34624c25a9f333..c5a85f0e75e7cf 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr { return &machohdr } +// Create a new Mach-O load command. ndata is the number of 32-bit words for +// the data (not including the load command header). func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ @@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) { } } + if ctxt.IsInternal() && len(buildinfo) > 0 { + ml := newMachoLoad(ctxt.Arch, LC_UUID, 4) + // Mach-O UUID is 16 bytes + if len(buildinfo) < 16 { + buildinfo = append(buildinfo, make([]byte, 16)...) + } + // By default, buildinfo is already in UUIDv3 format + // (see uuidFromGoBuildId). + ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo) + ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:]) + ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:]) + ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:]) + } + if ctxt.IsInternal() && ctxt.NeedCodeSign() { ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) ml.data[0] = uint32(codesigOff) diff --git a/src/cmd/link/internal/ld/macho_update_uuid.go b/src/cmd/link/internal/ld/macho_update_uuid.go index de27e655d59bf4..40e0c11ed19d6e 100644 --- a/src/cmd/link/internal/ld/macho_update_uuid.go +++ b/src/cmd/link/internal/ld/macho_update_uuid.go @@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte { // to use this UUID flavor than any of the others. This is similar // to how other linkers handle this (for example this code in lld: // https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524). - rv[6] &= 0xcf + rv[6] &= 0x0f rv[6] |= 0x30 rv[8] &= 0x3f rv[8] |= 0xc0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 56e865d8a53287..12bc896c66c3d7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,6 +95,7 @@ var ( flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table + flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID") flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") @@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) { flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") - objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) @@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) { *flagBuildid = "go-openbsd" } + if *flagHostBuildid != "" { + addbuildinfo(ctxt) + } + // enable benchmarking var bench *benchmark.Metrics if len(*benchmarkFlag) != 0 { diff --git a/src/cmd/trace/gstate.go b/src/cmd/trace/gstate.go index 638d492670a6e7..4b380db9f53cd7 100644 --- a/src/cmd/trace/gstate.go +++ b/src/cmd/trace/gstate.go @@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { if gs.lastStopStack != trace.NoStack { stk = ctx.Stack(viewerFrames(gs.lastStopStack)) } + var endStk int + if stack != trace.NoStack { + endStk = ctx.Stack(viewerFrames(stack)) + } // Check invariants. if gs.startRunningTime == 0 { panic("silently broken trace or generator invariant (startRunningTime != 0) not held") @@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { Dur: ts.Sub(gs.startRunningTime), Resource: uint64(gs.executing), Stack: stk, + EndStack: endStk, }) // Flush completed ranges. diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go index a38f371d0f51b6..e60ab7e9fdd73e 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go @@ -16,6 +16,7 @@ import ( "os" "os/exec" "path/filepath" + "sync/atomic" "golang.org/x/telemetry/internal/telemetry" ) @@ -29,12 +30,22 @@ const ( // creation flag. var needNoConsole = func(cmd *exec.Cmd) {} +var downloads int64 + +// Downloads reports, for testing purposes, the number of times [Download] has +// been called. +func Downloads() int64 { + return atomic.LoadInt64(&downloads) +} + // Download fetches the requested telemetry UploadConfig using "go mod // download". If envOverlay is provided, it is appended to the environment used // for invoking the go command. // // The second result is the canonical version of the requested configuration. func Download(version string, envOverlay []string) (*telemetry.UploadConfig, string, error) { + atomic.AddInt64(&downloads, 1) + if version == "" { version = "latest" } diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go index f475f7eec2dfce..612f7563a74c9f 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go @@ -21,12 +21,12 @@ import ( "golang.org/x/telemetry/internal/counter" ) -// Supported reports whether the runtime supports [runtime.SetCrashOutput]. +// Supported reports whether the runtime supports [runtime/debug.SetCrashOutput]. // // TODO(adonovan): eliminate once go1.23+ is assured. func Supported() bool { return setCrashOutput != nil } -var setCrashOutput func(*os.File) error // = runtime.SetCrashOutput on go1.23+ +var setCrashOutput func(*os.File) error // = runtime/debug.SetCrashOutput on go1.23+ // Parent sets up the parent side of the crashmonitor. It requires // exclusive use of a writable pipe connected to the child process's stdin. diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go index eba13b1a573560..e9c8dc207126a1 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go @@ -112,9 +112,24 @@ func newUploader(rcfg RunConfig) (*uploader, error) { logger := log.New(logWriter, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) // Fetch the upload config, if it is not provided. - config, configVersion, err := configstore.Download("latest", rcfg.Env) - if err != nil { - return nil, err + var ( + config *telemetry.UploadConfig + configVersion string + ) + + if mode, _ := dir.Mode(); mode == "on" { + // golang/go#68946: only download the upload config if it will be used. + // + // TODO(rfindley): This is a narrow change aimed at minimally fixing the + // associated bug. In the future, we should read the mode only once during + // the upload process. + config, configVersion, err = configstore.Download("latest", rcfg.Env) + if err != nil { + return nil, err + } + } else { + config = &telemetry.UploadConfig{} + configVersion = "v0.0.0-0" } // Set the start time, if it is not provided. diff --git a/src/cmd/vendor/golang.org/x/telemetry/start.go b/src/cmd/vendor/golang.org/x/telemetry/start.go index 4b37a5c3945cd5..69ebcc71359405 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/start.go +++ b/src/cmd/vendor/golang.org/x/telemetry/start.go @@ -206,7 +206,8 @@ func startChild(reportCrashes, upload bool, result *StartResult) { fd, err := os.Stat(telemetry.Default.DebugDir()) if err != nil { if !os.IsNotExist(err) { - log.Fatalf("failed to stat debug directory: %v", err) + log.Printf("failed to stat debug directory: %v", err) + return } } else if fd.IsDir() { // local/debug exists and is a directory. Set stderr to a log file path @@ -214,23 +215,31 @@ func startChild(reportCrashes, upload bool, result *StartResult) { childLogPath := filepath.Join(telemetry.Default.DebugDir(), "sidecar.log") childLog, err := os.OpenFile(childLogPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) if err != nil { - log.Fatalf("opening sidecar log file for child: %v", err) + log.Printf("opening sidecar log file for child: %v", err) + return } defer childLog.Close() cmd.Stderr = childLog } + var crashOutputFile *os.File if reportCrashes { pipe, err := cmd.StdinPipe() if err != nil { - log.Fatalf("StdinPipe: %v", err) + log.Printf("StdinPipe: %v", err) + return } - crashmonitor.Parent(pipe.(*os.File)) // (this conversion is safe) + crashOutputFile = pipe.(*os.File) // (this conversion is safe) } if err := cmd.Start(); err != nil { - log.Fatalf("can't start telemetry child process: %v", err) + // The child couldn't be started. Log the failure. + log.Printf("can't start telemetry child process: %v", err) + return + } + if reportCrashes { + crashmonitor.Parent(crashOutputFile) } result.wg.Add(1) go func() { diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index bf9c1341b94f73..22d40b9e4c1385 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -45,7 +45,7 @@ golang.org/x/sync/semaphore golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 +# golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 ## explicit; go 1.20 golang.org/x/telemetry golang.org/x/telemetry/counter diff --git a/src/context/context.go b/src/context/context.go index 763d4f777ffb86..73739bc90ea79f 100644 --- a/src/context/context.go +++ b/src/context/context.go @@ -59,6 +59,7 @@ import ( "sync" "sync/atomic" "time" + _ "unsafe" // for go:linkname ) // A Context carries a deadline, a cancellation signal, and other values across @@ -361,6 +362,7 @@ type stopCtx struct { var goroutines atomic.Int32 // &cancelCtxKey is the key that a cancelCtx returns itself for. +//go:linkname cancelCtxKey var cancelCtxKey int // parentCancelCtx returns the underlying *cancelCtx for parent. @@ -477,17 +479,7 @@ func (c *cancelCtx) propagateCancel(parent Context, child canceler) { if p, ok := parentCancelCtx(parent); ok { // parent is a *cancelCtx, or derives from one. - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err, p.cause) - } else { - if p.children == nil { - p.children = make(map[canceler]struct{}) - } - p.children[child] = struct{}{} - } - p.mu.Unlock() + p.addChild(child) return } @@ -515,6 +507,22 @@ func (c *cancelCtx) propagateCancel(parent Context, child canceler) { }() } +// addChild adds child to the list of children. +// NB: CockroachDB runtime patch. +func (c *cancelCtx) addChild(child canceler) { + c.mu.Lock() + if c.err != nil { + // parent has already been canceled + child.cancel(false, c.err, c.cause) + } else { + if c.children == nil { + c.children = make(map[canceler]struct{}) + } + c.children[child] = struct{}{} + } + c.mu.Unlock() +} + type stringer interface { String() string } @@ -790,3 +798,33 @@ func value(c Context, key any) any { } } } + +// CockroachDB runtime patch. +// cancelerAdapter invokes f when cancel context completes. +type cancelerAdapter struct { + *cancelCtx + f func() +} + +func (c *cancelerAdapter) cancel(removeFromParent bool, err, cause error) { + if removeFromParent { + removeChild(c.cancelCtx, c) + } + c.f() +} + +// PropagateCancel arranges for f to be invoked when parent is done. +// Parent must be one of the cancelable contexts. +// Returns true if cancellation will be propagated, false if the parent +// is not cancelable. +// This is similar to AfterFunc(), but does not spin up goroutine, and instead +// invokes f on whatever goroutine completed parent context. +func PropagateCancel(parent Context, f func()) bool { + p, ok := parent.Value(&cancelCtxKey).(*cancelCtx) + if !ok { + return false + } + a := cancelerAdapter{cancelCtx: p, f: f} + p.addChild(&a) + return true +} diff --git a/src/crypto/md5/md5.go b/src/crypto/md5/md5.go index 843678702bf93f..979b4533221858 100644 --- a/src/crypto/md5/md5.go +++ b/src/crypto/md5/md5.go @@ -27,6 +27,10 @@ const Size = 16 // The blocksize of MD5 in bytes. const BlockSize = 64 +// The maximum number of bytes that can be passed to block. +const maxAsmIters = 1024 +const maxAsmSize = BlockSize * maxAsmIters // 64KiB + const ( init0 = 0x67452301 init1 = 0xEFCDAB89 @@ -130,6 +134,11 @@ func (d *digest) Write(p []byte) (nn int, err error) { if len(p) >= BlockSize { n := len(p) &^ (BlockSize - 1) if haveAsm { + for n > maxAsmSize { + block(d, p[:maxAsmSize]) + p = p[maxAsmSize:] + n -= maxAsmSize + } block(d, p[:n]) } else { blockGeneric(d, p[:n]) diff --git a/src/crypto/md5/md5_test.go b/src/crypto/md5/md5_test.go index a5b661126dd716..5285a13724d23d 100644 --- a/src/crypto/md5/md5_test.go +++ b/src/crypto/md5/md5_test.go @@ -121,10 +121,11 @@ func TestGoldenMarshal(t *testing.T) { func TestLarge(t *testing.T) { const N = 10000 + const offsets = 4 ok := "2bb571599a4180e1d542f76904adc3df" // md5sum of "0123456789" * 1000 - block := make([]byte, 10004) + block := make([]byte, N+offsets) c := New() - for offset := 0; offset < 4; offset++ { + for offset := 0; offset < offsets; offset++ { for i := 0; i < N; i++ { block[offset+i] = '0' + byte(i%10) } @@ -143,6 +144,32 @@ func TestLarge(t *testing.T) { } } +func TestExtraLarge(t *testing.T) { + const N = 100000 + const offsets = 4 + ok := "13572e9e296cff52b79c52148313c3a5" // md5sum of "0123456789" * 10000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("md5 TestExtraLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + + // Tests that blockGeneric (pure Go) and block (in assembly for amd64, 386, arm) match. func TestBlockGeneric(t *testing.T) { gen, asm := New().(*digest), New().(*digest) diff --git a/src/crypto/sha256/sha256.go b/src/crypto/sha256/sha256.go index 68244fd63b0c1e..a2f669fa9ccb96 100644 --- a/src/crypto/sha256/sha256.go +++ b/src/crypto/sha256/sha256.go @@ -28,6 +28,10 @@ const Size224 = 28 // The blocksize of SHA256 and SHA224 in bytes. const BlockSize = 64 +// The maximum number of bytes that can be passed to block. +const maxAsmIters = 1024 +const maxAsmSize = BlockSize * maxAsmIters // 64KiB + const ( chunk = 64 init0 = 0x6A09E667 @@ -186,6 +190,11 @@ func (d *digest) Write(p []byte) (nn int, err error) { } if len(p) >= chunk { n := len(p) &^ (chunk - 1) + for n > maxAsmSize { + block(d, p[:maxAsmSize]) + p = p[maxAsmSize:] + n -= maxAsmSize + } block(d, p[:n]) p = p[n:] } diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go index d91f01e9ba3a5f..f5dd4025d25ecf 100644 --- a/src/crypto/sha256/sha256_test.go +++ b/src/crypto/sha256/sha256_test.go @@ -184,6 +184,58 @@ func TestGoldenMarshal(t *testing.T) { } } + + +func TestLarge(t *testing.T) { + const N = 10000 + const offsets = 4 + ok := "4c207598af7a20db0e3334dd044399a40e467cb81b37f7ba05a4f76dcbd8fd59" // sha256sum of "0123456789" * 1000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("sha256 TestLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + +func TestExtraLarge(t *testing.T) { + const N = 100000 + const offsets = 4 + ok := "aca9e593cc629cbaa94cd5a07dc029424aad93e5129e5d11f8dcd2f139c16cc0" // sha256sum of "0123456789" * 10000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("sha256 TestExtraLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + func TestMarshalTypeMismatch(t *testing.T) { h1 := New() h2 := New224() diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go index 501f9c6755f9e3..3c87916bcf0bb7 100644 --- a/src/crypto/tls/handshake_client_test.go +++ b/src/crypto/tls/handshake_client_test.go @@ -852,6 +852,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } issuer, err := x509.ParseCertificate(testRSACertificateIssuer) @@ -868,6 +869,7 @@ func testResumption(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(32), RootCAs: rootCAs, ServerName: "example.golang", + Time: testTime, } testResumeState := func(test string, didResume bool) { @@ -914,7 +916,7 @@ func testResumption(t *testing.T, version uint16) { // An old session ticket is replaced with a ticket encrypted with a fresh key. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("ResumeWithOldTicket", true) if bytes.Equal(ticket, getTicket()) { t.Fatal("old first ticket matches the fresh one") @@ -922,13 +924,13 @@ func testResumption(t *testing.T, version uint16) { // Once the session master secret is expired, a full handshake should occur. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + time.Minute) } testResumeState("ResumeWithExpiredTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("expired first ticket matches the fresh one") } - serverConfig.Time = func() time.Time { return time.Now() } // reset the time back + serverConfig.Time = testTime // reset the time back key1 := randomKey() serverConfig.SetSessionTicketKeys([][32]byte{key1}) @@ -945,11 +947,11 @@ func testResumption(t *testing.T, version uint16) { testResumeState("KeyChangeFinish", true) // Age the session ticket a bit, but not yet expired. - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("OldSessionTicket", true) ticket = getTicket() // Expire the session ticket, which would force a full handshake. - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + 2*time.Minute) } testResumeState("ExpiredSessionTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("new ticket wasn't provided after old ticket expired") @@ -957,7 +959,7 @@ func testResumption(t *testing.T, version uint16) { // Age the session ticket a bit at a time, but don't expire it. d := 0 * time.Hour - serverConfig.Time = func() time.Time { return time.Now().Add(d) } + serverConfig.Time = func() time.Time { return testTime().Add(d) } deleteTicket() testResumeState("GetFreshSessionTicket", false) for i := 0; i < 13; i++ { @@ -968,7 +970,7 @@ func testResumption(t *testing.T, version uint16) { // handshake occurs for TLS 1.2. Resumption should still occur for // TLS 1.3 since the client should be using a fresh ticket sent over // by the server. - d += 12 * time.Hour + d += 12*time.Hour + time.Minute if version == VersionTLS13 { testResumeState("ExpiredSessionTicket", true) } else { @@ -984,6 +986,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } serverConfig.SetSessionTicketKeys([][32]byte{key2}) @@ -1009,6 +1012,7 @@ func testResumption(t *testing.T, version uint16) { CurvePreferences: []CurveID{CurveP521, CurveP384, CurveP256}, MaxVersion: version, Certificates: testConfig.Certificates, + Time: testTime, } testResumeState("InitialHandshake", false) testResumeState("WithHelloRetryRequest", true) @@ -1018,6 +1022,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } } @@ -1736,6 +1741,7 @@ func testVerifyConnection(t *testing.T, version uint16) { serverConfig := &Config{ MaxVersion: version, Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, ClientCAs: rootCAs, NextProtos: []string{"protocol1"}, } @@ -1749,6 +1755,7 @@ func testVerifyConnection(t *testing.T, version uint16) { RootCAs: rootCAs, ServerName: "example.golang", Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, NextProtos: []string{"protocol1"}, } test.configureClient(clientConfig, &clientCalled) @@ -1791,8 +1798,6 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - sentinelErr := errors.New("TestVerifyPeerCertificate") verifyPeerCertificateCallback := func(called *bool, rawCerts [][]byte, validatedChains [][]*x509.Certificate) error { @@ -2038,7 +2043,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config.ServerName = "example.golang" config.ClientAuth = RequireAndVerifyClientCert config.ClientCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version config.Certificates = make([]Certificate, 1) config.Certificates[0].Certificate = [][]byte{testRSACertificate} @@ -2055,7 +2060,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config := testConfig.Clone() config.ServerName = "example.golang" config.RootCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version test.configureClient(config, &clientCalled) clientErr := Client(c, config).Handshake() @@ -2368,7 +2373,7 @@ func testGetClientCertificate(t *testing.T, version uint16) { serverConfig.RootCAs = x509.NewCertPool() serverConfig.RootCAs.AddCert(issuer) serverConfig.ClientCAs = serverConfig.RootCAs - serverConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + serverConfig.Time = testTime serverConfig.MaxVersion = version clientConfig := testConfig.Clone() @@ -2539,6 +2544,7 @@ func testResumptionKeepsOCSPAndSCT(t *testing.T, ver uint16) { ClientSessionCache: NewLRUClientSessionCache(32), ServerName: "example.golang", RootCAs: roots, + Time: testTime, } serverConfig := testConfig.Clone() serverConfig.MaxVersion = ver diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 94d3d0f6dc87bc..bbfe44bd97daa2 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -501,6 +501,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { serverConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } clientConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, @@ -508,6 +509,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(1), ServerName: "servername", MinVersion: VersionTLS12, + Time: testTime, } // Establish a session at TLS 1.3. diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index bc3d23d5adc24e..803aa736578f8c 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -491,9 +491,10 @@ func testHandshake(t *testing.T, clientConfig, serverConfig *Config) (serverStat if got := string(buf); got != sentinel { t.Errorf("read %q from TLS connection, but expected %q", got, sentinel) } - if err := cli.Close(); err != nil { - t.Errorf("failed to call cli.Close: %v", err) - } + // We discard the error because after ReadAll returns the server must + // have already closed the connection. Sending data (the closeNotify + // alert) can cause a reset, that will make Close return an error. + cli.Close() }() server := Server(s, serverConfig) err = server.Handshake() @@ -518,6 +519,11 @@ func fromHex(s string) []byte { return b } +// testTime is 2016-10-20T17:32:09.000Z, which is within the validity period of +// [testRSACertificate], [testRSACertificateIssuer], [testRSA2048Certificate], +// [testRSA2048CertificateIssuer], and [testECDSACertificate]. +var testTime = func() time.Time { return time.Unix(1476984729, 0) } + var testRSACertificate = fromHex("3082024b308201b4a003020102020900e8f09d3fe25beaa6300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301a310b3009060355040a1302476f310b300906035504031302476f30819f300d06092a864886f70d010101050003818d0030818902818100db467d932e12270648bc062821ab7ec4b6a25dfe1e5245887a3647a5080d92425bc281c0be97799840fb4f6d14fd2b138bc2a52e67d8d4099ed62238b74a0b74732bc234f1d193e596d9747bf3589f6c613cc0b041d4d92b2b2423775b1c3bbd755dce2054cfa163871d1e24c4f31d1a508baab61443ed97a77562f414c852d70203010001a38193308190300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff0402300030190603551d0e041204109f91161f43433e49a6de6db680d79f60301b0603551d230414301280104813494d137e1631bba301d5acab6e7b30190603551d1104123010820e6578616d706c652e676f6c616e67300d06092a864886f70d01010b0500038181009d30cc402b5b50a061cbbae55358e1ed8328a9581aa938a495a1ac315a1a84663d43d32dd90bf297dfd320643892243a00bccf9c7db74020015faad3166109a276fd13c3cce10c5ceeb18782f16c04ed73bbb343778d0c1cf10fa1d8408361c94c722b9daedb4606064df4c1b33ec0d1bd42d4dbfe3d1360845c21d33be9fae7") var testRSACertificateIssuer = fromHex("3082021930820182a003020102020900ca5e4e811a965964300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f7430819f300d06092a864886f70d010101050003818d0030818902818100d667b378bb22f34143b6cd2008236abefaf2852adf3ab05e01329e2c14834f5105df3f3073f99dab5442d45ee5f8f57b0111c8cb682fbb719a86944eebfffef3406206d898b8c1b1887797c9c5006547bb8f00e694b7a063f10839f269f2c34fff7a1f4b21fbcd6bfdfb13ac792d1d11f277b5c5b48600992203059f2a8f8cc50203010001a35d305b300e0603551d0f0101ff040403020204301d0603551d250416301406082b0601050507030106082b06010505070302300f0603551d130101ff040530030101ff30190603551d0e041204104813494d137e1631bba301d5acab6e7b300d06092a864886f70d01010b050003818100c1154b4bab5266221f293766ae4138899bd4c5e36b13cee670ceeaa4cbdf4f6679017e2fe649765af545749fe4249418a56bd38a04b81e261f5ce86b8d5c65413156a50d12449554748c59a30c515bc36a59d38bddf51173e899820b282e40aa78c806526fd184fb6b4cf186ec728edffa585440d2b3225325f7ab580e87dd76") diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index fc5040635fbbf7..13c5ddced2cddb 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -1112,8 +1112,6 @@ func TestConnectionState(t *testing.T) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - const alpnProtocol = "golang" const serverName = "example.golang" var scts = [][]byte{[]byte("dummy sct 1"), []byte("dummy sct 2")} @@ -1129,7 +1127,7 @@ func TestConnectionState(t *testing.T) { } t.Run(name, func(t *testing.T) { config := &Config{ - Time: now, + Time: testTime, Rand: zeroSource{}, Certificates: make([]Certificate, 1), MaxVersion: v, @@ -1760,7 +1758,7 @@ func testVerifyCertificates(t *testing.T, version uint16) { var serverVerifyPeerCertificates, clientVerifyPeerCertificates bool clientConfig := testConfig.Clone() - clientConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + clientConfig.Time = testTime clientConfig.MaxVersion = version clientConfig.MinVersion = version clientConfig.RootCAs = rootCAs diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index de774a051093df..c247a9b506bfab 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -1368,8 +1368,8 @@ func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn db.waitDuration.Add(int64(time.Since(waitStart))) - // If we failed to delete it, that means something else - // grabbed it and is about to send on it. + // If we failed to delete it, that means either the DB was closed or + // something else grabbed it and is about to send on it. if !deleted { // TODO(bradfitz): rather than this best effort select, we // should probably start a goroutine to read from req. This best @@ -3594,6 +3594,7 @@ type connRequestAndIndex struct { // and clears the set. func (s *connRequestSet) CloseAndRemoveAll() { for _, v := range s.s { + *v.curIdx = -1 close(v.req) } s.s = nil diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index ff65e877a5af6b..110a2bae5bd247 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -4920,6 +4920,17 @@ func TestConnRequestSet(t *testing.T) { t.Error("wasn't random") } }) + t.Run("close-delete", func(t *testing.T) { + reset() + ch := make(chan connRequest) + dh := s.Add(ch) + wantLen(1) + s.CloseAndRemoveAll() + wantLen(0) + if s.Delete(dh) { + t.Error("unexpected delete after CloseAndRemoveAll") + } + }) } func BenchmarkConnRequestSet(b *testing.B) { diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go index d178b2b2fb6467..26b5f6d62b631e 100644 --- a/src/encoding/gob/decode.go +++ b/src/encoding/gob/decode.go @@ -911,8 +911,11 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg var maxIgnoreNestingDepth = 10000 // decIgnoreOpFor returns the decoding op for a field that has no destination. -func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp { - if depth > maxIgnoreNestingDepth { +func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { + // Track how deep we've recursed trying to skip nested ignored fields. + dec.ignoreDepth++ + defer func() { dec.ignoreDepth-- }() + if dec.ignoreDepth > maxIgnoreNestingDepth { error_(errors.New("invalid nesting depth")) } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). @@ -938,7 +941,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, errorf("bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) } @@ -946,15 +949,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, case wire.MapT != nil: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1) - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + keyOp := dec.decIgnoreOpFor(keyId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, *keyOp, *elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, *elemOp) } @@ -1115,7 +1118,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { engine := new(decEngine) engine.instr = make([]decInstr, 1) // one item - op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) ovfl := overflow(dec.typeString(remoteId)) engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.numInstr = 1 @@ -1160,7 +1163,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn localField, present := srt.FieldByName(wireField.Name) // TODO(r): anonymous names if !present || !isExported(wireField.Name) { - op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} continue } diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go index c4b60880130787..eae307838e201e 100644 --- a/src/encoding/gob/decoder.go +++ b/src/encoding/gob/decoder.go @@ -35,6 +35,8 @@ type Decoder struct { freeList *decoderState // list of free decoderStates; avoids reallocation countBuf []byte // used for decoding integers while parsing messages err error + // ignoreDepth tracks the depth of recursively parsed ignored fields + ignoreDepth int } // NewDecoder returns a new decoder that reads from the [io.Reader]. diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go index ae806fc39a21fc..d30e622aa2cbe7 100644 --- a/src/encoding/gob/gobencdec_test.go +++ b/src/encoding/gob/gobencdec_test.go @@ -806,6 +806,8 @@ func TestIgnoreDepthLimit(t *testing.T) { defer func() { maxIgnoreNestingDepth = oldNestingDepth }() b := new(bytes.Buffer) enc := NewEncoder(b) + + // Nested slice typ := reflect.TypeFor[int]() nested := reflect.ArrayOf(1, typ) for i := 0; i < 100; i++ { @@ -819,4 +821,16 @@ func TestIgnoreDepthLimit(t *testing.T) { if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) } + + // Nested struct + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: typ}}) + for i := 0; i < 100; i++ { + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}) + } + badStruct = reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}})) + enc.Encode(badStruct.Interface()) + dec = NewDecoder(b) + if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { + t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) + } } diff --git a/src/go/build/constraint/expr.go b/src/go/build/constraint/expr.go index e59012361bef6d..0f05f8db6a48cb 100644 --- a/src/go/build/constraint/expr.go +++ b/src/go/build/constraint/expr.go @@ -16,6 +16,10 @@ import ( "unicode/utf8" ) +// maxSize is a limit used to control the complexity of expressions, in order +// to prevent stack exhaustion issues due to recursion. +const maxSize = 1000 + // An Expr is a build tag constraint expression. // The underlying concrete type is *[AndExpr], *[OrExpr], *[NotExpr], or *[TagExpr]. type Expr interface { @@ -151,7 +155,7 @@ func Parse(line string) (Expr, error) { return parseExpr(text) } if text, ok := splitPlusBuild(line); ok { - return parsePlusBuildExpr(text), nil + return parsePlusBuildExpr(text) } return nil, errNotConstraint } @@ -201,6 +205,8 @@ type exprParser struct { tok string // last token read isTag bool pos int // position (start) of last token + + size int } // parseExpr parses a boolean build tag expression. @@ -249,6 +255,10 @@ func (p *exprParser) and() Expr { // On entry, the next input token has not yet been lexed. // On exit, the next input token has been lexed and is in p.tok. func (p *exprParser) not() Expr { + p.size++ + if p.size > maxSize { + panic(&SyntaxError{Offset: p.pos, Err: "build expression too large"}) + } p.lex() if p.tok == "!" { p.lex() @@ -388,7 +398,13 @@ func splitPlusBuild(line string) (expr string, ok bool) { } // parsePlusBuildExpr parses a legacy build tag expression (as used with “// +build”). -func parsePlusBuildExpr(text string) Expr { +func parsePlusBuildExpr(text string) (Expr, error) { + // Only allow up to 100 AND/OR operators for "old" syntax. + // This is much less than the limit for "new" syntax, + // but uses of old syntax were always very simple. + const maxOldSize = 100 + size := 0 + var x Expr for _, clause := range strings.Fields(text) { var y Expr @@ -414,19 +430,25 @@ func parsePlusBuildExpr(text string) Expr { if y == nil { y = z } else { + if size++; size > maxOldSize { + return nil, errComplex + } y = and(y, z) } } if x == nil { x = y } else { + if size++; size > maxOldSize { + return nil, errComplex + } x = or(x, y) } } if x == nil { x = tag("ignore") } - return x + return x, nil } // isValidTag reports whether the word is a valid build tag. diff --git a/src/go/build/constraint/expr_test.go b/src/go/build/constraint/expr_test.go index 15d189012efb7d..ac38ba69294930 100644 --- a/src/go/build/constraint/expr_test.go +++ b/src/go/build/constraint/expr_test.go @@ -222,7 +222,7 @@ var parsePlusBuildExprTests = []struct { func TestParsePlusBuildExpr(t *testing.T) { for i, tt := range parsePlusBuildExprTests { t.Run(fmt.Sprint(i), func(t *testing.T) { - x := parsePlusBuildExpr(tt.in) + x, _ := parsePlusBuildExpr(tt.in) if x.String() != tt.x.String() { t.Errorf("parsePlusBuildExpr(%q):\nhave %v\nwant %v", tt.in, x, tt.x) } @@ -319,3 +319,66 @@ func TestPlusBuildLines(t *testing.T) { }) } } + +func TestSizeLimits(t *testing.T) { + for _, tc := range []struct { + name string + expr string + }{ + { + name: "go:build or limit", + expr: "//go:build " + strings.Repeat("a || ", maxSize+2), + }, + { + name: "go:build and limit", + expr: "//go:build " + strings.Repeat("a && ", maxSize+2), + }, + { + name: "go:build and depth limit", + expr: "//go:build " + strings.Repeat("(a &&", maxSize+2), + }, + { + name: "go:build or depth limit", + expr: "//go:build " + strings.Repeat("(a ||", maxSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if syntaxErr, ok := err.(*SyntaxError); !ok || syntaxErr.Err != "build expression too large" { + if !ok { + t.Errorf("unexpected error: %v", err) + } else { + t.Errorf("unexpected syntax error: %s", syntaxErr.Err) + } + } + }) + } +} + +func TestPlusSizeLimits(t *testing.T) { + maxOldSize := 100 + for _, tc := range []struct { + name string + expr string + }{ + { + name: "+build or limit", + expr: "// +build " + strings.Repeat("a ", maxOldSize+2), + }, + { + name: "+build and limit", + expr: "// +build " + strings.Repeat("a,", maxOldSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if err != errComplex { + t.Errorf("unexpected error: got %q, want %q", err, errComplex) + } + }) + } +} diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index 17808b366f092d..f268dea1a6f9cd 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -1676,6 +1676,8 @@ func (p *parser) parseElementList() (list []ast.Expr) { } func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "LiteralValue")) } diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index eea743c2b5b261..2c33e9ef314ad3 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -598,10 +598,11 @@ var parseDepthTests = []struct { {name: "chan2", format: "package main; var x «<-chan »int"}, {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType {name: "map", format: "package main; var x «map[int]»int"}, - {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "slicelit", format: "package main; var x = []any{«[]any{«»}»}", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 3}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "element", format: "package main; var x = struct{x any}{x: «{«»}»}"}, {name: "dot", format: "package main; var x = «x.»x"}, {name: "index", format: "package main; var x = x«[1]»"}, {name: "slice", format: "package main; var x = x«[1:2]»"}, diff --git a/src/go/types/alias.go b/src/go/types/alias.go index af43471a324176..7adb3deb58bbc7 100644 --- a/src/go/types/alias.go +++ b/src/go/types/alias.go @@ -137,10 +137,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index beed94f3557996..a7aa6488028ecd 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2904,22 +2904,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/go/types/check.go b/src/go/types/check.go index 1a5a41a3bb4b99..8a729094961fe2 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -349,7 +349,6 @@ func (check *Checker) initFiles(files []*ast.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -358,33 +357,19 @@ func (check *Checker) initFiles(files []*ast.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of setting the go.mod + // go line to an early version of Go and allowing //go:build lines + // to set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -399,6 +384,13 @@ func (check *Checker) initFiles(files []*ast.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) < 0 { + return b + } + return a +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go index 7bec790b5586ad..0435f2bf261647 100644 --- a/src/go/types/instantiate.go +++ b/src/go/types/instantiate.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "go/token" + "internal/buildcfg" . "internal/types/errors" ) @@ -129,8 +130,9 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -141,7 +143,7 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go index 3f459d3883017e..da0c0c1255b63e 100644 --- a/src/go/types/issues_test.go +++ b/src/go/types/issues_test.go @@ -1131,3 +1131,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + t.Setenv("GODEBUG", "gotypesalias=1") + pkg := mustTypecheck(src, nil, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/go/types/named.go b/src/go/types/named.go index b44fa9d788c345..d55b023812d108 100644 --- a/src/go/types/named.go +++ b/src/go/types/named.go @@ -285,7 +285,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil diff --git a/src/go/types/subst.go b/src/go/types/subst.go index 5ad2ff61eb1d30..6be106d3aa99d6 100644 --- a/src/go/types/subst.go +++ b/src/go/types/subst.go @@ -118,7 +118,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index d280bf2f5ff5cf..a1d7e6cc994e48 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -134,8 +134,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 786bafff723c96..b8eefe0da8dbba 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -177,6 +177,15 @@ func TypeOf(a any) *Type { return (*Type)(NoEscape(unsafe.Pointer(eface.Type))) } +// TypeFor returns the abi.Type for a type parameter. +func TypeFor[T any]() *Type { + var v T + if t := TypeOf(v); t != nil { + return t // optimize for T being a non-interface kind + } + return TypeOf((*T)(nil)).Elem() // only for an interface kind +} + func (t *Type) Kind() Kind { return t.Kind_ & KindMask } func (t *Type) HasName() bool { diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 669df94cc12e0d..d1023d4ebb9938 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,28 +32,46 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if int64(n) > remain { n = int(remain) } + m := n pos1 := pos n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) + // (n, nil) indicates that sendfile(2) has transferred + // the exact number of bytes we requested, or some unretryable + // error have occurred with partial bytes sent. Either way, we + // don't need to go through the following logic to check EINTR + // or fell into dstFD.pd.waitWrite, just continue to send the + // next chunk or break the loop. + if n == m { + continue + } else if err != syscall.EAGAIN && + err != syscall.EINTR && + err != syscall.EBUSY { + // Particularly, EPIPE. Errors like that would normally lead + // the subsequent sendfile(2) call to (-1, EBADF). + break + } + } else if err != syscall.EAGAIN && err != syscall.EINTR { + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break } if err == syscall.EINTR { continue } - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile), and other errors. - // We should end the loop when there is no error - // returned from sendfile(2) or it is not a retryable error. - if err != syscall.EAGAIN { - break - } if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } - handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + if err == syscall.EAGAIN { + err = nil + } + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP) return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index d1c4d5c0d3d34d..1c4130d45da89c 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index ec675833a225dc..b7c3f81a1efdcd 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go index 08365a7cfb564d..e040d396c7808b 100644 --- a/src/internal/types/testdata/check/go1_20_19.go +++ b/src/internal/types/testdata/check/go1_20_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go index 2acd25865d4b69..5866033eafe6f8 100644 --- a/src/internal/types/testdata/check/go1_21_19.go +++ b/src/internal/types/testdata/check/go1_21_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ERROR "requires go1.20 or later" */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_22.go b/src/internal/types/testdata/check/go1_21_22.go new file mode 100644 index 00000000000000..3939b7b1d868c0 --- /dev/null +++ b/src/internal/types/testdata/check/go1_21_22.go @@ -0,0 +1,16 @@ +// -lang=go1.21 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.22 + +package p + +func f() { + for _ = range /* ok because of upgrade to 1.22 */ 10 { + } +} diff --git a/src/internal/types/testdata/check/go1_22_21.go b/src/internal/types/testdata/check/go1_22_21.go new file mode 100644 index 00000000000000..f910ecb59cbc78 --- /dev/null +++ b/src/internal/types/testdata/check/go1_22_21.go @@ -0,0 +1,16 @@ +// -lang=go1.22 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.21 + +package p + +func f() { + for _ = range 10 /* ERROR "requires go1.22 or later" */ { + } +} diff --git a/src/internal/types/testdata/fixedbugs/issue66285.go b/src/internal/types/testdata/fixedbugs/issue66285.go index 9811fec3f35549..4af76f05da8e41 100644 --- a/src/internal/types/testdata/fixedbugs/issue66285.go +++ b/src/internal/types/testdata/fixedbugs/issue66285.go @@ -1,14 +1,9 @@ -// -lang=go1.21 +// -lang=go1.13 // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Note: Downgrading to go1.13 requires at least go1.21, -// hence the need for -lang=go1.21 at the top. - -//go:build go1.13 - package p import "io" diff --git a/src/internal/types/testdata/fixedbugs/issue68903.go b/src/internal/types/testdata/fixedbugs/issue68903.go new file mode 100644 index 00000000000000..b1369aa0f6faa7 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68903.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = [4]int +type B = map[string]interface{} + +func _[T ~A](x T) { + _ = len(x) +} + +func _[U ~A](x U) { + _ = cap(x) +} + +func _[V ~A]() { + _ = V{} +} + +func _[W ~B](a interface{}) { + _ = a.(W)["key"] +} diff --git a/src/internal/types/testdata/fixedbugs/issue68935.go b/src/internal/types/testdata/fixedbugs/issue68935.go new file mode 100644 index 00000000000000..2e72468f05eb0c --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68935.go @@ -0,0 +1,26 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = struct { + F string + G int +} + +func Make[T ~A]() T { + return T{ + F: "blah", + G: 1234, + } +} + +type N struct { + F string + G int +} + +func _() { + _ = Make[N]() +} diff --git a/src/internal/weak/pointer_test.go b/src/internal/weak/pointer_test.go index e143749230f0a5..5a861bb9ca39d7 100644 --- a/src/internal/weak/pointer_test.go +++ b/src/internal/weak/pointer_test.go @@ -5,9 +5,12 @@ package weak_test import ( + "context" "internal/weak" "runtime" + "sync" "testing" + "time" ) type T struct { @@ -128,3 +131,82 @@ func TestPointerFinalizer(t *testing.T) { t.Errorf("weak pointer is non-nil even after finalization: %v", wt) } } + +// Regression test for issue 69210. +// +// Weak-to-strong conversions must shade the new strong pointer, otherwise +// that might be creating the only strong pointer to a white object which +// is hidden in a blackened stack. +// +// Never fails if correct, fails with some high probability if incorrect. +func TestIssue69210(t *testing.T) { + if testing.Short() { + t.Skip("this is a stress test that takes seconds to run on its own") + } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // What we're trying to do is manufacture the conditions under which this + // bug happens. Specifically, we want: + // + // 1. To create a whole bunch of objects that are only weakly-pointed-to, + // 2. To call Strong while the GC is in the mark phase, + // 3. The new strong pointer to be missed by the GC, + // 4. The following GC cycle to mark a free object. + // + // Unfortunately, (2) and (3) are hard to control, but we can increase + // the likelihood by having several goroutines do (1) at once while + // another goroutine constantly keeps us in the GC with runtime.GC. + // Like throwing darts at a dart board until they land just right. + // We can increase the likelihood of (4) by adding some delay after + // creating the strong pointer, but only if it's non-nil. If it's nil, + // that means it was already collected in which case there's no chance + // of triggering the bug, so we want to retry as fast as possible. + // Our heap here is tiny, so the GCs will go by fast. + // + // As of 2024-09-03, removing the line that shades pointers during + // the weak-to-strong conversion causes this test to fail about 50% + // of the time. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + runtime.GC() + + select { + case <-ctx.Done(): + return + default: + } + } + }() + for range max(runtime.GOMAXPROCS(-1)-1, 1) { + wg.Add(1) + go func() { + defer wg.Done() + for { + for range 5 { + bt := new(T) + wt := weak.Make(bt) + bt = nil + time.Sleep(1 * time.Millisecond) + bt = wt.Strong() + if bt != nil { + time.Sleep(4 * time.Millisecond) + bt.t = bt + bt.a = 12 + } + runtime.KeepAlive(bt) + } + select { + case <-ctx.Done(): + return + default: + } + } + }() + } + wg.Wait() +} diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go index 9e46c4e607d4d8..4056856f306175 100644 --- a/src/net/sendfile_unix_alt.go +++ b/src/net/sendfile_unix_alt.go @@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { if err != nil { return 0, err, false } + if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 { + return 0, nil, false + } remain = fi.Size() } diff --git a/src/net/sendfile_unix_test.go b/src/net/sendfile_unix_test.go new file mode 100644 index 00000000000000..79fb23b31010d5 --- /dev/null +++ b/src/net/sendfile_unix_test.go @@ -0,0 +1,86 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "internal/testpty" + "io" + "os" + "sync" + "syscall" + "testing" +) + +// Issue 70763: test that we don't fail on sendfile from a tty. +func TestCopyFromTTY(t *testing.T) { + pty, ttyName, err := testpty.Open() + if err != nil { + t.Skipf("skipping test because pty open failed: %v", err) + } + defer pty.Close() + + // Use syscall.Open so that the tty is blocking. + ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0) + if err != nil { + t.Skipf("skipping test because tty open failed: %v", err) + } + defer syscall.Close(ttyFD) + + tty := os.NewFile(uintptr(ttyFD), "tty") + defer tty.Close() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + ch := make(chan bool) + + const data = "data\n" + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + + buf := make([]byte, len(data)) + if _, err := io.ReadFull(conn, buf); err != nil { + t.Error(err) + } + + ch <- true + }() + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + wg.Add(1) + go func() { + defer wg.Done() + if _, err := pty.Write([]byte(data)); err != nil { + t.Error(err) + } + <-ch + if err := pty.Close(); err != nil { + t.Error(err) + } + }() + + lr := io.LimitReader(tty, int64(len(data))) + if _, err := io.Copy(conn, lr); err != nil { + t.Error(err) + } +} diff --git a/src/os/copy_test.go b/src/os/copy_test.go new file mode 100644 index 00000000000000..82346ca4e57e3e --- /dev/null +++ b/src/os/copy_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "io" + "math/rand/v2" + "net" + "os" + "runtime" + "sync" + "testing" + + "golang.org/x/net/nettest" +) + +// Exercise sendfile/splice fast paths with a moderately large file. +// +// https://go.dev/issue/70000 + +func TestLargeCopyViaNetwork(t *testing.T) { + const size = 10 * 1024 * 1024 + dir := t.TempDir() + + src, err := os.Create(dir + "/src") + if err != nil { + t.Fatal(err) + } + defer src.Close() + if _, err := io.CopyN(src, newRandReader(), size); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, 0); err != nil { + t.Fatal(err) + } + + dst, err := os.Create(dir + "/dst") + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + client, server := createSocketPair(t, "tcp") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if n, err := io.Copy(dst, server); n != size || err != nil { + t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size) + } + }() + go func() { + defer wg.Done() + defer client.Close() + if n, err := io.Copy(client, src); n != size || err != nil { + t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size) + } + }() + wg.Wait() + + if _, err := dst.Seek(0, 0); err != nil { + t.Fatal(err) + } + if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil { + t.Fatal(err) + } +} + +func compareReaders(a, b io.Reader) error { + bufa := make([]byte, 4096) + bufb := make([]byte, 4096) + for { + na, erra := io.ReadFull(a, bufa) + if erra != nil && erra != io.EOF { + return erra + } + nb, errb := io.ReadFull(b, bufb) + if errb != nil && errb != io.EOF { + return errb + } + if !bytes.Equal(bufa[:na], bufb[:nb]) { + return errors.New("contents mismatch") + } + if erra == io.EOF && errb == io.EOF { + break + } + } + return nil +} + +type randReader struct { + rand *rand.Rand +} + +func newRandReader() *randReader { + return &randReader{rand.New(rand.NewPCG(0, 0))} +} + +func (r *randReader) Read(p []byte) (int, error) { + var v uint64 + var n int + for i := range p { + if n == 0 { + v = r.rand.Uint64() + n = 8 + } + p[i] = byte(v & 0xff) + v >>= 8 + n-- + } + return len(p), nil +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/src/os/dir.go b/src/os/dir.go index 471a29134582b3..04392193aa6b03 100644 --- a/src/os/dir.go +++ b/src/os/dir.go @@ -132,15 +132,18 @@ func ReadDir(name string) ([]DirEntry, error) { // CopyFS copies the file system fsys into the directory dir, // creating dir if necessary. // -// Newly created directories and files have their default modes -// where any bits from the file in fsys that are not part of the -// standard read, write, and execute permissions will be zeroed -// out, and standard read and write permissions are set for owner, -// group, and others while retaining any existing execute bits from -// the file in fsys. +// Files are created with mode 0o666 plus any execute permissions +// from the source, and directories are created with mode 0o777 +// (before umask). // -// Symbolic links in fsys are not supported, a *PathError with Err set -// to ErrInvalid is returned on symlink. +// CopyFS will not overwrite existing files. If a file name in fsys +// already exists in the destination, CopyFS will return an error +// such that errors.Is(err, fs.ErrExist) will be true. +// +// Symbolic links in fsys are not supported. A *PathError with Err set +// to ErrInvalid is returned when copying from a symbolic link. +// +// Symbolic links in dir are followed. // // Copying stops at and returns the first error encountered. func CopyFS(dir string, fsys fs.FS) error { @@ -174,7 +177,7 @@ func CopyFS(dir string, fsys fs.FS) error { if err != nil { return err } - w, err := OpenFile(newPath, O_CREATE|O_TRUNC|O_WRONLY, 0666|info.Mode()&0777) + w, err := OpenFile(newPath, O_CREATE|O_EXCL|O_WRONLY, 0666|info.Mode()&0777) if err != nil { return err } diff --git a/src/os/example_test.go b/src/os/example_test.go index 7437a74cd0c66d..c507d46c46303a 100644 --- a/src/os/example_test.go +++ b/src/os/example_test.go @@ -61,7 +61,7 @@ func ExampleFileMode() { log.Fatal(err) } - fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc. + fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0o400, 0o777, etc. switch mode := fi.Mode(); { case mode.IsRegular(): fmt.Println("regular file") diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index cba2e151673aba..ff51247d56b72d 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -35,10 +35,11 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e } } + attrSys, shouldDupPidfd := ensurePidfd(attr.Sys) sysattr := &syscall.ProcAttr{ Dir: attr.Dir, Env: attr.Env, - Sys: ensurePidfd(attr.Sys), + Sys: attrSys, } if sysattr.Env == nil { sysattr.Env, err = execenv.Default(sysattr.Sys) @@ -63,7 +64,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e // For Windows, syscall.StartProcess above already returned a process handle. if runtime.GOOS != "windows" { var ok bool - h, ok = getPidfd(sysattr.Sys) + h, ok = getPidfd(sysattr.Sys, shouldDupPidfd) if !ok { return newPIDProcess(pid), nil } diff --git a/src/os/file.go b/src/os/file.go index c3ee31583e32f6..ad869fc4938d17 100644 --- a/src/os/file.go +++ b/src/os/file.go @@ -366,7 +366,7 @@ func Open(name string) (*File, error) { } // Create creates or truncates the named file. If the file already exists, -// it is truncated. If the file does not exist, it is created with mode 0666 +// it is truncated. If the file does not exist, it is created with mode 0o666 // (before umask). If successful, methods on the returned File can // be used for I/O; the associated file descriptor has mode O_RDWR. // If there is an error, it will be of type *PathError. @@ -602,11 +602,11 @@ func UserHomeDir() (string, error) { // On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and // ModeSticky are used. // -// On Windows, only the 0200 bit (owner writable) of mode is used; it +// On Windows, only the 0o200 bit (owner writable) of mode is used; it // controls whether the file's read-only attribute is set or cleared. // The other bits are currently unused. For compatibility with Go 1.12 -// and earlier, use a non-zero mode. Use mode 0400 for a read-only -// file and 0600 for a readable+writable file. +// and earlier, use a non-zero mode. Use mode 0o400 for a read-only +// file and 0o600 for a readable+writable file. // // On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive, // and ModeTemporary are used. diff --git a/src/os/os_test.go b/src/os/os_test.go index 878974384dbcba..f1755dfa9139f8 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1376,8 +1376,7 @@ func TestChtimes(t *testing.T) { t.Parallel() f := newFile(t) - - f.Write([]byte("hello, world\n")) + // This should be an empty file (see #68687, #68663). f.Close() testChtimes(t, f.Name()) @@ -1395,12 +1394,9 @@ func TestChtimesOmit(t *testing.T) { func testChtimesOmit(t *testing.T, omitAt, omitMt bool) { t.Logf("omit atime: %v, mtime: %v", omitAt, omitMt) file := newFile(t) - _, err := file.Write([]byte("hello, world\n")) - if err != nil { - t.Fatal(err) - } + // This should be an empty file (see #68687, #68663). name := file.Name() - err = file.Close() + err := file.Close() if err != nil { t.Error(err) } @@ -3358,6 +3354,14 @@ func TestCopyFS(t *testing.T) { t.Fatal("comparing two directories:", err) } + // Test whether CopyFS disallows copying for disk filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in disk filesystem), "+ + "got: %v, expected any error that indicates ", err) + } + // Test with memory filesystem. fsys = fstest.MapFS{ "william": {Data: []byte("Shakespeare\n")}, @@ -3395,6 +3399,14 @@ func TestCopyFS(t *testing.T) { }); err != nil { t.Fatal("comparing two directories:", err) } + + // Test whether CopyFS disallows copying for memory filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in memory filesystem), "+ + "got: %v, expected any error that indicates ", err) + } } func TestCopyFSWithSymlinks(t *testing.T) { diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 0404c4ff64b72e..0bfef7759cc679 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -8,20 +8,28 @@ // v5.3: pidfd_open syscall, clone3 syscall; // v5.4: P_PIDFD idtype support for waitid syscall; // v5.6: pidfd_getfd syscall. +// +// N.B. Alternative Linux implementations may not follow this ordering. e.g., +// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until +// 8.0. package os import ( "errors" "internal/syscall/unix" + "runtime" "sync" "syscall" "unsafe" ) -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { +// ensurePidfd initializes the PidFD field in sysAttr if it is not already set. +// It returns the original or modified SysProcAttr struct and a flag indicating +// whether the PidFD should be duplicated before using. +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { if !pidfdWorks() { - return sysAttr + return sysAttr, false } var pidfd int @@ -29,23 +37,33 @@ func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { if sysAttr == nil { return &syscall.SysProcAttr{ PidFD: &pidfd, - } + }, false } if sysAttr.PidFD == nil { newSys := *sysAttr // copy newSys.PidFD = &pidfd - return &newSys + return &newSys, false } - return sysAttr + return sysAttr, true } -func getPidfd(sysAttr *syscall.SysProcAttr) (uintptr, bool) { +// getPidfd returns the value of sysAttr.PidFD (or its duplicate if needDup is +// set) and a flag indicating whether the value can be used. +func getPidfd(sysAttr *syscall.SysProcAttr, needDup bool) (uintptr, bool) { if !pidfdWorks() { return 0, false } - return uintptr(*sysAttr.PidFD), true + h := *sysAttr.PidFD + if needDup { + dupH, e := unix.Fcntl(h, syscall.F_DUPFD_CLOEXEC, 0) + if e != nil { + return 0, false + } + h = dupH + } + return uintptr(h), true } func pidfdFind(pid int) (uintptr, error) { @@ -126,14 +144,21 @@ func pidfdWorks() bool { var checkPidfdOnce = sync.OnceValue(checkPidfd) -// checkPidfd checks whether all required pidfd-related syscalls work. -// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid -// syscall with idtype of P_PIDFD. +// checkPidfd checks whether all required pidfd-related syscalls work. This +// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with +// idtype of P_PIDFD, and clone(CLONE_PIDFD). // // Reasons for non-working pidfd syscalls include an older kernel and an // execution environment in which the above system calls are restricted by // seccomp or a similar technology. func checkPidfd() error { + // In Android version < 12, pidfd-related system calls are not allowed + // by seccomp and trigger the SIGSYS signal. See issue #69065. + if runtime.GOOS == "android" { + ignoreSIGSYS() + defer restoreSIGSYS() + } + // Get a pidfd of the current process (opening of "/proc/self" won't // work for waitid). fd, err := unix.PidFDOpen(syscall.Getpid(), 0) @@ -159,5 +184,27 @@ func checkPidfd() error { return NewSyscallError("pidfd_send_signal", err) } + // Verify that clone(CLONE_PIDFD) works. + // + // This shouldn't be necessary since pidfd_open was added in Linux 5.3, + // after CLONE_PIDFD in Linux 5.2, but some alternative Linux + // implementations may not adhere to this ordering. + if err := checkClonePidfd(); err != nil { + return err + } + return nil } + +// Provided by syscall. +// +//go:linkname checkClonePidfd +func checkClonePidfd() error + +// Provided by runtime. +// +//go:linkname ignoreSIGSYS +func ignoreSIGSYS() + +//go:linkname restoreSIGSYS +func restoreSIGSYS() diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index 837593706bae8e..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -6,8 +6,10 @@ package os_test import ( "errors" + "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -57,3 +59,93 @@ func TestFindProcessViaPidfd(t *testing.T) { t.Fatalf("Release: got %v, want ", err) } } + +func TestStartProcessWithPidfd(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + if err := os.CheckPidfdOnce(); err != nil { + // Non-pidfd code paths tested in exec_unix_test.go. + t.Skipf("skipping: pidfd not available: %v", err) + } + + var pidfd int + p, err := os.StartProcess(testenv.GoToolPath(t), []string{"go"}, &os.ProcAttr{ + Sys: &syscall.SysProcAttr{ + PidFD: &pidfd, + }, + }) + if err != nil { + t.Fatalf("starting test process: %v", err) + } + defer syscall.Close(pidfd) + + if _, err := p.Wait(); err != nil { + t.Fatalf("Wait: got %v, want ", err) + } + + // Check the pidfd is still valid + err = unix.PidFDSendSignal(uintptr(pidfd), syscall.Signal(0)) + if !errors.Is(err, syscall.ESRCH) { + t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) + } +} + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/os/pidfd_other.go b/src/os/pidfd_other.go index dda4bd0feccae6..ba9cbcb93830c0 100644 --- a/src/os/pidfd_other.go +++ b/src/os/pidfd_other.go @@ -8,11 +8,11 @@ package os import "syscall" -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { - return sysAttr +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { + return sysAttr, false } -func getPidfd(_ *syscall.SysProcAttr) (uintptr, bool) { +func getPidfd(_ *syscall.SysProcAttr, _ bool) (uintptr, bool) { return 0, false } diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 8dcb9cb2172882..45867477dc26b2 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -14,15 +14,12 @@ import ( "net" . "os" "path/filepath" - "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" - - "golang.org/x/net/nettest" ) func TestCopyFileRange(t *testing.T) { @@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) { t.Fatalf("server Control error: %v", err) } } - -func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { - t.Helper() - if !nettest.TestableNetwork(proto) { - t.Skipf("%s does not support %q", runtime.GOOS, proto) - } - - ln, err := nettest.NewLocalListener(proto) - if err != nil { - t.Fatalf("NewLocalListener error: %v", err) - } - t.Cleanup(func() { - if ln != nil { - ln.Close() - } - if client != nil { - client.Close() - } - if server != nil { - server.Close() - } - }) - ch := make(chan struct{}) - go func() { - var err error - server, err = ln.Accept() - if err != nil { - t.Errorf("Accept new connection error: %v", err) - } - ch <- struct{}{} - }() - client, err = net.Dial(proto, ln.Addr().String()) - <-ch - if err != nil { - t.Fatalf("Dial new connection error: %v", err) - } - return client, server -} diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c index fcb03d0dea7e34..df0049a4f37ab3 100644 --- a/src/runtime/cgo/gcc_stack_unix.c +++ b/src/runtime/cgo/gcc_stack_unix.c @@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2]) pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); // low address #else - // We don't know how to get the current stacks, so assume they are the - // same as the default stack bounds. - pthread_attr_getstacksize(&attr, &size); - addr = __builtin_frame_address(0) + 4096 - size; + // We don't know how to get the current stacks, leave it as + // 0 and the caller will use an estimate based on the current + // SP. + addr = 0; + size = 0; #endif pthread_attr_destroy(&attr); diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index b943b1c2d6b4f8..972de4fe03277f 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { g0 := mp.g0 - inBound := sp > g0.stack.lo && sp <= g0.stack.hi - if mp.ncgo > 0 && !inBound { - // ncgo > 0 indicates that this M was in Go further up the stack - // (it called C and is now receiving a callback). - // - // !inBound indicates that we were called with SP outside the - // expected system stack bounds (C changed the stack out from - // under us between the cgocall and cgocallback?). - // - // It is not safe for the C call to change the stack out from - // under us, so throw. - - // Note that this case isn't possible for signal == true, as - // that is always passing a new M from needm. - - // Stack is bogus, but reset the bounds anyway so we can print. - hi := g0.stack.hi - lo := g0.stack.lo - g0.stack.hi = sp + 1024 - g0.stack.lo = sp - 32*1024 - g0.stackguard0 = g0.stack.lo + stackGuard - g0.stackguard1 = g0.stackguard0 - - print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]") - print("\n") - exit(2) - } - if !mp.isextra { // We allocated the stack for standard Ms. Don't replace the // stack bounds with estimated ones when we already initialized @@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { return } - // This M does not have Go further up the stack. However, it may have - // previously called into Go, initializing the stack bounds. Between - // that call returning and now the stack may have changed (perhaps the - // C thread is running a coroutine library). We need to update the - // stack bounds for this case. + inBound := sp > g0.stack.lo && sp <= g0.stack.hi + if inBound && mp.g0StackAccurate { + // This M has called into Go before and has the stack bounds + // initialized. We have the accurate stack bounds, and the SP + // is in bounds. We expect it continues to run within the same + // bounds. + return + } + + // We don't have an accurate stack bounds (either it never calls + // into Go before, or we couldn't get the accurate bounds), or the + // current SP is not within the previous bounds (the stack may have + // changed between calls). We need to update the stack bounds. // // N.B. we need to update the stack bounds even if SP appears to - // already be in bounds. Our "bounds" may actually be estimated dummy - // bounds (below). The actual stack bounds could have shifted but still - // have partial overlap with our dummy bounds. If we failed to update - // in that case, we could find ourselves seemingly called near the - // bottom of the stack bounds, where we quickly run out of space. + // already be in bounds, if our bounds are estimated dummy bounds + // (below). We may be in a different region within the same actual + // stack bounds, but our estimates were not accurate. Or the actual + // stack bounds could have shifted but still have partial overlap with + // our dummy bounds. If we failed to update in that case, we could find + // ourselves seemingly called near the bottom of the stack bounds, where + // we quickly run out of space. // Set the stack bounds to match the current stack. If we don't // actually know how big the stack is, like we don't know how big any // scheduling stack is, but we assume there's at least 32 kB. If we // can get a more accurate stack bound from pthread, use that, provided - // it actually contains SP.. + // it actually contains SP. g0.stack.hi = sp + 1024 g0.stack.lo = sp - 32*1024 + mp.g0StackAccurate = false if !signal && _cgo_getstackbound != nil { // Don't adjust if called from the signal handler. // We are on the signal stack, not the pthread stack. @@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) // getstackbound is an unsupported no-op on Windows. // + // On Unix systems, if the API to get accurate stack bounds is + // not available, it returns zeros. + // // Don't use these bounds if they don't contain SP. Perhaps we // were called by something not using the standard thread // stack. if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { g0.stack.lo = bounds[0] g0.stack.hi = bounds[1] + mp.g0StackAccurate = true } } g0.stackguard0 = g0.stack.lo + stackGuard @@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { } sp := gp.m.g0.sched.sp // system sp saved by cgocallback. + oldStack := gp.m.g0.stack + oldAccurate := gp.m.g0StackAccurate callbackUpdateSystemStack(gp.m, sp, false) // The call from C is on gp.m's g0 stack, so we must ensure @@ -338,9 +327,14 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { // stack. However, since we're returning to an earlier stack frame and // need to pair with the entersyscall() call made by cgocall, we must // save syscall* and let reentersyscall restore them. + // + // Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer. + // When we call into Go, the stack is free to be moved. If these locals + // aren't visible in the stack maps, they won't get updated properly, + // and will end up being stale when restored by reentersyscall. savedsp := unsafe.Pointer(gp.syscallsp) savedpc := gp.syscallpc - savedbp := gp.syscallbp + savedbp := unsafe.Pointer(gp.syscallbp) exitsyscall() // coming out of cgo call gp.m.incgo = false if gp.m.isextra { @@ -372,9 +366,15 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { osPreemptExtEnter(gp.m) // going back to cgo call - reentersyscall(savedpc, uintptr(savedsp), savedbp) + reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) gp.m.winsyscall = winsyscall + + // Restore the old g0 stack bounds + gp.m.g0.stack = oldStack + gp.m.g0.stackguard0 = oldStack.lo + stackGuard + gp.m.g0.stackguard1 = gp.m.g0.stackguard0 + gp.m.g0StackAccurate = oldAccurate } func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 3f2c271953db66..44f9eed40208c5 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -30,43 +30,48 @@ func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) { // runtime has set itself up. return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(unsafe.Pointer(dst)) { - return + if src != nil { + getg().m.p.ptr().ptrWrites++ } + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(unsafe.Pointer(dst)) { + return + } - // If we are running on the system stack then dst might be an - // address on the stack, which is OK. - gp := getg() - if gp == gp.m.g0 || gp == gp.m.gsignal { - return - } + // If we are running on the system stack then dst might be an + // address on the stack, which is OK. + gp := getg() + if gp == gp.m.g0 || gp == gp.m.gsignal { + return + } - // Allocating memory can write to various mfixalloc structs - // that look like they are non-Go memory. - if gp.m.mallocing != 0 { - return - } + // Allocating memory can write to various mfixalloc structs + // that look like they are non-Go memory. + if gp.m.mallocing != 0 { + return + } - // If the object is pinned, it's safe to store it in C memory. The GC - // ensures it will not be moved or freed. - if isPinned(src) { - return - } + // If the object is pinned, it's safe to store it in C memory. The GC + // ensures it will not be moved or freed. + if isPinned(src) { + return + } - // It's OK if writing to memory allocated by persistentalloc. - // Do this check last because it is more expensive and rarely true. - // If it is false the expense doesn't matter since we are crashing. - if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { - return - } + // It's OK if writing to memory allocated by persistentalloc. + // Do this check last because it is more expensive and rarely true. + // If it is false the expense doesn't matter since we are crashing. + if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { + return + } - systemstack(func() { - println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) - throw(cgoWriteBarrierFail) - }) + systemstack(func() { + println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) + throw(cgoWriteBarrierFail) + }) + */ } // cgoCheckMemmove is called when moving a block of memory. @@ -93,13 +98,16 @@ func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { if !typ.Pointers() { return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(dst) { - return - } - cgoCheckTypedBlock(typ, src, off, size) + countWrittenPointers(typ, 1) + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + cgoCheckTypedBlock(typ, src, off, size) + */ } // cgoCheckSliceCopy is called when copying n elements of a slice. @@ -114,17 +122,20 @@ func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { if !typ.Pointers() { return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(dst) { - return - } - p := src - for i := 0; i < n; i++ { - cgoCheckTypedBlock(typ, p, 0, typ.Size_) - p = add(p, typ.Size_) - } + countWrittenPointers(typ, n) + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + p := src + for i := 0; i < n; i++ { + cgoCheckTypedBlock(typ, p, 0, typ.Size_) + p = add(p, typ.Size_) + } + */ } // cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, @@ -190,6 +201,21 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } } +//go:nosplit +//go:nowritebarrier +func countWrittenPointers(typ *_type, n int) { + ptrs := uint64(0) + for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize * ptrBits { + b := uint64(readUintptr(addb(typ.GCData, i/ptrBits))) + for j := range 64 { + if b&(uint64(1)<strong conversions in the critical window. + var wg sync.WaitGroup + for _, wp := range w { + wg.Add(1) + go func() { + defer wg.Done() + wp.Strong() + }() + } + + // Make sure the GC completes. + <-done + + // Make sure all the weak->strong conversions finish. + wg.Wait() + + // The bug is triggered if there's still mark work after gcMarkDone stops the world. + // + // This can manifest in one of two ways today: + // - An exceedingly rare crash in mark termination. + // - gcMarkDone restarts, as if issue #27993 is at play. + // + // Check for the latter. This is a fairly controlled environment, so #27993 is very + // unlikely to happen (it's already rare to begin with) but we'll always _appear_ to + // trigger the same bug if weak->strong conversions aren't properly coordinated with + // mark termination. + if runtime.GCMarkDoneRestarted() { + t.Errorf("gcMarkDone restarted") + } +} diff --git a/src/runtime/internal/sys/intrinsics.go b/src/runtime/internal/sys/intrinsics.go index e6a3758447f95e..196a93df7242e5 100644 --- a/src/runtime/internal/sys/intrinsics.go +++ b/src/runtime/internal/sys/intrinsics.go @@ -119,6 +119,10 @@ const m1 = 0x3333333333333333 // 00110011 ... const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... // OnesCount64 returns the number of one bits ("population count") in x. +// +// nosplit because this is used in the cgocheck2 code for memmoves. +// +//go:nosplit func OnesCount64(x uint64) int { // Implementation: Parallel summing of adjacent bits. // See "Hacker's Delight", Chap. 5: Counting Bits. diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index 58690e45e4d5a8..4aafc3e44d3496 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -48,6 +48,7 @@ func mutexContended(l *mutex) bool { return atomic.Load(key32(&l.key)) > mutex_locked } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -117,6 +118,7 @@ func lock2(l *mutex) { } } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go index b6ee5ec7afe269..5ca1e3d5611abb 100644 --- a/src/runtime/lock_js.go +++ b/src/runtime/lock_js.go @@ -27,6 +27,7 @@ func mutexContended(l *mutex) bool { return false } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -45,6 +46,7 @@ func lock2(l *mutex) { l.key = mutex_locked } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 32d2235ad3ab90..20a0243655465f 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -35,6 +35,7 @@ func mutexContended(l *mutex) bool { return atomic.Loaduintptr(&l.key) > locked } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -99,6 +100,7 @@ Loop: } } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_wasip1.go b/src/runtime/lock_wasip1.go index acfc62acb48e90..2c5bd3c5907916 100644 --- a/src/runtime/lock_wasip1.go +++ b/src/runtime/lock_wasip1.go @@ -23,6 +23,7 @@ func mutexContended(l *mutex) bool { return false } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -41,6 +42,7 @@ func lock2(l *mutex) { l.key = mutex_locked } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 432ace728b8269..373838332f564a 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -17,6 +17,7 @@ const ( lockRankDefer lockRankSweepWaiters lockRankAssistQueue + lockRankStrongFromWeakQueue lockRankSweep lockRankTestR lockRankTestW @@ -84,64 +85,65 @@ const lockRankLeafRank lockRank = 1000 // lockNames gives the names associated with each of the above ranks. var lockNames = []string{ - lockRankSysmon: "sysmon", - lockRankScavenge: "scavenge", - lockRankForcegc: "forcegc", - lockRankDefer: "defer", - lockRankSweepWaiters: "sweepWaiters", - lockRankAssistQueue: "assistQueue", - lockRankSweep: "sweep", - lockRankTestR: "testR", - lockRankTestW: "testW", - lockRankTimerSend: "timerSend", - lockRankAllocmW: "allocmW", - lockRankExecW: "execW", - lockRankCpuprof: "cpuprof", - lockRankPollCache: "pollCache", - lockRankPollDesc: "pollDesc", - lockRankWakeableSleep: "wakeableSleep", - lockRankHchan: "hchan", - lockRankAllocmR: "allocmR", - lockRankExecR: "execR", - lockRankSched: "sched", - lockRankAllg: "allg", - lockRankAllp: "allp", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", - lockRankTimers: "timers", - lockRankTimer: "timer", - lockRankNetpollInit: "netpollInit", - lockRankRoot: "root", - lockRankItab: "itab", - lockRankReflectOffs: "reflectOffs", - lockRankUserArenaState: "userArenaState", - lockRankTraceBuf: "traceBuf", - lockRankTraceStrings: "traceStrings", - lockRankFin: "fin", - lockRankSpanSetSpine: "spanSetSpine", - lockRankMspanSpecial: "mspanSpecial", - lockRankTraceTypeTab: "traceTypeTab", - lockRankGcBitsArenas: "gcBitsArenas", - lockRankProfInsert: "profInsert", - lockRankProfBlock: "profBlock", - lockRankProfMemActive: "profMemActive", - lockRankProfMemFuture: "profMemFuture", - lockRankGscan: "gscan", - lockRankStackpool: "stackpool", - lockRankStackLarge: "stackLarge", - lockRankHchanLeaf: "hchanLeaf", - lockRankWbufSpans: "wbufSpans", - lockRankMheap: "mheap", - lockRankMheapSpecial: "mheapSpecial", - lockRankGlobalAlloc: "globalAlloc", - lockRankTrace: "trace", - lockRankTraceStackTab: "traceStackTab", - lockRankPanic: "panic", - lockRankDeadlock: "deadlock", - lockRankRaceFini: "raceFini", - lockRankAllocmRInternal: "allocmRInternal", - lockRankExecRInternal: "execRInternal", - lockRankTestRInternal: "testRInternal", + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankStrongFromWeakQueue: "strongFromWeakQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankTimerSend: "timerSend", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollCache: "pollCache", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankHchan: "hchan", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankTimers: "timers", + lockRankTimer: "timer", + lockRankNetpollInit: "netpollInit", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankTraceTypeTab: "traceTypeTab", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", } func (rank lockRank) String() string { @@ -163,62 +165,63 @@ func (rank lockRank) String() string { // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ - lockRankSysmon: {}, - lockRankScavenge: {lockRankSysmon}, - lockRankForcegc: {lockRankSysmon}, - lockRankDefer: {}, - lockRankSweepWaiters: {}, - lockRankAssistQueue: {}, - lockRankSweep: {}, - lockRankTestR: {}, - lockRankTestW: {}, - lockRankTimerSend: {}, - lockRankAllocmW: {}, - lockRankExecW: {}, - lockRankCpuprof: {}, - lockRankPollCache: {}, - lockRankPollDesc: {}, - lockRankWakeableSleep: {}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, - lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, - lockRankRoot: {}, - lockRankItab: {}, - lockRankReflectOffs: {lockRankItab}, - lockRankUserArenaState: {}, - lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, - lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, - lockRankPanic: {}, - lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, - lockRankRaceFini: {lockRankPanic}, - lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, - lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, - lockRankTestRInternal: {lockRankTestR, lockRankTestW}, + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankStrongFromWeakQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankTimerSend: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollCache: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index b92a213245d4f7..112fd876d0e199 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1332,7 +1332,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Returns the G for which the assist credit was accounted. func deductAssistCredit(size uintptr) *g { var assistG *g - if gcBlackenEnabled != 0 { + if debug.gcnoassist == 0 && gcBlackenEnabled != 0 { // Charge the current user G for this allocation. assistG = getg() if assistG.m.curg != nil { diff --git a/src/runtime/map.go b/src/runtime/map.go index 112084f5a74091..52d56fb57a4dc6 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1209,6 +1209,11 @@ func (h *hmap) sameSizeGrow() bool { return h.flags&sameSizeGrow != 0 } +//go:linkname sameSizeGrowForIssue69110Test +func sameSizeGrowForIssue69110Test(h *hmap) bool { + return h.sameSizeGrow() +} + // noldbuckets calculates the number of buckets prior to the current map growth. func (h *hmap) noldbuckets() uintptr { oldB := h.B @@ -1668,7 +1673,16 @@ func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) } func mapclone2(t *maptype, src *hmap) *hmap { - dst := makemap(t, src.count, nil) + hint := src.count + if overLoadFactor(hint, src.B) { + // Note: in rare cases (e.g. during a same-sized grow) the map + // can be overloaded. Make sure we don't allocate a destination + // bucket array larger than the source bucket array. + // This will cause the cloned map to be overloaded also, + // but that's better than crashing. See issue 69110. + hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) + } + dst := makemap(t, hint, nil) dst.hash0 = src.hash0 dst.nevacuate = 0 // flags do not need to be copied here, just like a new map has no flags. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 2654c696582211..1347bd1622c685 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -130,6 +130,7 @@ package runtime import ( "internal/cpu" + "internal/goexperiment" "internal/runtime/atomic" "unsafe" ) @@ -190,6 +191,7 @@ func gcinit() { work.markDoneSema = 1 lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.assistQueue.lock, lockRankAssistQueue) + lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue) lockInit(&work.wbufSpans.lock, lockRankWbufSpans) } @@ -418,6 +420,26 @@ type workType struct { list gList } + // strongFromWeak controls how the GC interacts with weak->strong + // pointer conversions. + strongFromWeak struct { + // block is a flag set during mark termination that prevents + // new weak->strong conversions from executing by blocking the + // goroutine and enqueuing it onto q. + // + // Mutated only by one goroutine at a time in gcMarkDone, + // with globally-synchronizing events like forEachP and + // stopTheWorld. + block bool + + // q is a queue of goroutines that attempted to perform a + // weak->strong conversion during mark termination. + // + // Protected by lock. + lock mutex + q gQueue + } + // cycles is the number of completed GC cycles, where a GC // cycle is sweep termination, mark, mark termination, and // sweep. This differs from memstats.numgc, which is @@ -800,6 +822,19 @@ func gcStart(trigger gcTrigger) { // This is protected by markDoneSema. var gcMarkDoneFlushed uint32 +// gcDebugMarkDone contains fields used to debug/test mark termination. +var gcDebugMarkDone struct { + // spinAfterRaggedBarrier forces gcMarkDone to spin after it executes + // the ragged barrier. + spinAfterRaggedBarrier atomic.Bool + + // restartedDueTo27993 indicates that we restarted mark termination + // due to the bug described in issue #27993. + // + // Protected by worldsema. + restartedDueTo27993 bool +} + // gcMarkDone transitions the GC from mark to mark termination if all // reachable objects have been marked (that is, there are no grey // objects and can be no more in the future). Otherwise, it flushes @@ -842,6 +877,10 @@ top: // stop the world later, so acquire worldsema now. semacquire(&worldsema) + // Prevent weak->strong conversions from generating additional + // GC work. forEachP will guarantee that it is observed globally. + work.strongFromWeak.block = true + // Flush all local buffers and collect flushedWork flags. gcMarkDoneFlushed = 0 forEachP(waitReasonGCMarkTermination, func(pp *p) { @@ -872,6 +911,10 @@ top: goto top } + // For debugging/testing. + for gcDebugMarkDone.spinAfterRaggedBarrier.Load() { + } + // There was no global work, no local work, and no Ps // communicated work since we took markDoneSema. Therefore // there are no grey objects and no more objects can be @@ -910,6 +953,8 @@ top: } }) if restart { + gcDebugMarkDone.restartedDueTo27993 = true + getg().m.preemptoff = "" systemstack(func() { // Accumulate the time we were stopped before we had to start again. @@ -936,6 +981,11 @@ top: // start the world again. gcWakeAllAssists() + // Wake all blocked weak->strong conversions. These will run + // when we start the world again. + work.strongFromWeak.block = false + gcWakeAllStrongFromWeak() + // Likewise, release the transition lock. Blocked // workers and assists will run when we start the // world again. @@ -1113,6 +1163,14 @@ func gcMarkTermination(stw worldStop) { throw("non-concurrent sweep failed to drain all sweep queues") } + // Accumulate pointer writes before restarting the world. + ptrWrites := uint64(0) + if goexperiment.CgoCheck2 { + for _, pp := range allp { + ptrWrites += pp.ptrWrites + } + } + systemstack(func() { // The memstats updated above must be updated with the world // stopped to ensure consistency of some values, such as @@ -1215,6 +1273,19 @@ func gcMarkTermination(stw worldStop) { if work.userForced { print(" (forced)") } + if goexperiment.CgoCheck2 { + var stats heapStatsDelta + memstats.heapStats.read(&stats) + allocs := stats.tinyAllocCount + stats.largeAllocCount + allocBytes := stats.largeAlloc + for i, c := range stats.smallAllocCount { + allocs += c + allocBytes += c * uint64(class_to_size[i]) + } + print(" ", ptrWrites, "w") + print(" ", allocs, "o") + print(" ", allocBytes, "b") + } print("\n") printunlock() } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 35fd08af50c3c1..bfca2d105b7426 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2049,8 +2049,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { handle := (*atomic.Uintptr)(u) - // Prevent preemption. We want to make sure that another GC cycle can't start. + // Prevent preemption. We want to make sure that another GC cycle can't start + // and that work.strongFromWeak.block can't change out from under us. mp := acquirem() + + // Yield to the GC if necessary. + if work.strongFromWeak.block { + releasem(mp) + + // Try to park and wait for mark termination. + // N.B. gcParkStrongFromWeak calls acquirem before returning. + mp = gcParkStrongFromWeak() + } + p := handle.Load() if p == 0 { releasem(mp) @@ -2073,14 +2084,67 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { // Even if we just swept some random span that doesn't contain this object, because // this object is long dead and its memory has since been reused, we'll just observe nil. ptr := unsafe.Pointer(handle.Load()) + + // This is responsible for maintaining the same GC-related + // invariants as the Yuasa part of the write barrier. During + // the mark phase, it's possible that we just created the only + // valid pointer to the object pointed to by ptr. If it's only + // ever referenced from our stack, and our stack is blackened + // already, we could fail to mark it. So, mark it now. + if gcphase != _GCoff { + shade(uintptr(ptr)) + } releasem(mp) + + // Explicitly keep ptr alive. This seems unnecessary since we return ptr, + // but let's be explicit since it's important we keep ptr alive across the + // call to shade. + KeepAlive(ptr) return ptr } +// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks. +func gcParkStrongFromWeak() *m { + // Prevent preemption as we check strongFromWeak, so it can't change out from under us. + mp := acquirem() + + for work.strongFromWeak.block { + lock(&work.strongFromWeak.lock) + releasem(mp) // N.B. Holding the lock prevents preemption. + + // Queue ourselves up. + work.strongFromWeak.q.pushBack(getg()) + + // Park. + goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2) + + // Re-acquire the current M since we're going to check the condition again. + mp = acquirem() + + // Re-check condition. We may have awoken in the next GC's mark termination phase. + } + return mp +} + +// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong +// conversions. This is used at the end of a GC cycle. +// +// work.strongFromWeak.block must be false to prevent woken goroutines +// from immediately going back to sleep. +func gcWakeAllStrongFromWeak() { + lock(&work.strongFromWeak.lock) + list := work.strongFromWeak.q.popList() + injectglist(&list) + unlock(&work.strongFromWeak.lock) +} + // Retrieves or creates a weak pointer handle for the object p. func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. if handle := getWeakHandle(p); handle != nil { + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } @@ -2105,7 +2169,17 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } - return s.handle + + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + // + // Same for handle, which is only stored in the special. + // There's a window where it might die if we don't keep it + // alive explicitly. Returning it here is probably good enough, + // but let's be defensive and explicit. See #70455. + KeepAlive(p) + KeepAlive(handle) + return handle } // There was an existing handle. Free the special @@ -2124,8 +2198,11 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { } // Keep p alive for the duration of the function to ensure - // that it cannot die while we're trying to this. + // that it cannot die while we're trying to do this. + // + // Same for handle, just to be defensive. KeepAlive(p) + KeepAlive(handle) return handle } @@ -2154,6 +2231,9 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { unlock(&span.speciallock) releasem(mp) + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 1239b4a546ea39..3391afc6572509 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -50,6 +50,7 @@ NONE < defer; NONE < sweepWaiters, assistQueue, + strongFromWeakQueue, sweep; # Test only @@ -66,6 +67,7 @@ assistQueue, hchan, pollDesc, # pollDesc can interact with timers, which can lock sched. scavenge, + strongFromWeakQueue, sweep, sweepWaiters, testR, diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e66f1..ee3e59a9aa99ce 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) { for i := range p { cf := CallersFrames(p[i].Stack()) j := 0 - for ; j < len(expandedStack); j++ { + for j < len(expandedStack) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" expandedStack[j] = f.PC + 1 + j++ if !more { break } @@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1649,7 +1651,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6ce656c70e146e..e80d390e0d09f2 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -879,8 +879,9 @@ func runPerThreadSyscall() { } const ( - _SI_USER = 0 - _SI_TKILL = -6 + _SI_USER = 0 + _SI_TKILL = -6 + _SYS_SECCOMP = 1 ) // sigFromUser reports whether the signal was sent because of a call @@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool { return code == _SI_USER || code == _SI_TKILL } +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + code := int32(c.sigcode()) + return code == _SYS_SECCOMP +} + //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) diff --git a/src/runtime/os_unix_nonlinux.go b/src/runtime/os_unix_nonlinux.go index b98753b8fe12b7..0e8b61c3b11aa2 100644 --- a/src/runtime/os_unix_nonlinux.go +++ b/src/runtime/os_unix_nonlinux.go @@ -13,3 +13,10 @@ package runtime func (c *sigctxt) sigFromUser() bool { return c.sigcode() == _SI_USER } + +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + return false +} diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 391588d4acd0ec..ef373b36848437 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0fa7a..da4ad17d77e6fd 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -15,6 +15,7 @@ import ( "internal/syscall/unix" "internal/testenv" "io" + "iter" "math" "math/big" "os" @@ -981,7 +982,7 @@ func TestBlockProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk) @@ -991,7 +992,7 @@ func TestBlockProfile(t *testing.T) { } -func stacks(p *profile.Profile) (res [][]string) { +func profileStacks(p *profile.Profile) (res [][]string) { for _, s := range p.Sample { var stk []string for _, l := range s.Location { @@ -1004,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) { return res } +func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) { + for _, record := range records { + frames := runtime.CallersFrames(record.Stack()) + var stk []string + for { + frame, more := frames.Next() + stk = append(stk, frame.Function) + if !more { + break + } + } + res = append(res, stk) + } + return res +} + func containsStack(got [][]string, want []string) bool { for _, stk := range got { if len(stk) < len(want) { @@ -1288,7 +1305,7 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, want := range [][]string{ {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"}, } { @@ -1328,6 +1345,28 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D) } }) + + t.Run("records", func(t *testing.T) { + // Record a mutex profile using the structured record API. + var records []runtime.BlockProfileRecord + for { + n, ok := runtime.MutexProfile(records) + if ok { + records = records[:n] + break + } + records = make([]runtime.BlockProfileRecord, n*2) + } + + // Check that we see the same stack trace as the proto profile. For + // historical reason we expect a runtime.goexit root frame here that is + // omitted in the proto profile. + stks := blockRecordStacks(records) + want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"} + if !containsStack(stks, want) { + t.Errorf("No matching stack entry for %+v", want) + } + }) } func TestMutexProfileRateAdjust(t *testing.T) { @@ -1754,6 +1793,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) { } } +// Regression test for #69998. +func TestGoroutineProfileCoro(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // Set up a goroutine to just create and run coroutine goroutines all day. + iterFunc := func() { + p, stop := iter.Pull2( + func(yield func(int, int) bool) { + for i := 0; i < 10000; i++ { + if !yield(i, i) { + return + } + } + }, + ) + defer stop() + for { + _, _, ok := p() + if !ok { + break + } + } + } + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + iterFunc() + select { + case <-done: + default: + } + } + }() + + // Take a goroutine profile. If the bug in #69998 is present, this will crash + // with high probability. We don't care about the output for this bug. + goroutineProf.WriteTo(io.Discard, 1) +} + func BenchmarkGoroutine(b *testing.B) { withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) { return func(b *testing.B) { @@ -2441,16 +2524,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2478,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) var stk []string for _, s := range stks { if hasPrefix(s, test.prefix) { @@ -2742,3 +2816,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +} diff --git a/src/runtime/proc.go b/src/runtime/proc.go index c4f175b0b76b22..e5bf3ff4f0bfc0 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1137,6 +1137,11 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { dumpgstatus(gp) throw("casfrom_Gscanstatus: gp->status is not in scan state") } + // We're transitioning into the running state, record the timestamp for + // subsequent use. + if newval == _Grunning { + gp.lastsched = nanotime() + } releaseLockRankAndM(lockRankGscan) } @@ -1152,6 +1157,11 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { r := gp.atomicstatus.CompareAndSwap(oldval, newval) if r { acquireLockRankAndM(lockRankGscan) + // We're transitioning out of running, record how long we were in the + // state. + if oldval == _Grunning { + gp.runningnanos += nanotime() - gp.lastsched + } } return r @@ -1211,7 +1221,18 @@ func casgstatus(gp *g, oldval, newval uint32) { } } + now := nanotime() + if newval == _Grunning { + // We're transitioning into the running state, record the timestamp for + // subsequent use. + gp.lastsched = now + } + if oldval == _Grunning { + // We're transitioning out of running, record how long we were in the + // state. + gp.runningnanos += now - gp.lastsched + // Track every gTrackingPeriod time a goroutine transitions out of running. if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 { gp.tracking = true @@ -1232,7 +1253,6 @@ func casgstatus(gp *g, oldval, newval uint32) { // We transitioned out of runnable, so measure how much // time we spent in this state and add it to // runnableTime. - now := nanotime() gp.runnableTime += now - gp.trackingStamp gp.trackingStamp = 0 case _Gwaiting: @@ -1245,7 +1265,6 @@ func casgstatus(gp *g, oldval, newval uint32) { // a more representative estimate of the absolute value. // gTrackingPeriod also represents an accurate sampling period // because we can only enter this state from _Grunning. - now := nanotime() sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod) gp.trackingStamp = 0 } @@ -1256,12 +1275,10 @@ func casgstatus(gp *g, oldval, newval uint32) { break } // Blocking on a lock. Write down the timestamp. - now := nanotime() gp.trackingStamp = now case _Grunnable: // We just transitioned into runnable, so record what // time that happened. - now := nanotime() gp.trackingStamp = now case _Grunning: // We're transitioning into running, so turn off @@ -1323,6 +1340,9 @@ func casGToPreemptScan(gp *g, old, new uint32) { acquireLockRankAndM(lockRankGscan) for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) { } + // We're transitioning out of running, record how long we were in the + // state. + gp.runningnanos += nanotime() - gp.lastsched } // casGFromPreempted attempts to transition gp from _Gpreempted to @@ -2539,6 +2559,7 @@ func dropm() { g0.stack.lo = 0 g0.stackguard0 = 0 g0.stackguard1 = 0 + mp.g0StackAccurate = false putExtraM(mp) @@ -3872,23 +3893,23 @@ func injectglist(glist *gList) { if glist.empty() { return } - trace := traceAcquire() - if trace.ok() { - for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { - trace.GoUnpark(gp, 0) - } - traceRelease(trace) - } // Mark all the goroutines as runnable before we put them // on the run queues. head := glist.head.ptr() var tail *g qsize := 0 + trace := traceAcquire() for gp := head; gp != nil; gp = gp.schedlink.ptr() { tail = gp qsize++ casgstatus(gp, _Gwaiting, _Grunnable) + if trace.ok() { + trace.GoUnpark(gp, 0) + } + } + if trace.ok() { + traceRelease(trace) } // Turn the gList into a gQueue. @@ -4059,6 +4080,14 @@ func dropg() { setGNoWB(&gp.m.curg, nil) } +// Grunningnanos returns the wall time spent by current g in the running state. +// A goroutine may be running on an OS thread that's descheduled by the OS +// scheduler, this time still counts towards the metric. +func Grunningnanos() int64 { + gp := getg() + return gp.runningnanos + nanotime() - gp.lastsched +} + func parkunlock_c(gp *g, lock unsafe.Pointer) bool { unlock((*mutex)(lock)) return true @@ -4290,6 +4319,8 @@ func gdestroy(gp *g) { gp.param = nil gp.labels = nil gp.timer = nil + gp.lastsched = 0 + gp.runningnanos = 0 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { // Flush assist credit to the global pool. This gives @@ -4415,7 +4446,13 @@ func reentersyscall(pc, sp, bp uintptr) { } if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscall") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscall") }) } @@ -4553,14 +4590,20 @@ func entersyscallblock() { sp2 := gp.sched.sp sp3 := gp.syscallsp systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } casgstatus(gp, _Grunning, _Gsyscall) if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscallblock") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 1670a809862a2b..74c57bb1dc9136 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -41,6 +41,8 @@ TEXT _main(SB),NOSPLIT,$-8 MOVD R12, CTR BR (CTR) +// Paramater save space required to cross-call into _cgo_sys_thread_create +#define PARAM_SPACE 16 TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 // Start with standard C stack frame layout and linkage. @@ -49,45 +51,45 @@ TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 MOVW CR, R0 // Save CR in caller's frame MOVD R0, 8(R1) - MOVDU R1, -344(R1) // Allocate frame. + MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame. // Preserve callee-save registers. - MOVD R14, 48(R1) - MOVD R15, 56(R1) - MOVD R16, 64(R1) - MOVD R17, 72(R1) - MOVD R18, 80(R1) - MOVD R19, 88(R1) - MOVD R20, 96(R1) - MOVD R21,104(R1) - MOVD R22, 112(R1) - MOVD R23, 120(R1) - MOVD R24, 128(R1) - MOVD R25, 136(R1) - MOVD R26, 144(R1) - MOVD R27, 152(R1) - MOVD R28, 160(R1) - MOVD R29, 168(R1) - MOVD g, 176(R1) // R30 - MOVD R31, 184(R1) - FMOVD F14, 192(R1) - FMOVD F15, 200(R1) - FMOVD F16, 208(R1) - FMOVD F17, 216(R1) - FMOVD F18, 224(R1) - FMOVD F19, 232(R1) - FMOVD F20, 240(R1) - FMOVD F21, 248(R1) - FMOVD F22, 256(R1) - FMOVD F23, 264(R1) - FMOVD F24, 272(R1) - FMOVD F25, 280(R1) - FMOVD F26, 288(R1) - FMOVD F27, 296(R1) - FMOVD F28, 304(R1) - FMOVD F29, 312(R1) - FMOVD F30, 320(R1) - FMOVD F31, 328(R1) + MOVD R14, 48+PARAM_SPACE(R1) + MOVD R15, 56+PARAM_SPACE(R1) + MOVD R16, 64+PARAM_SPACE(R1) + MOVD R17, 72+PARAM_SPACE(R1) + MOVD R18, 80+PARAM_SPACE(R1) + MOVD R19, 88+PARAM_SPACE(R1) + MOVD R20, 96+PARAM_SPACE(R1) + MOVD R21,104+PARAM_SPACE(R1) + MOVD R22, 112+PARAM_SPACE(R1) + MOVD R23, 120+PARAM_SPACE(R1) + MOVD R24, 128+PARAM_SPACE(R1) + MOVD R25, 136+PARAM_SPACE(R1) + MOVD R26, 144+PARAM_SPACE(R1) + MOVD R27, 152+PARAM_SPACE(R1) + MOVD R28, 160+PARAM_SPACE(R1) + MOVD R29, 168+PARAM_SPACE(R1) + MOVD g, 176+PARAM_SPACE(R1) // R30 + MOVD R31, 184+PARAM_SPACE(R1) + FMOVD F14, 192+PARAM_SPACE(R1) + FMOVD F15, 200+PARAM_SPACE(R1) + FMOVD F16, 208+PARAM_SPACE(R1) + FMOVD F17, 216+PARAM_SPACE(R1) + FMOVD F18, 224+PARAM_SPACE(R1) + FMOVD F19, 232+PARAM_SPACE(R1) + FMOVD F20, 240+PARAM_SPACE(R1) + FMOVD F21, 248+PARAM_SPACE(R1) + FMOVD F22, 256+PARAM_SPACE(R1) + FMOVD F23, 264+PARAM_SPACE(R1) + FMOVD F24, 272+PARAM_SPACE(R1) + FMOVD F25, 280+PARAM_SPACE(R1) + FMOVD F26, 288+PARAM_SPACE(R1) + FMOVD F27, 296+PARAM_SPACE(R1) + FMOVD F28, 304+PARAM_SPACE(R1) + FMOVD F29, 312+PARAM_SPACE(R1) + FMOVD F30, 320+PARAM_SPACE(R1) + FMOVD F31, 328+PARAM_SPACE(R1) // Synchronous initialization. MOVD $runtime·reginit(SB), R12 @@ -130,44 +132,44 @@ nocgo: done: // Restore saved registers. - MOVD 48(R1), R14 - MOVD 56(R1), R15 - MOVD 64(R1), R16 - MOVD 72(R1), R17 - MOVD 80(R1), R18 - MOVD 88(R1), R19 - MOVD 96(R1), R20 - MOVD 104(R1), R21 - MOVD 112(R1), R22 - MOVD 120(R1), R23 - MOVD 128(R1), R24 - MOVD 136(R1), R25 - MOVD 144(R1), R26 - MOVD 152(R1), R27 - MOVD 160(R1), R28 - MOVD 168(R1), R29 - MOVD 176(R1), g // R30 - MOVD 184(R1), R31 - FMOVD 196(R1), F14 - FMOVD 200(R1), F15 - FMOVD 208(R1), F16 - FMOVD 216(R1), F17 - FMOVD 224(R1), F18 - FMOVD 232(R1), F19 - FMOVD 240(R1), F20 - FMOVD 248(R1), F21 - FMOVD 256(R1), F22 - FMOVD 264(R1), F23 - FMOVD 272(R1), F24 - FMOVD 280(R1), F25 - FMOVD 288(R1), F26 - FMOVD 296(R1), F27 - FMOVD 304(R1), F28 - FMOVD 312(R1), F29 - FMOVD 320(R1), F30 - FMOVD 328(R1), F31 - - ADD $344, R1 + MOVD 48+PARAM_SPACE(R1), R14 + MOVD 56+PARAM_SPACE(R1), R15 + MOVD 64+PARAM_SPACE(R1), R16 + MOVD 72+PARAM_SPACE(R1), R17 + MOVD 80+PARAM_SPACE(R1), R18 + MOVD 88+PARAM_SPACE(R1), R19 + MOVD 96+PARAM_SPACE(R1), R20 + MOVD 104+PARAM_SPACE(R1), R21 + MOVD 112+PARAM_SPACE(R1), R22 + MOVD 120+PARAM_SPACE(R1), R23 + MOVD 128+PARAM_SPACE(R1), R24 + MOVD 136+PARAM_SPACE(R1), R25 + MOVD 144+PARAM_SPACE(R1), R26 + MOVD 152+PARAM_SPACE(R1), R27 + MOVD 160+PARAM_SPACE(R1), R28 + MOVD 168+PARAM_SPACE(R1), R29 + MOVD 176+PARAM_SPACE(R1), g // R30 + MOVD 184+PARAM_SPACE(R1), R31 + FMOVD 196+PARAM_SPACE(R1), F14 + FMOVD 200+PARAM_SPACE(R1), F15 + FMOVD 208+PARAM_SPACE(R1), F16 + FMOVD 216+PARAM_SPACE(R1), F17 + FMOVD 224+PARAM_SPACE(R1), F18 + FMOVD 232+PARAM_SPACE(R1), F19 + FMOVD 240+PARAM_SPACE(R1), F20 + FMOVD 248+PARAM_SPACE(R1), F21 + FMOVD 256+PARAM_SPACE(R1), F22 + FMOVD 264+PARAM_SPACE(R1), F23 + FMOVD 272+PARAM_SPACE(R1), F24 + FMOVD 280+PARAM_SPACE(R1), F25 + FMOVD 288+PARAM_SPACE(R1), F26 + FMOVD 296+PARAM_SPACE(R1), F27 + FMOVD 304+PARAM_SPACE(R1), F28 + FMOVD 312+PARAM_SPACE(R1), F29 + FMOVD 320+PARAM_SPACE(R1), F30 + FMOVD 328+PARAM_SPACE(R1), F31 + + ADD $344+PARAM_SPACE, R1 MOVD 8(R1), R0 MOVFL R0, $0xff diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 5defe2f615eaa4..14561330bbf281 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "[]main.astruct;", - "bucket;", - "hash;", - "main.astruct;", - "hash * map[string]main.astruct;", + "[]main.astruct", + "bucket", + "hash", + "main.astruct", + "hash * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { - t.Fatalf("could not find %s in 'info typrs astruct' output", name) + t.Fatalf("could not find %q in 'info typrs astruct' output", name) } } } diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 03ef74b8dc4b54..cb45504fbfc8ec 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -316,6 +316,7 @@ var debug struct { gcpacertrace int32 gcshrinkstackoff int32 gcstoptheworld int32 + gcnoassist int32 gctrace int32 invalidptr int32 madvdontneed int32 // for Linux; issue 28466 @@ -374,6 +375,7 @@ var dbgvars = []*dbgVar{ {name: "gcpacertrace", value: &debug.gcpacertrace}, {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, {name: "gcstoptheworld", value: &debug.gcstoptheworld}, + {name: "gcnoassist", value: &debug.gcnoassist}, {name: "gctrace", value: &debug.gctrace}, {name: "harddecommit", value: &debug.harddecommit}, {name: "inittrace", value: &debug.inittrace}, diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 4a789639611fb7..65051c55351c16 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -493,7 +493,6 @@ type g struct { trackingStamp int64 // timestamp of when the G last started being tracked runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking lockedm muintptr - sig uint32 writebuf []byte sigcode0 uintptr sigcode1 uintptr @@ -509,6 +508,10 @@ type g struct { timer *timer // cached timer for time.Sleep sleepWhen int64 // when to sleep until selectDone atomic.Uint32 // are we participating in a select and did someone win the race? + sig uint32 + lastsched int64 // timestamp when the G last started running + runningnanos int64 // wall time spent in the running state + // goroutineProfiled indicates the status of this goroutine's stack for the // current in-progress goroutine profile @@ -556,47 +559,48 @@ type m struct { _ uint32 // align next field to 8 bytes // Fields not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal - p puintptr // attached p for executing go code (nil if not executing go code) - nextp puintptr - oldp puintptr // the p that was attached before executing a syscall - id int64 - mallocing int32 - throwing throwType - preemptoff string // if != "", keep curg running on this m - locks int32 - dying int32 - profilehz int32 - spinning bool // m is out of work and is actively looking for work - blocked bool // m is blocked on a note - newSigstack bool // minit on C thread called sigaltstack - printlock int8 - incgo bool // m is executing a cgo call - isextra bool // m is an extra m - isExtraInC bool // m is an extra m that is not executing Go code - isExtraInSig bool // m is an extra m in a signal handler - freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) - needextram bool - traceback uint8 - ncgocall uint64 // number of cgo calls in total - ncgo int32 // number of cgo calls currently in progress - cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily - cgoCallers *cgoCallers // cgo traceback if crashing in cgo call - park note - alllink *m // on allm - schedlink muintptr - lockedg guintptr - createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. - lockedExt uint32 // tracking for external LockOSThread - lockedInt uint32 // tracking for internal lockOSThread - nextwaitm muintptr // next m waiting for lock + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + p puintptr // attached p for executing go code (nil if not executing go code) + nextp puintptr + oldp puintptr // the p that was attached before executing a syscall + id int64 + mallocing int32 + throwing throwType + preemptoff string // if != "", keep curg running on this m + locks int32 + dying int32 + profilehz int32 + spinning bool // m is out of work and is actively looking for work + blocked bool // m is blocked on a note + newSigstack bool // minit on C thread called sigaltstack + printlock int8 + incgo bool // m is executing a cgo call + isextra bool // m is an extra m + isExtraInC bool // m is an extra m that is not executing Go code + isExtraInSig bool // m is an extra m in a signal handler + freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) + needextram bool + g0StackAccurate bool // whether the g0 stack has accurate bounds + traceback uint8 + ncgocall uint64 // number of cgo calls in total + ncgo int32 // number of cgo calls currently in progress + cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily + cgoCallers *cgoCallers // cgo traceback if crashing in cgo call + park note + alllink *m // on allm + schedlink muintptr + lockedg guintptr + createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. + lockedExt uint32 // tracking for external LockOSThread + lockedInt uint32 // tracking for internal lockOSThread + nextwaitm muintptr // next m waiting for lock mLockProfile mLockProfile // fields relating to runtime.lock contention profStack []uintptr // used for memory/block/mutex stack traces @@ -768,6 +772,10 @@ type p struct { // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. gcStopTime int64 + // ptrWrites counts non-nil heap pointer writes made on this P. + // Only modified if goexperiment.CgoCheck2 is enabled. + ptrWrites uint64 + // Padding is no longer needed. False sharing is now not a worry because p is large enough // that its size class is an integer multiple of the cache line size (for any of our architectures). } @@ -1095,6 +1103,7 @@ const ( waitReasonTraceProcStatus // "trace proc status" waitReasonPageTraceFlush // "page trace flush" waitReasonCoroutine // "coroutine" + waitReasonGCWeakToStrongWait // "GC weak to strong wait" ) var waitReasonStrings = [...]string{ @@ -1135,6 +1144,7 @@ var waitReasonStrings = [...]string{ waitReasonTraceProcStatus: "trace proc status", waitReasonPageTraceFlush: "page trace flush", waitReasonCoroutine: "coroutine", + waitReasonGCWeakToStrongWait: "GC weak to strong wait", } func (w waitReason) String() string { @@ -1189,6 +1199,7 @@ var ( // len(allp) == gomaxprocs; may change at safe points, otherwise // immutable. + //go:linkname allp allp []*p // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 8ba498bdb238d5..6f40f440e807f8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -605,6 +605,19 @@ var crashing atomic.Int32 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool var testSigusr1 func(gp *g) bool +// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. +var sigsysIgnored uint32 + +//go:linkname ignoreSIGSYS os.ignoreSIGSYS +func ignoreSIGSYS() { + atomic.Store(&sigsysIgnored, 1) +} + +//go:linkname restoreSIGSYS os.restoreSIGSYS +func restoreSIGSYS() { + atomic.Store(&sigsysIgnored, 0) +} + // sighandler is invoked when a signal occurs. The global g will be // set to a gsignal goroutine and we will be running on the alternate // signal stack. The parameter gp will be the value of the global g @@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { return } + if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { + return + } + if flags&_SigKill != 0 { dieFromSignal(sig) } diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index 43aba98dcebcdb..a076c93b8e8df9 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, 272, 432}, // g, but exported for testing + {runtime.G{}, 272, 448}, // g, but exported for testing {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index cdf859a7ff1342..d43c6ace4ffcf2 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -69,7 +69,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 + stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 // The minimum size of stack used by Go code stackMin = 2048 @@ -1330,7 +1330,7 @@ func morestackc() { } // startingStackSize is the amount of stack that new goroutines start with. -// It is a power of 2, and between _FixedStack and maxstacksize, inclusive. +// It is a power of 2, and between fixedStack and maxstacksize, inclusive. // startingStackSize is updated every GC by tracking the average size of // stacks scanned during the GC. var startingStackSize uint32 = fixedStack diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 69d720a395c48d..85b1b8c9024a73 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -454,43 +454,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) } // maxArgs should be divisible by 2, as Windows stack @@ -503,7 +497,15 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - if len(args) > maxArgs { + return syscall_syscalln(fn, uintptr(len(args)), args...) +} + +//go:nosplit +func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { + if n > uintptr(len(args)) { + panic("syscall: n > len(args)") // should not be reachable from user code + } + if n > maxArgs { panic("runtime: SyscallN has too many arguments") } @@ -512,7 +514,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { // calls back into Go. c := &getg().m.winsyscall c.fn = fn - c.n = uintptr(len(args)) + c.n = n if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 6a056c8d2b190c..156cf3eb8e5c71 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -1212,6 +1212,13 @@ func TestBigStackCallbackSyscall(t *testing.T) { } } +func TestSyscallStackUsage(t *testing.T) { + // Test that the stack usage of a syscall doesn't exceed the limit. + // See https://go.dev/issue/69813. + syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + var ( modwinmm = syscall.NewLazyDLL("winmm.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..7b344a349610d3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,11 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -68,6 +69,20 @@ type timer struct { // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. + // The value is incremented when about to send a value on the channel, + // and decremented after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // isSending is incremented only when t.mu is held. + // isSending is decremented only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + isSending atomic.Int32 } // init initializes a newly allocated timer t. @@ -431,6 +446,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.period == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -490,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.maybeRunAsync() } t.trace("modify") + oldPeriod := t.period t.period = period if f != nil { t.f = f @@ -525,6 +550,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if oldPeriod == 0 && t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1047,15 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + if !async && t.isChan && t.period == 0 { + // Tell Stop/Reset that we are sending a value. + if t.isSending.Add(1) < 0 { + throw("too many concurrent timer firings") + } + } + t.unlock() if raceenabled { @@ -1028,7 +1071,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,7 +1086,21 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) + + if t.period == 0 { + // We are committed to possibly sending a value + // based on seq, so no need to keep telling + // stop/modify that we are sending. + if t.isSending.Add(-1) < 0 { + throw("mismatched isSending updates") + } + } + if t.seq != seq { f = func(any, uintptr, int64) {} } diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 195b3e1c37f984..7c4cb5502377c0 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -99,24 +99,26 @@ const ( traceBlockDebugCall traceBlockUntilGCEnds traceBlockSleep + traceBlockGCWeakToStrongWait ) var traceBlockReasonStrings = [...]string{ - traceBlockGeneric: "unspecified", - traceBlockForever: "forever", - traceBlockNet: "network", - traceBlockSelect: "select", - traceBlockCondWait: "sync.(*Cond).Wait", - traceBlockSync: "sync", - traceBlockChanSend: "chan send", - traceBlockChanRecv: "chan receive", - traceBlockGCMarkAssist: "GC mark assist wait for work", - traceBlockGCSweep: "GC background sweeper wait", - traceBlockSystemGoroutine: "system goroutine wait", - traceBlockPreempted: "preempted", - traceBlockDebugCall: "wait for debug call", - traceBlockUntilGCEnds: "wait until GC ends", - traceBlockSleep: "sleep", + traceBlockGeneric: "unspecified", + traceBlockForever: "forever", + traceBlockNet: "network", + traceBlockSelect: "select", + traceBlockCondWait: "sync.(*Cond).Wait", + traceBlockSync: "sync", + traceBlockChanSend: "chan send", + traceBlockChanRecv: "chan receive", + traceBlockGCMarkAssist: "GC mark assist wait for work", + traceBlockGCSweep: "GC background sweeper wait", + traceBlockSystemGoroutine: "system goroutine wait", + traceBlockPreempted: "preempted", + traceBlockDebugCall: "wait for debug call", + traceBlockUntilGCEnds: "wait until GC ends", + traceBlockSleep: "sleep", + traceBlockGCWeakToStrongWait: "GC weak to strong wait", } // traceGoStopReason is an enumeration of reasons a goroutine might yield. diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index 81134cb0bd27ff..a7873e6ad8c93e 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // Deprecated: Use [SyscallN] instead. func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) +//go:noescape func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno) diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..dfd9a8368a9e50 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -7,6 +7,7 @@ package syscall import ( + errpkg "errors" "internal/itoa" "runtime" "unsafe" @@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att if clone3 != nil { pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0) } else { + // N.B. Keep in sync with doCheckClonePidfd. flags |= uintptr(SIGCHLD) if runtime.GOARCH == "s390x" { // On Linux/s390, the first two arguments of clone(2) are swapped. @@ -735,3 +737,90 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} + +// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a +// clone. +// +//go:linkname os_checkClonePidfd os.checkClonePidfd +func os_checkClonePidfd() error { + pidfd := int32(-1) + pid, errno := doCheckClonePidfd(&pidfd) + if errno != 0 { + return errno + } + + if pidfd == -1 { + // Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process + // before returning. + + var err error + for { + var status WaitStatus + _, err = Wait4(int(pid), &status, 0, nil) + if err != EINTR { + break + } + } + if err != nil { + return err + } + + return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd") + } + + // Good: CLONE_PIDFD provided a pidfd. Reap the process and close the + // pidfd. + defer Close(int(pidfd)) + + for { + const _P_PIDFD = 3 + _, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0) + if errno != EINTR { + break + } + } + if errno != 0 { + return errno + } + + return nil +} + +// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and +// child execution. This is a separate function so we can separate the child's +// and parent's stack frames if we're using vfork. +// +// This is go:noinline because the point is to keep the stack frames of this +// and os_checkClonePidfd separate. +// +//go:noinline +func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { + flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD) + if runtime.GOARCH == "s390x" { + // On Linux/s390, the first two arguments of clone(2) are swapped. + pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd))) + } else { + pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd))) + } + if errno != 0 || pid != 0 { + // If we're in the parent, we must return immediately + // so we're not in the same stack frame as the child. + // This can at most use the return PC, which the child + // will not modify, and the results of + // rawVforkSyscall, which must have been written after + // the child was replaced. + return + } + + for { + RawSyscall(SYS_EXIT_GROUP, 0, 0, 0) + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } diff --git a/src/syscall/syscall_windows_test.go b/src/syscall/syscall_windows_test.go index f67e8991591601..a6c6eff31f0c45 100644 --- a/src/syscall/syscall_windows_test.go +++ b/src/syscall/syscall_windows_test.go @@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) { } } +func TestSyscallAllocations(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + + // Test that syscall.SyscallN arguments do not escape. + // The function used (in this case GetVersion) doesn't matter + // as long as it is always available and doesn't panic. + h, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + t.Fatal(err) + } + defer syscall.FreeLibrary(h) + proc, err := syscall.GetProcAddress(h, "GetVersion") + if err != nil { + t.Fatal(err) + } + + testAllocs := func(t *testing.T, name string, fn func() error) { + t.Run(name, func(t *testing.T) { + n := int(testing.AllocsPerRun(10, func() { + if err := fn(); err != nil { + t.Fatalf("%s: %v", name, err) + } + })) + if n > 0 { + t.Errorf("allocs = %d, want 0", n) + } + }) + } + + testAllocs(t, "SyscallN", func() error { + r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) + testAllocs(t, "Syscall", func() error { + r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) +} + func FuzzUTF16FromString(f *testing.F) { f.Add("hi") // ASCII f.Add("â") // latin1 diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..285a2e748c4af7 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,119 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + +// Test having a large number of goroutines wake up a ticker simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTicker(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTicker(Microsecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 100000 { + select { + case <-timer.C: + case <-After(Millisecond): + } + } + }() + } + wg.Wait() +} + +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTimer(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTimer(Nanosecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 10000 { + select { + case <-timer.C: + default: + } + timer.Reset(Nanosecond) + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 diff --git a/src/time/time_test.go b/src/time/time_test.go index 70eb61478480e0..c12b9117d0f5c1 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -14,6 +14,7 @@ import ( "math/rand" "os" "runtime" + "slices" "strings" "sync" "testing" @@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) { // So GMT+1 corresponds to -3600 in the Go zone, not +3600. name, offset := Now().In(loc).Zone() // The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1" - // on earlier versions; we accept both. (Issue #17276). - if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 { - t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d", - name, offset, "GMT+1", "-01", -1*60*60) + // on earlier versions; we accept both. (Issue 17276.) + wantName := []string{"GMT+1", "-01"} + // The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.) + if runtime.GOOS == "openbsd" { + wantName = append(wantName, "+01") + } + if !slices.Contains(wantName, name) || offset != -1*60*60 { + t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d", + name, offset, wantName, -1*60*60) } } diff --git a/src/unique/clone_test.go b/src/unique/clone_test.go index 69a9a540c07fa0..b0ba5b312e1466 100644 --- a/src/unique/clone_test.go +++ b/src/unique/clone_test.go @@ -27,7 +27,7 @@ func cSeq(stringOffsets ...uintptr) cloneSeq { func testCloneSeq[T any](t *testing.T, want cloneSeq) { typName := reflect.TypeFor[T]().Name() - typ := abi.TypeOf(*new(T)) + typ := abi.TypeFor[T]() t.Run(typName, func(t *testing.T) { got := makeCloneSeq(typ) if !reflect.DeepEqual(got, want) { diff --git a/src/unique/handle.go b/src/unique/handle.go index 0842ae3185f2cc..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -31,7 +31,7 @@ func (h Handle[T]) Value() T { // are equal if and only if the values used to produce them are equal. func Make[T comparable](value T) Handle[T] { // Find the map for type T. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() ma, ok := uniqueMaps.Load(typ) if !ok { // This is a good time to initialize cleanup, since we must go through @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index dffe10ac728189..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -41,6 +44,7 @@ func TestHandle(t *testing.T) { s: [2]testStringStruct{testStringStruct{"y"}, testStringStruct{"z"}}, }) testHandle[testStruct](t, testStruct{0.5, "184"}) + testHandle[testEface](t, testEface("hello")) } func testHandle[T comparable](t *testing.T, value T) { @@ -93,7 +97,7 @@ func drainMaps(t *testing.T) { func checkMapsFor[T comparable](t *testing.T, value T) { // Manually load the value out of the map. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() a, ok := uniqueMaps.Load(typ) if !ok { return @@ -109,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go index c8e751fb613c2e..a866c9a9e30e8e 100644 --- a/test/fixedbugs/issue14636.go +++ b/test/fixedbugs/issue14636.go @@ -12,22 +12,29 @@ import ( "bytes" "log" "os/exec" + "runtime" "strings" ) func main() { - checkLinkOutput("", "-B argument must start with 0x") + // The cannot open file error indicates that the parsing of -B flag + // succeeded and it failed at a later step. checkLinkOutput("0", "-B argument must start with 0x") - checkLinkOutput("0x", "usage") + checkLinkOutput("0x", "cannot open file nonexistent.o") checkLinkOutput("0x0", "-B argument must have even number of digits") - checkLinkOutput("0x00", "usage") + checkLinkOutput("0x00", "cannot open file nonexistent.o") checkLinkOutput("0xYZ", "-B argument contains invalid hex digit") - checkLinkOutput("0x"+strings.Repeat("00", 32), "usage") - checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)") + + maxLen := 32 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + maxLen = 16 + } + checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o") + checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long") } func checkLinkOutput(buildid string, message string) { - cmd := exec.Command("go", "tool", "link", "-B", buildid) + cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o") out, err := cmd.CombinedOutput() if err == nil { log.Fatalf("expected cmd/link to fail") @@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) { } if !strings.Contains(firstLine, message) { - log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine) + log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine) } } diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go index b88120f2c045ef..2b46814f9566de 100644 --- a/test/fixedbugs/issue63489a.go +++ b/test/fixedbugs/issue63489a.go @@ -1,16 +1,20 @@ -// errorcheck -lang=go1.21 +// errorcheck -lang=go1.22 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.21 with a file version of +// go1.4 while this new version tests a -lang version of go1.22 +// with a file version of go1.21. -package p - -const c = 0o123 // ERROR "file declares //go:build go1.4" +//go:build go1.21 -// ERROR "file declares //go:build go1.4" +package p -//line issue63489a.go:13:1 -const d = 0o124 +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} diff --git a/test/fixedbugs/issue63489b.go b/test/fixedbugs/issue63489b.go index 2ad590dfc33347..fd897dea97cb88 100644 --- a/test/fixedbugs/issue63489b.go +++ b/test/fixedbugs/issue63489b.go @@ -1,11 +1,20 @@ -// errorcheck -lang=go1.4 +// errorcheck -lang=go1.21 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.4 with a file version of +// go1.4 while this new version tests a -lang version of go1.1 +// with a file version of go1.21. + +//go:build go1.21 package p -const c = 0o123 // ERROR "file declares //go:build go1.4" +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} diff --git a/test/fixedbugs/issue68580.go b/test/fixedbugs/issue68580.go new file mode 100644 index 00000000000000..b60a7447aaa77b --- /dev/null +++ b/test/fixedbugs/issue68580.go @@ -0,0 +1,15 @@ +// compile -goexperiment aliastypeparams + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type A[P any] = struct{ _ P } + +type N[P any] A[P] + +func f[P any](N[P]) {} + +var _ = f[int] diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go new file mode 100644 index 00000000000000..71a4bcac31a16e --- /dev/null +++ b/test/fixedbugs/issue69110.go @@ -0,0 +1,57 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "maps" + _ "unsafe" +) + +func main() { + for i := 0; i < 100; i++ { + f() + } +} + +const NB = 4 + +func f() { + // Make a map with NB buckets, at max capacity. + // 6.5 entries/bucket. + ne := NB * 13 / 2 + m := map[int]int{} + for i := 0; i < ne; i++ { + m[i] = i + } + + // delete/insert a lot, to hopefully get lots of overflow buckets + // and trigger a same-size grow. + ssg := false + for i := ne; i < ne+1000; i++ { + delete(m, i-ne) + m[i] = i + if sameSizeGrow(m) { + ssg = true + break + } + } + if !ssg { + return + } + + // Insert 1 more entry, which would ordinarily trigger a growth. + // We can't grow while growing, so we instead go over our + // target capacity. + m[-1] = -1 + + // Cloning in this state will make a map with a destination bucket + // array twice the size of the source. + _ = maps.Clone(m) +} + +//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test +func sameSizeGrow(m map[int]int) bool diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1<