From 362f22d2d2fd52260338ee48fc3baa573749f8ce Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 18 Jun 2024 17:00:42 -0400 Subject: [PATCH 01/66] [release-branch.go1.23] update codereview.cfg for release-branch.go1.23 Change-Id: Ib335bc903e2b8d5b7be6a158a6debe5db48e79a4 Reviewed-on: https://go-review.googlesource.com/c/go/+/593535 LUCI-TryBot-Result: Go LUCI Auto-Submit: David Chase Reviewed-by: Michael Pratt --- codereview.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/codereview.cfg b/codereview.cfg index 77a74f108eae36..3cf4bb2dd376de 100644 --- a/codereview.cfg +++ b/codereview.cfg @@ -1 +1,2 @@ -branch: master +branch: release-branch.go1.23 +parent-branch: master From f3bdcda88a5cf060592657df3d1179309bb8d028 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 30 May 2024 14:31:40 -0700 Subject: [PATCH 02/66] [release-branch.go1.23] internal/syscall/unix: fix UTIME_OMIT for dragonfly CL 219638 added UTIME_OMIT values for various systems. The value for DragonFly BSD appears to be incorrect. The correct value is -2 (see references below), while -1 is used for UTIME_NOW. As a result, timestamp is changed to the current time instead of not touching. This should have been caught by the accompanying test case, TestChtimesWithZeroTimes, but its failures are essentially skipped on dragonfly (this is being fixed separately in a followup CL 591535). Improve formatting while at it. References: - https://github.com/DragonFlyBSD/DragonFlyBSD/blob/965b380e9609/sys/sys/stat.h#L284 - https://go.googlesource.com/sys/+/refs/tags/v0.20.0/unix/zerrors_dragonfly_amd64.go#1421 Change-Id: I432360ca982c84b7cd70d0cf01d860af9ff985fa Reviewed-on: https://go-review.googlesource.com/c/go/+/589496 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Auto-Submit: Ian Lance Taylor Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/c/go/+/593796 Reviewed-by: Michael Pratt --- src/internal/syscall/unix/at_sysnum_dragonfly.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/internal/syscall/unix/at_sysnum_dragonfly.go b/src/internal/syscall/unix/at_sysnum_dragonfly.go index 9ac1f919f147a5..a8164dcc8ec7be 100644 --- a/src/internal/syscall/unix/at_sysnum_dragonfly.go +++ b/src/internal/syscall/unix/at_sysnum_dragonfly.go @@ -6,15 +6,15 @@ package unix import "syscall" -const unlinkatTrap uintptr = syscall.SYS_UNLINKAT -const openatTrap uintptr = syscall.SYS_OPENAT -const fstatatTrap uintptr = syscall.SYS_FSTATAT - const ( + unlinkatTrap uintptr = syscall.SYS_UNLINKAT + openatTrap uintptr = syscall.SYS_OPENAT + fstatatTrap uintptr = syscall.SYS_FSTATAT + AT_EACCESS = 0x4 AT_FDCWD = 0xfffafdcd AT_REMOVEDIR = 0x2 AT_SYMLINK_NOFOLLOW = 0x1 - UTIME_OMIT = -0x1 + UTIME_OMIT = -0x2 ) From eba9e08766e694183c043f743278a86f16dc2ab3 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 20 Jun 2024 22:58:24 +0700 Subject: [PATCH 03/66] [release-branch.go1.23] cmd/compile: support generic alias type Type parameters on aliases are now allowed after #46477 accepted. Updates #46477 Fixes #68054 Change-Id: Ic2e3b6f960a898163f47666e3a6bfe43b8cc22e2 Reviewed-on: https://go-review.googlesource.com/c/go/+/593715 Reviewed-by: Robert Griesemer Reviewed-by: Matthew Dempsky LUCI-TryBot-Result: Go LUCI Auto-Submit: Robert Griesemer Reviewed-on: https://go-review.googlesource.com/c/go/+/593797 Reviewed-by: Michael Pratt --- src/cmd/compile/internal/noder/writer.go | 13 ++++++++++++- test/fixedbugs/issue68054.go | 23 +++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue68054.go diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index 9b33fb7c6dcaa5..fe8f8f2a351394 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -543,7 +543,7 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo { case *types2.Alias: w.Code(pkgbits.TypeNamed) - w.namedType(typ.Obj(), nil) + w.namedType(splitAlias(typ)) case *types2.TypeParam: w.derived = true @@ -2958,6 +2958,9 @@ func objTypeParams(obj types2.Object) *types2.TypeParamList { if !obj.IsAlias() { return obj.Type().(*types2.Named).TypeParams() } + if alias, ok := obj.Type().(*types2.Alias); ok { + return alias.TypeParams() + } } return nil } @@ -2974,6 +2977,14 @@ func splitNamed(typ *types2.Named) (*types2.TypeName, *types2.TypeList) { return typ.Obj(), typ.TypeArgs() } +// splitAlias is like splitNamed, but for an alias type. +func splitAlias(typ *types2.Alias) (*types2.TypeName, *types2.TypeList) { + orig := typ.Origin() + base.Assertf(typ.Obj() == orig.Obj(), "alias type %v has object %v, but %v has object %v", typ, typ.Obj(), orig, orig.Obj()) + + return typ.Obj(), typ.TypeArgs() +} + func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag { if p == nil { return 0 diff --git a/test/fixedbugs/issue68054.go b/test/fixedbugs/issue68054.go new file mode 100644 index 00000000000000..5409fc90818003 --- /dev/null +++ b/test/fixedbugs/issue68054.go @@ -0,0 +1,23 @@ +// compile -goexperiment aliastypeparams + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type Seq[V any] = func(yield func(V) bool) + +func f[E any](seq Seq[E]) { + return +} + +func g() { + f(Seq[int](nil)) +} + +type T[P any] struct{} + +type A[P any] = T[P] + +var _ A[int] From 62c3a6350b2a6224481e3440eb0a9cf68353990d Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 20 Jun 2024 10:23:42 -0700 Subject: [PATCH 04/66] [release-branch.go1.23] internal/godebugs: fix old value for httpservecontentkeepheaders The pre-Go 1.23 behavior is httpservecontentkeepheaders=1. For #66343 Change-Id: If6f92853b38522f19a8908ff11ac49b12f3dc3e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/593775 Reviewed-by: David Chase Auto-Submit: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/593795 Reviewed-by: Damien Neil Reviewed-by: Michael Pratt --- src/internal/godebugs/table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index f4262b6695d032..4c084635fbe1b0 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -36,7 +36,7 @@ var All = []Info{ {Name: "http2server", Package: "net/http"}, {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, - {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "0"}, + {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, {Name: "installgoroot", Package: "go/build"}, {Name: "jstmpllitinterp", Package: "html/template", Opaque: true}, // bug #66217: remove Opaque //{Name: "multipartfiles", Package: "mime/multipart"}, From 7dff7439dcee8ff6cd83869d356accac1039c017 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Fri, 21 Jun 2024 15:42:56 +0000 Subject: [PATCH 05/66] [release-branch.go1.23] go1.23rc1 Change-Id: Ied4bb63f49d13bd7d421cf9cb269220974641b89 Reviewed-on: https://go-review.googlesource.com/c/go/+/593897 Auto-Submit: David Chase Reviewed-by: David Chase Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Auto-Submit: Gopher Robot --- VERSION | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 VERSION diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000000..e338ef70bd3b3d --- /dev/null +++ b/VERSION @@ -0,0 +1,2 @@ +go1.23rc1 +time 2024-06-20T19:20:56Z From 30b6fd60a63c738c2736e83b6a6886a032e6f269 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 16 Jul 2024 15:04:50 +0000 Subject: [PATCH 06/66] [release-branch.go1.23] go1.23rc2 Change-Id: I73a3f2e680a84aa698c6f64b1e924bb1b9a85a89 Reviewed-on: https://go-review.googlesource.com/c/go/+/598555 TryBot-Bypass: Carlos Amedee Reviewed-by: Carlos Amedee Reviewed-by: Cherry Mui Auto-Submit: Carlos Amedee Auto-Submit: Gopher Robot --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index e338ef70bd3b3d..93d84a73a6cd9f 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23rc1 -time 2024-06-20T19:20:56Z +go1.23rc2 +time 2024-07-16T00:27:32Z From 559c77592f182a2f77f2d70328cb649609517bd3 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Wed, 24 Jul 2024 10:29:13 -0700 Subject: [PATCH 07/66] [release-branch.go1.23] os: document CopyFS behavior for symlinks in destination Also clarify the permissions of created files, and note that CopyFS will not overwrite files. Update a few places in documentation to use 0oXXX for octal consts. For #62484 Change-Id: I208ed2bde250304bc7fac2b93963ba57037e791e Reviewed-on: https://go-review.googlesource.com/c/go/+/600775 Reviewed-by: Ian Lance Taylor Reviewed-by: Russ Cox LUCI-TryBot-Result: Go LUCI (cherry picked from commit 910e6b5fae7cbf84e4a3fcfa6739e20239080bcd) Reviewed-on: https://go-review.googlesource.com/c/go/+/600815 Reviewed-by: Ian Lance Taylor --- src/os/dir.go | 18 ++++++++++-------- src/os/example_test.go | 2 +- src/os/file.go | 8 ++++---- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/os/dir.go b/src/os/dir.go index 471a29134582b3..dab75b5d436ce5 100644 --- a/src/os/dir.go +++ b/src/os/dir.go @@ -132,15 +132,17 @@ func ReadDir(name string) ([]DirEntry, error) { // CopyFS copies the file system fsys into the directory dir, // creating dir if necessary. // -// Newly created directories and files have their default modes -// where any bits from the file in fsys that are not part of the -// standard read, write, and execute permissions will be zeroed -// out, and standard read and write permissions are set for owner, -// group, and others while retaining any existing execute bits from -// the file in fsys. +// Files are created with mode 0o666 plus any execute permissions +// from the source, and directories are created with mode 0o777 +// (before umask). // -// Symbolic links in fsys are not supported, a *PathError with Err set -// to ErrInvalid is returned on symlink. +// CopyFS will not overwrite existing files, and returns an error +// if a file name in fsys already exists in the destination. +// +// Symbolic links in fsys are not supported. A *PathError with Err set +// to ErrInvalid is returned when copying from a symbolic link. +// +// Symbolic links in dir are followed. // // Copying stops at and returns the first error encountered. func CopyFS(dir string, fsys fs.FS) error { diff --git a/src/os/example_test.go b/src/os/example_test.go index 7437a74cd0c66d..c507d46c46303a 100644 --- a/src/os/example_test.go +++ b/src/os/example_test.go @@ -61,7 +61,7 @@ func ExampleFileMode() { log.Fatal(err) } - fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc. + fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0o400, 0o777, etc. switch mode := fi.Mode(); { case mode.IsRegular(): fmt.Println("regular file") diff --git a/src/os/file.go b/src/os/file.go index c3ee31583e32f6..ad869fc4938d17 100644 --- a/src/os/file.go +++ b/src/os/file.go @@ -366,7 +366,7 @@ func Open(name string) (*File, error) { } // Create creates or truncates the named file. If the file already exists, -// it is truncated. If the file does not exist, it is created with mode 0666 +// it is truncated. If the file does not exist, it is created with mode 0o666 // (before umask). If successful, methods on the returned File can // be used for I/O; the associated file descriptor has mode O_RDWR. // If there is an error, it will be of type *PathError. @@ -602,11 +602,11 @@ func UserHomeDir() (string, error) { // On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and // ModeSticky are used. // -// On Windows, only the 0200 bit (owner writable) of mode is used; it +// On Windows, only the 0o200 bit (owner writable) of mode is used; it // controls whether the file's read-only attribute is set or cleared. // The other bits are currently unused. For compatibility with Go 1.12 -// and earlier, use a non-zero mode. Use mode 0400 for a read-only -// file and 0600 for a readable+writable file. +// and earlier, use a non-zero mode. Use mode 0o400 for a read-only +// file and 0o600 for a readable+writable file. // // On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive, // and ModeTemporary are used. From 3509415eca0eac695c706eaf63cb4c8d8d69b7f2 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 24 Jul 2024 15:01:07 -0700 Subject: [PATCH 08/66] [release-branch.go1.23] cmd/compile: more informative panic when importing generic type alias When GOEXPERIMENT=aliastypeparams is set, type aliases may have type parameters. The compiler export data doesn't export that type parameter information yet, which leads to an index-out-of-bounds panic when a client package imports a package with a general type alias and then refers to one of the missing type parameters. This CL detects this specific case and panics with a more informative panic message explaining the shortcoming. The change is only in effect if the respective GOEXPERIMENT is enabled. Manually tested. No test addded since this is just a temporary fix (Go 1.24 will have a complete implementation), and because the existing testing framework doesn't easily support testing that a compilation panics. Together with @taking and input from @rfindley. For #68526. Change-Id: I24737b153a7e2f9b705cd29a5b70b2b9e808dffc Reviewed-on: https://go-review.googlesource.com/c/go/+/601035 Reviewed-by: Tim King LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/importer/ureader.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index d3c7d4516f7ee8..7eda375bd52196 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/syntax" "cmd/compile/internal/types2" "cmd/internal/src" + "internal/buildcfg" "internal/pkgbits" ) @@ -411,6 +412,14 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) { panic("weird") case pkgbits.ObjAlias: + if buildcfg.Experiment.AliasTypeParams && len(r.dict.bounds) > 0 { + // Temporary work-around for issue #68526: rather than panicking + // with an non-descriptive index-out-of-bounds panic when trying + // to access a missing type parameter, instead panic with a more + // descriptive error. Only needed for Go 1.23; Go 1.24 will have + // the correct implementation. + panic("importing generic type aliases is not supported in Go 1.23 (see issue #68526)") + } pos := r.pos() typ := r.typ() return newAliasTypeName(pr.enableAlias, pos, objPkg, objName, typ) From c9940fe2a9f2eb77327efca860abfbae8d94bf28 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 25 Jul 2024 17:17:44 +0700 Subject: [PATCH 09/66] [release-branch.go1.23] types2, go/types: fix instantiation of named type with generic alias The typechecker is assuming that alias instances cannot be reached from a named type. However, when type parameters on aliases are permited, it can happen. This CL changes the typechecker to propagate the correct named instance is being expanded. Updates #46477 Fixes #68580 Change-Id: Id0879021f4640c0fefe277701d5096c649413811 Reviewed-on: https://go-review.googlesource.com/c/go/+/601115 Auto-Submit: Robert Griesemer Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Griesemer Auto-Submit: Cuong Manh Le Reviewed-on: https://go-review.googlesource.com/c/go/+/601116 --- src/cmd/compile/internal/types2/alias.go | 4 ++-- src/cmd/compile/internal/types2/instantiate.go | 8 +++++--- src/cmd/compile/internal/types2/subst.go | 2 +- src/go/types/alias.go | 4 ++-- src/go/types/instantiate.go | 8 +++++--- src/go/types/subst.go | 2 +- test/fixedbugs/issue68580.go | 15 +++++++++++++++ 7 files changed, 31 insertions(+), 12 deletions(-) create mode 100644 test/fixedbugs/issue68580.go diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 5148d5db034142..07f35b1854acaf 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -134,10 +134,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 72227ab12256dd..308d1f550ad4fa 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/syntax" "errors" "fmt" + "internal/buildcfg" . "internal/types/errors" ) @@ -126,8 +127,9 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -138,7 +140,7 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go index 650ae846a61e85..7c4cd732501e43 100644 --- a/src/cmd/compile/internal/types2/subst.go +++ b/src/cmd/compile/internal/types2/subst.go @@ -115,7 +115,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/src/go/types/alias.go b/src/go/types/alias.go index af43471a324176..7adb3deb58bbc7 100644 --- a/src/go/types/alias.go +++ b/src/go/types/alias.go @@ -137,10 +137,10 @@ func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { // newAliasInstance creates a new alias instance for the given origin and type // arguments, recording pos as the position of its synthetic object (for error // reporting). -func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, ctxt *Context) *Alias { +func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type, expanding *Named, ctxt *Context) *Alias { assert(len(targs) > 0) obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) - rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), nil, ctxt) + rhs := check.subst(pos, orig.fromRHS, makeSubstMap(orig.TypeParams().list(), targs), expanding, ctxt) res := check.newAlias(obj, rhs) res.orig = orig res.tparams = orig.tparams diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go index 7bec790b5586ad..0435f2bf261647 100644 --- a/src/go/types/instantiate.go +++ b/src/go/types/instantiate.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "go/token" + "internal/buildcfg" . "internal/types/errors" ) @@ -129,8 +130,9 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily case *Alias: - // TODO(gri) is this correct? - assert(expanding == nil) // Alias instances cannot be reached from Named types + if !buildcfg.Experiment.AliasTypeParams { + assert(expanding == nil) // Alias instances cannot be reached from Named types + } tparams := orig.TypeParams() // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) @@ -141,7 +143,7 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex return orig // nothing to do (minor optimization) } - return check.newAliasInstance(pos, orig, targs, ctxt) + return check.newAliasInstance(pos, orig, targs, expanding, ctxt) case *Signature: assert(expanding == nil) // function instances cannot be reached from Named types diff --git a/src/go/types/subst.go b/src/go/types/subst.go index 5ad2ff61eb1d30..6be106d3aa99d6 100644 --- a/src/go/types/subst.go +++ b/src/go/types/subst.go @@ -118,7 +118,7 @@ func (subst *subster) typ(typ Type) Type { // that has a type argument for it. targs, updated := subst.typeList(t.TypeArgs().list()) if updated { - return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.ctxt) + return subst.check.newAliasInstance(subst.pos, t.orig, targs, subst.expanding, subst.ctxt) } case *Array: diff --git a/test/fixedbugs/issue68580.go b/test/fixedbugs/issue68580.go new file mode 100644 index 00000000000000..b60a7447aaa77b --- /dev/null +++ b/test/fixedbugs/issue68580.go @@ -0,0 +1,15 @@ +// compile -goexperiment aliastypeparams + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type A[P any] = struct{ _ P } + +type N[P any] A[P] + +func f[P any](N[P]) {} + +var _ = f[int] From 7adb01220584115365acc9ebda0317d530288a1a Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 1 Aug 2024 23:41:13 +0200 Subject: [PATCH 10/66] [release-branch.go1.23] crypto/tls: fix testHandshake close flakes The flakes were introduced by me in CL 586655. It's unclear why only FreeBSD seems affected, maybe other TCP stacks handle sending on a half-closed connection differently, or aren't as quick to propagate the RST over localhost. Updates #68155 Change-Id: I32a1b474a7d6531dbab93910c23568b867629e8c Reviewed-on: https://go-review.googlesource.com/c/go/+/602635 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Run-TryBot: Filippo Valsorda TryBot-Result: Gopher Robot Auto-Submit: Filippo Valsorda Reviewed-by: Roland Shoemaker --- src/crypto/tls/handshake_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index bc3d23d5adc24e..41c2643f2a8d28 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -491,9 +491,10 @@ func testHandshake(t *testing.T, clientConfig, serverConfig *Config) (serverStat if got := string(buf); got != sentinel { t.Errorf("read %q from TLS connection, but expected %q", got, sentinel) } - if err := cli.Close(); err != nil { - t.Errorf("failed to call cli.Close: %v", err) - } + // We discard the error because after ReadAll returns the server must + // have already closed the connection. Sending data (the closeNotify + // alert) can cause a reset, that will make Close return an error. + cli.Close() }() server := Server(s, serverConfig) err = server.Handshake() From 63b0f805cd83f97c43a45e9558d00513c2399fbf Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Wed, 7 Aug 2024 13:09:18 -0400 Subject: [PATCH 11/66] [release-branch.go1.23] go/types, types2: only use fileVersion if 1.21 or greater Only honor //go:build language version downgrades if the version specified is 1.21 or greater. Before 1.21 the version in //go:build lines didn't have the meaning of setting the file's language version. This fixes an issue that was appearing in GOPATH builds: Go 1.23 started providing -lang versions to the compiler in GOPATH mode (among other places) which it wasn't doing before. For example, take a go file with a //go:build line specifying go1.10. If that file used a 1.18 feature, that use would compile fine with a Go 1.22 toolchain. But, before this change, it would produce an error when compiling with the 1.23 toolchain because it set the language version to 1.10 and disallowed the 1.18 feature. This breaks backwards compatibility: when the build tag was added, it did not have the meaning of restricting the language version. Fixes #68658 Change-Id: I4ac2b45a981cd019183d52ba324ba8f0fed93a8e Reviewed-on: https://go-review.googlesource.com/c/go/+/603895 Reviewed-by: Robert Griesemer Commit-Queue: Michael Matloob LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Matloob Reviewed-on: https://go-review.googlesource.com/c/go/+/604935 --- src/cmd/compile/internal/types2/api_test.go | 8 ++++++-- src/cmd/compile/internal/types2/check.go | 15 +++++++++++++- src/go/types/api_test.go | 8 ++++++-- src/go/types/check.go | 15 +++++++++++++- .../types/testdata/check/go1_20_19.go | 2 +- .../types/testdata/check/go1_21_19.go | 2 +- .../types/testdata/check/go1_21_22.go | 16 +++++++++++++++ .../types/testdata/check/go1_22_21.go | 16 +++++++++++++++ .../types/testdata/fixedbugs/issue66285.go | 7 +------ test/fixedbugs/issue63489a.go | 20 +++++++++++-------- 10 files changed, 87 insertions(+), 22 deletions(-) create mode 100644 src/internal/types/testdata/check/go1_21_22.go create mode 100644 src/internal/types/testdata/check/go1_22_21.go diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 5126ac51116cd9..a9dcac333b533c 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2903,17 +2903,21 @@ func TestFileVersions(t *testing.T) { {"", "go1.20", ""}, // file upgrade ignored {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.21", "go1.20", "go1.21"}, // file downgrade not permitted + {"go1.22", "go1.21", "go1.21"}, // file downgrade permitted (file and module version are >= go1.21) // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored + {"go1.21.0", "go1.21.1", "go1.21.0"}, // file upgrade ignored + {"go1.21", "go1.21.1", "go1.21"}, // file upgrade ignored {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted + {"go1.21.1", "go1.21", "go1.21.1"}, // file downgrade ignored {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.22.1", "go1.21", "go1.21"}, // file downgrade permitted (file and module version is >= go1.21) } { var src string if test.fileVersion != "" { diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 91ad474e9df315..482d888e99d6a3 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -354,12 +354,25 @@ func (check *Checker) initFiles(files []*syntax.File) { // To work around this, downgrades are only allowed when the // module's Go version is Go 1.21 or later. // + // Downgrades are also only allowed to Go versions Go 1.21 or later. + // In GOPATH mode, there's no way to set a module version and the + // -lang is set to the local toolchain version to allow the use of + // new features in GOPATH mode. But //go:build lines added before go1.21 + // weren't intended to downgrade, so code with //go:build lines for + // go versions earlier than 1.21 may use language features added + // in later versions and compile. + // + // We should probably change the downgradeOk condition to capture this + // instead of adding an extra condition, but to make the change simpler, + // we've tried to limit it to one line. + // TODO(gri): simplify this code after 1.23 has shipped + // // If there is no valid check.version, then we don't really know what // Go version to apply. // Legacy tools may do this, and they historically have accepted everything. // Preserve that behavior by ignoring //go:build constraints entirely in that // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { + if cmp > 0 || cmp < 0 && downgradeOk && fileVersion.cmp(go1_21) >= 0 { v = file.GoVersion } } diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index beed94f3557996..828cd5d21196b7 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2909,17 +2909,21 @@ func TestFileVersions(t *testing.T) { {"", "go1.20", ""}, // file upgrade ignored {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.21", "go1.20", "go1.21"}, // file downgrade not permitted + {"go1.22", "go1.21", "go1.21"}, // file downgrade permitted (file and module version are >= go1.21) // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored + {"go1.21.0", "go1.21.1", "go1.21.0"}, // file upgrade ignored + {"go1.21", "go1.21.1", "go1.21"}, // file upgrade ignored {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted + {"go1.21.1", "go1.21", "go1.21.1"}, // file downgrade ignored {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.22.1", "go1.21", "go1.21"}, // file downgrade permitted (file and module version is >= go1.21) } { var src string if test.fileVersion != "" { diff --git a/src/go/types/check.go b/src/go/types/check.go index 1a5a41a3bb4b99..15504eed408577 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -376,12 +376,25 @@ func (check *Checker) initFiles(files []*ast.File) { // To work around this, downgrades are only allowed when the // module's Go version is Go 1.21 or later. // + // Downgrades are also only allowed to Go versions Go 1.21 or later. + // In GOPATH mode, there's no way to set a module version and the + // -lang is set to the local toolchain version to allow the use of + // new features in GOPATH mode. But //go:build lines added before go1.21 + // weren't intended to downgrade, so code with //go:build lines for + // go versions earlier than 1.21 may use language features added + // in later versions and compile. + // + // We should probably change the downgradeOk condition to capture this + // instead of adding an extra condition, but to make the change simpler, + // we've tried to limit it to one line. + // TODO(gri): simplify this code after 1.23 has shipped + // // If there is no valid check.version, then we don't really know what // Go version to apply. // Legacy tools may do this, and they historically have accepted everything. // Preserve that behavior by ignoring //go:build constraints entirely in that // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { + if cmp > 0 || cmp < 0 && downgradeOk && fileVersion.cmp(go1_21) >= 0 { v = file.GoVersion } } diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go index 08365a7cfb564d..ba1c454332322c 100644 --- a/src/internal/types/testdata/check/go1_20_19.go +++ b/src/internal/types/testdata/check/go1_20_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */) +var p = (Array)(s /* ok because downgrades below 1.21 are ignored */) diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go index 2acd25865d4b69..6c0900d0272762 100644 --- a/src/internal/types/testdata/check/go1_21_19.go +++ b/src/internal/types/testdata/check/go1_21_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ERROR "requires go1.20 or later" */) +var p = (Array)(s /* ok because downgrades below 1.21 are ignored */) diff --git a/src/internal/types/testdata/check/go1_21_22.go b/src/internal/types/testdata/check/go1_21_22.go new file mode 100644 index 00000000000000..695503ace17cb4 --- /dev/null +++ b/src/internal/types/testdata/check/go1_21_22.go @@ -0,0 +1,16 @@ +// -lang=go1.21 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.22 + +package p + +func f() { + for _ = range /* ok because of upgrade to 1.22 */ 10 { + } +} \ No newline at end of file diff --git a/src/internal/types/testdata/check/go1_22_21.go b/src/internal/types/testdata/check/go1_22_21.go new file mode 100644 index 00000000000000..79e21a7de824a1 --- /dev/null +++ b/src/internal/types/testdata/check/go1_22_21.go @@ -0,0 +1,16 @@ +// -lang=go1.22 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.21 + +package p + +func f() { + for _ = range 10 /* ERROR "requires go1.22 or later" */ { + } +} \ No newline at end of file diff --git a/src/internal/types/testdata/fixedbugs/issue66285.go b/src/internal/types/testdata/fixedbugs/issue66285.go index 9811fec3f35549..4af76f05da8e41 100644 --- a/src/internal/types/testdata/fixedbugs/issue66285.go +++ b/src/internal/types/testdata/fixedbugs/issue66285.go @@ -1,14 +1,9 @@ -// -lang=go1.21 +// -lang=go1.13 // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Note: Downgrading to go1.13 requires at least go1.21, -// hence the need for -lang=go1.21 at the top. - -//go:build go1.13 - package p import "io" diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go index b88120f2c045ef..9b06d949bfa962 100644 --- a/test/fixedbugs/issue63489a.go +++ b/test/fixedbugs/issue63489a.go @@ -1,16 +1,20 @@ -// errorcheck -lang=go1.21 +// errorcheck -lang=go1.22 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build language downgrades below go1.21 are no longer +// supported. The original tested a downgrade from go1.21 to +// go1.4 while this new version tests a downgrade from go1.22 +// to go1.21 -package p - -const c = 0o123 // ERROR "file declares //go:build go1.4" +//go:build go1.21 -// ERROR "file declares //go:build go1.4" +package p -//line issue63489a.go:13:1 -const d = 0o124 +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} From ec7d6094e6cb273796480dc94e0f181094e726f7 Mon Sep 17 00:00:00 2001 From: Carlos Amedee Date: Tue, 13 Aug 2024 13:53:09 +0000 Subject: [PATCH 12/66] [release-branch.go1.23] revert "go/types, types2: only use fileVersion if 1.21 or greater" This reverts commit CL 604935. Reason for revert: The team has decided that this change will be added to a point release. Change-Id: I1c1032b881c3a98312a4753b9767cb7c8eed9e09 Reviewed-on: https://go-review.googlesource.com/c/go/+/605096 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Michael Matloob --- src/cmd/compile/internal/types2/api_test.go | 8 ++------ src/cmd/compile/internal/types2/check.go | 15 +------------- src/go/types/api_test.go | 8 ++------ src/go/types/check.go | 15 +------------- .../types/testdata/check/go1_20_19.go | 2 +- .../types/testdata/check/go1_21_19.go | 2 +- .../types/testdata/check/go1_21_22.go | 16 --------------- .../types/testdata/check/go1_22_21.go | 16 --------------- .../types/testdata/fixedbugs/issue66285.go | 7 ++++++- test/fixedbugs/issue63489a.go | 20 ++++++++----------- 10 files changed, 22 insertions(+), 87 deletions(-) delete mode 100644 src/internal/types/testdata/check/go1_21_22.go delete mode 100644 src/internal/types/testdata/check/go1_22_21.go diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index a9dcac333b533c..5126ac51116cd9 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2903,21 +2903,17 @@ func TestFileVersions(t *testing.T) { {"", "go1.20", ""}, // file upgrade ignored {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.20", "go1.21"}, // file downgrade not permitted - {"go1.22", "go1.21", "go1.21"}, // file downgrade permitted (file and module version are >= go1.21) + {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.21.0", "go1.21.1", "go1.21.0"}, // file upgrade ignored - {"go1.21", "go1.21.1", "go1.21"}, // file upgrade ignored {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.21.1", "go1.21", "go1.21.1"}, // file downgrade ignored {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.22.1", "go1.21", "go1.21"}, // file downgrade permitted (file and module version is >= go1.21) + {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) } { var src string if test.fileVersion != "" { diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 482d888e99d6a3..91ad474e9df315 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -354,25 +354,12 @@ func (check *Checker) initFiles(files []*syntax.File) { // To work around this, downgrades are only allowed when the // module's Go version is Go 1.21 or later. // - // Downgrades are also only allowed to Go versions Go 1.21 or later. - // In GOPATH mode, there's no way to set a module version and the - // -lang is set to the local toolchain version to allow the use of - // new features in GOPATH mode. But //go:build lines added before go1.21 - // weren't intended to downgrade, so code with //go:build lines for - // go versions earlier than 1.21 may use language features added - // in later versions and compile. - // - // We should probably change the downgradeOk condition to capture this - // instead of adding an extra condition, but to make the change simpler, - // we've tried to limit it to one line. - // TODO(gri): simplify this code after 1.23 has shipped - // // If there is no valid check.version, then we don't really know what // Go version to apply. // Legacy tools may do this, and they historically have accepted everything. // Preserve that behavior by ignoring //go:build constraints entirely in that // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk && fileVersion.cmp(go1_21) >= 0 { + if cmp > 0 || cmp < 0 && downgradeOk { v = file.GoVersion } } diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index 828cd5d21196b7..beed94f3557996 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2909,21 +2909,17 @@ func TestFileVersions(t *testing.T) { {"", "go1.20", ""}, // file upgrade ignored {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.20", "go1.21"}, // file downgrade not permitted - {"go1.22", "go1.21", "go1.21"}, // file downgrade permitted (file and module version are >= go1.21) + {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.21.0", "go1.21.1", "go1.21.0"}, // file upgrade ignored - {"go1.21", "go1.21.1", "go1.21"}, // file upgrade ignored {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.21.1", "go1.21", "go1.21.1"}, // file downgrade ignored {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.22.1", "go1.21", "go1.21"}, // file downgrade permitted (file and module version is >= go1.21) + {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) } { var src string if test.fileVersion != "" { diff --git a/src/go/types/check.go b/src/go/types/check.go index 15504eed408577..1a5a41a3bb4b99 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -376,25 +376,12 @@ func (check *Checker) initFiles(files []*ast.File) { // To work around this, downgrades are only allowed when the // module's Go version is Go 1.21 or later. // - // Downgrades are also only allowed to Go versions Go 1.21 or later. - // In GOPATH mode, there's no way to set a module version and the - // -lang is set to the local toolchain version to allow the use of - // new features in GOPATH mode. But //go:build lines added before go1.21 - // weren't intended to downgrade, so code with //go:build lines for - // go versions earlier than 1.21 may use language features added - // in later versions and compile. - // - // We should probably change the downgradeOk condition to capture this - // instead of adding an extra condition, but to make the change simpler, - // we've tried to limit it to one line. - // TODO(gri): simplify this code after 1.23 has shipped - // // If there is no valid check.version, then we don't really know what // Go version to apply. // Legacy tools may do this, and they historically have accepted everything. // Preserve that behavior by ignoring //go:build constraints entirely in that // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk && fileVersion.cmp(go1_21) >= 0 { + if cmp > 0 || cmp < 0 && downgradeOk { v = file.GoVersion } } diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go index ba1c454332322c..08365a7cfb564d 100644 --- a/src/internal/types/testdata/check/go1_20_19.go +++ b/src/internal/types/testdata/check/go1_20_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because downgrades below 1.21 are ignored */) +var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */) diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go index 6c0900d0272762..2acd25865d4b69 100644 --- a/src/internal/types/testdata/check/go1_21_19.go +++ b/src/internal/types/testdata/check/go1_21_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because downgrades below 1.21 are ignored */) +var p = (Array)(s /* ERROR "requires go1.20 or later" */) diff --git a/src/internal/types/testdata/check/go1_21_22.go b/src/internal/types/testdata/check/go1_21_22.go deleted file mode 100644 index 695503ace17cb4..00000000000000 --- a/src/internal/types/testdata/check/go1_21_22.go +++ /dev/null @@ -1,16 +0,0 @@ -// -lang=go1.21 - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Check Go language version-specific errors. - -//go:build go1.22 - -package p - -func f() { - for _ = range /* ok because of upgrade to 1.22 */ 10 { - } -} \ No newline at end of file diff --git a/src/internal/types/testdata/check/go1_22_21.go b/src/internal/types/testdata/check/go1_22_21.go deleted file mode 100644 index 79e21a7de824a1..00000000000000 --- a/src/internal/types/testdata/check/go1_22_21.go +++ /dev/null @@ -1,16 +0,0 @@ -// -lang=go1.22 - -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Check Go language version-specific errors. - -//go:build go1.21 - -package p - -func f() { - for _ = range 10 /* ERROR "requires go1.22 or later" */ { - } -} \ No newline at end of file diff --git a/src/internal/types/testdata/fixedbugs/issue66285.go b/src/internal/types/testdata/fixedbugs/issue66285.go index 4af76f05da8e41..9811fec3f35549 100644 --- a/src/internal/types/testdata/fixedbugs/issue66285.go +++ b/src/internal/types/testdata/fixedbugs/issue66285.go @@ -1,9 +1,14 @@ -// -lang=go1.13 +// -lang=go1.21 // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Note: Downgrading to go1.13 requires at least go1.21, +// hence the need for -lang=go1.21 at the top. + +//go:build go1.13 + package p import "io" diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go index 9b06d949bfa962..b88120f2c045ef 100644 --- a/test/fixedbugs/issue63489a.go +++ b/test/fixedbugs/issue63489a.go @@ -1,20 +1,16 @@ -// errorcheck -lang=go1.22 +// errorcheck -lang=go1.21 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file has been changed from its original version as -// //go:build language downgrades below go1.21 are no longer -// supported. The original tested a downgrade from go1.21 to -// go1.4 while this new version tests a downgrade from go1.22 -// to go1.21 - -//go:build go1.21 +//go:build go1.4 package p -func f() { - for _ = range 10 { // ERROR "file declares //go:build go1.21" - } -} +const c = 0o123 // ERROR "file declares //go:build go1.4" + +// ERROR "file declares //go:build go1.4" + +//line issue63489a.go:13:1 +const d = 0o124 From 6885bad7dd86880be6929c02085e5c7a67ff2887 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 13 Aug 2024 15:39:36 +0000 Subject: [PATCH 13/66] [release-branch.go1.23] go1.23.0 Change-Id: I2b0514157b85ca61f9f6b8931df6ac874598a045 Reviewed-on: https://go-review.googlesource.com/c/go/+/605215 Reviewed-by: Carlos Amedee Auto-Submit: Gopher Robot Auto-Submit: Carlos Amedee Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 93d84a73a6cd9f..d184425f0c2ed2 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23rc2 -time 2024-07-16T00:27:32Z +go1.23.0 +time 2024-08-07T19:21:44Z From dbecb416d1d4609a1c8185921cb9cf132ac4a11c Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 8 Aug 2024 13:11:14 -0700 Subject: [PATCH 14/66] [release-branch.go1.23] os: fix Chtimes test flakes It appears that some builders (notably, linux-arm) have some additional security software installed, which apparently reads the files created by tests. As a result, test file atime is changed, making the test fail like these: === RUN TestChtimesOmit ... os_test.go:1475: atime mismatch, got: "2024-07-30 18:42:03.450932494 +0000 UTC", want: "2024-07-30 18:42:02.450932494 +0000 UTC" === RUN TestChtimes ... os_test.go:1539: AccessTime didn't go backwards; was=2024-07-31 20:45:53.390326147 +0000 UTC, after=2024-07-31 20:45:53.394326118 +0000 UTC According to inode(7), atime is changed when more than 0 bytes are read from the file. So, one possible solution to these flakes is to make the test files empty, so no one can read more than 0 bytes from them. For #68687 For #68663 Fixes #68812 Change-Id: Ib9234567883ef7b16ff8811e3360cd26c2d6bdab Reviewed-on: https://go-review.googlesource.com/c/go/+/604315 LUCI-TryBot-Result: Go LUCI Reviewed-by: Kirill Kolyshkin Reviewed-by: Robert Griesemer Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor (cherry picked from commit 84266e1469cfa6fa8e1b41518528a96950db7562) Reviewed-on: https://go-review.googlesource.com/c/go/+/604196 --- src/os/os_test.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/os/os_test.go b/src/os/os_test.go index 878974384dbcba..94ac58dcb089ed 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1376,8 +1376,7 @@ func TestChtimes(t *testing.T) { t.Parallel() f := newFile(t) - - f.Write([]byte("hello, world\n")) + // This should be an empty file (see #68687, #68663). f.Close() testChtimes(t, f.Name()) @@ -1395,12 +1394,9 @@ func TestChtimesOmit(t *testing.T) { func testChtimesOmit(t *testing.T, omitAt, omitMt bool) { t.Logf("omit atime: %v, mtime: %v", omitAt, omitMt) file := newFile(t) - _, err := file.Write([]byte("hello, world\n")) - if err != nil { - t.Fatal(err) - } + // This should be an empty file (see #68687, #68663). name := file.Name() - err = file.Close() + err := file.Close() if err != nil { t.Error(err) } From 3c9340557cf1e13c2fe61c6b894ba284711dfb85 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Fri, 16 Aug 2024 08:04:57 +0800 Subject: [PATCH 15/66] [release-branch.go1.23] os: use O_EXCL instead of O_TRUNC in CopyFS to disallow rewriting existing files does not exist On Linux, a call to creat() is equivalent to calling open() with flags equal to O_CREAT|O_WRONLY|O_TRUNC, which applies to other platforms as well in a similar manner. Thus, to force CopyFS's behavior to comply with the function comment, we need to replace O_TRUNC with O_EXCL. Fixes #68907 Change-Id: I3e2ab153609d3c8cf20ce5969d6f3ef593833cd1 Reviewed-on: https://go-review.googlesource.com/c/go/+/606095 Auto-Submit: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI Reviewed-by: Damien Neil Reviewed-by: Ian Lance Taylor (cherry picked from commit aa5d672a00f5bf64865d0e821623ed29bc416405) Reviewed-on: https://go-review.googlesource.com/c/go/+/606415 --- src/os/dir.go | 7 ++++--- src/os/os_test.go | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/os/dir.go b/src/os/dir.go index dab75b5d436ce5..04392193aa6b03 100644 --- a/src/os/dir.go +++ b/src/os/dir.go @@ -136,8 +136,9 @@ func ReadDir(name string) ([]DirEntry, error) { // from the source, and directories are created with mode 0o777 // (before umask). // -// CopyFS will not overwrite existing files, and returns an error -// if a file name in fsys already exists in the destination. +// CopyFS will not overwrite existing files. If a file name in fsys +// already exists in the destination, CopyFS will return an error +// such that errors.Is(err, fs.ErrExist) will be true. // // Symbolic links in fsys are not supported. A *PathError with Err set // to ErrInvalid is returned when copying from a symbolic link. @@ -176,7 +177,7 @@ func CopyFS(dir string, fsys fs.FS) error { if err != nil { return err } - w, err := OpenFile(newPath, O_CREATE|O_TRUNC|O_WRONLY, 0666|info.Mode()&0777) + w, err := OpenFile(newPath, O_CREATE|O_EXCL|O_WRONLY, 0666|info.Mode()&0777) if err != nil { return err } diff --git a/src/os/os_test.go b/src/os/os_test.go index 94ac58dcb089ed..f1755dfa9139f8 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -3354,6 +3354,14 @@ func TestCopyFS(t *testing.T) { t.Fatal("comparing two directories:", err) } + // Test whether CopyFS disallows copying for disk filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in disk filesystem), "+ + "got: %v, expected any error that indicates ", err) + } + // Test with memory filesystem. fsys = fstest.MapFS{ "william": {Data: []byte("Shakespeare\n")}, @@ -3391,6 +3399,14 @@ func TestCopyFS(t *testing.T) { }); err != nil { t.Fatal("comparing two directories:", err) } + + // Test whether CopyFS disallows copying for memory filesystem when there is any + // existing file in the destination directory. + if err := CopyFS(tmpDir, fsys); !errors.Is(err, fs.ErrExist) { + t.Errorf("CopyFS should have failed and returned error when there is"+ + "any existing file in the destination directory (in memory filesystem), "+ + "got: %v, expected any error that indicates ", err) + } } func TestCopyFSWithSymlinks(t *testing.T) { From 76346b354337c7011eddef040de7696307450e41 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 21 Aug 2024 14:38:30 +0000 Subject: [PATCH 16/66] [release-branch.go1.23] unique: use TypeFor instead of TypeOf to get type in Make Currently the first thing Make does it get the abi.Type of its argument, and uses abi.TypeOf to do it. However, this has a problem for interface types, since the type of the value stored in the interface value will bleed through. This is a classic reflection mistake. Fix this by implementing and using a generic TypeFor which matches reflect.TypeFor. This gets the type of the type parameter, which is far less ambiguous and error-prone. For #68990. Fixes #68992. Change-Id: Idd8d9a1095ef017e9cd7c7779314f7d4034f01a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/607355 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI (cherry picked from commit 755c18ecdfe64df060be91fb669ca1a68527830b) Reviewed-on: https://go-review.googlesource.com/c/go/+/607435 Reviewed-by: Ian Lance Taylor --- src/internal/abi/type.go | 9 +++++++++ src/unique/clone_test.go | 2 +- src/unique/handle.go | 2 +- src/unique/handle_test.go | 3 ++- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 786bafff723c96..b8eefe0da8dbba 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -177,6 +177,15 @@ func TypeOf(a any) *Type { return (*Type)(NoEscape(unsafe.Pointer(eface.Type))) } +// TypeFor returns the abi.Type for a type parameter. +func TypeFor[T any]() *Type { + var v T + if t := TypeOf(v); t != nil { + return t // optimize for T being a non-interface kind + } + return TypeOf((*T)(nil)).Elem() // only for an interface kind +} + func (t *Type) Kind() Kind { return t.Kind_ & KindMask } func (t *Type) HasName() bool { diff --git a/src/unique/clone_test.go b/src/unique/clone_test.go index 69a9a540c07fa0..b0ba5b312e1466 100644 --- a/src/unique/clone_test.go +++ b/src/unique/clone_test.go @@ -27,7 +27,7 @@ func cSeq(stringOffsets ...uintptr) cloneSeq { func testCloneSeq[T any](t *testing.T, want cloneSeq) { typName := reflect.TypeFor[T]().Name() - typ := abi.TypeOf(*new(T)) + typ := abi.TypeFor[T]() t.Run(typName, func(t *testing.T) { got := makeCloneSeq(typ) if !reflect.DeepEqual(got, want) { diff --git a/src/unique/handle.go b/src/unique/handle.go index 0842ae3185f2cc..96d8fedb0cabe6 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -31,7 +31,7 @@ func (h Handle[T]) Value() T { // are equal if and only if the values used to produce them are equal. func Make[T comparable](value T) Handle[T] { // Find the map for type T. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() ma, ok := uniqueMaps.Load(typ) if !ok { // This is a good time to initialize cleanup, since we must go through diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index dffe10ac728189..b031bbf6852c6b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -41,6 +41,7 @@ func TestHandle(t *testing.T) { s: [2]testStringStruct{testStringStruct{"y"}, testStringStruct{"z"}}, }) testHandle[testStruct](t, testStruct{0.5, "184"}) + testHandle[testEface](t, testEface("hello")) } func testHandle[T comparable](t *testing.T, value T) { @@ -93,7 +94,7 @@ func drainMaps(t *testing.T) { func checkMapsFor[T comparable](t *testing.T, value T) { // Manually load the value out of the map. - typ := abi.TypeOf(value) + typ := abi.TypeFor[T]() a, ok := uniqueMaps.Load(typ) if !ok { return From 9166d2feec7c6fe5e4db802cca162745dac93488 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Thu, 15 Aug 2024 16:07:04 -0700 Subject: [PATCH 17/66] [release-branch.go1.23] go/types, types2: Named.cleanup must also handle *Alias types Named.cleanup is called at the end of type-checking to ensure that a named type is fully set up; specifically that it's underlying field is not (still) a Named type. Now it can also be an *Alias type. Add this case to the respective type switch. Fixes #68894. Change-Id: I29bc0024ac9d8b0152a3d97c82dd28d09d5dbd66 Reviewed-on: https://go-review.googlesource.com/c/go/+/605977 Auto-Submit: Robert Griesemer Reviewed-by: Robert Griesemer Reviewed-by: Robert Findley LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/606656 --- .../compile/internal/types2/issues_test.go | 20 +++++++++++++++++++ src/cmd/compile/internal/types2/named.go | 2 +- src/go/types/issues_test.go | 20 +++++++++++++++++++ src/go/types/named.go | 2 +- 4 files changed, 42 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 20e3f52facd9de..b339def7354e28 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -1121,3 +1121,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + conf := Config{EnableAlias: true} + pkg := mustTypecheck(src, &conf, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index 1859b27aa4edfb..02b5ecf1669ea5 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -282,7 +282,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go index 3f459d3883017e..da0c0c1255b63e 100644 --- a/src/go/types/issues_test.go +++ b/src/go/types/issues_test.go @@ -1131,3 +1131,23 @@ func f(x int) { t.Errorf("got: %s want: %s", got, want) } } + +func TestIssue68877(t *testing.T) { + const src = ` +package p + +type ( + S struct{} + A = S + T A +)` + + t.Setenv("GODEBUG", "gotypesalias=1") + pkg := mustTypecheck(src, nil, nil) + T := pkg.Scope().Lookup("T").(*TypeName) + got := T.String() // this must not panic (was issue) + const want = "type p.T struct{}" + if got != want { + t.Errorf("got %s, want %s", got, want) + } +} diff --git a/src/go/types/named.go b/src/go/types/named.go index b44fa9d788c345..d55b023812d108 100644 --- a/src/go/types/named.go +++ b/src/go/types/named.go @@ -285,7 +285,7 @@ func (t *Named) cleanup() { if t.TypeArgs().Len() == 0 { panic("nil underlying") } - case *Named: + case *Named, *Alias: t.under() // t.under may add entries to check.cleaners } t.check = nil From 8002845759484ad55c8194199ec065d228ede6b2 Mon Sep 17 00:00:00 2001 From: "Paul E. Murphy" Date: Mon, 19 Aug 2024 15:08:14 -0500 Subject: [PATCH 18/66] [release-branch.go1.23] runtime: on AIX, fix call to _cgo_sys_thread_create in _rt0_ppc64_aix_lib The AIX ABI requires allocating parameter save space when calling a function, even if the arguments are passed via registers. gcc sometimes uses this space. In the case of the cgo c-archive tests, it clobbered the storage space of argc/argv which prevented the test program from running the expected test. Fixes #68973 Change-Id: I8a267b463b1abb2b37ac85231f6c328f406b7515 Reviewed-on: https://go-review.googlesource.com/c/go/+/606895 Reviewed-by: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Run-TryBot: Paul Murphy TryBot-Result: Gopher Robot Reviewed-on: https://go-review.googlesource.com/c/go/+/607195 Reviewed-by: Dmitri Shuralyov --- src/runtime/rt0_aix_ppc64.s | 152 ++++++++++++++++++------------------ 1 file changed, 77 insertions(+), 75 deletions(-) diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 1670a809862a2b..74c57bb1dc9136 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -41,6 +41,8 @@ TEXT _main(SB),NOSPLIT,$-8 MOVD R12, CTR BR (CTR) +// Paramater save space required to cross-call into _cgo_sys_thread_create +#define PARAM_SPACE 16 TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 // Start with standard C stack frame layout and linkage. @@ -49,45 +51,45 @@ TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 MOVW CR, R0 // Save CR in caller's frame MOVD R0, 8(R1) - MOVDU R1, -344(R1) // Allocate frame. + MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame. // Preserve callee-save registers. - MOVD R14, 48(R1) - MOVD R15, 56(R1) - MOVD R16, 64(R1) - MOVD R17, 72(R1) - MOVD R18, 80(R1) - MOVD R19, 88(R1) - MOVD R20, 96(R1) - MOVD R21,104(R1) - MOVD R22, 112(R1) - MOVD R23, 120(R1) - MOVD R24, 128(R1) - MOVD R25, 136(R1) - MOVD R26, 144(R1) - MOVD R27, 152(R1) - MOVD R28, 160(R1) - MOVD R29, 168(R1) - MOVD g, 176(R1) // R30 - MOVD R31, 184(R1) - FMOVD F14, 192(R1) - FMOVD F15, 200(R1) - FMOVD F16, 208(R1) - FMOVD F17, 216(R1) - FMOVD F18, 224(R1) - FMOVD F19, 232(R1) - FMOVD F20, 240(R1) - FMOVD F21, 248(R1) - FMOVD F22, 256(R1) - FMOVD F23, 264(R1) - FMOVD F24, 272(R1) - FMOVD F25, 280(R1) - FMOVD F26, 288(R1) - FMOVD F27, 296(R1) - FMOVD F28, 304(R1) - FMOVD F29, 312(R1) - FMOVD F30, 320(R1) - FMOVD F31, 328(R1) + MOVD R14, 48+PARAM_SPACE(R1) + MOVD R15, 56+PARAM_SPACE(R1) + MOVD R16, 64+PARAM_SPACE(R1) + MOVD R17, 72+PARAM_SPACE(R1) + MOVD R18, 80+PARAM_SPACE(R1) + MOVD R19, 88+PARAM_SPACE(R1) + MOVD R20, 96+PARAM_SPACE(R1) + MOVD R21,104+PARAM_SPACE(R1) + MOVD R22, 112+PARAM_SPACE(R1) + MOVD R23, 120+PARAM_SPACE(R1) + MOVD R24, 128+PARAM_SPACE(R1) + MOVD R25, 136+PARAM_SPACE(R1) + MOVD R26, 144+PARAM_SPACE(R1) + MOVD R27, 152+PARAM_SPACE(R1) + MOVD R28, 160+PARAM_SPACE(R1) + MOVD R29, 168+PARAM_SPACE(R1) + MOVD g, 176+PARAM_SPACE(R1) // R30 + MOVD R31, 184+PARAM_SPACE(R1) + FMOVD F14, 192+PARAM_SPACE(R1) + FMOVD F15, 200+PARAM_SPACE(R1) + FMOVD F16, 208+PARAM_SPACE(R1) + FMOVD F17, 216+PARAM_SPACE(R1) + FMOVD F18, 224+PARAM_SPACE(R1) + FMOVD F19, 232+PARAM_SPACE(R1) + FMOVD F20, 240+PARAM_SPACE(R1) + FMOVD F21, 248+PARAM_SPACE(R1) + FMOVD F22, 256+PARAM_SPACE(R1) + FMOVD F23, 264+PARAM_SPACE(R1) + FMOVD F24, 272+PARAM_SPACE(R1) + FMOVD F25, 280+PARAM_SPACE(R1) + FMOVD F26, 288+PARAM_SPACE(R1) + FMOVD F27, 296+PARAM_SPACE(R1) + FMOVD F28, 304+PARAM_SPACE(R1) + FMOVD F29, 312+PARAM_SPACE(R1) + FMOVD F30, 320+PARAM_SPACE(R1) + FMOVD F31, 328+PARAM_SPACE(R1) // Synchronous initialization. MOVD $runtime·reginit(SB), R12 @@ -130,44 +132,44 @@ nocgo: done: // Restore saved registers. - MOVD 48(R1), R14 - MOVD 56(R1), R15 - MOVD 64(R1), R16 - MOVD 72(R1), R17 - MOVD 80(R1), R18 - MOVD 88(R1), R19 - MOVD 96(R1), R20 - MOVD 104(R1), R21 - MOVD 112(R1), R22 - MOVD 120(R1), R23 - MOVD 128(R1), R24 - MOVD 136(R1), R25 - MOVD 144(R1), R26 - MOVD 152(R1), R27 - MOVD 160(R1), R28 - MOVD 168(R1), R29 - MOVD 176(R1), g // R30 - MOVD 184(R1), R31 - FMOVD 196(R1), F14 - FMOVD 200(R1), F15 - FMOVD 208(R1), F16 - FMOVD 216(R1), F17 - FMOVD 224(R1), F18 - FMOVD 232(R1), F19 - FMOVD 240(R1), F20 - FMOVD 248(R1), F21 - FMOVD 256(R1), F22 - FMOVD 264(R1), F23 - FMOVD 272(R1), F24 - FMOVD 280(R1), F25 - FMOVD 288(R1), F26 - FMOVD 296(R1), F27 - FMOVD 304(R1), F28 - FMOVD 312(R1), F29 - FMOVD 320(R1), F30 - FMOVD 328(R1), F31 - - ADD $344, R1 + MOVD 48+PARAM_SPACE(R1), R14 + MOVD 56+PARAM_SPACE(R1), R15 + MOVD 64+PARAM_SPACE(R1), R16 + MOVD 72+PARAM_SPACE(R1), R17 + MOVD 80+PARAM_SPACE(R1), R18 + MOVD 88+PARAM_SPACE(R1), R19 + MOVD 96+PARAM_SPACE(R1), R20 + MOVD 104+PARAM_SPACE(R1), R21 + MOVD 112+PARAM_SPACE(R1), R22 + MOVD 120+PARAM_SPACE(R1), R23 + MOVD 128+PARAM_SPACE(R1), R24 + MOVD 136+PARAM_SPACE(R1), R25 + MOVD 144+PARAM_SPACE(R1), R26 + MOVD 152+PARAM_SPACE(R1), R27 + MOVD 160+PARAM_SPACE(R1), R28 + MOVD 168+PARAM_SPACE(R1), R29 + MOVD 176+PARAM_SPACE(R1), g // R30 + MOVD 184+PARAM_SPACE(R1), R31 + FMOVD 196+PARAM_SPACE(R1), F14 + FMOVD 200+PARAM_SPACE(R1), F15 + FMOVD 208+PARAM_SPACE(R1), F16 + FMOVD 216+PARAM_SPACE(R1), F17 + FMOVD 224+PARAM_SPACE(R1), F18 + FMOVD 232+PARAM_SPACE(R1), F19 + FMOVD 240+PARAM_SPACE(R1), F20 + FMOVD 248+PARAM_SPACE(R1), F21 + FMOVD 256+PARAM_SPACE(R1), F22 + FMOVD 264+PARAM_SPACE(R1), F23 + FMOVD 272+PARAM_SPACE(R1), F24 + FMOVD 280+PARAM_SPACE(R1), F25 + FMOVD 288+PARAM_SPACE(R1), F26 + FMOVD 296+PARAM_SPACE(R1), F27 + FMOVD 304+PARAM_SPACE(R1), F28 + FMOVD 312+PARAM_SPACE(R1), F29 + FMOVD 320+PARAM_SPACE(R1), F30 + FMOVD 328+PARAM_SPACE(R1), F31 + + ADD $344+PARAM_SPACE, R1 MOVD 8(R1), R0 MOVFL R0, $0xff From 7afe17bbdb961df3a7163f4d725bedc1c008571f Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Fri, 16 Aug 2024 15:13:39 -0400 Subject: [PATCH 19/66] [release-branch.go1.23] go/types, types2: use max(fileVersion, go1.21) if fileVersion present Change the rules for how //go:build "file versions" are applied: instead of considering whether a file version is an upgrade or downgrade from the -lang version, always use max(fileVersion, go1.21). This prevents file versions from downgrading the version below go1.21. Before Go 1.21 the //go:build version did not have the meaning of setting the file's langage version. This fixes an issue that was appearing in GOPATH builds: Go 1.23.0 started providing -lang versions to the compiler in GOPATH mode (among other places) which it wasn't doing before, and it set -lang to the toolchain version (1.23). Because the -lang version was greater than go1.21, language version used to compile the file would be set to the //go:build file version. //go:build file versions below 1.21 could cause files that could previously build to stop building. For example, take a Go file with a //go:build line specifying go1.10. If that file used a 1.18 feature, that use would compile fine with a Go 1.22 toolchain. But it would produce an error when compiling with the 1.23.0 toolchain because it set the language version to 1.10 and disallowed the 1.18 feature. This breaks backwards compatibility: when the build tag was added, it did not have the meaning of restricting the language version. For #68658 Fixes #69094 Change-Id: I6cedda81a55bcccffaa3501eef9e2be6541b6ece Reviewed-on: https://go-review.googlesource.com/c/go/+/607955 LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Griesemer (cherry picked from commit aeac0b6cbfb42bc9c9301913a191bb09454d316a) Reviewed-on: https://go-review.googlesource.com/c/go/+/608935 --- src/cmd/compile/internal/types2/api_test.go | 50 ++++++++++++++----- src/cmd/compile/internal/types2/check.go | 47 +++++++---------- src/go/types/api_test.go | 50 ++++++++++++++----- src/go/types/check.go | 48 ++++++++---------- .../types/testdata/check/go1_20_19.go | 2 +- .../types/testdata/check/go1_21_19.go | 2 +- .../types/testdata/check/go1_21_22.go | 16 ++++++ .../types/testdata/check/go1_22_21.go | 16 ++++++ .../types/testdata/fixedbugs/issue66285.go | 7 +-- test/fixedbugs/issue63489a.go | 20 +++++--- test/fixedbugs/issue63489b.go | 15 ++++-- 11 files changed, 174 insertions(+), 99 deletions(-) create mode 100644 src/internal/types/testdata/check/go1_21_22.go create mode 100644 src/internal/types/testdata/check/go1_22_21.go diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 5126ac51116cd9..a6b105ace5cc33 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2898,22 +2898,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 91ad474e9df315..ada421ba939ed4 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -327,7 +327,6 @@ func (check *Checker) initFiles(files []*syntax.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -336,33 +335,18 @@ func (check *Checker) initFiles(files []*syntax.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of allowing //go:build lines + // to sometimes set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -377,6 +361,13 @@ func (check *Checker) initFiles(files []*syntax.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) > 0 { + return a + } + return b +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index beed94f3557996..a7aa6488028ecd 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2904,22 +2904,48 @@ func TestFileVersions(t *testing.T) { fileVersion string wantVersion string }{ - {"", "", ""}, // no versions specified - {"go1.19", "", "go1.19"}, // module version specified - {"", "go1.20", ""}, // file upgrade ignored - {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted - {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted - {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "", "go1"}, // no file version specified + {"go1", "goo1.22", "go1"}, // invalid file version specified + {"go1", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.19", "", "go1.19"}, // no file version specified + {"go1.19", "goo1.22", "go1.19"}, // invalid file version specified + {"go1.19", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.19", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.19", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.20", "", "go1.20"}, // no file version specified + {"go1.20", "goo1.22", "go1.20"}, // invalid file version specified + {"go1.20", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.20", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.20", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.21", "", "go1.21"}, // no file version specified + {"go1.21", "goo1.22", "go1.21"}, // invalid file version specified + {"go1.21", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.21", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.21", "go1.22", "go1.22"}, // file version specified above 1.21 + {"go1.22", "", "go1.22"}, // no file version specified + {"go1.22", "goo1.22", "go1.22"}, // invalid file version specified + {"go1.22", "go1.19", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.20", "go1.21"}, // file version specified below minimum of 1.21 + {"go1.22", "go1.21", "go1.21"}, // file version specified at 1.21 + {"go1.22", "go1.22", "go1.22"}, // file version specified above 1.21 // versions containing release numbers // (file versions containing release numbers are considered invalid) {"go1.19.0", "", "go1.19.0"}, // no file version specified - {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored - {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored - {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted - {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted - {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) - {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + {"go1.20.1", "go1.19.1", "go1.20.1"}, // invalid file version + {"go1.20.1", "go1.21.1", "go1.20.1"}, // invalid file version + {"go1.21.1", "go1.19.1", "go1.21.1"}, // invalid file version + {"go1.21.1", "go1.21.1", "go1.21.1"}, // invalid file version + {"go1.22.1", "go1.19.1", "go1.22.1"}, // invalid file version + {"go1.22.1", "go1.21.1", "go1.22.1"}, // invalid file version } { var src string if test.fileVersion != "" { diff --git a/src/go/types/check.go b/src/go/types/check.go index 1a5a41a3bb4b99..8a729094961fe2 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -349,7 +349,6 @@ func (check *Checker) initFiles(files []*ast.File) { check.errorf(files[0], TooNew, "package requires newer Go version %v (application built with %v)", check.version, go_current) } - downgradeOk := check.version.cmp(go1_21) >= 0 // determine Go version for each file for _, file := range check.files { @@ -358,33 +357,19 @@ func (check *Checker) initFiles(files []*ast.File) { // unlike file versions which are Go language versions only, if valid.) v := check.conf.GoVersion - fileVersion := asGoVersion(file.GoVersion) - if fileVersion.isValid() { - // use the file version, if applicable - // (file versions are either the empty string or of the form go1.dd) - if pkgVersionOk { - cmp := fileVersion.cmp(check.version) - // Go 1.21 introduced the feature of setting the go.mod - // go line to an early version of Go and allowing //go:build lines - // to “upgrade” (cmp > 0) the Go version in a given file. - // We can do that backwards compatibly. - // - // Go 1.21 also introduced the feature of allowing //go:build lines - // to “downgrade” (cmp < 0) the Go version in a given file. - // That can't be done compatibly in general, since before the - // build lines were ignored and code got the module's Go version. - // To work around this, downgrades are only allowed when the - // module's Go version is Go 1.21 or later. - // - // If there is no valid check.version, then we don't really know what - // Go version to apply. - // Legacy tools may do this, and they historically have accepted everything. - // Preserve that behavior by ignoring //go:build constraints entirely in that - // case (!pkgVersionOk). - if cmp > 0 || cmp < 0 && downgradeOk { - v = file.GoVersion - } - } + // If the file specifies a version, use max(fileVersion, go1.21). + if fileVersion := asGoVersion(file.GoVersion); fileVersion.isValid() { + // Go 1.21 introduced the feature of setting the go.mod + // go line to an early version of Go and allowing //go:build lines + // to set the Go version in a given file. Versions Go 1.21 and later + // can be set backwards compatibly as that was the first version + // files with go1.21 or later build tags could be built with. + // + // Set the version to max(fileVersion, go1.21): That will allow a + // downgrade to a version before go1.22, where the for loop semantics + // change was made, while being backwards compatible with versions of + // go before the new //go:build semantics were introduced. + v = string(versionMax(fileVersion, go1_21)) // Report a specific error for each tagged file that's too new. // (Normally the build system will have filtered files by version, @@ -399,6 +384,13 @@ func (check *Checker) initFiles(files []*ast.File) { } } +func versionMax(a, b goVersion) goVersion { + if a.cmp(b) < 0 { + return b + } + return a +} + // A bailout panic is used for early termination. type bailout struct{} diff --git a/src/internal/types/testdata/check/go1_20_19.go b/src/internal/types/testdata/check/go1_20_19.go index 08365a7cfb564d..e040d396c7808b 100644 --- a/src/internal/types/testdata/check/go1_20_19.go +++ b/src/internal/types/testdata/check/go1_20_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ok because Go 1.20 ignored the //go:build go1.19 */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_19.go b/src/internal/types/testdata/check/go1_21_19.go index 2acd25865d4b69..5866033eafe6f8 100644 --- a/src/internal/types/testdata/check/go1_21_19.go +++ b/src/internal/types/testdata/check/go1_21_19.go @@ -14,4 +14,4 @@ type Slice []byte type Array [8]byte var s Slice -var p = (Array)(s /* ERROR "requires go1.20 or later" */) +var p = (Array)(s /* ok because file versions below go1.21 set the langage version to go1.21 */) diff --git a/src/internal/types/testdata/check/go1_21_22.go b/src/internal/types/testdata/check/go1_21_22.go new file mode 100644 index 00000000000000..3939b7b1d868c0 --- /dev/null +++ b/src/internal/types/testdata/check/go1_21_22.go @@ -0,0 +1,16 @@ +// -lang=go1.21 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.22 + +package p + +func f() { + for _ = range /* ok because of upgrade to 1.22 */ 10 { + } +} diff --git a/src/internal/types/testdata/check/go1_22_21.go b/src/internal/types/testdata/check/go1_22_21.go new file mode 100644 index 00000000000000..f910ecb59cbc78 --- /dev/null +++ b/src/internal/types/testdata/check/go1_22_21.go @@ -0,0 +1,16 @@ +// -lang=go1.22 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +//go:build go1.21 + +package p + +func f() { + for _ = range 10 /* ERROR "requires go1.22 or later" */ { + } +} diff --git a/src/internal/types/testdata/fixedbugs/issue66285.go b/src/internal/types/testdata/fixedbugs/issue66285.go index 9811fec3f35549..4af76f05da8e41 100644 --- a/src/internal/types/testdata/fixedbugs/issue66285.go +++ b/src/internal/types/testdata/fixedbugs/issue66285.go @@ -1,14 +1,9 @@ -// -lang=go1.21 +// -lang=go1.13 // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Note: Downgrading to go1.13 requires at least go1.21, -// hence the need for -lang=go1.21 at the top. - -//go:build go1.13 - package p import "io" diff --git a/test/fixedbugs/issue63489a.go b/test/fixedbugs/issue63489a.go index b88120f2c045ef..2b46814f9566de 100644 --- a/test/fixedbugs/issue63489a.go +++ b/test/fixedbugs/issue63489a.go @@ -1,16 +1,20 @@ -// errorcheck -lang=go1.21 +// errorcheck -lang=go1.22 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.21 with a file version of +// go1.4 while this new version tests a -lang version of go1.22 +// with a file version of go1.21. -package p - -const c = 0o123 // ERROR "file declares //go:build go1.4" +//go:build go1.21 -// ERROR "file declares //go:build go1.4" +package p -//line issue63489a.go:13:1 -const d = 0o124 +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} diff --git a/test/fixedbugs/issue63489b.go b/test/fixedbugs/issue63489b.go index 2ad590dfc33347..fd897dea97cb88 100644 --- a/test/fixedbugs/issue63489b.go +++ b/test/fixedbugs/issue63489b.go @@ -1,11 +1,20 @@ -// errorcheck -lang=go1.4 +// errorcheck -lang=go1.21 // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.4 +// This file has been changed from its original version as +// //go:build file versions below 1.21 set the language version to 1.21. +// The original tested a -lang version of 1.4 with a file version of +// go1.4 while this new version tests a -lang version of go1.1 +// with a file version of go1.21. + +//go:build go1.21 package p -const c = 0o123 // ERROR "file declares //go:build go1.4" +func f() { + for _ = range 10 { // ERROR "file declares //go:build go1.21" + } +} From 9c939a1e60ba1fa89251b5ef43793542aa68ff4e Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Tue, 27 Aug 2024 15:34:10 +0000 Subject: [PATCH 20/66] [release-branch.go1.23] runtime: store bp on cgocallback as unsafe.Pointer As of CL 580255, the runtime tracks the frame pointer (or base pointer, bp) when entering syscalls, so that we can use fpTracebackPCs on goroutines that are sitting in syscalls. That CL mostly got things right, but missed one very subtle detail. When calling from Go->C->Go, the goroutine stack performing the calls when returning to Go is free to move around in memory due to growth, shrinking, etc. But upon returning back to C, it needs to restore gp.syscall*, including gp.syscallsp and gp.syscallbp. The way syscallsp currently gets updated is automagically: it's stored as an unsafe.Pointer on the stack so that it shows up in a stack map. If the stack ever moves, it'll get updated correctly. But gp.syscallbp isn't saved to the stack as an unsafe.Pointer, but rather as a uintptr, so it never gets updated! As a result, in rare circumstances, fpTracebackPCs can correctly try to use gp.syscallbp as the starting point for the traceback, but the value is stale. This change fixes the problem by just storing gp.syscallbp to the stack on cgocallback as an unsafe.Pointer, like gp.syscallsp. It also adds a comment documenting this subtlety; the lack of explanation for the unsafe.Pointer type on syscallsp meant this detail was missed -- let's not miss it again in the future. Now, we have a fix, what about a test? Unfortunately, testing this is going to be incredibly annoying because the circumstances under which gp.syscallbp are actually used for traceback are non-deterministic and hard to arrange, especially from within testprogcgo where we don't have export_test.go and can't reach into the runtime. So, instead, add a gp.syscallbp check to reentersyscall and entersyscallblock that mirrors the gp.syscallbp consistency check. This probably causes some miniscule slowdown to the syscall path, but it'll catch the issue without having to actually perform a traceback. For #69085. Fixes #69087. Change-Id: Iaf771758f1666024b854f5fbe2b2c63cbe35b201 Reviewed-on: https://go-review.googlesource.com/c/go/+/608775 Reviewed-by: Nick Ripley LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt Reviewed-by: Cherry Mui (cherry picked from commit 54fe0fd43fcf8609666c16ae6d15ed92873b1564) Reviewed-on: https://go-review.googlesource.com/c/go/+/608835 --- src/runtime/cgocall.go | 9 +++++++-- src/runtime/proc.go | 18 +++++++++++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index b943b1c2d6b4f8..68b1ebbac2c7e0 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -338,9 +338,14 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { // stack. However, since we're returning to an earlier stack frame and // need to pair with the entersyscall() call made by cgocall, we must // save syscall* and let reentersyscall restore them. + // + // Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer. + // When we call into Go, the stack is free to be moved. If these locals + // aren't visible in the stack maps, they won't get updated properly, + // and will end up being stale when restored by reentersyscall. savedsp := unsafe.Pointer(gp.syscallsp) savedpc := gp.syscallpc - savedbp := gp.syscallbp + savedbp := unsafe.Pointer(gp.syscallbp) exitsyscall() // coming out of cgo call gp.m.incgo = false if gp.m.isextra { @@ -372,7 +377,7 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { osPreemptExtEnter(gp.m) // going back to cgo call - reentersyscall(savedpc, uintptr(savedsp), savedbp) + reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) gp.m.winsyscall = winsyscall } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index c4f175b0b76b22..76c8b71ab9a939 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -4415,7 +4415,13 @@ func reentersyscall(pc, sp, bp uintptr) { } if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscall") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscall") }) } @@ -4553,14 +4559,20 @@ func entersyscallblock() { sp2 := gp.sched.sp sp3 := gp.syscallsp systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } casgstatus(gp, _Grunning, _Gsyscall) if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { - print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") + throw("entersyscallblock") + }) + } + if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp { + systemstack(func() { + print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n") throw("entersyscallblock") }) } From 9625a7faaefd85ce9dd0b7efbaad7731c2018200 Mon Sep 17 00:00:00 2001 From: Tim King Date: Thu, 15 Aug 2024 16:48:42 -0700 Subject: [PATCH 21/66] [release-branch.go1.23] go/types, types2: unalias tilde terms in underIs Unalias the ~T terms during underIs. Before, if T was an alias of U, it may pass T to the iteration function. The iterator function expects an underlying type, under(U), to be passed. This caused several bugs where underIs is used without eventually taking the underlying type. Fixes #68905 Change-Id: Ie8691d8dddaea00e1dcba94d17c0f1b021fc49a2 Reviewed-on: https://go-review.googlesource.com/c/go/+/606075 Reviewed-by: Robert Griesemer LUCI-TryBot-Result: Go LUCI Reviewed-by: Robert Findley (cherry picked from commit 1a90dcdaaf46d9dd0ee61781dcb9b6e05b80d926) Reviewed-on: https://go-review.googlesource.com/c/go/+/607635 --- src/cmd/compile/internal/types2/typeset.go | 4 +-- src/go/types/typeset.go | 4 +-- .../types/testdata/fixedbugs/issue68903.go | 24 +++++++++++++++++ .../types/testdata/fixedbugs/issue68935.go | 26 +++++++++++++++++++ 4 files changed, 54 insertions(+), 4 deletions(-) create mode 100644 src/internal/types/testdata/fixedbugs/issue68903.go create mode 100644 src/internal/types/testdata/fixedbugs/issue68935.go diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 0457502e393942..a2d9e42c615ca4 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -131,8 +131,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index d280bf2f5ff5cf..a1d7e6cc994e48 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -134,8 +134,8 @@ func (s *_TypeSet) underIs(f func(Type) bool) bool { } for _, t := range s.terms { assert(t.typ != nil) - // x == under(x) for ~x terms - u := t.typ + // Unalias(x) == under(x) for ~x terms + u := Unalias(t.typ) if !t.tilde { u = under(u) } diff --git a/src/internal/types/testdata/fixedbugs/issue68903.go b/src/internal/types/testdata/fixedbugs/issue68903.go new file mode 100644 index 00000000000000..b1369aa0f6faa7 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68903.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = [4]int +type B = map[string]interface{} + +func _[T ~A](x T) { + _ = len(x) +} + +func _[U ~A](x U) { + _ = cap(x) +} + +func _[V ~A]() { + _ = V{} +} + +func _[W ~B](a interface{}) { + _ = a.(W)["key"] +} diff --git a/src/internal/types/testdata/fixedbugs/issue68935.go b/src/internal/types/testdata/fixedbugs/issue68935.go new file mode 100644 index 00000000000000..2e72468f05eb0c --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68935.go @@ -0,0 +1,26 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = struct { + F string + G int +} + +func Make[T ~A]() T { + return T{ + F: "blah", + G: 1234, + } +} + +type N struct { + F string + G int +} + +func _() { + _ = Make[N]() +} From 6de5a7180cc6459235895c76c792a7f15be5625d Mon Sep 17 00:00:00 2001 From: Nic Klaassen Date: Thu, 22 Aug 2024 23:37:00 +0000 Subject: [PATCH 22/66] [release-branch.go1.23] database/sql: fix panic with concurrent Conn and Close The current implementation has a panic when the database is closed concurrently with a new connection attempt. connRequestSet.CloseAndRemoveAll sets connRequestSet.s to a nil slice. If this happens between calls to connRequestSet.Add and connRequestSet.Delete, there is a panic when trying to write to the nil slice. This is sequence is likely to occur in DB.conn, where the mutex is released between calls to db.connRequests.Add and db.connRequests.Delete This change updates connRequestSet.CloseAndRemoveAll to set the curIdx to -1 for all pending requests before setting its internal slice to nil. CloseAndRemoveAll already iterates the full slice to close all the request channels. It seems appropriate to set curIdx to -1 before deleting the slice for 3 reasons: 1. connRequestSet.deleteIndex also sets curIdx to -1 2. curIdx will not be relevant to anything after the slice is set to nil 3. connRequestSet.Delete already checks for negative indices For #68949 Fixes #69041 Change-Id: I6b7ebc5a71b67322908271d13865fa12f2469b87 GitHub-Last-Rev: 7d2669155b24043dd9d276f915689511572f2e49 GitHub-Pull-Request: golang/go#68953 Reviewed-on: https://go-review.googlesource.com/c/go/+/607238 LUCI-TryBot-Result: Go LUCI Auto-Submit: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Reviewed-by: Brad Fitzpatrick Reviewed-by: Cherry Mui Reviewed-by: Dmitri Shuralyov (cherry picked from commit 08707d66c350927560faa11b0c195d37d281ab89) Reviewed-on: https://go-review.googlesource.com/c/go/+/609255 --- src/database/sql/sql.go | 5 +++-- src/database/sql/sql_test.go | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index de774a051093df..c247a9b506bfab 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -1368,8 +1368,8 @@ func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn db.waitDuration.Add(int64(time.Since(waitStart))) - // If we failed to delete it, that means something else - // grabbed it and is about to send on it. + // If we failed to delete it, that means either the DB was closed or + // something else grabbed it and is about to send on it. if !deleted { // TODO(bradfitz): rather than this best effort select, we // should probably start a goroutine to read from req. This best @@ -3594,6 +3594,7 @@ type connRequestAndIndex struct { // and clears the set. func (s *connRequestSet) CloseAndRemoveAll() { for _, v := range s.s { + *v.curIdx = -1 close(v.req) } s.s = nil diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index ff65e877a5af6b..110a2bae5bd247 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -4920,6 +4920,17 @@ func TestConnRequestSet(t *testing.T) { t.Error("wasn't random") } }) + t.Run("close-delete", func(t *testing.T) { + reset() + ch := make(chan connRequest) + dh := s.Add(ch) + wantLen(1) + s.CloseAndRemoveAll() + wantLen(0) + if s.Delete(dh) { + t.Error("unexpected delete after CloseAndRemoveAll") + } + }) } func BenchmarkConnRequestSet(b *testing.B) { From 3d1f1f27cf2f524dc17697f8058162ada850d61e Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 28 Aug 2024 17:45:44 -0400 Subject: [PATCH 23/66] [release-branch.go1.23] cmd: vendor golang.org/x/telemetry@internal-branch.go1.23-vendor Update x/telemetry to fix #68976 and #68946. Commands run: go get golang.org/x/telemetry@internal-branch.go1.23-vendor go mod tidy go mod vendor Fixes #68994. Fixes #68995. Change-Id: I63b892ad4c313aa92f21fbd4f519a0b19d725849 Reviewed-on: https://go-review.googlesource.com/c/go/+/609355 LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Pratt Reviewed-by: Robert Findley --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 4 ++-- .../internal/configstore/download.go | 11 ++++++++++ .../internal/crashmonitor/monitor.go | 4 ++-- .../x/telemetry/internal/upload/run.go | 21 ++++++++++++++++--- .../vendor/golang.org/x/telemetry/start.go | 19 ++++++++++++----- src/cmd/vendor/modules.txt | 2 +- 7 files changed, 49 insertions(+), 14 deletions(-) diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 49f02012d3103a..484fef03d10a16 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -9,7 +9,7 @@ require ( golang.org/x/mod v0.19.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.22.0 - golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 + golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 golang.org/x/term v0.20.0 golang.org/x/tools v0.22.1-0.20240618181713-f2d2ebe43e72 ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index ee671f95122344..919dbd2dc74c74 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -16,8 +16,8 @@ golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 h1:+bltxAtk8YFEQ61B/lcYQM8e+7XjLwSDbpspVaVYkz8= -golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 h1:Lj8KbuZmoFUbI6pQ28G3Diz/5bRYD2UY5vfAmhrLZWo= +golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147/go.mod h1:amNmu/SBSm2GAF3X+9U2C0epLocdh+r5Z+7oMYO5cLM= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go index a38f371d0f51b6..e60ab7e9fdd73e 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/configstore/download.go @@ -16,6 +16,7 @@ import ( "os" "os/exec" "path/filepath" + "sync/atomic" "golang.org/x/telemetry/internal/telemetry" ) @@ -29,12 +30,22 @@ const ( // creation flag. var needNoConsole = func(cmd *exec.Cmd) {} +var downloads int64 + +// Downloads reports, for testing purposes, the number of times [Download] has +// been called. +func Downloads() int64 { + return atomic.LoadInt64(&downloads) +} + // Download fetches the requested telemetry UploadConfig using "go mod // download". If envOverlay is provided, it is appended to the environment used // for invoking the go command. // // The second result is the canonical version of the requested configuration. func Download(version string, envOverlay []string) (*telemetry.UploadConfig, string, error) { + atomic.AddInt64(&downloads, 1) + if version == "" { version = "latest" } diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go index f475f7eec2dfce..612f7563a74c9f 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go @@ -21,12 +21,12 @@ import ( "golang.org/x/telemetry/internal/counter" ) -// Supported reports whether the runtime supports [runtime.SetCrashOutput]. +// Supported reports whether the runtime supports [runtime/debug.SetCrashOutput]. // // TODO(adonovan): eliminate once go1.23+ is assured. func Supported() bool { return setCrashOutput != nil } -var setCrashOutput func(*os.File) error // = runtime.SetCrashOutput on go1.23+ +var setCrashOutput func(*os.File) error // = runtime/debug.SetCrashOutput on go1.23+ // Parent sets up the parent side of the crashmonitor. It requires // exclusive use of a writable pipe connected to the child process's stdin. diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go index eba13b1a573560..e9c8dc207126a1 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/upload/run.go @@ -112,9 +112,24 @@ func newUploader(rcfg RunConfig) (*uploader, error) { logger := log.New(logWriter, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) // Fetch the upload config, if it is not provided. - config, configVersion, err := configstore.Download("latest", rcfg.Env) - if err != nil { - return nil, err + var ( + config *telemetry.UploadConfig + configVersion string + ) + + if mode, _ := dir.Mode(); mode == "on" { + // golang/go#68946: only download the upload config if it will be used. + // + // TODO(rfindley): This is a narrow change aimed at minimally fixing the + // associated bug. In the future, we should read the mode only once during + // the upload process. + config, configVersion, err = configstore.Download("latest", rcfg.Env) + if err != nil { + return nil, err + } + } else { + config = &telemetry.UploadConfig{} + configVersion = "v0.0.0-0" } // Set the start time, if it is not provided. diff --git a/src/cmd/vendor/golang.org/x/telemetry/start.go b/src/cmd/vendor/golang.org/x/telemetry/start.go index 4b37a5c3945cd5..69ebcc71359405 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/start.go +++ b/src/cmd/vendor/golang.org/x/telemetry/start.go @@ -206,7 +206,8 @@ func startChild(reportCrashes, upload bool, result *StartResult) { fd, err := os.Stat(telemetry.Default.DebugDir()) if err != nil { if !os.IsNotExist(err) { - log.Fatalf("failed to stat debug directory: %v", err) + log.Printf("failed to stat debug directory: %v", err) + return } } else if fd.IsDir() { // local/debug exists and is a directory. Set stderr to a log file path @@ -214,23 +215,31 @@ func startChild(reportCrashes, upload bool, result *StartResult) { childLogPath := filepath.Join(telemetry.Default.DebugDir(), "sidecar.log") childLog, err := os.OpenFile(childLogPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) if err != nil { - log.Fatalf("opening sidecar log file for child: %v", err) + log.Printf("opening sidecar log file for child: %v", err) + return } defer childLog.Close() cmd.Stderr = childLog } + var crashOutputFile *os.File if reportCrashes { pipe, err := cmd.StdinPipe() if err != nil { - log.Fatalf("StdinPipe: %v", err) + log.Printf("StdinPipe: %v", err) + return } - crashmonitor.Parent(pipe.(*os.File)) // (this conversion is safe) + crashOutputFile = pipe.(*os.File) // (this conversion is safe) } if err := cmd.Start(); err != nil { - log.Fatalf("can't start telemetry child process: %v", err) + // The child couldn't be started. Log the failure. + log.Printf("can't start telemetry child process: %v", err) + return + } + if reportCrashes { + crashmonitor.Parent(crashOutputFile) } result.wg.Add(1) go func() { diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index bf9c1341b94f73..22d40b9e4c1385 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -45,7 +45,7 @@ golang.org/x/sync/semaphore golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20240717194752-0b706e19b701 +# golang.org/x/telemetry v0.0.0-20240828213427-40b6b7fe7147 ## explicit; go 1.20 golang.org/x/telemetry golang.org/x/telemetry/counter From 53487e5477151ed75da50e50a0ba8f1ca64c00a3 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Mon, 10 Jun 2024 15:34:12 -0700 Subject: [PATCH 24/66] [release-branch.go1.23] go/parser: track depth in nested element lists Prevents stack exhaustion with extremely deeply nested literal values, i.e. field values in structs. Updates #69138 Fixes #69143 Fixes CVE-2024-34155 Change-Id: I2e8e33b44105cc169d7ed1ae83fb56df0c10f1ee Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1520 Reviewed-by: Robert Griesemer Reviewed-by: Damien Neil Reviewed-by: Russ Cox (cherry picked from commit eb1b038c0d01761694e7a735ef87ac9164c6568e) Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1560 Commit-Queue: Roland Shoemaker Reviewed-by: Tatiana Bradley Reviewed-on: https://go-review.googlesource.com/c/go/+/611175 Auto-Submit: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov TryBot-Bypass: Dmitri Shuralyov Reviewed-by: Michael Pratt --- src/go/parser/parser.go | 2 ++ src/go/parser/parser_test.go | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index 17808b366f092d..f268dea1a6f9cd 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -1676,6 +1676,8 @@ func (p *parser) parseElementList() (list []ast.Expr) { } func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "LiteralValue")) } diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index eea743c2b5b261..2c33e9ef314ad3 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -598,10 +598,11 @@ var parseDepthTests = []struct { {name: "chan2", format: "package main; var x «<-chan »int"}, {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType {name: "map", format: "package main; var x «map[int]»int"}, - {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit - {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "slicelit", format: "package main; var x = []any{«[]any{«»}»}", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 3}, // Parser nodes: UnaryExpr, CompositeLit + {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 3}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "element", format: "package main; var x = struct{x any}{x: «{«»}»}"}, {name: "dot", format: "package main; var x = «x.»x"}, {name: "index", format: "package main; var x = x«[1]»"}, {name: "slice", format: "package main; var x = x«[1:2]»"}, From fa8ff1a46deb6c816304441ec6740ec112e19012 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Fri, 3 May 2024 09:21:39 -0400 Subject: [PATCH 25/66] [release-branch.go1.23] encoding/gob: cover missed cases when checking ignore depth This change makes sure that we are properly checking the ignored field recursion depth in decIgnoreOpFor consistently. This prevents stack exhaustion when attempting to decode a message that contains an extremely deeply nested struct which is ignored. Thanks to Md Sakib Anwar of The Ohio State University (anwar.40@osu.edu) for reporting this issue. Updates #69139 Fixes #69145 Fixes CVE-2024-34156 Change-Id: Iacce06be95a5892b3064f1c40fcba2e2567862d6 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1440 Reviewed-by: Russ Cox Reviewed-by: Damien Neil (cherry picked from commit 9f2ea73c5f2a7056b7da5d579a485a7216f4b20a) Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1581 Commit-Queue: Roland Shoemaker Reviewed-by: Tatiana Bradley Reviewed-on: https://go-review.googlesource.com/c/go/+/611176 Reviewed-by: Dmitri Shuralyov Auto-Submit: Dmitri Shuralyov Reviewed-by: Michael Pratt TryBot-Bypass: Dmitri Shuralyov --- src/encoding/gob/decode.go | 19 +++++++++++-------- src/encoding/gob/decoder.go | 2 ++ src/encoding/gob/gobencdec_test.go | 14 ++++++++++++++ 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go index d178b2b2fb6467..26b5f6d62b631e 100644 --- a/src/encoding/gob/decode.go +++ b/src/encoding/gob/decode.go @@ -911,8 +911,11 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg var maxIgnoreNestingDepth = 10000 // decIgnoreOpFor returns the decoding op for a field that has no destination. -func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp { - if depth > maxIgnoreNestingDepth { +func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { + // Track how deep we've recursed trying to skip nested ignored fields. + dec.ignoreDepth++ + defer func() { dec.ignoreDepth-- }() + if dec.ignoreDepth > maxIgnoreNestingDepth { error_(errors.New("invalid nesting depth")) } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). @@ -938,7 +941,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, errorf("bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) } @@ -946,15 +949,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, case wire.MapT != nil: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1) - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + keyOp := dec.decIgnoreOpFor(keyId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, *keyOp, *elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, *elemOp) } @@ -1115,7 +1118,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { engine := new(decEngine) engine.instr = make([]decInstr, 1) // one item - op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) ovfl := overflow(dec.typeString(remoteId)) engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.numInstr = 1 @@ -1160,7 +1163,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn localField, present := srt.FieldByName(wireField.Name) // TODO(r): anonymous names if !present || !isExported(wireField.Name) { - op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0) + op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} continue } diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go index c4b60880130787..eae307838e201e 100644 --- a/src/encoding/gob/decoder.go +++ b/src/encoding/gob/decoder.go @@ -35,6 +35,8 @@ type Decoder struct { freeList *decoderState // list of free decoderStates; avoids reallocation countBuf []byte // used for decoding integers while parsing messages err error + // ignoreDepth tracks the depth of recursively parsed ignored fields + ignoreDepth int } // NewDecoder returns a new decoder that reads from the [io.Reader]. diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go index ae806fc39a21fc..d30e622aa2cbe7 100644 --- a/src/encoding/gob/gobencdec_test.go +++ b/src/encoding/gob/gobencdec_test.go @@ -806,6 +806,8 @@ func TestIgnoreDepthLimit(t *testing.T) { defer func() { maxIgnoreNestingDepth = oldNestingDepth }() b := new(bytes.Buffer) enc := NewEncoder(b) + + // Nested slice typ := reflect.TypeFor[int]() nested := reflect.ArrayOf(1, typ) for i := 0; i < 100; i++ { @@ -819,4 +821,16 @@ func TestIgnoreDepthLimit(t *testing.T) { if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) } + + // Nested struct + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: typ}}) + for i := 0; i < 100; i++ { + nested = reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}) + } + badStruct = reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}})) + enc.Encode(badStruct.Interface()) + dec = NewDecoder(b) + if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { + t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) + } } From 032ac075c20c01c6c35a672d1542d3e98eab84ea Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Thu, 20 Jun 2024 10:45:30 -0700 Subject: [PATCH 26/66] [release-branch.go1.23] go/build/constraint: add parsing limits Limit the size of build constraints that we will parse. This prevents a number of stack exhaustions that can be hit when parsing overly complex constraints. The imposed limits are unlikely to ever be hit in real world usage. Updates #69141 Fixes #69149 Fixes CVE-2024-34158 Change-Id: I38b614bf04caa36eefc6a4350d848588c4cef3c4 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1540 Reviewed-by: Damien Neil Reviewed-by: Russ Cox (cherry picked from commit 0c74dc9e0da0cf1e12494b514d822b5bebbc9f04) Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/1562 Commit-Queue: Roland Shoemaker Reviewed-by: Tatiana Bradley Reviewed-on: https://go-review.googlesource.com/c/go/+/611177 Reviewed-by: Michael Pratt TryBot-Bypass: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov Auto-Submit: Dmitri Shuralyov --- src/go/build/constraint/expr.go | 28 ++++++++++-- src/go/build/constraint/expr_test.go | 65 +++++++++++++++++++++++++++- 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/src/go/build/constraint/expr.go b/src/go/build/constraint/expr.go index e59012361bef6d..0f05f8db6a48cb 100644 --- a/src/go/build/constraint/expr.go +++ b/src/go/build/constraint/expr.go @@ -16,6 +16,10 @@ import ( "unicode/utf8" ) +// maxSize is a limit used to control the complexity of expressions, in order +// to prevent stack exhaustion issues due to recursion. +const maxSize = 1000 + // An Expr is a build tag constraint expression. // The underlying concrete type is *[AndExpr], *[OrExpr], *[NotExpr], or *[TagExpr]. type Expr interface { @@ -151,7 +155,7 @@ func Parse(line string) (Expr, error) { return parseExpr(text) } if text, ok := splitPlusBuild(line); ok { - return parsePlusBuildExpr(text), nil + return parsePlusBuildExpr(text) } return nil, errNotConstraint } @@ -201,6 +205,8 @@ type exprParser struct { tok string // last token read isTag bool pos int // position (start) of last token + + size int } // parseExpr parses a boolean build tag expression. @@ -249,6 +255,10 @@ func (p *exprParser) and() Expr { // On entry, the next input token has not yet been lexed. // On exit, the next input token has been lexed and is in p.tok. func (p *exprParser) not() Expr { + p.size++ + if p.size > maxSize { + panic(&SyntaxError{Offset: p.pos, Err: "build expression too large"}) + } p.lex() if p.tok == "!" { p.lex() @@ -388,7 +398,13 @@ func splitPlusBuild(line string) (expr string, ok bool) { } // parsePlusBuildExpr parses a legacy build tag expression (as used with “// +build”). -func parsePlusBuildExpr(text string) Expr { +func parsePlusBuildExpr(text string) (Expr, error) { + // Only allow up to 100 AND/OR operators for "old" syntax. + // This is much less than the limit for "new" syntax, + // but uses of old syntax were always very simple. + const maxOldSize = 100 + size := 0 + var x Expr for _, clause := range strings.Fields(text) { var y Expr @@ -414,19 +430,25 @@ func parsePlusBuildExpr(text string) Expr { if y == nil { y = z } else { + if size++; size > maxOldSize { + return nil, errComplex + } y = and(y, z) } } if x == nil { x = y } else { + if size++; size > maxOldSize { + return nil, errComplex + } x = or(x, y) } } if x == nil { x = tag("ignore") } - return x + return x, nil } // isValidTag reports whether the word is a valid build tag. diff --git a/src/go/build/constraint/expr_test.go b/src/go/build/constraint/expr_test.go index 15d189012efb7d..ac38ba69294930 100644 --- a/src/go/build/constraint/expr_test.go +++ b/src/go/build/constraint/expr_test.go @@ -222,7 +222,7 @@ var parsePlusBuildExprTests = []struct { func TestParsePlusBuildExpr(t *testing.T) { for i, tt := range parsePlusBuildExprTests { t.Run(fmt.Sprint(i), func(t *testing.T) { - x := parsePlusBuildExpr(tt.in) + x, _ := parsePlusBuildExpr(tt.in) if x.String() != tt.x.String() { t.Errorf("parsePlusBuildExpr(%q):\nhave %v\nwant %v", tt.in, x, tt.x) } @@ -319,3 +319,66 @@ func TestPlusBuildLines(t *testing.T) { }) } } + +func TestSizeLimits(t *testing.T) { + for _, tc := range []struct { + name string + expr string + }{ + { + name: "go:build or limit", + expr: "//go:build " + strings.Repeat("a || ", maxSize+2), + }, + { + name: "go:build and limit", + expr: "//go:build " + strings.Repeat("a && ", maxSize+2), + }, + { + name: "go:build and depth limit", + expr: "//go:build " + strings.Repeat("(a &&", maxSize+2), + }, + { + name: "go:build or depth limit", + expr: "//go:build " + strings.Repeat("(a ||", maxSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if syntaxErr, ok := err.(*SyntaxError); !ok || syntaxErr.Err != "build expression too large" { + if !ok { + t.Errorf("unexpected error: %v", err) + } else { + t.Errorf("unexpected syntax error: %s", syntaxErr.Err) + } + } + }) + } +} + +func TestPlusSizeLimits(t *testing.T) { + maxOldSize := 100 + for _, tc := range []struct { + name string + expr string + }{ + { + name: "+build or limit", + expr: "// +build " + strings.Repeat("a ", maxOldSize+2), + }, + { + name: "+build and limit", + expr: "// +build " + strings.Repeat("a,", maxOldSize+2), + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.expr) + if err == nil { + t.Error("expression did not trigger limit") + } else if err != errComplex { + t.Errorf("unexpected error: got %q, want %q", err, errComplex) + } + }) + } +} From 69234ded30614a471c35cef5d87b0e0d3c136cd9 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Thu, 5 Sep 2024 15:06:10 +0000 Subject: [PATCH 27/66] [release-branch.go1.23] go1.23.1 Change-Id: I1f2dab5560d3214c8934074a53f7750d8d431936 Reviewed-on: https://go-review.googlesource.com/c/go/+/611196 Reviewed-by: Michael Pratt Reviewed-by: Dmitri Shuralyov Commit-Queue: Gopher Robot TryBot-Bypass: Dmitri Shuralyov Auto-Submit: Gopher Robot --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index d184425f0c2ed2..c0bacc9e28f531 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.0 -time 2024-08-07T19:21:44Z +go1.23.1 +time 2024-08-29T20:56:24Z From 80ff7cd35ad35e6518b539f4eb2517928c2f8945 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 27 Aug 2024 10:19:17 -0700 Subject: [PATCH 28/66] [release-branch.go1.23] cmd/cgo: correct padding required by alignment If the aligned offset isn't sufficient for the field offset, we were padding based on the aligned offset. We need to pad based on the original offset instead. Also set the Go alignment correctly for int128. We were defaulting to the maximum alignment, but since we translate int128 into an array of uint8 the correct Go alignment is 1. For #69086 Fixes #69219 Change-Id: I23ce583335c81beac2ac51f7f9336ac97ccebf09 Reviewed-on: https://go-review.googlesource.com/c/go/+/608815 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Auto-Submit: Ian Lance Taylor (cherry picked from commit c2098929056481d0dc09f5f42b8959f4db8878f2) Reviewed-on: https://go-review.googlesource.com/c/go/+/611296 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Commit-Queue: Ian Lance Taylor --- src/cmd/cgo/gcc.go | 23 ++++++++++++------ src/cmd/cgo/internal/test/cgo_test.go | 1 + src/cmd/cgo/internal/test/test.go | 34 +++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 6c23e59adf19eb..be93c4a24bb566 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -2579,6 +2579,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype) @@ -2595,9 +2600,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.PtrType: @@ -2826,6 +2830,11 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ if dt.BitSize > 0 { fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype) } + + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + switch t.Size { default: fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype) @@ -2842,9 +2851,8 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ Len: c.intExpr(t.Size), Elt: c.uint8, } - } - if t.Align = t.Size; t.Align >= c.ptrSize { - t.Align = c.ptrSize + // t.Align is the alignment of the Go type. + t.Align = 1 } case *dwarf.VoidType: @@ -3110,10 +3118,11 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct } // Round off up to talign, assumed to be a power of 2. + origOff := off off = (off + talign - 1) &^ (talign - 1) if f.ByteOffset > off { - fld, sizes = c.pad(fld, sizes, f.ByteOffset-off) + fld, sizes = c.pad(fld, sizes, f.ByteOffset-origOff) off = f.ByteOffset } if f.ByteOffset < off { diff --git a/src/cmd/cgo/internal/test/cgo_test.go b/src/cmd/cgo/internal/test/cgo_test.go index 5e02888b3dddd9..5393552e07a4d1 100644 --- a/src/cmd/cgo/internal/test/cgo_test.go +++ b/src/cmd/cgo/internal/test/cgo_test.go @@ -70,6 +70,7 @@ func Test31891(t *testing.T) { test31891(t) } func Test42018(t *testing.T) { test42018(t) } func Test45451(t *testing.T) { test45451(t) } func Test49633(t *testing.T) { test49633(t) } +func Test69086(t *testing.T) { test69086(t) } func TestAlign(t *testing.T) { testAlign(t) } func TestAtol(t *testing.T) { testAtol(t) } func TestBlocking(t *testing.T) { testBlocking(t) } diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go index 374689631d77ab..362be79a737bee 100644 --- a/src/cmd/cgo/internal/test/test.go +++ b/src/cmd/cgo/internal/test/test.go @@ -940,6 +940,19 @@ typedef struct { } issue67517struct; static void issue67517(issue67517struct* p) {} +// Issue 69086. +// GCC added the __int128 type in GCC 4.6, released in 2011. +typedef struct { + int a; +#ifdef __SIZEOF_INT128__ + unsigned __int128 b; +#else + uint64_t b; +#endif + unsigned char c; +} issue69086struct; +static int issue690861(issue69086struct* p) { p->b = 1234; return p->c; } +static int issue690862(unsigned long ul1, unsigned long ul2, unsigned int u, issue69086struct s) { return (int)(s.b); } */ import "C" @@ -2349,3 +2362,24 @@ func issue67517() { b: nil, }) } + +// Issue 69086. +func test69086(t *testing.T) { + var s C.issue69086struct + + typ := reflect.TypeOf(s) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + t.Logf("field %d: name %s size %d align %d offset %d", i, f.Name, f.Type.Size(), f.Type.Align(), f.Offset) + } + + s.c = 1 + got := C.issue690861(&s) + if got != 1 { + t.Errorf("field: got %d, want 1", got) + } + got = C.issue690862(1, 2, 3, s) + if got != 1234 { + t.Errorf("call: got %d, want 1234", got) + } +} From a886959aa2fb1115096a937d3d8a2e921388752f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 29 Aug 2024 15:08:33 -0700 Subject: [PATCH 29/66] [release-branch.go1.23] runtime: size maps.Clone destination bucket array safely In rare situations, like during same-sized grows, the source map for maps.Clone may be overloaded (has more than 6.5 entries per bucket). This causes the runtime to allocate a larger bucket array for the destination map than for the source map. The maps.Clone code walks off the end of the source array if it is smaller than the destination array. This is a pretty simple fix, ensuring that the destination bucket array is never longer than the source bucket array. Maybe a better fix is to make the Clone code handle shorter source arrays correctly, but this fix is deliberately simple to reduce the risk of backporting this fix. Fixes #69156 Change-Id: I824c93d1db690999f25a3c43b2816fc28ace7509 Reviewed-on: https://go-review.googlesource.com/c/go/+/610377 Reviewed-by: Dmitri Shuralyov Reviewed-by: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Auto-Submit: Dmitri Shuralyov Reviewed-by: Keith Randall --- src/runtime/map.go | 16 +++++++++- test/fixedbugs/issue69110.go | 57 ++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue69110.go diff --git a/src/runtime/map.go b/src/runtime/map.go index 112084f5a74091..52d56fb57a4dc6 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1209,6 +1209,11 @@ func (h *hmap) sameSizeGrow() bool { return h.flags&sameSizeGrow != 0 } +//go:linkname sameSizeGrowForIssue69110Test +func sameSizeGrowForIssue69110Test(h *hmap) bool { + return h.sameSizeGrow() +} + // noldbuckets calculates the number of buckets prior to the current map growth. func (h *hmap) noldbuckets() uintptr { oldB := h.B @@ -1668,7 +1673,16 @@ func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) } func mapclone2(t *maptype, src *hmap) *hmap { - dst := makemap(t, src.count, nil) + hint := src.count + if overLoadFactor(hint, src.B) { + // Note: in rare cases (e.g. during a same-sized grow) the map + // can be overloaded. Make sure we don't allocate a destination + // bucket array larger than the source bucket array. + // This will cause the cloned map to be overloaded also, + // but that's better than crashing. See issue 69110. + hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) + } + dst := makemap(t, hint, nil) dst.hash0 = src.hash0 dst.nevacuate = 0 // flags do not need to be copied here, just like a new map has no flags. diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go new file mode 100644 index 00000000000000..71a4bcac31a16e --- /dev/null +++ b/test/fixedbugs/issue69110.go @@ -0,0 +1,57 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "maps" + _ "unsafe" +) + +func main() { + for i := 0; i < 100; i++ { + f() + } +} + +const NB = 4 + +func f() { + // Make a map with NB buckets, at max capacity. + // 6.5 entries/bucket. + ne := NB * 13 / 2 + m := map[int]int{} + for i := 0; i < ne; i++ { + m[i] = i + } + + // delete/insert a lot, to hopefully get lots of overflow buckets + // and trigger a same-size grow. + ssg := false + for i := ne; i < ne+1000; i++ { + delete(m, i-ne) + m[i] = i + if sameSizeGrow(m) { + ssg = true + break + } + } + if !ssg { + return + } + + // Insert 1 more entry, which would ordinarily trigger a growth. + // We can't grow while growing, so we instead go over our + // target capacity. + m[-1] = -1 + + // Cloning in this state will make a map with a destination bucket + // array twice the size of the source. + _ = maps.Clone(m) +} + +//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test +func sameSizeGrow(m map[int]int) bool From 82575f76b8473effd6aff0a8690582820380d4d4 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 4 Sep 2024 03:08:26 +0000 Subject: [PATCH 30/66] [release-branch.go1.23] internal/weak: shade pointer in weak-to-strong conversion There's a bug in the weak-to-strong conversion in that creating the *only* strong pointer to some weakly-held object during the mark phase may result in that object not being properly marked. The exact mechanism for this is that the new strong pointer will always point to a white object (because it was only weakly referenced up until this point) and it can then be stored in a blackened stack, hiding it from the garbage collector. This "hide a white pointer in the stack" problem is pretty much exactly what the Yuasa part of the hybrid write barrier is trying to catch, so we need to do the same thing the write barrier would do: shade the pointer. Added a test and confirmed that it fails with high probability if the pointer shading is missing. For #69210. Fixes #69240. Change-Id: Iaae64ae95ea7e975c2f2c3d4d1960e74e1bd1c3f Reviewed-on: https://go-review.googlesource.com/c/go/+/610396 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek (cherry picked from commit 79fd633632cdbaf9ca38f7559e5abb5c07fbbd9d) Reviewed-on: https://go-review.googlesource.com/c/go/+/610696 Auto-Submit: Dmitri Shuralyov --- src/internal/weak/pointer_test.go | 82 +++++++++++++++++++++++++++++++ src/runtime/mheap.go | 27 +++++++++- 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/src/internal/weak/pointer_test.go b/src/internal/weak/pointer_test.go index e143749230f0a5..5a861bb9ca39d7 100644 --- a/src/internal/weak/pointer_test.go +++ b/src/internal/weak/pointer_test.go @@ -5,9 +5,12 @@ package weak_test import ( + "context" "internal/weak" "runtime" + "sync" "testing" + "time" ) type T struct { @@ -128,3 +131,82 @@ func TestPointerFinalizer(t *testing.T) { t.Errorf("weak pointer is non-nil even after finalization: %v", wt) } } + +// Regression test for issue 69210. +// +// Weak-to-strong conversions must shade the new strong pointer, otherwise +// that might be creating the only strong pointer to a white object which +// is hidden in a blackened stack. +// +// Never fails if correct, fails with some high probability if incorrect. +func TestIssue69210(t *testing.T) { + if testing.Short() { + t.Skip("this is a stress test that takes seconds to run on its own") + } + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // What we're trying to do is manufacture the conditions under which this + // bug happens. Specifically, we want: + // + // 1. To create a whole bunch of objects that are only weakly-pointed-to, + // 2. To call Strong while the GC is in the mark phase, + // 3. The new strong pointer to be missed by the GC, + // 4. The following GC cycle to mark a free object. + // + // Unfortunately, (2) and (3) are hard to control, but we can increase + // the likelihood by having several goroutines do (1) at once while + // another goroutine constantly keeps us in the GC with runtime.GC. + // Like throwing darts at a dart board until they land just right. + // We can increase the likelihood of (4) by adding some delay after + // creating the strong pointer, but only if it's non-nil. If it's nil, + // that means it was already collected in which case there's no chance + // of triggering the bug, so we want to retry as fast as possible. + // Our heap here is tiny, so the GCs will go by fast. + // + // As of 2024-09-03, removing the line that shades pointers during + // the weak-to-strong conversion causes this test to fail about 50% + // of the time. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + runtime.GC() + + select { + case <-ctx.Done(): + return + default: + } + } + }() + for range max(runtime.GOMAXPROCS(-1)-1, 1) { + wg.Add(1) + go func() { + defer wg.Done() + for { + for range 5 { + bt := new(T) + wt := weak.Make(bt) + bt = nil + time.Sleep(1 * time.Millisecond) + bt = wt.Strong() + if bt != nil { + time.Sleep(4 * time.Millisecond) + bt.t = bt + bt.a = 12 + } + runtime.KeepAlive(bt) + } + select { + case <-ctx.Done(): + return + default: + } + } + }() + } + wg.Wait() +} diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 35fd08af50c3c1..a91055387ef35b 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2073,7 +2073,22 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { // Even if we just swept some random span that doesn't contain this object, because // this object is long dead and its memory has since been reused, we'll just observe nil. ptr := unsafe.Pointer(handle.Load()) + + // This is responsible for maintaining the same GC-related + // invariants as the Yuasa part of the write barrier. During + // the mark phase, it's possible that we just created the only + // valid pointer to the object pointed to by ptr. If it's only + // ever referenced from our stack, and our stack is blackened + // already, we could fail to mark it. So, mark it now. + if gcphase != _GCoff { + shade(uintptr(ptr)) + } releasem(mp) + + // Explicitly keep ptr alive. This seems unnecessary since we return ptr, + // but let's be explicit since it's important we keep ptr alive across the + // call to shade. + KeepAlive(ptr) return ptr } @@ -2081,6 +2096,9 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. if handle := getWeakHandle(p); handle != nil { + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } @@ -2105,6 +2123,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } + + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return s.handle } @@ -2124,7 +2146,7 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { } // Keep p alive for the duration of the function to ensure - // that it cannot die while we're trying to this. + // that it cannot die while we're trying to do this. KeepAlive(p) return handle } @@ -2154,6 +2176,9 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr { unlock(&span.speciallock) releasem(mp) + // Keep p alive for the duration of the function to ensure + // that it cannot die while we're trying to do this. + KeepAlive(p) return handle } From e6598e7baafa5650f82b9575c053a52c2601bf8f Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 22 Aug 2024 16:22:53 +0800 Subject: [PATCH 31/66] [release-branch.go1.23] os: dup pidfd if caller sets PidFD manually For #68984. Fixes #69119. Change-Id: I16d25777cb38a337cd4204a8147eaf866c3df9e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/607695 Reviewed-by: Kirill Kolyshkin Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Commit-Queue: Ian Lance Taylor Reviewed-by: David Chase Auto-Submit: Ian Lance Taylor (cherry picked from commit 239666cd7343d46c40a5b929c8bec8b532dbe83f) Reviewed-on: https://go-review.googlesource.com/c/go/+/611415 Auto-Submit: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov TryBot-Bypass: Dmitri Shuralyov --- src/os/exec_posix.go | 5 +++-- src/os/pidfd_linux.go | 27 ++++++++++++++++++++------- src/os/pidfd_linux_test.go | 32 ++++++++++++++++++++++++++++++++ src/os/pidfd_other.go | 6 +++--- 4 files changed, 58 insertions(+), 12 deletions(-) diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index cba2e151673aba..ff51247d56b72d 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -35,10 +35,11 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e } } + attrSys, shouldDupPidfd := ensurePidfd(attr.Sys) sysattr := &syscall.ProcAttr{ Dir: attr.Dir, Env: attr.Env, - Sys: ensurePidfd(attr.Sys), + Sys: attrSys, } if sysattr.Env == nil { sysattr.Env, err = execenv.Default(sysattr.Sys) @@ -63,7 +64,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e // For Windows, syscall.StartProcess above already returned a process handle. if runtime.GOOS != "windows" { var ok bool - h, ok = getPidfd(sysattr.Sys) + h, ok = getPidfd(sysattr.Sys, shouldDupPidfd) if !ok { return newPIDProcess(pid), nil } diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 0404c4ff64b72e..545cfe9613b8b4 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -19,9 +19,12 @@ import ( "unsafe" ) -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { +// ensurePidfd initializes the PidFD field in sysAttr if it is not already set. +// It returns the original or modified SysProcAttr struct and a flag indicating +// whether the PidFD should be duplicated before using. +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { if !pidfdWorks() { - return sysAttr + return sysAttr, false } var pidfd int @@ -29,23 +32,33 @@ func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { if sysAttr == nil { return &syscall.SysProcAttr{ PidFD: &pidfd, - } + }, false } if sysAttr.PidFD == nil { newSys := *sysAttr // copy newSys.PidFD = &pidfd - return &newSys + return &newSys, false } - return sysAttr + return sysAttr, true } -func getPidfd(sysAttr *syscall.SysProcAttr) (uintptr, bool) { +// getPidfd returns the value of sysAttr.PidFD (or its duplicate if needDup is +// set) and a flag indicating whether the value can be used. +func getPidfd(sysAttr *syscall.SysProcAttr, needDup bool) (uintptr, bool) { if !pidfdWorks() { return 0, false } - return uintptr(*sysAttr.PidFD), true + h := *sysAttr.PidFD + if needDup { + dupH, e := unix.Fcntl(h, syscall.F_DUPFD_CLOEXEC, 0) + if e != nil { + return 0, false + } + h = dupH + } + return uintptr(h), true } func pidfdFind(pid int) (uintptr, error) { diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index 837593706bae8e..fa0877037baadc 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -6,6 +6,7 @@ package os_test import ( "errors" + "internal/syscall/unix" "internal/testenv" "os" "syscall" @@ -57,3 +58,34 @@ func TestFindProcessViaPidfd(t *testing.T) { t.Fatalf("Release: got %v, want ", err) } } + +func TestStartProcessWithPidfd(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + if err := os.CheckPidfdOnce(); err != nil { + // Non-pidfd code paths tested in exec_unix_test.go. + t.Skipf("skipping: pidfd not available: %v", err) + } + + var pidfd int + p, err := os.StartProcess(testenv.GoToolPath(t), []string{"go"}, &os.ProcAttr{ + Sys: &syscall.SysProcAttr{ + PidFD: &pidfd, + }, + }) + if err != nil { + t.Fatalf("starting test process: %v", err) + } + defer syscall.Close(pidfd) + + if _, err := p.Wait(); err != nil { + t.Fatalf("Wait: got %v, want ", err) + } + + // Check the pidfd is still valid + err = unix.PidFDSendSignal(uintptr(pidfd), syscall.Signal(0)) + if !errors.Is(err, syscall.ESRCH) { + t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) + } +} diff --git a/src/os/pidfd_other.go b/src/os/pidfd_other.go index dda4bd0feccae6..ba9cbcb93830c0 100644 --- a/src/os/pidfd_other.go +++ b/src/os/pidfd_other.go @@ -8,11 +8,11 @@ package os import "syscall" -func ensurePidfd(sysAttr *syscall.SysProcAttr) *syscall.SysProcAttr { - return sysAttr +func ensurePidfd(sysAttr *syscall.SysProcAttr) (*syscall.SysProcAttr, bool) { + return sysAttr, false } -func getPidfd(_ *syscall.SysProcAttr) (uintptr, bool) { +func getPidfd(_ *syscall.SysProcAttr, _ bool) (uintptr, bool) { return 0, false } From a74951c5af5498db5d4be0c14dcaa45fb452e23a Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 4 Sep 2024 16:46:33 +0000 Subject: [PATCH 32/66] [release-branch.go1.23] unique: don't retain uncloned input as key Currently the unique package tries to clone strings that get stored in its internal map to avoid retaining large strings. However, this falls over entirely due to the fact that the original string is *still* stored in the map as a key. Whoops. Fix this by storing the cloned value in the map instead. This change also adds a test which fails without this change. For #69370. Fixes #69383. Change-Id: I1a6bb68ed79b869ea12ab6be061a5ae4b4377ddb Reviewed-on: https://go-review.googlesource.com/c/go/+/610738 Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit 21ac23a96f204dfb558a8d3071380c1d105a93ba) Reviewed-on: https://go-review.googlesource.com/c/go/+/612295 Auto-Submit: Tim King --- src/unique/handle.go | 7 ++++--- src/unique/handle_test.go | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/src/unique/handle.go b/src/unique/handle.go index 96d8fedb0cabe6..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index b031bbf6852c6b..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -110,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} From c8c6f9abfbbd2f949285a527036a3d0dbd991e74 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 6 Sep 2024 12:19:01 -0700 Subject: [PATCH 33/66] [release-branch.go1.23] syscall: on exec failure, close pidfd For #69284 Fixes #69402 Change-Id: I6350209302778ba5e44fa03d0b9e680d2b4ec192 Reviewed-on: https://go-review.googlesource.com/c/go/+/611495 LUCI-TryBot-Result: Go LUCI Reviewed-by: roger peppe Reviewed-by: Tim King Auto-Submit: Ian Lance Taylor Reviewed-by: Dmitri Shuralyov (cherry picked from commit 8926ca9c5ec3ea0b51e413e87f737aeb1422ea48) Reviewed-on: https://go-review.googlesource.com/c/go/+/613616 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Reviewed-by: Tobias Klauser --- src/os/pidfd_linux_test.go | 60 +++++++++++++++++++++++++++++++++++++ src/syscall/exec_bsd.go | 5 ++++ src/syscall/exec_freebsd.go | 5 ++++ src/syscall/exec_libc.go | 5 ++++ src/syscall/exec_libc2.go | 5 ++++ src/syscall/exec_linux.go | 8 +++++ src/syscall/exec_unix.go | 4 +++ 7 files changed, 92 insertions(+) diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index fa0877037baadc..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -9,6 +9,7 @@ import ( "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -89,3 +90,62 @@ func TestStartProcessWithPidfd(t *testing.T) { t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) } } + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..26844121910e8f 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -735,3 +735,11 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } From fbddfae62f19b5f04555aa593970ac4c6f5a38e5 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 18 Sep 2024 22:39:05 +0700 Subject: [PATCH 34/66] [release-branch.go1.23] cmd/compile: fix wrong esacpe analysis for rangefunc CL 584596 "-range" suffix to the name of closure generated for a rangefunc loop body. However, this breaks the condition that escape analysis uses for checking whether a closure contains within function, which is "F.funcN" for outer function "F" and closure "funcN". Fixing this by adding new "-rangeN" to the condition. Updates #69434 Fixes #69511 Change-Id: I411de8f63b69a6514a9e9504d49d62e00ce4115d Reviewed-on: https://go-review.googlesource.com/c/go/+/614096 Reviewed-by: David Chase Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Cuong Manh Le Reviewed-on: https://go-review.googlesource.com/c/go/+/614195 --- src/cmd/compile/internal/escape/solve.go | 4 +- test/fixedbugs/issue69434.go | 173 +++++++++++++++++++++++ test/fixedbugs/issue69507.go | 133 +++++++++++++++++ 3 files changed, 308 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue69434.go create mode 100644 test/fixedbugs/issue69507.go diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..ef17bc48ef2342 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,9 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." + // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". // TODO(mdempsky): Better way to recognize this. fn := f.Sym().Name cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') } diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1< Date: Fri, 6 Sep 2024 17:19:34 -0700 Subject: [PATCH 35/66] [release-branch.go1.23] runtime: if stop/reset races with running timer, return correct result The timer code is careful to ensure that if stop/reset is called while a timer is being run, we cancel the run. However, the code failed to ensure that in that case stop/reset returned true, meaning that the timer had been stopped. In the racing case stop/reset could see that t.when had been set to zero, and return false, even though the timer had not and never would fire. Fix this by tracking whether a timer run is in progress, and using that to reliably detect that the run was cancelled, meaning that stop/reset should return true. For #69312 Fixes #69333 Change-Id: I78e870063eb96650638f12c056e32c931417c84a Reviewed-on: https://go-review.googlesource.com/c/go/+/611496 Reviewed-by: David Chase Reviewed-by: Cuong Manh Le Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI Auto-Submit: Ian Lance Taylor (cherry picked from commit 2ebaff4890596ed6064e2dcbbe5e68bc93bed882) Reviewed-on: https://go-review.googlesource.com/c/go/+/616096 Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- src/runtime/time.go | 82 +++++++++++++++++++++++++++++++++++++++--- src/time/sleep_test.go | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 5 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..b43cf9589bf90b 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,40 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // The lowest zero bit is set when about to send a value on the channel, + // and cleared after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // An isSending bit is set only when t.mu is held. + // An isSending bit is cleared only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + // + // Setting and clearing Uint8 bits handles the case of + // a timer that is reset concurrently with unlockAndRun. + // If the reset timer runs immediately, we can wind up with + // concurrent calls to unlockAndRun for the same timer. + // Using matched bit set and clear in unlockAndRun + // ensures that the value doesn't get temporarily out of sync. + // + // We use a uint8 to keep the timer struct small. + // This means that we can only support up to 8 concurrent + // runs of a timer, where a concurrent run can only occur if + // we start a run, unlock the timer, the timer is reset to a new + // value (or the ticker fires again), it is ready to run, + // and it is actually run, all before the first run completes. + // Since completing a run is fast, even 2 concurrent timer runs are + // nearly impossible, so this should be safe in practice. + isSending atomic.Uint8 + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -431,6 +461,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -525,6 +564,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1061,24 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + var isSendingClear uint8 + if !async && t.isChan { + // Tell Stop/Reset that we are sending a value. + // Set the lowest zero bit. + // We do this awkward step because atomic.Uint8 + // doesn't support Add or CompareAndSwap. + // We only set bits with t locked. + v := t.isSending.Load() + i := sys.TrailingZeros8(^v) + if i == 8 { + throw("too many concurrent timer firings") + } + isSendingClear = 1 << i + t.isSending.Or(isSendingClear) + } + t.unlock() if raceenabled { @@ -1028,7 +1094,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,6 +1109,10 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) if t.seq != seq { f = func(any, uintptr, int64) {} @@ -1053,6 +1122,9 @@ func (t *timer) unlockAndRun(now int64) { f(arg, seq, delay) if !async && t.isChan { + // We are no longer sending a value. + t.isSending.And(^isSendingClear) + unlock(&t.sendLock) } diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..5357ed23c8e352 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,68 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From ed07b321aef7632f956ce991dd10fdd7e1abd827 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 1 Oct 2024 16:56:29 +0000 Subject: [PATCH 36/66] [release-branch.go1.23] go1.23.2 Change-Id: I904d2e951796dd4142d6e9de4a55af07852bca51 Reviewed-on: https://go-review.googlesource.com/c/go/+/617019 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Auto-Submit: Gopher Robot Reviewed-by: David Chase --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c0bacc9e28f531..7c5b4094049322 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.1 -time 2024-08-29T20:56:24Z +go1.23.2 +time 2024-09-28T01:34:15Z From f8080edefd60c8740915f922cf8a4352e6658174 Mon Sep 17 00:00:00 2001 From: Shulhan Date: Sat, 13 Jul 2024 12:18:04 +0700 Subject: [PATCH 37/66] [release-branch.go1.23] runtime: fix TestGdbAutotmpTypes on gdb version 15 On Arch Linux with gdb version 15.1, the test for TestGdbAutotmpTypes print the following output, ---- ~/src/go/src/runtime $ go test -run=TestGdbAutotmpTypes -v === RUN TestGdbAutotmpTypes === PAUSE TestGdbAutotmpTypes === CONT TestGdbAutotmpTypes runtime-gdb_test.go:78: gdb version 15.1 runtime-gdb_test.go:570: gdb output: Loading Go Runtime support. Target 'exec' cannot support this command. Breakpoint 1 at 0x46e416: file /tmp/TestGdbAutotmpTypes750485513/001/main.go, line 8. This GDB supports auto-downloading debuginfo from the following URLs: Enable debuginfod for this session? (y or [n]) [answered N; input not from terminal] Debuginfod has been disabled. To make this setting permanent, add 'set debuginfod enabled off' to .gdbinit. [New LWP 355373] [New LWP 355374] [New LWP 355375] [New LWP 355376] Thread 1 "a.exe" hit Breakpoint 1, main.main () at /tmp/TestGdbAutotmpTypes750485513/001/main.go:8 8 func main() { 9 var iface interface{} = map[string]astruct{} All types matching regular expression "astruct": File runtime: []main.astruct bucket hash main.astruct typedef hash * map[string]main.astruct; typedef noalg.[8]main.astruct noalg.[8]main.astruct; noalg.map.bucket[string]main.astruct runtime-gdb_test.go:587: could not find []main.astruct; in 'info typrs astruct' output !!! FAIL exit status 1 FAIL runtime 0.273s $ ---- In the back trace for "File runtime", each output lines does not end with ";" anymore, while in test we check the string with it. While at it, print the expected string with "%q" instead of "%s" for better error message. For #67089 Fixes #69746 Change-Id: If6019ee68c0d8e495c920f98568741462c7d0fd0 Reviewed-on: https://go-review.googlesource.com/c/go/+/598135 Reviewed-by: David Chase Reviewed-by: Meng Zhuo LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt (cherry picked from commit ff695ca2e3ea37dcb688d470e86ed64849c61f2e) Reviewed-on: https://go-review.googlesource.com/c/go/+/617455 Reviewed-by: Michael Knyszek Auto-Submit: Michael Knyszek --- src/runtime/runtime-gdb_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 5defe2f615eaa4..14561330bbf281 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "[]main.astruct;", - "bucket;", - "hash;", - "main.astruct;", - "hash * map[string]main.astruct;", + "[]main.astruct", + "bucket", + "hash", + "main.astruct", + "hash * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { - t.Fatalf("could not find %s in 'info typrs astruct' output", name) + t.Fatalf("could not find %q in 'info typrs astruct' output", name) } } } From 9563300f6e262589ae25c71d778bfcd646d4a750 Mon Sep 17 00:00:00 2001 From: cions Date: Tue, 24 Sep 2024 01:27:40 +0000 Subject: [PATCH 38/66] [release-branch.go1.23] os: ignore SIGSYS in checkPidfd In Android version 11 and earlier, pidfd-related system calls are not allowed by the seccomp policy, which causes crashes due to SIGSYS signals. For #69065 Fixes #69640 Change-Id: Ib29631639a5cf221ac11b4d82390cb79436b8657 GitHub-Last-Rev: aad6b3b32c81795f86bc4a9e81aad94899daf520 GitHub-Pull-Request: golang/go#69543 Reviewed-on: https://go-review.googlesource.com/c/go/+/614277 Auto-Submit: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Ian Lance Taylor (cherry picked from commit a3a05ed04cb53c53bdacded2d16f0f3e5facdbb0) Reviewed-on: https://go-review.googlesource.com/c/go/+/616077 Reviewed-by: Michael Knyszek Reviewed-by: Kirill Kolyshkin Reviewed-by: Dmitri Shuralyov Reviewed-by: Mauri de Souza Meneguzzo Auto-Submit: Michael Knyszek --- src/os/pidfd_linux.go | 16 ++++++++++++++++ src/runtime/os_linux.go | 13 +++++++++++-- src/runtime/os_unix_nonlinux.go | 7 +++++++ src/runtime/signal_unix.go | 17 +++++++++++++++++ 4 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 545cfe9613b8b4..01a98ca17c286f 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -14,6 +14,7 @@ package os import ( "errors" "internal/syscall/unix" + "runtime" "sync" "syscall" "unsafe" @@ -147,6 +148,13 @@ var checkPidfdOnce = sync.OnceValue(checkPidfd) // execution environment in which the above system calls are restricted by // seccomp or a similar technology. func checkPidfd() error { + // In Android version < 12, pidfd-related system calls are not allowed + // by seccomp and trigger the SIGSYS signal. See issue #69065. + if runtime.GOOS == "android" { + ignoreSIGSYS() + defer restoreSIGSYS() + } + // Get a pidfd of the current process (opening of "/proc/self" won't // work for waitid). fd, err := unix.PidFDOpen(syscall.Getpid(), 0) @@ -174,3 +182,11 @@ func checkPidfd() error { return nil } + +// Provided by runtime. +// +//go:linkname ignoreSIGSYS +func ignoreSIGSYS() + +//go:linkname restoreSIGSYS +func restoreSIGSYS() diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6ce656c70e146e..e80d390e0d09f2 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -879,8 +879,9 @@ func runPerThreadSyscall() { } const ( - _SI_USER = 0 - _SI_TKILL = -6 + _SI_USER = 0 + _SI_TKILL = -6 + _SYS_SECCOMP = 1 ) // sigFromUser reports whether the signal was sent because of a call @@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool { return code == _SI_USER || code == _SI_TKILL } +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + code := int32(c.sigcode()) + return code == _SYS_SECCOMP +} + //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) diff --git a/src/runtime/os_unix_nonlinux.go b/src/runtime/os_unix_nonlinux.go index b98753b8fe12b7..0e8b61c3b11aa2 100644 --- a/src/runtime/os_unix_nonlinux.go +++ b/src/runtime/os_unix_nonlinux.go @@ -13,3 +13,10 @@ package runtime func (c *sigctxt) sigFromUser() bool { return c.sigcode() == _SI_USER } + +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + return false +} diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 8ba498bdb238d5..6f40f440e807f8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -605,6 +605,19 @@ var crashing atomic.Int32 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool var testSigusr1 func(gp *g) bool +// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. +var sigsysIgnored uint32 + +//go:linkname ignoreSIGSYS os.ignoreSIGSYS +func ignoreSIGSYS() { + atomic.Store(&sigsysIgnored, 1) +} + +//go:linkname restoreSIGSYS os.restoreSIGSYS +func restoreSIGSYS() { + atomic.Store(&sigsysIgnored, 0) +} + // sighandler is invoked when a signal occurs. The global g will be // set to a gsignal goroutine and we will be running on the alternate // signal stack. The parameter gp will be the value of the global g @@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { return } + if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { + return + } + if flags&_SigKill != 0 { dieFromSignal(sig) } From cc16cdf48f228caebc55c982ed5b1b187ff39fcc Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 2 Oct 2024 13:38:25 -0700 Subject: [PATCH 39/66] [release-branch.go1.23] runtime: clear isSending bit earlier I've done some more testing of the new isSending field. I'm not able to get more than 2 bits set. That said, with this change it's significantly less likely to have even 2 bits set. The idea here is to clear the bit before possibly locking the channel we are sending the value on, thus avoiding some delay and some serialization. For #69312 For #69333 Change-Id: I8b5f167f162bbcbcbf7ea47305967f349b62b0f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/617596 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- src/runtime/time.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index b43cf9589bf90b..7abd15ee862608 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -1114,6 +1114,11 @@ func (t *timer) unlockAndRun(now int64) { // started to send the value. That lets them correctly return // true meaning that no value was sent. lock(&t.sendLock) + + // We are committed to possibly sending a value based on seq, + // so no need to keep telling stop/modify that we are sending. + t.isSending.And(^isSendingClear) + if t.seq != seq { f = func(any, uintptr, int64) {} } @@ -1122,9 +1127,6 @@ func (t *timer) unlockAndRun(now int64) { f(arg, seq, delay) if !async && t.isChan { - // We are no longer sending a value. - t.isSending.And(^isSendingClear) - unlock(&t.sendLock) } From 7fc83126731de12449f7b38c32e2e318c439a6d4 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 11 Jun 2024 16:34:38 -0400 Subject: [PATCH 40/66] [release-branch.go1.23] os: add clone(CLONE_PIDFD) check to pidfd feature check clone(CLONE_PIDFD) was added in Linux 5.2 and pidfd_open was added in Linux 5.3. Thus our feature check for pidfd_open should be sufficient to ensure that clone(CLONE_PIDFD) works. Unfortuantely, some alternative Linux implementations may not follow this strict ordering. For example, QEMU 7.2 (Dec 2022) added pidfd_open, but clone(CLONE_PIDFD) was only added in QEMU 8.0 (Apr 2023). Debian bookworm provides QEMU 7.2 by default. For #68976. Fixes #69259. Change-Id: Ie3f3dc51f0cd76944871bf98690abf59f68fd7bf Reviewed-on: https://go-review.googlesource.com/c/go/+/592078 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui (cherry picked from commit 7a5fc9b34deb8d9fe22c9d060a5839827344fcc2) Reviewed-on: https://go-review.googlesource.com/c/go/+/612218 --- src/os/pidfd_linux.go | 24 ++++++++++-- src/syscall/exec_linux.go | 81 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 3 deletions(-) diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 01a98ca17c286f..0bfef7759cc679 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -8,6 +8,10 @@ // v5.3: pidfd_open syscall, clone3 syscall; // v5.4: P_PIDFD idtype support for waitid syscall; // v5.6: pidfd_getfd syscall. +// +// N.B. Alternative Linux implementations may not follow this ordering. e.g., +// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until +// 8.0. package os @@ -140,9 +144,9 @@ func pidfdWorks() bool { var checkPidfdOnce = sync.OnceValue(checkPidfd) -// checkPidfd checks whether all required pidfd-related syscalls work. -// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid -// syscall with idtype of P_PIDFD. +// checkPidfd checks whether all required pidfd-related syscalls work. This +// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with +// idtype of P_PIDFD, and clone(CLONE_PIDFD). // // Reasons for non-working pidfd syscalls include an older kernel and an // execution environment in which the above system calls are restricted by @@ -180,9 +184,23 @@ func checkPidfd() error { return NewSyscallError("pidfd_send_signal", err) } + // Verify that clone(CLONE_PIDFD) works. + // + // This shouldn't be necessary since pidfd_open was added in Linux 5.3, + // after CLONE_PIDFD in Linux 5.2, but some alternative Linux + // implementations may not adhere to this ordering. + if err := checkClonePidfd(); err != nil { + return err + } + return nil } +// Provided by syscall. +// +//go:linkname checkClonePidfd +func checkClonePidfd() error + // Provided by runtime. // //go:linkname ignoreSIGSYS diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 26844121910e8f..3e15676fcb3d5f 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -7,6 +7,7 @@ package syscall import ( + errpkg "errors" "internal/itoa" "runtime" "unsafe" @@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att if clone3 != nil { pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0) } else { + // N.B. Keep in sync with doCheckClonePidfd. flags |= uintptr(SIGCHLD) if runtime.GOARCH == "s390x" { // On Linux/s390, the first two arguments of clone(2) are swapped. @@ -743,3 +745,82 @@ func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { *sys.PidFD = -1 } } + +// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a +// clone. +// +//go:linkname os_checkClonePidfd os.checkClonePidfd +func os_checkClonePidfd() error { + pidfd := int32(-1) + pid, errno := doCheckClonePidfd(&pidfd) + if errno != 0 { + return errno + } + + if pidfd == -1 { + // Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process + // before returning. + + var err error + for { + var status WaitStatus + _, err = Wait4(int(pid), &status, 0, nil) + if err != EINTR { + break + } + } + if err != nil { + return err + } + + return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd") + } + + // Good: CLONE_PIDFD provided a pidfd. Reap the process and close the + // pidfd. + defer Close(int(pidfd)) + + for { + const _P_PIDFD = 3 + _, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0) + if errno != EINTR { + break + } + } + if errno != 0 { + return errno + } + + return nil +} + +// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and +// child execution. This is a separate function so we can separate the child's +// and parent's stack frames if we're using vfork. +// +// This is go:noinline because the point is to keep the stack frames of this +// and os_checkClonePidfd separate. +// +//go:noinline +func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { + flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD) + if runtime.GOARCH == "s390x" { + // On Linux/s390, the first two arguments of clone(2) are swapped. + pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd))) + } else { + pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd))) + } + if errno != 0 || pid != 0 { + // If we're in the parent, we must return immediately + // so we're not in the same stack frame as the child. + // This can at most use the return PC, which the child + // will not modify, and the results of + // rawVforkSyscall, which must have been written after + // the child was replaced. + return + } + + for { + RawSyscall(SYS_EXIT, 0, 0, 0) + } +} From 6495ce0495041ba28fdbad8ae8b0e0996481e6f4 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 2 Oct 2024 17:20:12 -0400 Subject: [PATCH 41/66] [release-branch.go1.23] syscall: use SYS_EXIT_GROUP in CLONE_PIDFD feature check child Inside Google we have seen issues with QEMU user mode failing to wake a parent waitid when this child exits with SYS_EXIT. This bug appears to not affect SYS_EXIT_GROUP. It is currently unclear if this is a general QEMU or specific to Google's configuration, but SYS_EXIT and SYS_EXIT_GROUP are semantically equivalent here, so we can use the latter here in case this is a general QEMU bug. For #68976. For #69259. Change-Id: I34e51088c9a6b7493a060e2a719a3cc4a3d54aa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/617417 Reviewed-by: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI (cherry picked from commit 47a99359206f0dd41228deda0aa31f1e769cc156) Reviewed-on: https://go-review.googlesource.com/c/go/+/617716 --- src/syscall/exec_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 3e15676fcb3d5f..dfd9a8368a9e50 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -821,6 +821,6 @@ func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { } for { - RawSyscall(SYS_EXIT, 0, 0, 0) + RawSyscall(SYS_EXIT_GROUP, 0, 0, 0) } } From 35c010ad6db5113f51e1867ab3d0108754a3264c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisend=C3=B6rfer?= Date: Fri, 30 Aug 2024 08:17:19 +0200 Subject: [PATCH 42/66] [release-branch.go1.23] runtime: fix GoroutineProfile stacks not getting null terminated Fix a regression introduced in CL 572396 causing goroutine stacks not getting null terminated. This bug impacts callers that reuse the []StackRecord slice for multiple calls to GoroutineProfile. See https://github.com/felixge/fgprof/issues/33 for an example of the problem. Add a test case to prevent similar regressions in the future. Use null padding instead of null termination to be consistent with other profile types and because it's less code to implement. Also fix the ThreadCreateProfile code path. Fixes #69258 Change-Id: I0b9414f6c694c304bc03a5682586f619e9bf0588 Reviewed-on: https://go-review.googlesource.com/c/go/+/609815 Reviewed-by: Tim King LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt (cherry picked from commit 49e542aa85b7c2d9f6cf50de00843b455bc1e635) Reviewed-on: https://go-review.googlesource.com/c/go/+/621277 Reviewed-by: Cherry Mui --- src/runtime/mprof.go | 6 ++- src/runtime/pprof/pprof_test.go | 92 +++++++++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 12 deletions(-) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e66f1..82b7fa68aecbde 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1270,7 +1270,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1649,7 +1650,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0fa7a..d16acf54dabd98 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2441,16 +2441,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2742,3 +2733,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +} From 8d79bf799b46875b52bef6a47f89b73ead824160 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 14 Oct 2024 11:46:17 -0700 Subject: [PATCH 43/66] [release-branch.go1.23] runtime: don't frob isSending for tickers The Ticker Stop and Reset methods don't report a value, so we don't need to track whether they are interrupting a send. This includes a test that used to fail about 2% of the time on my laptop when run under x/tools/cmd/stress. For #69880 Fixes #69882 Change-Id: Ic6d14b344594149dd3c24b37bbe4e42e83f9a9ad Reviewed-on: https://go-review.googlesource.com/c/go/+/620136 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor Auto-Submit: Michael Knyszek Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Knyszek (cherry picked from commit 48849e0866f64a40d04a9151e44e5a73acdfc17b) Reviewed-on: https://go-review.googlesource.com/c/go/+/620137 Reviewed-by: Dmitri Shuralyov --- src/runtime/time.go | 17 +++++++++++------ src/time/sleep_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index 7abd15ee862608..19b4ac99010529 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -33,6 +33,7 @@ type timer struct { // isSending is used to handle races between running a // channel timer and stopping or resetting the timer. // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. // The lowest zero bit is set when about to send a value on the channel, // and cleared after sending the value. // The stop/reset code uses this to detect whether it @@ -467,7 +468,7 @@ func (t *timer) stop() bool { // send from actually happening. That means // that we should return true: the timer was // stopped, even though t.when may be zero. - if t.isSending.Load() > 0 { + if t.period == 0 && t.isSending.Load() > 0 { pending = true } } @@ -529,6 +530,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.maybeRunAsync() } t.trace("modify") + oldPeriod := t.period t.period = period if f != nil { t.f = f @@ -570,7 +572,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // send from actually happening. That means // that we should return true: the timer was // stopped, even though t.when may be zero. - if t.isSending.Load() > 0 { + if oldPeriod == 0 && t.isSending.Load() > 0 { pending = true } } @@ -1064,7 +1066,7 @@ func (t *timer) unlockAndRun(now int64) { async := debug.asynctimerchan.Load() != 0 var isSendingClear uint8 - if !async && t.isChan { + if !async && t.isChan && t.period == 0 { // Tell Stop/Reset that we are sending a value. // Set the lowest zero bit. // We do this awkward step because atomic.Uint8 @@ -1115,9 +1117,12 @@ func (t *timer) unlockAndRun(now int64) { // true meaning that no value was sent. lock(&t.sendLock) - // We are committed to possibly sending a value based on seq, - // so no need to keep telling stop/modify that we are sending. - t.isSending.And(^isSendingClear) + if t.period == 0 { + // We are committed to possibly sending a value + // based on seq, so no need to keep telling + // stop/modify that we are sending. + t.isSending.And(^isSendingClear) + } if t.seq != seq { f = func(any, uintptr, int64) {} diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 5357ed23c8e352..520ff957d09fc1 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -847,6 +847,31 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { wg.Wait() } +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeup(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTicker(Microsecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 100000 { + select { + case <-timer.C: + case <-After(Millisecond): + } + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From 58babf6e0bf58cd81bb5a71744a1c195fba2d6c8 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Mon, 21 Oct 2024 17:34:22 +0000 Subject: [PATCH 44/66] [release-branch.go1.23] runtime,time: use atomic.Int32 for isSending This change switches isSending to be an atomic.Int32 instead of an atomic.Uint8. The Int32 version is managed as a counter, which is something that we couldn't do with Uint8 without adding a new intrinsic which may not be available on all architectures. That is, instead of only being able to support 8 concurrent timer firings on the same timer because we only have 8 independent bits to set for each concurrent timer firing, we can now have 2^31-1 concurrent timer firings before running into any issues. Like the fact that each bit-set was matched with a clear, here we match increments with decrements to indicate that we're in the "sending on a channel" critical section in the timer code, so we can report the correct result back on Stop or Reset. We choose an Int32 instead of a Uint32 because it's easier to check for obviously bad values (negative values are always bad) and 2^31-1 concurrent timer firings should be enough for anyone. Previously, we avoided anything bigger than a Uint8 because we could pack it into some padding in the runtime.timer struct. But it turns out that the type that actually matters, runtime.timeTimer, is exactly 96 bytes in size. This means its in the next size class up in the 112 byte size class because of an allocation header. We thus have some free space to work with. This change increases the size of this struct from 96 bytes to 104 bytes. (I'm not sure if runtime.timer is often allocated directly, but if it is, we get lucky in the same way too. It's exactly 80 bytes in size, which means its in the 96-byte size class, leaving us with some space to work with.) Fixes #69978 For #69969. Related to #69880 and #69312 and #69882. Change-Id: I9fd59cb6a69365c62971d1f225490a65c58f3e77 Cq-Include-Trybots: luci.golang.try:go1.23-linux-amd64-longtest Reviewed-on: https://go-review.googlesource.com/c/go/+/621616 Reviewed-by: Ian Lance Taylor Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit 6a49f81edc7aa8aa12e26a1a0ed8819a3e5c7b5e) Reviewed-on: https://go-review.googlesource.com/c/go/+/621856 Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Pratt --- src/runtime/time.go | 59 +++++++++++++----------------------------- src/time/sleep_test.go | 30 +++++++++++++++++++-- 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index 19b4ac99010529..7b344a349610d3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -30,36 +30,6 @@ type timer struct { state uint8 // state bits isChan bool // timer has a channel; immutable; can be read without lock - // isSending is used to handle races between running a - // channel timer and stopping or resetting the timer. - // It is used only for channel timers (t.isChan == true). - // It is not used for tickers. - // The lowest zero bit is set when about to send a value on the channel, - // and cleared after sending the value. - // The stop/reset code uses this to detect whether it - // stopped the channel send. - // - // An isSending bit is set only when t.mu is held. - // An isSending bit is cleared only when t.sendLock is held. - // isSending is read only when both t.mu and t.sendLock are held. - // - // Setting and clearing Uint8 bits handles the case of - // a timer that is reset concurrently with unlockAndRun. - // If the reset timer runs immediately, we can wind up with - // concurrent calls to unlockAndRun for the same timer. - // Using matched bit set and clear in unlockAndRun - // ensures that the value doesn't get temporarily out of sync. - // - // We use a uint8 to keep the timer struct small. - // This means that we can only support up to 8 concurrent - // runs of a timer, where a concurrent run can only occur if - // we start a run, unlock the timer, the timer is reset to a new - // value (or the ticker fires again), it is ready to run, - // and it is actually run, all before the first run completes. - // Since completing a run is fast, even 2 concurrent timer runs are - // nearly impossible, so this should be safe in practice. - isSending atomic.Uint8 - blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) @@ -99,6 +69,20 @@ type timer struct { // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. + // The value is incremented when about to send a value on the channel, + // and decremented after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // isSending is incremented only when t.mu is held. + // isSending is decremented only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + isSending atomic.Int32 } // init initializes a newly allocated timer t. @@ -1065,20 +1049,11 @@ func (t *timer) unlockAndRun(now int64) { } async := debug.asynctimerchan.Load() != 0 - var isSendingClear uint8 if !async && t.isChan && t.period == 0 { // Tell Stop/Reset that we are sending a value. - // Set the lowest zero bit. - // We do this awkward step because atomic.Uint8 - // doesn't support Add or CompareAndSwap. - // We only set bits with t locked. - v := t.isSending.Load() - i := sys.TrailingZeros8(^v) - if i == 8 { + if t.isSending.Add(1) < 0 { throw("too many concurrent timer firings") } - isSendingClear = 1 << i - t.isSending.Or(isSendingClear) } t.unlock() @@ -1121,7 +1096,9 @@ func (t *timer) unlockAndRun(now int64) { // We are committed to possibly sending a value // based on seq, so no need to keep telling // stop/modify that we are sending. - t.isSending.And(^isSendingClear) + if t.isSending.Add(-1) < 0 { + throw("mismatched isSending updates") + } } if t.seq != seq { diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 520ff957d09fc1..285a2e748c4af7 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -847,9 +847,9 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { wg.Wait() } -// Test having a large number of goroutines wake up a timer simultaneously. +// Test having a large number of goroutines wake up a ticker simultaneously. // This used to trigger a crash when run under x/tools/cmd/stress. -func TestMultiWakeup(t *testing.T) { +func TestMultiWakeupTicker(t *testing.T) { if testing.Short() { t.Skip("-short") } @@ -872,6 +872,32 @@ func TestMultiWakeup(t *testing.T) { wg.Wait() } +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTimer(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTimer(Nanosecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 10000 { + select { + case <-timer.C: + default: + } + timer.Reset(Nanosecond) + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From cfe0ae0b7070048ceda021988b01fbc6a8589a1b Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 23 Oct 2024 16:28:52 +0000 Subject: [PATCH 45/66] [release-branch.go1.23] runtime: uphold goroutine profile invariants in coroswitch Goroutine profiles require checking in with the profiler before any goroutine starts running. coroswitch is a place where a goroutine may start running, but where we do not check in with the profiler, which leads to crashes. Fix this by checking in with the profiler the same way execute does. For #69998. Fixes #70001. Change-Id: Idef6dd31b70a73dd1c967b56c307c7a46a26ba73 Reviewed-on: https://go-review.googlesource.com/c/go/+/622016 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI (cherry picked from commit 2a98a1849f059ffa94ab23a1ab7d8fa0fd0b48dd) Reviewed-on: https://go-review.googlesource.com/c/go/+/622375 Reviewed-by: Michael Pratt --- src/runtime/coro.go | 12 +++++++++ src/runtime/pprof/pprof_test.go | 45 +++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/src/runtime/coro.go b/src/runtime/coro.go index 30ada455e4985e..d93817f92f1caa 100644 --- a/src/runtime/coro.go +++ b/src/runtime/coro.go @@ -208,6 +208,18 @@ func coroswitch_m(gp *g) { // directly if possible. setGNoWB(&mp.curg, gnext) setMNoWB(&gnext.m, mp) + + // Synchronize with any out-standing goroutine profile. We're about to start + // executing, and an invariant of the profiler is that we tryRecordGoroutineProfile + // whenever a goroutine is about to start running. + // + // N.B. We must do this before transitioning to _Grunning but after installing gnext + // in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile + // may allocate). + if goroutineProfile.active { + tryRecordGoroutineProfile(gnext, nil, osyield) + } + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { // The CAS failed: use casgstatus, which will take care of // coordinating with the garbage collector about the state change. diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index d16acf54dabd98..41952ff147c4b7 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -15,6 +15,7 @@ import ( "internal/syscall/unix" "internal/testenv" "io" + "iter" "math" "math/big" "os" @@ -1754,6 +1755,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) { } } +// Regression test for #69998. +func TestGoroutineProfileCoro(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // Set up a goroutine to just create and run coroutine goroutines all day. + iterFunc := func() { + p, stop := iter.Pull2( + func(yield func(int, int) bool) { + for i := 0; i < 10000; i++ { + if !yield(i, i) { + return + } + } + }, + ) + defer stop() + for { + _, _, ok := p() + if !ok { + break + } + } + } + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + iterFunc() + select { + case <-done: + default: + } + } + }() + + // Take a goroutine profile. If the bug in #69998 is present, this will crash + // with high probability. We don't care about the output for this bug. + goroutineProf.WriteTo(io.Discard, 1) +} + func BenchmarkGoroutine(b *testing.B) { withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) { return func(b *testing.B) { From 5472853843bd9ae72ddc107a556558d13e39b035 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 7 Oct 2024 11:11:06 -0400 Subject: [PATCH 46/66] [release-branch.go1.23] cmd/link: generate Mach-O UUID when -B flag is specified Currently, on Mach-O, the Go linker doesn't generate LC_UUID in internal linking mode. This causes some macOS system tools unable to track the binary, as well as in some cases the binary unable to access local network on macOS 15. This CL makes the linker start generate LC_UUID. Currently, the UUID is generated if the -B flag is specified. And we'll make it generate UUID by default in a later CL. The -B flag is currently for generating GNU build ID on ELF, which is a similar concept to Mach-O's UUID. Instead of introducing another flag, we just use the same flag and the same setting. Specifically, "-B gobuildid" will generate a UUID based on the Go build ID. Updates #68678. Fixes #69992. Cq-Include-Trybots: luci.golang.try:go1.23-darwin-amd64_14,go1.23-darwin-arm64_13 Change-Id: I90089a78ba144110bf06c1c6836daf2d737ff10a Reviewed-on: https://go-review.googlesource.com/c/go/+/618595 Reviewed-by: Michael Knyszek Reviewed-by: Ingo Oeser Reviewed-by: Than McIntosh LUCI-TryBot-Result: Go LUCI (cherry picked from commit 20ed60311848ca40e51cb430fa602dd83a9c726f) Reviewed-on: https://go-review.googlesource.com/c/go/+/622595 Reviewed-by: Michael Pratt --- src/cmd/link/internal/ld/elf.go | 14 ++++++++++--- src/cmd/link/internal/ld/macho.go | 16 ++++++++++++++ src/cmd/link/internal/ld/macho_update_uuid.go | 2 +- src/cmd/link/internal/ld/main.go | 6 +++++- test/fixedbugs/issue14636.go | 21 ++++++++++++------- 5 files changed, 47 insertions(+), 12 deletions(-) diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0d8455d92e336b..bc484dedf6ed54 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int { return int(sh.Size) } -func addbuildinfo(val string) { +func addbuildinfo(ctxt *Link) { + val := *flagHostBuildid if val == "gobuildid" { buildID := *flagBuildid if buildID == "" { Exitf("-B gobuildid requires a Go build ID supplied via -buildid") } + if ctxt.IsDarwin() { + buildinfo = uuidFromGoBuildId(buildID) + return + } + hashedBuildID := notsha256.Sum256([]byte(buildID)) buildinfo = hashedBuildID[:20] @@ -821,11 +827,13 @@ func addbuildinfo(val string) { if !strings.HasPrefix(val, "0x") { Exitf("-B argument must start with 0x: %s", val) } - ov := val val = val[2:] - const maxLen = 32 + maxLen := 32 + if ctxt.IsDarwin() { + maxLen = 16 + } if hex.DecodedLen(len(val)) > maxLen { Exitf("-B option too long (max %d digits): %s", maxLen, ov) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 34624c25a9f333..c5a85f0e75e7cf 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr { return &machohdr } +// Create a new Mach-O load command. ndata is the number of 32-bit words for +// the data (not including the load command header). func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ @@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) { } } + if ctxt.IsInternal() && len(buildinfo) > 0 { + ml := newMachoLoad(ctxt.Arch, LC_UUID, 4) + // Mach-O UUID is 16 bytes + if len(buildinfo) < 16 { + buildinfo = append(buildinfo, make([]byte, 16)...) + } + // By default, buildinfo is already in UUIDv3 format + // (see uuidFromGoBuildId). + ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo) + ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:]) + ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:]) + ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:]) + } + if ctxt.IsInternal() && ctxt.NeedCodeSign() { ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) ml.data[0] = uint32(codesigOff) diff --git a/src/cmd/link/internal/ld/macho_update_uuid.go b/src/cmd/link/internal/ld/macho_update_uuid.go index de27e655d59bf4..40e0c11ed19d6e 100644 --- a/src/cmd/link/internal/ld/macho_update_uuid.go +++ b/src/cmd/link/internal/ld/macho_update_uuid.go @@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte { // to use this UUID flavor than any of the others. This is similar // to how other linkers handle this (for example this code in lld: // https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524). - rv[6] &= 0xcf + rv[6] &= 0x0f rv[6] |= 0x30 rv[8] &= 0x3f rv[8] |= 0xc0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 56e865d8a53287..12bc896c66c3d7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,6 +95,7 @@ var ( flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table + flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID") flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") @@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) { flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") - objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) @@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) { *flagBuildid = "go-openbsd" } + if *flagHostBuildid != "" { + addbuildinfo(ctxt) + } + // enable benchmarking var bench *benchmark.Metrics if len(*benchmarkFlag) != 0 { diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go index c8e751fb613c2e..a866c9a9e30e8e 100644 --- a/test/fixedbugs/issue14636.go +++ b/test/fixedbugs/issue14636.go @@ -12,22 +12,29 @@ import ( "bytes" "log" "os/exec" + "runtime" "strings" ) func main() { - checkLinkOutput("", "-B argument must start with 0x") + // The cannot open file error indicates that the parsing of -B flag + // succeeded and it failed at a later step. checkLinkOutput("0", "-B argument must start with 0x") - checkLinkOutput("0x", "usage") + checkLinkOutput("0x", "cannot open file nonexistent.o") checkLinkOutput("0x0", "-B argument must have even number of digits") - checkLinkOutput("0x00", "usage") + checkLinkOutput("0x00", "cannot open file nonexistent.o") checkLinkOutput("0xYZ", "-B argument contains invalid hex digit") - checkLinkOutput("0x"+strings.Repeat("00", 32), "usage") - checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)") + + maxLen := 32 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + maxLen = 16 + } + checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o") + checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long") } func checkLinkOutput(buildid string, message string) { - cmd := exec.Command("go", "tool", "link", "-B", buildid) + cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o") out, err := cmd.CombinedOutput() if err == nil { log.Fatalf("expected cmd/link to fail") @@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) { } if !strings.Contains(firstLine, message) { - log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine) + log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine) } } From 6ba3a8a6ba5214ec88b83e39148de8cd540a6e94 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Wed, 23 Oct 2024 16:01:08 -0700 Subject: [PATCH 47/66] [release-branch.go1.23] internal/poll: keep copying after successful Sendfile return on BSD The BSD implementation of poll.SendFile incorrectly halted copying after succesfully writing one full chunk of data. Adjust the copy loop to match the Linux and Solaris implementations. In testing, empirically macOS appears to sometimes return EAGAIN from sendfile after successfully copying a full chunk. Add a check to all implementations to return nil after successfully copying all data if the last sendfile call returns EAGAIN. For #70000 For #70020 Change-Id: I57ba649491fc078c7330310b23e1cfd85135c8ff Reviewed-on: https://go-review.googlesource.com/c/go/+/622235 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor (cherry picked from commit bd388c0216bcb33d7325b0ad9722a3be8155a289) Reviewed-on: https://go-review.googlesource.com/c/go/+/622696 --- src/internal/poll/sendfile_bsd.go | 19 ++-- src/internal/poll/sendfile_linux.go | 3 + src/internal/poll/sendfile_solaris.go | 3 + src/os/copy_test.go | 154 ++++++++++++++++++++++++++ src/os/readfrom_linux_test.go | 41 ------- 5 files changed, 171 insertions(+), 49 deletions(-) create mode 100644 src/os/copy_test.go diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 669df94cc12e0d..0b0966815deedd 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -38,22 +38,25 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, pos += int64(n) written += int64(n) remain -= int64(n) + continue + } else if err != syscall.EAGAIN && err != syscall.EINTR { + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break } if err == syscall.EINTR { continue } - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile), and other errors. - // We should end the loop when there is no error - // returned from sendfile(2) or it is not a retryable error. - if err != syscall.EAGAIN { - break - } if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index d1c4d5c0d3d34d..1c4130d45da89c 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index ec675833a225dc..b7c3f81a1efdcd 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/os/copy_test.go b/src/os/copy_test.go new file mode 100644 index 00000000000000..82346ca4e57e3e --- /dev/null +++ b/src/os/copy_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "io" + "math/rand/v2" + "net" + "os" + "runtime" + "sync" + "testing" + + "golang.org/x/net/nettest" +) + +// Exercise sendfile/splice fast paths with a moderately large file. +// +// https://go.dev/issue/70000 + +func TestLargeCopyViaNetwork(t *testing.T) { + const size = 10 * 1024 * 1024 + dir := t.TempDir() + + src, err := os.Create(dir + "/src") + if err != nil { + t.Fatal(err) + } + defer src.Close() + if _, err := io.CopyN(src, newRandReader(), size); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, 0); err != nil { + t.Fatal(err) + } + + dst, err := os.Create(dir + "/dst") + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + client, server := createSocketPair(t, "tcp") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if n, err := io.Copy(dst, server); n != size || err != nil { + t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size) + } + }() + go func() { + defer wg.Done() + defer client.Close() + if n, err := io.Copy(client, src); n != size || err != nil { + t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size) + } + }() + wg.Wait() + + if _, err := dst.Seek(0, 0); err != nil { + t.Fatal(err) + } + if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil { + t.Fatal(err) + } +} + +func compareReaders(a, b io.Reader) error { + bufa := make([]byte, 4096) + bufb := make([]byte, 4096) + for { + na, erra := io.ReadFull(a, bufa) + if erra != nil && erra != io.EOF { + return erra + } + nb, errb := io.ReadFull(b, bufb) + if errb != nil && errb != io.EOF { + return errb + } + if !bytes.Equal(bufa[:na], bufb[:nb]) { + return errors.New("contents mismatch") + } + if erra == io.EOF && errb == io.EOF { + break + } + } + return nil +} + +type randReader struct { + rand *rand.Rand +} + +func newRandReader() *randReader { + return &randReader{rand.New(rand.NewPCG(0, 0))} +} + +func (r *randReader) Read(p []byte) (int, error) { + var v uint64 + var n int + for i := range p { + if n == 0 { + v = r.rand.Uint64() + n = 8 + } + p[i] = byte(v & 0xff) + v >>= 8 + n-- + } + return len(p), nil +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 8dcb9cb2172882..45867477dc26b2 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -14,15 +14,12 @@ import ( "net" . "os" "path/filepath" - "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" - - "golang.org/x/net/nettest" ) func TestCopyFileRange(t *testing.T) { @@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) { t.Fatalf("server Control error: %v", err) } } - -func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { - t.Helper() - if !nettest.TestableNetwork(proto) { - t.Skipf("%s does not support %q", runtime.GOOS, proto) - } - - ln, err := nettest.NewLocalListener(proto) - if err != nil { - t.Fatalf("NewLocalListener error: %v", err) - } - t.Cleanup(func() { - if ln != nil { - ln.Close() - } - if client != nil { - client.Close() - } - if server != nil { - server.Close() - } - }) - ch := make(chan struct{}) - go func() { - var err error - server, err = ln.Accept() - if err != nil { - t.Errorf("Accept new connection error: %v", err) - } - ch <- struct{}{} - }() - client, err = net.Dial(proto, ln.Addr().String()) - <-ch - if err != nil { - t.Fatalf("Dial new connection error: %v", err) - } - return client, server -} From 958f3a0309855bc2e362e2951c70849ebec76f30 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 24 Oct 2024 13:10:54 +0800 Subject: [PATCH 48/66] [release-branch.go1.23] internal/poll: handle the special case of sendfile(2) sending the full chunk CL 622235 would fix #70000 while resulting in one extra sendfile(2) system call when sendfile(2) returns (>0, EAGAIN). That's also why I left sendfile_bsd.go behind, and didn't make it line up with other two implementations: sendfile_linux.go and sendfile_solaris.go. Unlike sendfile(2)'s on Linux and Solaris that always return (0, EAGAIN), sendfile(2)'s on *BSD and macOS may return (>0, EAGAIN) when using a socket marked for non-blocking I/O. In that case, the current code will try to re-call sendfile(2) immediately, which will most likely get us a (0, EAGAIN). After that, it goes to `dstFD.pd.waitWrite(dstFD.isFile)` below, which should have been done in the first place. Thus, the real problem that leads to #70000 is that the old code doesn't handle the special case of sendfile(2) sending the exact number of bytes the caller requested. Fixes #70000 Fixes #70020 Change-Id: I6073d6b9feb58b3d7e114ec21e4e80d9727bca66 Reviewed-on: https://go-review.googlesource.com/c/go/+/622255 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor TryBot-Result: Gopher Robot Reviewed-by: Damien Neil Run-TryBot: Andy Pan Reviewed-on: https://go-review.googlesource.com/c/go/+/622697 --- src/internal/poll/sendfile_bsd.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 0b0966815deedd..341e07ca1fed6a 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,13 +32,28 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if int64(n) > remain { n = int(remain) } + m := n pos1 := pos n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) - continue + // (n, nil) indicates that sendfile(2) has transferred + // the exact number of bytes we requested, or some unretryable + // error have occurred with partial bytes sent. Either way, we + // don't need to go through the following logic to check EINTR + // or fell into dstFD.pd.waitWrite, just continue to send the + // next chunk or break the loop. + if n == m { + continue + } else if err != syscall.EAGAIN && + err != syscall.EINTR && + err != syscall.EBUSY { + // Particularly, EPIPE. Errors like that would normally lead + // the subsequent sendfile(2) call to (-1, EBADF). + break + } } else if err != syscall.EAGAIN && err != syscall.EINTR { // This includes syscall.ENOSYS (no kernel // support) and syscall.EINVAL (fd types which From a0d15cb9c8f3c35c96129857984d25446041f29e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisend=C3=B6rfer?= Date: Sat, 7 Sep 2024 13:44:09 +0200 Subject: [PATCH 49/66] [release-branch.go1.23] runtime: fix MutexProfile missing root frames Fix a regression introduced in CL 598515 causing runtime.MutexProfile stack traces to omit their root frames. In most cases this was merely causing the `runtime.goexit` frame to go missing. But in the case of runtime._LostContendedRuntimeLock, an empty stack trace was being produced. Add a test that catches this regression by checking for a stack trace with the `runtime.goexit` frame. Also fix a separate problem in expandFrame that could cause out-of-bounds panics when profstackdepth is set to a value below 32. There is no test for this fix because profstackdepth can't be changed at runtime right now. Fixes #69865 Change-Id: I1600fe62548ea84981df0916d25072c3ddf1ea1a Reviewed-on: https://go-review.googlesource.com/c/go/+/611615 Reviewed-by: David Chase Reviewed-by: Nick Ripley Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI (cherry picked from commit c64ca8c6ef13723b9f25f4b5e1c7b6986b958d2e) Reviewed-on: https://go-review.googlesource.com/c/go/+/621276 Reviewed-by: Cherry Mui --- src/runtime/mprof.go | 3 ++- src/runtime/pprof/mprof_test.go | 2 +- src/runtime/pprof/pprof_test.go | 46 ++++++++++++++++++++++++++++++--- 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 82b7fa68aecbde..ee3e59a9aa99ce 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) { for i := range p { cf := CallersFrames(p[i].Stack()) j := 0 - for ; j < len(expandedStack); j++ { + for j < len(expandedStack) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" expandedStack[j] = f.PC + 1 + j++ if !more { break } diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 391588d4acd0ec..ef373b36848437 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 41952ff147c4b7..da4ad17d77e6fd 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -982,7 +982,7 @@ func TestBlockProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk) @@ -992,7 +992,7 @@ func TestBlockProfile(t *testing.T) { } -func stacks(p *profile.Profile) (res [][]string) { +func profileStacks(p *profile.Profile) (res [][]string) { for _, s := range p.Sample { var stk []string for _, l := range s.Location { @@ -1005,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) { return res } +func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) { + for _, record := range records { + frames := runtime.CallersFrames(record.Stack()) + var stk []string + for { + frame, more := frames.Next() + stk = append(stk, frame.Function) + if !more { + break + } + } + res = append(res, stk) + } + return res +} + func containsStack(got [][]string, want []string) bool { for _, stk := range got { if len(stk) < len(want) { @@ -1289,7 +1305,7 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, want := range [][]string{ {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"}, } { @@ -1329,6 +1345,28 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D) } }) + + t.Run("records", func(t *testing.T) { + // Record a mutex profile using the structured record API. + var records []runtime.BlockProfileRecord + for { + n, ok := runtime.MutexProfile(records) + if ok { + records = records[:n] + break + } + records = make([]runtime.BlockProfileRecord, n*2) + } + + // Check that we see the same stack trace as the proto profile. For + // historical reason we expect a runtime.goexit root frame here that is + // omitted in the proto profile. + stks := blockRecordStacks(records) + want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"} + if !containsStack(stks, want) { + t.Errorf("No matching stack entry for %+v", want) + } + }) } func TestMutexProfileRateAdjust(t *testing.T) { @@ -2514,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) var stk []string for _, s := range stks { if hasPrefix(s, test.prefix) { From 1207de4f6c3739eb4339ff9eb5a794e9bdd7c4d2 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 8 Oct 2024 18:10:17 +0200 Subject: [PATCH 50/66] [release-branch.go1.23] runtime: reduce syscall.SyscallX stack usage syscall.SyscallX consumes a lot of stack space, which is a problem because they are nosplit functions. They used to use less stack space, but CL 563315, that landed in Go 1.23, increased the stack usage by a lot. This CL reduces the stack usage back to the previous level. Fixes #69848 Updates #69813 Change-Id: Iddedd28b693c66a258da687389768055c493fc2e Reviewed-on: https://go-review.googlesource.com/c/go/+/618497 Reviewed-by: Cherry Mui Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit fa7343aca326aad061ab877c1a4cebb96c4355c1) Reviewed-on: https://go-review.googlesource.com/c/go/+/623516 Reviewed-by: Dmitri Shuralyov Reviewed-by: Michael Pratt --- src/runtime/syscall_windows.go | 30 +++++++++++++++-------------- src/runtime/syscall_windows_test.go | 7 +++++++ 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 69d720a395c48d..85b1b8c9024a73 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -454,43 +454,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) } // maxArgs should be divisible by 2, as Windows stack @@ -503,7 +497,15 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - if len(args) > maxArgs { + return syscall_syscalln(fn, uintptr(len(args)), args...) +} + +//go:nosplit +func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { + if n > uintptr(len(args)) { + panic("syscall: n > len(args)") // should not be reachable from user code + } + if n > maxArgs { panic("runtime: SyscallN has too many arguments") } @@ -512,7 +514,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { // calls back into Go. c := &getg().m.winsyscall c.fn = fn - c.n = uintptr(len(args)) + c.n = n if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 6a056c8d2b190c..156cf3eb8e5c71 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -1212,6 +1212,13 @@ func TestBigStackCallbackSyscall(t *testing.T) { } } +func TestSyscallStackUsage(t *testing.T) { + // Test that the stack usage of a syscall doesn't exceed the limit. + // See https://go.dev/issue/69813. + syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + var ( modwinmm = syscall.NewLazyDLL("winmm.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") From c390a1c22e8951263e6c01346a4281d604b25062 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Wed, 6 Nov 2024 22:21:42 +0000 Subject: [PATCH 51/66] [release-branch.go1.23] go1.23.3 Change-Id: I065005a4a18f801d09ad3ebc886e90a6dd1df69a Reviewed-on: https://go-review.googlesource.com/c/go/+/626137 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Auto-Submit: Gopher Robot Reviewed-by: David Chase --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 7c5b4094049322..66e6565be501e5 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.2 -time 2024-09-28T01:34:15Z +go1.23.3 +time 2024-11-06T18:46:45Z From 3726f07c4650d266b58b828432b3f8dab6d553d7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 25 Oct 2024 14:04:22 -0400 Subject: [PATCH 52/66] [release-branch.go1.23] cmd/compile: use a non-fragile test for "does f contain closure c?" The old test relied on naming conventions. The new test uses an explicit parent pointer chain initialized when the closures are created (in the same place that the names used in the older fragile test were assigned). Fixes #70198. Change-Id: Ie834103c7096e4505faaff3bed1fc6e918a21211 Reviewed-on: https://go-review.googlesource.com/c/go/+/622656 Reviewed-by: Keith Randall Reviewed-by: Keith Randall Reviewed-by: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/625535 Auto-Submit: Dmitri Shuralyov --- src/cmd/compile/internal/escape/solve.go | 11 +++++---- src/cmd/compile/internal/ir/func.go | 6 +++++ src/cmd/compile/internal/ir/sizeof_test.go | 2 +- .../internal/rangefunc/rangefunc_test.go | 24 +++++++++++++++++++ 4 files changed, 37 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index ef17bc48ef2342..32f5a771a34a66 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". - // TODO(mdempsky): Better way to recognize this. - fn := f.Sym().Name - cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') + for p := c.ClosureParent; p != nil; p = p.ClosureParent { + if p == f { + return true + } + } + return false } diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index d0c8ee359befff..4fa9055b4b2c0b 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -51,6 +51,8 @@ import ( // the generated ODCLFUNC, but there is no // pointer from the Func back to the OMETHVALUE. type Func struct { + // if you add or remove a field, don't forget to update sizeof_test.go + miniNode Body Nodes @@ -76,6 +78,9 @@ type Func struct { // Populated during walk. Closures []*Func + // Parent of a closure + ClosureParent *Func + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. @@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, fn.Nname.Defn = fn pkg.Funcs = append(pkg.Funcs, fn) + fn.ClosureParent = outerfn return fn } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 68d2865595b716..6331cceb4a59b4 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 176, 296}, + {Func{}, 180, 304}, {Name{}, 96, 168}, } diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go index 97ab254395332a..e488c3cf377cae 100644 --- a/src/cmd/compile/internal/rangefunc/rangefunc_test.go +++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go @@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) { t.Errorf("Expected y=3, got y=%d\n", y) } } + +func Bug70035(s1, s2, s3 []string) string { + var c1 string + for v1 := range slices.Values(s1) { + var c2 string + for v2 := range slices.Values(s2) { + var c3 string + for v3 := range slices.Values(s3) { + c3 = c3 + v3 + } + c2 = c2 + v2 + c3 + } + c1 = c1 + v1 + c2 + } + return c1 +} + +func Test70035(t *testing.T) { + got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"}) + want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC" + if got != want { + t.Errorf("got %v, want %v", got, want) + } +} From 777f43ab27bde4c662cd0a663f807f74f3fbab0f Mon Sep 17 00:00:00 2001 From: Dmitri Shuralyov Date: Mon, 4 Nov 2024 17:36:26 -0500 Subject: [PATCH 53/66] [release-branch.go1.23]time: accept "+01" in TestLoadFixed on OpenBSD This stops the test from failing with a known failure mode, and creates time to look into what the next steps should be, if any. For #69840 Fixes #70239 Change-Id: I060903d256ed65c5dfcd70ae76eb361cab63186f Reviewed-on: https://go-review.googlesource.com/c/go/+/625197 Auto-Submit: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor Reviewed-by: Eric Grosse (cherry picked from commit bea9b91f0f4be730c880edbe496ab25c9b742cad) Reviewed-on: https://go-review.googlesource.com/c/go/+/627575 Reviewed-by: Dmitri Shuralyov TryBot-Bypass: Dmitri Shuralyov Auto-Submit: Ian Lance Taylor --- src/time/time_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/time/time_test.go b/src/time/time_test.go index 70eb61478480e0..c12b9117d0f5c1 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -14,6 +14,7 @@ import ( "math/rand" "os" "runtime" + "slices" "strings" "sync" "testing" @@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) { // So GMT+1 corresponds to -3600 in the Go zone, not +3600. name, offset := Now().In(loc).Zone() // The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1" - // on earlier versions; we accept both. (Issue #17276). - if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 { - t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d", - name, offset, "GMT+1", "-01", -1*60*60) + // on earlier versions; we accept both. (Issue 17276.) + wantName := []string{"GMT+1", "-01"} + // The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.) + if runtime.GOOS == "openbsd" { + wantName = append(wantName, "+01") + } + if !slices.Contains(wantName, name) || offset != -1*60*60 { + t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d", + name, offset, wantName, -1*60*60) } } From 847cb6f9ca43da48cb10e98808a74a40b41242fa Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 5 Nov 2024 16:01:45 +0100 Subject: [PATCH 54/66] [release-branch.go1.23] syscall: mark SyscallN as noescape syscall.SyscallN is implemented by runtime.syscall_syscalln, which makes sure that the variadic argument doesn't escape. There is no need to worry about the lifetime of the elements of the variadic argument, as the compiler will keep them live until the function returns. For #70197 Fixes #70202 Change-Id: I12991f0be12062eea68f2b103fa0a794c1b527eb Reviewed-on: https://go-review.googlesource.com/c/go/+/625297 Reviewed-by: Ian Lance Taylor Reviewed-by: Alex Brainman Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI (cherry picked from commit 7fff741016c8157e107cce8013ee3ca621725384) Reviewed-on: https://go-review.googlesource.com/c/go/+/630196 Reviewed-by: Dmitri Shuralyov Auto-Submit: Ian Lance Taylor Reviewed-by: Quim Muntal --- src/syscall/dll_windows.go | 1 + src/syscall/syscall_windows_test.go | 45 +++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index 81134cb0bd27ff..a7873e6ad8c93e 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // Deprecated: Use [SyscallN] instead. func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) +//go:noescape func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno) diff --git a/src/syscall/syscall_windows_test.go b/src/syscall/syscall_windows_test.go index f67e8991591601..a6c6eff31f0c45 100644 --- a/src/syscall/syscall_windows_test.go +++ b/src/syscall/syscall_windows_test.go @@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) { } } +func TestSyscallAllocations(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + + // Test that syscall.SyscallN arguments do not escape. + // The function used (in this case GetVersion) doesn't matter + // as long as it is always available and doesn't panic. + h, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + t.Fatal(err) + } + defer syscall.FreeLibrary(h) + proc, err := syscall.GetProcAddress(h, "GetVersion") + if err != nil { + t.Fatal(err) + } + + testAllocs := func(t *testing.T, name string, fn func() error) { + t.Run(name, func(t *testing.T) { + n := int(testing.AllocsPerRun(10, func() { + if err := fn(); err != nil { + t.Fatalf("%s: %v", name, err) + } + })) + if n > 0 { + t.Errorf("allocs = %d, want 0", n) + } + }) + } + + testAllocs(t, "SyscallN", func() error { + r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) + testAllocs(t, "Syscall", func() error { + r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) +} + func FuzzUTF16FromString(f *testing.F) { f.Add("hi") // ASCII f.Add("â") // latin1 From d8adc6c4c7cc21d607a97aeaa6b7ffc4c2d76e65 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Fri, 1 Nov 2024 21:54:07 +0000 Subject: [PATCH 55/66] [release-branch.go1.23] runtime: prevent weak->strong conversions during mark termination Currently it's possible for weak->strong conversions to create more GC work during mark termination. When a weak->strong conversion happens during the mark phase, we need to mark the newly-strong pointer, since it may now be the only pointer to that object. In other words, the object could be white. But queueing new white objects creates GC work, and if this happens during mark termination, we could end up violating mark termination invariants. In the parlance of the mark termination algorithm, the weak->strong conversion is a non-monotonic source of GC work, unlike the write barriers (which will eventually only see black objects). This change fixes the problem by forcing weak->strong conversions to block during mark termination. We can do this efficiently by setting a global flag before the ragged barrier that is checked at each weak->strong conversion. If the flag is set, then the conversions block. The ragged barrier ensures that all Ps have observed the flag and that any weak->strong conversions which completed before the ragged barrier have their newly-minted strong pointers visible in GC work queues if necessary. We later unset the flag and wake all the blocked goroutines during the mark termination STW. There are a few subtleties that we need to account for. For one, it's possible that a goroutine which blocked in a weak->strong conversion wakes up only to find it's mark termination time again, so we need to recheck the global flag on wake. We should also stay non-preemptible while performing the check, so that if the check *does* appear as true, it cannot switch back to false while we're actively trying to block. If it switches to false while we try to block, then we'll be stuck in the queue until the following GC. All-in-all, this CL is more complicated than I would have liked, but it's the only idea so far that is clearly correct to me at a high level. This change adds a test which is somewhat invasive as it manipulates mark termination, but hopefully that infrastructure will be useful for debugging, fixing, and regression testing mark termination whenever we do fix it. For #69803. Fixes #70323. Change-Id: Ie314e6fd357c9e2a07a9be21f217f75f7aba8c4a Reviewed-on: https://go-review.googlesource.com/c/go/+/623615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui (cherry picked from commit 80d306da50aef6334bcb65fb02f5728cb9513691) Reviewed-on: https://go-review.googlesource.com/c/go/+/627615 TryBot-Bypass: Dmitri Shuralyov Auto-Submit: Dmitri Shuralyov --- src/runtime/export_test.go | 27 +++++ src/runtime/gc_test.go | 77 ++++++++++++ src/runtime/lockrank.go | 235 ++++++++++++++++++------------------ src/runtime/mgc.go | 49 ++++++++ src/runtime/mheap.go | 48 +++++++- src/runtime/mklockrank.go | 2 + src/runtime/runtime2.go | 2 + src/runtime/traceruntime.go | 32 ++--- 8 files changed, 340 insertions(+), 132 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d55da1028dbb1c..4502fa72a10371 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1886,3 +1886,30 @@ func (m *TraceMap) PutString(s string) (uint64, bool) { func (m *TraceMap) Reset() { m.traceMap.reset() } + +func SetSpinInGCMarkDone(spin bool) { + gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin) +} + +func GCMarkDoneRestarted() bool { + // Only read this outside of the GC. If we're running during a GC, just report false. + mp := acquirem() + if gcphase != _GCoff { + releasem(mp) + return false + } + restarted := gcDebugMarkDone.restartedDueTo27993 + releasem(mp) + return restarted +} + +func GCMarkDoneResetRestartFlag() { + mp := acquirem() + for gcphase != _GCoff { + releasem(mp) + Gosched() + mp = acquirem() + } + gcDebugMarkDone.restartedDueTo27993 = false + releasem(mp) +} diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 908f6322466b17..4b92b200674386 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -6,6 +6,8 @@ package runtime_test import ( "fmt" + "internal/testenv" + "internal/weak" "math/bits" "math/rand" "os" @@ -787,3 +789,78 @@ func TestMemoryLimitNoGCPercent(t *testing.T) { func TestMyGenericFunc(t *testing.T) { runtime.MyGenericFunc[int]() } + +func TestWeakToStrongMarkTermination(t *testing.T) { + testenv.MustHaveParallelism(t) + + type T struct { + a *int + b int + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + w := make([]weak.Pointer[T], 2048) + + // Make sure there's no out-standing GC from a previous test. + runtime.GC() + + // Create many objects with a weak pointers to them. + for i := range w { + x := new(T) + x.a = new(int) + w[i] = weak.Make(x) + } + + // Reset the restart flag. + runtime.GCMarkDoneResetRestartFlag() + + // Prevent mark termination from completing. + runtime.SetSpinInGCMarkDone(true) + + // Start a GC, and wait a little bit to get something spinning in mark termination. + // Simultaneously, fire off another goroutine to disable spinning. If everything's + // working correctly, then weak.Strong will block, so we need to make sure something + // prevents the GC from continuing to spin. + done := make(chan struct{}) + go func() { + runtime.GC() + done <- struct{}{} + }() + go func() { + time.Sleep(100 * time.Millisecond) + + // Let mark termination continue. + runtime.SetSpinInGCMarkDone(false) + }() + time.Sleep(10 * time.Millisecond) + + // Perform many weak->strong conversions in the critical window. + var wg sync.WaitGroup + for _, wp := range w { + wg.Add(1) + go func() { + defer wg.Done() + wp.Strong() + }() + } + + // Make sure the GC completes. + <-done + + // Make sure all the weak->strong conversions finish. + wg.Wait() + + // The bug is triggered if there's still mark work after gcMarkDone stops the world. + // + // This can manifest in one of two ways today: + // - An exceedingly rare crash in mark termination. + // - gcMarkDone restarts, as if issue #27993 is at play. + // + // Check for the latter. This is a fairly controlled environment, so #27993 is very + // unlikely to happen (it's already rare to begin with) but we'll always _appear_ to + // trigger the same bug if weak->strong conversions aren't properly coordinated with + // mark termination. + if runtime.GCMarkDoneRestarted() { + t.Errorf("gcMarkDone restarted") + } +} diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 432ace728b8269..373838332f564a 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -17,6 +17,7 @@ const ( lockRankDefer lockRankSweepWaiters lockRankAssistQueue + lockRankStrongFromWeakQueue lockRankSweep lockRankTestR lockRankTestW @@ -84,64 +85,65 @@ const lockRankLeafRank lockRank = 1000 // lockNames gives the names associated with each of the above ranks. var lockNames = []string{ - lockRankSysmon: "sysmon", - lockRankScavenge: "scavenge", - lockRankForcegc: "forcegc", - lockRankDefer: "defer", - lockRankSweepWaiters: "sweepWaiters", - lockRankAssistQueue: "assistQueue", - lockRankSweep: "sweep", - lockRankTestR: "testR", - lockRankTestW: "testW", - lockRankTimerSend: "timerSend", - lockRankAllocmW: "allocmW", - lockRankExecW: "execW", - lockRankCpuprof: "cpuprof", - lockRankPollCache: "pollCache", - lockRankPollDesc: "pollDesc", - lockRankWakeableSleep: "wakeableSleep", - lockRankHchan: "hchan", - lockRankAllocmR: "allocmR", - lockRankExecR: "execR", - lockRankSched: "sched", - lockRankAllg: "allg", - lockRankAllp: "allp", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", - lockRankTimers: "timers", - lockRankTimer: "timer", - lockRankNetpollInit: "netpollInit", - lockRankRoot: "root", - lockRankItab: "itab", - lockRankReflectOffs: "reflectOffs", - lockRankUserArenaState: "userArenaState", - lockRankTraceBuf: "traceBuf", - lockRankTraceStrings: "traceStrings", - lockRankFin: "fin", - lockRankSpanSetSpine: "spanSetSpine", - lockRankMspanSpecial: "mspanSpecial", - lockRankTraceTypeTab: "traceTypeTab", - lockRankGcBitsArenas: "gcBitsArenas", - lockRankProfInsert: "profInsert", - lockRankProfBlock: "profBlock", - lockRankProfMemActive: "profMemActive", - lockRankProfMemFuture: "profMemFuture", - lockRankGscan: "gscan", - lockRankStackpool: "stackpool", - lockRankStackLarge: "stackLarge", - lockRankHchanLeaf: "hchanLeaf", - lockRankWbufSpans: "wbufSpans", - lockRankMheap: "mheap", - lockRankMheapSpecial: "mheapSpecial", - lockRankGlobalAlloc: "globalAlloc", - lockRankTrace: "trace", - lockRankTraceStackTab: "traceStackTab", - lockRankPanic: "panic", - lockRankDeadlock: "deadlock", - lockRankRaceFini: "raceFini", - lockRankAllocmRInternal: "allocmRInternal", - lockRankExecRInternal: "execRInternal", - lockRankTestRInternal: "testRInternal", + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankStrongFromWeakQueue: "strongFromWeakQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankTimerSend: "timerSend", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollCache: "pollCache", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankHchan: "hchan", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankTimers: "timers", + lockRankTimer: "timer", + lockRankNetpollInit: "netpollInit", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankTraceTypeTab: "traceTypeTab", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", } func (rank lockRank) String() string { @@ -163,62 +165,63 @@ func (rank lockRank) String() string { // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ - lockRankSysmon: {}, - lockRankScavenge: {lockRankSysmon}, - lockRankForcegc: {lockRankSysmon}, - lockRankDefer: {}, - lockRankSweepWaiters: {}, - lockRankAssistQueue: {}, - lockRankSweep: {}, - lockRankTestR: {}, - lockRankTestW: {}, - lockRankTimerSend: {}, - lockRankAllocmW: {}, - lockRankExecW: {}, - lockRankCpuprof: {}, - lockRankPollCache: {}, - lockRankPollDesc: {}, - lockRankWakeableSleep: {}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, - lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, - lockRankRoot: {}, - lockRankItab: {}, - lockRankReflectOffs: {lockRankItab}, - lockRankUserArenaState: {}, - lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, - lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, - lockRankPanic: {}, - lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, - lockRankRaceFini: {lockRankPanic}, - lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, - lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, - lockRankTestRInternal: {lockRankTestR, lockRankTestW}, + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankStrongFromWeakQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankTimerSend: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollCache: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 2654c696582211..f72edc2afe95da 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -190,6 +190,7 @@ func gcinit() { work.markDoneSema = 1 lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.assistQueue.lock, lockRankAssistQueue) + lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue) lockInit(&work.wbufSpans.lock, lockRankWbufSpans) } @@ -418,6 +419,26 @@ type workType struct { list gList } + // strongFromWeak controls how the GC interacts with weak->strong + // pointer conversions. + strongFromWeak struct { + // block is a flag set during mark termination that prevents + // new weak->strong conversions from executing by blocking the + // goroutine and enqueuing it onto q. + // + // Mutated only by one goroutine at a time in gcMarkDone, + // with globally-synchronizing events like forEachP and + // stopTheWorld. + block bool + + // q is a queue of goroutines that attempted to perform a + // weak->strong conversion during mark termination. + // + // Protected by lock. + lock mutex + q gQueue + } + // cycles is the number of completed GC cycles, where a GC // cycle is sweep termination, mark, mark termination, and // sweep. This differs from memstats.numgc, which is @@ -800,6 +821,19 @@ func gcStart(trigger gcTrigger) { // This is protected by markDoneSema. var gcMarkDoneFlushed uint32 +// gcDebugMarkDone contains fields used to debug/test mark termination. +var gcDebugMarkDone struct { + // spinAfterRaggedBarrier forces gcMarkDone to spin after it executes + // the ragged barrier. + spinAfterRaggedBarrier atomic.Bool + + // restartedDueTo27993 indicates that we restarted mark termination + // due to the bug described in issue #27993. + // + // Protected by worldsema. + restartedDueTo27993 bool +} + // gcMarkDone transitions the GC from mark to mark termination if all // reachable objects have been marked (that is, there are no grey // objects and can be no more in the future). Otherwise, it flushes @@ -842,6 +876,10 @@ top: // stop the world later, so acquire worldsema now. semacquire(&worldsema) + // Prevent weak->strong conversions from generating additional + // GC work. forEachP will guarantee that it is observed globally. + work.strongFromWeak.block = true + // Flush all local buffers and collect flushedWork flags. gcMarkDoneFlushed = 0 forEachP(waitReasonGCMarkTermination, func(pp *p) { @@ -872,6 +910,10 @@ top: goto top } + // For debugging/testing. + for gcDebugMarkDone.spinAfterRaggedBarrier.Load() { + } + // There was no global work, no local work, and no Ps // communicated work since we took markDoneSema. Therefore // there are no grey objects and no more objects can be @@ -910,6 +952,8 @@ top: } }) if restart { + gcDebugMarkDone.restartedDueTo27993 = true + getg().m.preemptoff = "" systemstack(func() { // Accumulate the time we were stopped before we had to start again. @@ -936,6 +980,11 @@ top: // start the world again. gcWakeAllAssists() + // Wake all blocked weak->strong conversions. These will run + // when we start the world again. + work.strongFromWeak.block = false + gcWakeAllStrongFromWeak() + // Likewise, release the transition lock. Blocked // workers and assists will run when we start the // world again. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index a91055387ef35b..b43f912cea2f8a 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2049,8 +2049,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { handle := (*atomic.Uintptr)(u) - // Prevent preemption. We want to make sure that another GC cycle can't start. + // Prevent preemption. We want to make sure that another GC cycle can't start + // and that work.strongFromWeak.block can't change out from under us. mp := acquirem() + + // Yield to the GC if necessary. + if work.strongFromWeak.block { + releasem(mp) + + // Try to park and wait for mark termination. + // N.B. gcParkStrongFromWeak calls acquirem before returning. + mp = gcParkStrongFromWeak() + } + p := handle.Load() if p == 0 { releasem(mp) @@ -2092,6 +2103,41 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { return ptr } +// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks. +func gcParkStrongFromWeak() *m { + // Prevent preemption as we check strongFromWeak, so it can't change out from under us. + mp := acquirem() + + for work.strongFromWeak.block { + lock(&work.strongFromWeak.lock) + releasem(mp) // N.B. Holding the lock prevents preemption. + + // Queue ourselves up. + work.strongFromWeak.q.pushBack(getg()) + + // Park. + goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2) + + // Re-acquire the current M since we're going to check the condition again. + mp = acquirem() + + // Re-check condition. We may have awoken in the next GC's mark termination phase. + } + return mp +} + +// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong +// conversions. This is used at the end of a GC cycle. +// +// work.strongFromWeak.block must be false to prevent woken goroutines +// from immediately going back to sleep. +func gcWakeAllStrongFromWeak() { + lock(&work.strongFromWeak.lock) + list := work.strongFromWeak.q.popList() + injectglist(&list) + unlock(&work.strongFromWeak.lock) +} + // Retrieves or creates a weak pointer handle for the object p. func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 1239b4a546ea39..3391afc6572509 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -50,6 +50,7 @@ NONE < defer; NONE < sweepWaiters, assistQueue, + strongFromWeakQueue, sweep; # Test only @@ -66,6 +67,7 @@ assistQueue, hchan, pollDesc, # pollDesc can interact with timers, which can lock sched. scavenge, + strongFromWeakQueue, sweep, sweepWaiters, testR, diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 4a789639611fb7..074ce5e538bc9d 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -1095,6 +1095,7 @@ const ( waitReasonTraceProcStatus // "trace proc status" waitReasonPageTraceFlush // "page trace flush" waitReasonCoroutine // "coroutine" + waitReasonGCWeakToStrongWait // "GC weak to strong wait" ) var waitReasonStrings = [...]string{ @@ -1135,6 +1136,7 @@ var waitReasonStrings = [...]string{ waitReasonTraceProcStatus: "trace proc status", waitReasonPageTraceFlush: "page trace flush", waitReasonCoroutine: "coroutine", + waitReasonGCWeakToStrongWait: "GC weak to strong wait", } func (w waitReason) String() string { diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 195b3e1c37f984..7c4cb5502377c0 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -99,24 +99,26 @@ const ( traceBlockDebugCall traceBlockUntilGCEnds traceBlockSleep + traceBlockGCWeakToStrongWait ) var traceBlockReasonStrings = [...]string{ - traceBlockGeneric: "unspecified", - traceBlockForever: "forever", - traceBlockNet: "network", - traceBlockSelect: "select", - traceBlockCondWait: "sync.(*Cond).Wait", - traceBlockSync: "sync", - traceBlockChanSend: "chan send", - traceBlockChanRecv: "chan receive", - traceBlockGCMarkAssist: "GC mark assist wait for work", - traceBlockGCSweep: "GC background sweeper wait", - traceBlockSystemGoroutine: "system goroutine wait", - traceBlockPreempted: "preempted", - traceBlockDebugCall: "wait for debug call", - traceBlockUntilGCEnds: "wait until GC ends", - traceBlockSleep: "sleep", + traceBlockGeneric: "unspecified", + traceBlockForever: "forever", + traceBlockNet: "network", + traceBlockSelect: "select", + traceBlockCondWait: "sync.(*Cond).Wait", + traceBlockSync: "sync", + traceBlockChanSend: "chan send", + traceBlockChanRecv: "chan receive", + traceBlockGCMarkAssist: "GC mark assist wait for work", + traceBlockGCSweep: "GC background sweeper wait", + traceBlockSystemGoroutine: "system goroutine wait", + traceBlockPreempted: "preempted", + traceBlockDebugCall: "wait for debug call", + traceBlockUntilGCEnds: "wait until GC ends", + traceBlockSleep: "sleep", + traceBlockGCWeakToStrongWait: "GC weak to strong wait", } // traceGoStopReason is an enumeration of reasons a goroutine might yield. From be062b7f61486db3c93741e794bd51eda5cc6fce Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 20 Nov 2024 19:12:58 +0000 Subject: [PATCH 56/66] [release-branch.go1.23] runtime: explicitly keep handle alive during getOrAddWeakHandle getOrAddWeakHandle is very careful about keeping its input alive across the operation, but not very careful about keeping the heap-allocated handle it creates alive. In fact, there's a window in this function where it is *only* visible via the special. Specifically, the window of time between when the handle is stored in the special and when the special actually becomes visible to the GC. (If we fail to add the special because it already exists, that case is fine. We don't even use the same handle value, but the one we obtain from the attached GC-visible special, *and* we return that value, so it remains live.) For #70455. Fixes #70469. Change-Id: Iadaff0cfb93bcaf61ba2b05be7fa0519c481de82 Reviewed-on: https://go-review.googlesource.com/c/go/+/630316 Auto-Submit: Michael Knyszek Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mheap.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index b43f912cea2f8a..bfca2d105b7426 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2172,8 +2172,14 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // Keep p alive for the duration of the function to ensure // that it cannot die while we're trying to do this. + // + // Same for handle, which is only stored in the special. + // There's a window where it might die if we don't keep it + // alive explicitly. Returning it here is probably good enough, + // but let's be defensive and explicit. See #70455. KeepAlive(p) - return s.handle + KeepAlive(handle) + return handle } // There was an existing handle. Free the special @@ -2193,7 +2199,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // Keep p alive for the duration of the function to ensure // that it cannot die while we're trying to do this. + // + // Same for handle, just to be defensive. KeepAlive(p) + KeepAlive(handle) return handle } From 25f042daecda1058baa25b213f1692d22ff5fb73 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 12 Nov 2024 23:23:12 +0100 Subject: [PATCH 57/66] [release-branch.go1.23] runtime: reserve 4kB for system stack on windows-386 The failures in #70288 are consistent with and strongly imply stack corruption during fault handling, and debug prints show that the Go code run during fault handling is running about 300 bytes above the bottom of the goroutine stack. That should be okay, but that implies the DLL code that called Go's handler was running near the bottom of the stack too, and maybe it called other deeper things before or after the Go handler and smashed the stack that way. stackSystem is already 4096 bytes on amd64; making it match that on 386 makes the flaky failures go away. It's a little unsatisfying not to be able to say exactly what is overflowing the stack, but the circumstantial evidence is very strong that it's Windows. For #70288. Fixes #70475. Change-Id: Ife89385873d5e5062a71629dbfee40825edefa49 Reviewed-on: https://go-review.googlesource.com/c/go/+/627375 Reviewed-by: Ian Lance Taylor Auto-Submit: Russ Cox LUCI-TryBot-Result: Go LUCI (cherry picked from commit 7eeb0a188eb644486da9f77bae0375d91433d0bf) Reviewed-on: https://go-review.googlesource.com/c/go/+/632196 Reviewed-by: Cherry Mui Reviewed-by: Dmitri Shuralyov Auto-Submit: Veronica Silina --- src/runtime/stack.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index cdf859a7ff1342..d43c6ace4ffcf2 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -69,7 +69,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 + stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 // The minimum size of stack used by Go code stackMin = 2048 @@ -1330,7 +1330,7 @@ func morestackc() { } // startingStackSize is the amount of stack that new goroutines start with. -// It is a power of 2, and between _FixedStack and maxstacksize, inclusive. +// It is a power of 2, and between fixedStack and maxstacksize, inclusive. // startingStackSize is updated every GC by tracking the average size of // stacks scanned during the GC. var startingStackSize uint32 = fixedStack From 5164a865e3de723f07976edac234c4d6a814128e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisend=C3=B6rfer?= Date: Tue, 26 Nov 2024 09:10:22 +0100 Subject: [PATCH 58/66] [release-branch.go1.23] cmd/trace: also show end stack traces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a regression that appeared in 1.23 when it comes to the stack traces shown in the trace viewer. In 1.22 and earlier, the viewer was always showing end stack traces. In 1.23 and later the viewer started to exclusively show start stack traces. Showing only the start stack traces made it impossible to see the last stack trace produced by a goroutine. It also made it hard to understand why a goroutine went off-cpu, as one had to hunt down the next running slice of the same goroutine. Emit end stack traces in addition to start stack traces to fix the issue. Fixes #70592 Change-Id: Ib22ea61388c1d94cdbc99fae2d207c4dce011a59 Reviewed-on: https://go-review.googlesource.com/c/go/+/631895 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt Auto-Submit: Felix Geisendörfer Reviewed-by: Nick Ripley Reviewed-by: Michael Knyszek (cherry picked from commit 6405e60ca6be798c1f8c1d0365fd63b89b524df5) Reviewed-on: https://go-review.googlesource.com/c/go/+/632075 Reviewed-by: Veronica Silina Auto-Submit: Veronica Silina --- src/cmd/trace/gstate.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cmd/trace/gstate.go b/src/cmd/trace/gstate.go index 638d492670a6e7..4b380db9f53cd7 100644 --- a/src/cmd/trace/gstate.go +++ b/src/cmd/trace/gstate.go @@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { if gs.lastStopStack != trace.NoStack { stk = ctx.Stack(viewerFrames(gs.lastStopStack)) } + var endStk int + if stack != trace.NoStack { + endStk = ctx.Stack(viewerFrames(stack)) + } // Check invariants. if gs.startRunningTime == 0 { panic("silently broken trace or generator invariant (startRunningTime != 0) not held") @@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { Dur: ts.Sub(gs.startRunningTime), Resource: uint64(gs.executing), Stack: stk, + EndStack: endStk, }) // Flush completed ranges. From 194de8fbfaf4c3ed54e1a3c1b14fc67a830b8d95 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 3 Dec 2024 17:22:32 +0000 Subject: [PATCH 59/66] [release-branch.go1.23] go1.23.4 Change-Id: I8d26e5231e868476949390ec900f0273c816d807 Reviewed-on: https://go-review.googlesource.com/c/go/+/633217 Reviewed-by: Veronica Silina Auto-Submit: Gopher Robot LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 66e6565be501e5..139c590eb87892 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.3 -time 2024-11-06T18:46:45Z +go1.23.4 +time 2024-11-27T20:27:20Z From 69c8cfe29bb677614c01f8dae3901d6954411af8 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 9 Aug 2024 10:50:00 -0700 Subject: [PATCH 60/66] [release-branch.go1.23] cmd/compile/internal/importer: enable aliases Flips the pkgReader.enableAlias flag to true when reading unified IR. This was disabled while resolving #66873. This resolves the TODO to flip it back to true. Fixes #70394 Fixes #70517 Updates #66873 Change-Id: Ifd52b0f9510d6bcf151de1c9a18d71ab548c14e4 Reviewed-on: https://go-review.googlesource.com/c/go/+/604099 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase (cherry picked from commit 209ed1a9c75d17046285c416b74a14bb89799757) Reviewed-on: https://go-review.googlesource.com/c/go/+/631855 Commit-Queue: Tim King Reviewed-by: Robert Griesemer --- .../internal/importer/gcimporter_test.go | 17 +++++++++++++++++ .../compile/internal/importer/testdata/alias.go | 7 +++++++ src/cmd/compile/internal/importer/ureader.go | 8 +++----- 3 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 src/cmd/compile/internal/importer/testdata/alias.go diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go index 7fe4445dad7638..ffeddea0c9d588 100644 --- a/src/cmd/compile/internal/importer/gcimporter_test.go +++ b/src/cmd/compile/internal/importer/gcimporter_test.go @@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) { compileAndImportPkg(t, "issue25596") } +func TestIssue70394(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + pkg := compileAndImportPkg(t, "alias") + obj := lookupObj(t, pkg.Scope(), "A") + + typ := obj.Type() + if _, ok := typ.(*types2.Alias); !ok { + t.Fatalf("type of %s is %s, wanted an alias", obj, typ) + } +} + func importPkg(t *testing.T, path, srcDir string) *types2.Package { pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) if err != nil { diff --git a/src/cmd/compile/internal/importer/testdata/alias.go b/src/cmd/compile/internal/importer/testdata/alias.go new file mode 100644 index 00000000000000..51492fc943ea0c --- /dev/null +++ b/src/cmd/compile/internal/importer/testdata/alias.go @@ -0,0 +1,7 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +type A = int32 diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index 7eda375bd52196..9d267e6db411c0 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -29,11 +29,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pr := pkgReader{ PkgDecoder: input, - ctxt: ctxt, - imports: imports, - // Currently, the compiler panics when using Alias types. - // TODO(gri) set to true once this is fixed (issue #66873) - enableAlias: false, + ctxt: ctxt, + imports: imports, + enableAlias: true, posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), From 59b7d40774b29bd1da1aa624f13233111aff4ad2 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 22 Jul 2024 16:23:43 -0400 Subject: [PATCH 61/66] [release-branch.go1.23] runtime: update and restore g0 stack bounds at cgocallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, at a cgo callback where there is already a Go frame on the stack (i.e. C->Go->C->Go), we require that at the inner Go callback the SP is within the g0's stack bounds set by a previous callback. This is to prevent that the C code switches stack while having a Go frame on the stack, which we don't really support. But this could also happen when we cannot get accurate stack bounds, e.g. when pthread_getattr_np is not available. Since the stack bounds are just estimates based on the current SP, if there are multiple C->Go callbacks with various stack depth, it is possible that the SP of a later callback falls out of a previous call's estimate. This leads to runtime throw in a seemingly reasonable program. This CL changes it to save the old g0 stack bounds at cgocallback, update the bounds, and restore the old bounds at return. So each callback will get its own stack bounds based on the current SP, and when it returns, the outer callback has the its old stack bounds restored. Also, at a cgo callback when there is no Go frame on the stack, we currently always get new stack bounds. We do this because if we can only get estimated bounds based on the SP, and the stack depth varies a lot between two C->Go calls, the previous estimates may be off and we fall out or nearly fall out of the previous bounds. But this causes a performance problem: the pthread API to get accurate stack bounds (pthread_getattr_np) is very slow when called on the main thread. Getting the stack bounds every time significantly slows down repeated C->Go calls on the main thread. This CL fixes it by "caching" the stack bounds if they are accurate. I.e. at the second time Go calls into C, if the previous stack bounds are accurate, and the current SP is in bounds, we can be sure it is the same stack and we don't need to update the bounds. This avoids the repeated calls to pthread_getattr_np. If we cannot get the accurate bounds, we continue to update the stack bounds based on the SP, and that operation is very cheap. On a Linux/AMD64 machine with glibc: name old time/op new time/op delta CgoCallbackMainThread-8 96.4µs ± 3% 0.1µs ± 2% -99.92% (p=0.000 n=10+9) Updates #68285. Updates #68587. Fixes #69988. Change-Id: I3422badd5ad8ff63e1a733152d05fb7a44d5d435 Reviewed-on: https://go-review.googlesource.com/c/go/+/600296 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt (cherry picked from commit 76a8409eb81eda553363783dcdd9d6224368ae0e) Reviewed-on: https://go-review.googlesource.com/c/go/+/635775 --- .../internal/testcarchive/carchive_test.go | 57 +++++++++++-- .../testcarchive/testdata/libgo10/a.go | 12 +++ .../testcarchive/testdata/libgo9/a.go | 22 ++++- .../internal/testcarchive/testdata/main10.c | 22 +++++ .../internal/testcarchive/testdata/main9.c | 16 +++- src/runtime/cgo/gcc_stack_unix.c | 9 +- src/runtime/cgocall.go | 73 ++++++++-------- src/runtime/proc.go | 1 + src/runtime/runtime2.go | 83 ++++++++++--------- 9 files changed, 201 insertions(+), 94 deletions(-) create mode 100644 src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go create mode 100644 src/cmd/cgo/internal/testcarchive/testdata/main10.c diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index a8eebead25dc9f..c263b82d5768f4 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -33,7 +33,7 @@ import ( "unicode" ) -var globalSkip = func(t *testing.T) {} +var globalSkip = func(t testing.TB) {} // Program to run. var bin []string @@ -59,12 +59,12 @@ func TestMain(m *testing.M) { func testMain(m *testing.M) int { if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") } return m.Run() } if runtime.GOOS == "linux" { if _, err := os.Stat("/etc/alpine-release"); err == nil { - globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } return m.Run() } } @@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) { } } -// Issue 59294. Test calling Go function from C after using some -// stack space. +// Issue 59294 and 68285. Test calling Go function from C after with +// various stack space. func TestDeepStack(t *testing.T) { globalSkip(t) testenv.MustHaveGoBuild(t) @@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) { } } +func BenchmarkCgoCallbackMainThread(b *testing.B) { + // Benchmark for calling into Go fron C main thread. + // See issue #68587. + // + // It uses a subprocess, which is a C binary that calls + // Go on the main thread b.N times. There is some overhead + // for launching the subprocess. It is probably fine when + // b.N is large. + + globalSkip(b) + testenv.MustHaveGoBuild(b) + testenv.MustHaveCGO(b) + testenv.MustHaveBuildMode(b, "c-archive") + + if !testWork { + defer func() { + os.Remove("testp10" + exeSuffix) + os.Remove("libgo10.a") + os.Remove("libgo10.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10") + out, err := cmd.CombinedOutput() + b.Logf("%v\n%s", cmd.Args, out) + if err != nil { + b.Fatal(err) + } + + ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + b.Logf("%v\n%s", ccArgs, out) + if err != nil { + b.Fatal(err) + } + + argv := cmdToRun("./testp10") + argv = append(argv, fmt.Sprint(b.N)) + cmd = exec.Command(argv[0], argv[1:]...) + + b.ResetTimer() + err = cmd.Run() + if err != nil { + b.Fatal(err) + } +} + func TestSharedObject(t *testing.T) { // Test that we can put a Go c-archive into a C shared object. globalSkip(t) diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go new file mode 100644 index 00000000000000..803a0fa5f1cb35 --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoF +func GoF() {} + +func main() {} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go index acb08d90ecd5bf..3528bef654ddb3 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go @@ -6,9 +6,29 @@ package main import "runtime" +// extern void callGoWithVariousStack(int); import "C" func main() {} //export GoF -func GoF() { runtime.GC() } +func GoF(p int32) { + runtime.GC() + if p != 0 { + panic("panic") + } +} + +//export callGoWithVariousStackAndGoFrame +func callGoWithVariousStackAndGoFrame(p int32) { + if p != 0 { + defer func() { + e := recover() + if e == nil { + panic("did not panic") + } + runtime.GC() + }() + } + C.callGoWithVariousStack(C.int(p)); +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main10.c b/src/cmd/cgo/internal/testcarchive/testdata/main10.c new file mode 100644 index 00000000000000..53c3c83a99e35c --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/main10.c @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include + +#include "libgo10.h" + +int main(int argc, char **argv) { + int n, i; + + if (argc != 2) { + perror("wrong arg"); + return 2; + } + n = atoi(argv[1]); + for (i = 0; i < n; i++) + GoF(); + + return 0; +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main9.c b/src/cmd/cgo/internal/testcarchive/testdata/main9.c index 95ad4dea49fb1a..e641d8a8027a5f 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/main9.c +++ b/src/cmd/cgo/internal/testcarchive/testdata/main9.c @@ -6,19 +6,27 @@ void use(int *x) { (*x)++; } -void callGoFWithDeepStack() { +void callGoFWithDeepStack(int p) { int x[10000]; use(&x[0]); use(&x[9999]); - GoF(); + GoF(p); use(&x[0]); use(&x[9999]); } +void callGoWithVariousStack(int p) { + GoF(0); // call GoF without using much stack + callGoFWithDeepStack(p); // call GoF with a deep stack + GoF(0); // again on a shallow stack +} + int main() { - GoF(); // call GoF without using much stack - callGoFWithDeepStack(); // call GoF with a deep stack + callGoWithVariousStack(0); + + callGoWithVariousStackAndGoFrame(0); // normal execution + callGoWithVariousStackAndGoFrame(1); // panic and recover } diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c index fcb03d0dea7e34..df0049a4f37ab3 100644 --- a/src/runtime/cgo/gcc_stack_unix.c +++ b/src/runtime/cgo/gcc_stack_unix.c @@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2]) pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); // low address #else - // We don't know how to get the current stacks, so assume they are the - // same as the default stack bounds. - pthread_attr_getstacksize(&attr, &size); - addr = __builtin_frame_address(0) + 4096 - size; + // We don't know how to get the current stacks, leave it as + // 0 and the caller will use an estimate based on the current + // SP. + addr = 0; + size = 0; #endif pthread_attr_destroy(&attr); diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 68b1ebbac2c7e0..972de4fe03277f 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { g0 := mp.g0 - inBound := sp > g0.stack.lo && sp <= g0.stack.hi - if mp.ncgo > 0 && !inBound { - // ncgo > 0 indicates that this M was in Go further up the stack - // (it called C and is now receiving a callback). - // - // !inBound indicates that we were called with SP outside the - // expected system stack bounds (C changed the stack out from - // under us between the cgocall and cgocallback?). - // - // It is not safe for the C call to change the stack out from - // under us, so throw. - - // Note that this case isn't possible for signal == true, as - // that is always passing a new M from needm. - - // Stack is bogus, but reset the bounds anyway so we can print. - hi := g0.stack.hi - lo := g0.stack.lo - g0.stack.hi = sp + 1024 - g0.stack.lo = sp - 32*1024 - g0.stackguard0 = g0.stack.lo + stackGuard - g0.stackguard1 = g0.stackguard0 - - print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]") - print("\n") - exit(2) - } - if !mp.isextra { // We allocated the stack for standard Ms. Don't replace the // stack bounds with estimated ones when we already initialized @@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { return } - // This M does not have Go further up the stack. However, it may have - // previously called into Go, initializing the stack bounds. Between - // that call returning and now the stack may have changed (perhaps the - // C thread is running a coroutine library). We need to update the - // stack bounds for this case. + inBound := sp > g0.stack.lo && sp <= g0.stack.hi + if inBound && mp.g0StackAccurate { + // This M has called into Go before and has the stack bounds + // initialized. We have the accurate stack bounds, and the SP + // is in bounds. We expect it continues to run within the same + // bounds. + return + } + + // We don't have an accurate stack bounds (either it never calls + // into Go before, or we couldn't get the accurate bounds), or the + // current SP is not within the previous bounds (the stack may have + // changed between calls). We need to update the stack bounds. // // N.B. we need to update the stack bounds even if SP appears to - // already be in bounds. Our "bounds" may actually be estimated dummy - // bounds (below). The actual stack bounds could have shifted but still - // have partial overlap with our dummy bounds. If we failed to update - // in that case, we could find ourselves seemingly called near the - // bottom of the stack bounds, where we quickly run out of space. + // already be in bounds, if our bounds are estimated dummy bounds + // (below). We may be in a different region within the same actual + // stack bounds, but our estimates were not accurate. Or the actual + // stack bounds could have shifted but still have partial overlap with + // our dummy bounds. If we failed to update in that case, we could find + // ourselves seemingly called near the bottom of the stack bounds, where + // we quickly run out of space. // Set the stack bounds to match the current stack. If we don't // actually know how big the stack is, like we don't know how big any // scheduling stack is, but we assume there's at least 32 kB. If we // can get a more accurate stack bound from pthread, use that, provided - // it actually contains SP.. + // it actually contains SP. g0.stack.hi = sp + 1024 g0.stack.lo = sp - 32*1024 + mp.g0StackAccurate = false if !signal && _cgo_getstackbound != nil { // Don't adjust if called from the signal handler. // We are on the signal stack, not the pthread stack. @@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) // getstackbound is an unsupported no-op on Windows. // + // On Unix systems, if the API to get accurate stack bounds is + // not available, it returns zeros. + // // Don't use these bounds if they don't contain SP. Perhaps we // were called by something not using the standard thread // stack. if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { g0.stack.lo = bounds[0] g0.stack.hi = bounds[1] + mp.g0StackAccurate = true } } g0.stackguard0 = g0.stack.lo + stackGuard @@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { } sp := gp.m.g0.sched.sp // system sp saved by cgocallback. + oldStack := gp.m.g0.stack + oldAccurate := gp.m.g0StackAccurate callbackUpdateSystemStack(gp.m, sp, false) // The call from C is on gp.m's g0 stack, so we must ensure @@ -380,6 +369,12 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) gp.m.winsyscall = winsyscall + + // Restore the old g0 stack bounds + gp.m.g0.stack = oldStack + gp.m.g0.stackguard0 = oldStack.lo + stackGuard + gp.m.g0.stackguard1 = gp.m.g0.stackguard0 + gp.m.g0StackAccurate = oldAccurate } func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 76c8b71ab9a939..d5cfaa391647d8 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -2539,6 +2539,7 @@ func dropm() { g0.stack.lo = 0 g0.stackguard0 = 0 g0.stackguard1 = 0 + mp.g0StackAccurate = false putExtraM(mp) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 074ce5e538bc9d..4a1ee37a14a08d 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -556,47 +556,48 @@ type m struct { _ uint32 // align next field to 8 bytes // Fields not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal - p puintptr // attached p for executing go code (nil if not executing go code) - nextp puintptr - oldp puintptr // the p that was attached before executing a syscall - id int64 - mallocing int32 - throwing throwType - preemptoff string // if != "", keep curg running on this m - locks int32 - dying int32 - profilehz int32 - spinning bool // m is out of work and is actively looking for work - blocked bool // m is blocked on a note - newSigstack bool // minit on C thread called sigaltstack - printlock int8 - incgo bool // m is executing a cgo call - isextra bool // m is an extra m - isExtraInC bool // m is an extra m that is not executing Go code - isExtraInSig bool // m is an extra m in a signal handler - freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) - needextram bool - traceback uint8 - ncgocall uint64 // number of cgo calls in total - ncgo int32 // number of cgo calls currently in progress - cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily - cgoCallers *cgoCallers // cgo traceback if crashing in cgo call - park note - alllink *m // on allm - schedlink muintptr - lockedg guintptr - createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. - lockedExt uint32 // tracking for external LockOSThread - lockedInt uint32 // tracking for internal lockOSThread - nextwaitm muintptr // next m waiting for lock + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + p puintptr // attached p for executing go code (nil if not executing go code) + nextp puintptr + oldp puintptr // the p that was attached before executing a syscall + id int64 + mallocing int32 + throwing throwType + preemptoff string // if != "", keep curg running on this m + locks int32 + dying int32 + profilehz int32 + spinning bool // m is out of work and is actively looking for work + blocked bool // m is blocked on a note + newSigstack bool // minit on C thread called sigaltstack + printlock int8 + incgo bool // m is executing a cgo call + isextra bool // m is an extra m + isExtraInC bool // m is an extra m that is not executing Go code + isExtraInSig bool // m is an extra m in a signal handler + freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) + needextram bool + g0StackAccurate bool // whether the g0 stack has accurate bounds + traceback uint8 + ncgocall uint64 // number of cgo calls in total + ncgo int32 // number of cgo calls currently in progress + cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily + cgoCallers *cgoCallers // cgo traceback if crashing in cgo call + park note + alllink *m // on allm + schedlink muintptr + lockedg guintptr + createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. + lockedExt uint32 // tracking for external LockOSThread + lockedInt uint32 // tracking for internal lockOSThread + nextwaitm muintptr // next m waiting for lock mLockProfile mLockProfile // fields relating to runtime.lock contention profStack []uintptr // used for memory/block/mutex stack traces From 1576793c513c5cd8396d1a5b004b546e82efc033 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 11 Dec 2024 14:18:42 -0800 Subject: [PATCH 62/66] [release-branch.go1.23] net: don't use sendfile for irregular files; handle EOPNOTSUPP/ENOTSUP This is not a cherry pick, because the code has changed on tip. For #70763 Fixes #70789 Change-Id: If9fcfee17e86a746cf8c72293dc34f80b430f6e6 Reviewed-on: https://go-review.googlesource.com/c/go/+/635397 Auto-Submit: Ian Lance Taylor Reviewed-by: Dmitri Shuralyov Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI --- src/internal/poll/sendfile_bsd.go | 2 +- src/net/sendfile_unix_alt.go | 3 ++ src/net/sendfile_unix_test.go | 86 +++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 src/net/sendfile_unix_test.go diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 341e07ca1fed6a..d1023d4ebb9938 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -72,6 +72,6 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if err == syscall.EAGAIN { err = nil } - handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP) return } diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go index 9e46c4e607d4d8..4056856f306175 100644 --- a/src/net/sendfile_unix_alt.go +++ b/src/net/sendfile_unix_alt.go @@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { if err != nil { return 0, err, false } + if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 { + return 0, nil, false + } remain = fi.Size() } diff --git a/src/net/sendfile_unix_test.go b/src/net/sendfile_unix_test.go new file mode 100644 index 00000000000000..79fb23b31010d5 --- /dev/null +++ b/src/net/sendfile_unix_test.go @@ -0,0 +1,86 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "internal/testpty" + "io" + "os" + "sync" + "syscall" + "testing" +) + +// Issue 70763: test that we don't fail on sendfile from a tty. +func TestCopyFromTTY(t *testing.T) { + pty, ttyName, err := testpty.Open() + if err != nil { + t.Skipf("skipping test because pty open failed: %v", err) + } + defer pty.Close() + + // Use syscall.Open so that the tty is blocking. + ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0) + if err != nil { + t.Skipf("skipping test because tty open failed: %v", err) + } + defer syscall.Close(ttyFD) + + tty := os.NewFile(uintptr(ttyFD), "tty") + defer tty.Close() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + ch := make(chan bool) + + const data = "data\n" + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + + buf := make([]byte, len(data)) + if _, err := io.ReadFull(conn, buf); err != nil { + t.Error(err) + } + + ch <- true + }() + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + wg.Add(1) + go func() { + defer wg.Done() + if _, err := pty.Write([]byte(data)); err != nil { + t.Error(err) + } + <-ch + if err := pty.Close(); err != nil { + t.Error(err) + } + }() + + lr := io.LimitReader(tty, int64(len(data))) + if _, err := io.Copy(conn, lr); err != nil { + t.Error(err) + } +} From 3417000c69bc89c173cfafcf3f2a06a408b32880 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 2 Jan 2025 01:34:40 +0100 Subject: [PATCH 63/66] [release-branch.go1.23] crypto/tls: fix Config.Time in tests using expired certificates Updates #71077 Fixes #71104 Change-Id: I6a6a465685f3bd50a5bb35a160f87b59b74fa6af Reviewed-on: https://go-review.googlesource.com/c/go/+/639655 Auto-Submit: Ian Lance Taylor Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Auto-Submit: Filippo Valsorda Auto-Submit: Damien Neil Reviewed-by: Joel Sing Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/c/go/+/640315 Reviewed-by: Filippo Valsorda --- src/crypto/tls/handshake_client_test.go | 30 +++++++++++++++---------- src/crypto/tls/handshake_server_test.go | 2 ++ src/crypto/tls/handshake_test.go | 5 +++++ src/crypto/tls/tls_test.go | 6 ++--- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go index 501f9c6755f9e3..3c87916bcf0bb7 100644 --- a/src/crypto/tls/handshake_client_test.go +++ b/src/crypto/tls/handshake_client_test.go @@ -852,6 +852,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } issuer, err := x509.ParseCertificate(testRSACertificateIssuer) @@ -868,6 +869,7 @@ func testResumption(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(32), RootCAs: rootCAs, ServerName: "example.golang", + Time: testTime, } testResumeState := func(test string, didResume bool) { @@ -914,7 +916,7 @@ func testResumption(t *testing.T, version uint16) { // An old session ticket is replaced with a ticket encrypted with a fresh key. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("ResumeWithOldTicket", true) if bytes.Equal(ticket, getTicket()) { t.Fatal("old first ticket matches the fresh one") @@ -922,13 +924,13 @@ func testResumption(t *testing.T, version uint16) { // Once the session master secret is expired, a full handshake should occur. ticket = getTicket() - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + time.Minute) } testResumeState("ResumeWithExpiredTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("expired first ticket matches the fresh one") } - serverConfig.Time = func() time.Time { return time.Now() } // reset the time back + serverConfig.Time = testTime // reset the time back key1 := randomKey() serverConfig.SetSessionTicketKeys([][32]byte{key1}) @@ -945,11 +947,11 @@ func testResumption(t *testing.T, version uint16) { testResumeState("KeyChangeFinish", true) // Age the session ticket a bit, but not yet expired. - serverConfig.Time = func() time.Time { return time.Now().Add(24*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*time.Hour + time.Minute) } testResumeState("OldSessionTicket", true) ticket = getTicket() // Expire the session ticket, which would force a full handshake. - serverConfig.Time = func() time.Time { return time.Now().Add(24*8*time.Hour + time.Minute) } + serverConfig.Time = func() time.Time { return testTime().Add(24*8*time.Hour + 2*time.Minute) } testResumeState("ExpiredSessionTicket", false) if bytes.Equal(ticket, getTicket()) { t.Fatal("new ticket wasn't provided after old ticket expired") @@ -957,7 +959,7 @@ func testResumption(t *testing.T, version uint16) { // Age the session ticket a bit at a time, but don't expire it. d := 0 * time.Hour - serverConfig.Time = func() time.Time { return time.Now().Add(d) } + serverConfig.Time = func() time.Time { return testTime().Add(d) } deleteTicket() testResumeState("GetFreshSessionTicket", false) for i := 0; i < 13; i++ { @@ -968,7 +970,7 @@ func testResumption(t *testing.T, version uint16) { // handshake occurs for TLS 1.2. Resumption should still occur for // TLS 1.3 since the client should be using a fresh ticket sent over // by the server. - d += 12 * time.Hour + d += 12*time.Hour + time.Minute if version == VersionTLS13 { testResumeState("ExpiredSessionTicket", true) } else { @@ -984,6 +986,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } serverConfig.SetSessionTicketKeys([][32]byte{key2}) @@ -1009,6 +1012,7 @@ func testResumption(t *testing.T, version uint16) { CurvePreferences: []CurveID{CurveP521, CurveP384, CurveP256}, MaxVersion: version, Certificates: testConfig.Certificates, + Time: testTime, } testResumeState("InitialHandshake", false) testResumeState("WithHelloRetryRequest", true) @@ -1018,6 +1022,7 @@ func testResumption(t *testing.T, version uint16) { MaxVersion: version, CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } } @@ -1736,6 +1741,7 @@ func testVerifyConnection(t *testing.T, version uint16) { serverConfig := &Config{ MaxVersion: version, Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, ClientCAs: rootCAs, NextProtos: []string{"protocol1"}, } @@ -1749,6 +1755,7 @@ func testVerifyConnection(t *testing.T, version uint16) { RootCAs: rootCAs, ServerName: "example.golang", Certificates: []Certificate{testConfig.Certificates[0]}, + Time: testTime, NextProtos: []string{"protocol1"}, } test.configureClient(clientConfig, &clientCalled) @@ -1791,8 +1798,6 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - sentinelErr := errors.New("TestVerifyPeerCertificate") verifyPeerCertificateCallback := func(called *bool, rawCerts [][]byte, validatedChains [][]*x509.Certificate) error { @@ -2038,7 +2043,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config.ServerName = "example.golang" config.ClientAuth = RequireAndVerifyClientCert config.ClientCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version config.Certificates = make([]Certificate, 1) config.Certificates[0].Certificate = [][]byte{testRSACertificate} @@ -2055,7 +2060,7 @@ func testVerifyPeerCertificate(t *testing.T, version uint16) { config := testConfig.Clone() config.ServerName = "example.golang" config.RootCAs = rootCAs - config.Time = now + config.Time = testTime config.MaxVersion = version test.configureClient(config, &clientCalled) clientErr := Client(c, config).Handshake() @@ -2368,7 +2373,7 @@ func testGetClientCertificate(t *testing.T, version uint16) { serverConfig.RootCAs = x509.NewCertPool() serverConfig.RootCAs.AddCert(issuer) serverConfig.ClientCAs = serverConfig.RootCAs - serverConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + serverConfig.Time = testTime serverConfig.MaxVersion = version clientConfig := testConfig.Clone() @@ -2539,6 +2544,7 @@ func testResumptionKeepsOCSPAndSCT(t *testing.T, ver uint16) { ClientSessionCache: NewLRUClientSessionCache(32), ServerName: "example.golang", RootCAs: roots, + Time: testTime, } serverConfig := testConfig.Clone() serverConfig.MaxVersion = ver diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 94d3d0f6dc87bc..bbfe44bd97daa2 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -501,6 +501,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { serverConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, Certificates: testConfig.Certificates, + Time: testTime, } clientConfig := &Config{ CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA}, @@ -508,6 +509,7 @@ func testCrossVersionResume(t *testing.T, version uint16) { ClientSessionCache: NewLRUClientSessionCache(1), ServerName: "servername", MinVersion: VersionTLS12, + Time: testTime, } // Establish a session at TLS 1.3. diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index 41c2643f2a8d28..803aa736578f8c 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -519,6 +519,11 @@ func fromHex(s string) []byte { return b } +// testTime is 2016-10-20T17:32:09.000Z, which is within the validity period of +// [testRSACertificate], [testRSACertificateIssuer], [testRSA2048Certificate], +// [testRSA2048CertificateIssuer], and [testECDSACertificate]. +var testTime = func() time.Time { return time.Unix(1476984729, 0) } + var testRSACertificate = fromHex("3082024b308201b4a003020102020900e8f09d3fe25beaa6300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301a310b3009060355040a1302476f310b300906035504031302476f30819f300d06092a864886f70d010101050003818d0030818902818100db467d932e12270648bc062821ab7ec4b6a25dfe1e5245887a3647a5080d92425bc281c0be97799840fb4f6d14fd2b138bc2a52e67d8d4099ed62238b74a0b74732bc234f1d193e596d9747bf3589f6c613cc0b041d4d92b2b2423775b1c3bbd755dce2054cfa163871d1e24c4f31d1a508baab61443ed97a77562f414c852d70203010001a38193308190300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b06010505070302300c0603551d130101ff0402300030190603551d0e041204109f91161f43433e49a6de6db680d79f60301b0603551d230414301280104813494d137e1631bba301d5acab6e7b30190603551d1104123010820e6578616d706c652e676f6c616e67300d06092a864886f70d01010b0500038181009d30cc402b5b50a061cbbae55358e1ed8328a9581aa938a495a1ac315a1a84663d43d32dd90bf297dfd320643892243a00bccf9c7db74020015faad3166109a276fd13c3cce10c5ceeb18782f16c04ed73bbb343778d0c1cf10fa1d8408361c94c722b9daedb4606064df4c1b33ec0d1bd42d4dbfe3d1360845c21d33be9fae7") var testRSACertificateIssuer = fromHex("3082021930820182a003020102020900ca5e4e811a965964300d06092a864886f70d01010b0500301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f74301e170d3136303130313030303030305a170d3235303130313030303030305a301f310b3009060355040a1302476f3110300e06035504031307476f20526f6f7430819f300d06092a864886f70d010101050003818d0030818902818100d667b378bb22f34143b6cd2008236abefaf2852adf3ab05e01329e2c14834f5105df3f3073f99dab5442d45ee5f8f57b0111c8cb682fbb719a86944eebfffef3406206d898b8c1b1887797c9c5006547bb8f00e694b7a063f10839f269f2c34fff7a1f4b21fbcd6bfdfb13ac792d1d11f277b5c5b48600992203059f2a8f8cc50203010001a35d305b300e0603551d0f0101ff040403020204301d0603551d250416301406082b0601050507030106082b06010505070302300f0603551d130101ff040530030101ff30190603551d0e041204104813494d137e1631bba301d5acab6e7b300d06092a864886f70d01010b050003818100c1154b4bab5266221f293766ae4138899bd4c5e36b13cee670ceeaa4cbdf4f6679017e2fe649765af545749fe4249418a56bd38a04b81e261f5ce86b8d5c65413156a50d12449554748c59a30c515bc36a59d38bddf51173e899820b282e40aa78c806526fd184fb6b4cf186ec728edffa585440d2b3225325f7ab580e87dd76") diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index fc5040635fbbf7..13c5ddced2cddb 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -1112,8 +1112,6 @@ func TestConnectionState(t *testing.T) { rootCAs := x509.NewCertPool() rootCAs.AddCert(issuer) - now := func() time.Time { return time.Unix(1476984729, 0) } - const alpnProtocol = "golang" const serverName = "example.golang" var scts = [][]byte{[]byte("dummy sct 1"), []byte("dummy sct 2")} @@ -1129,7 +1127,7 @@ func TestConnectionState(t *testing.T) { } t.Run(name, func(t *testing.T) { config := &Config{ - Time: now, + Time: testTime, Rand: zeroSource{}, Certificates: make([]Certificate, 1), MaxVersion: v, @@ -1760,7 +1758,7 @@ func testVerifyCertificates(t *testing.T, version uint16) { var serverVerifyPeerCertificates, clientVerifyPeerCertificates bool clientConfig := testConfig.Clone() - clientConfig.Time = func() time.Time { return time.Unix(1476984729, 0) } + clientConfig.Time = testTime clientConfig.MaxVersion = version clientConfig.MinVersion = version clientConfig.RootCAs = rootCAs From 1dde0b484489653136a54df9932cc8d1c0fb6d1b Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Mon, 23 Dec 2024 17:21:07 +0000 Subject: [PATCH 64/66] [release-branch.go1.23] runtime: hold traceAcquire across casgstatus in injectglist Currently injectglist emits all the trace events before actually calling casgstatus on each goroutine. This is a problem, since tracing can observe an inconsistent state (gstatus does not match tracer's 'emitted an event' state). This change fixes the problem by having injectglist do what every other scheduler function does, and that's wrap each call to casgstatus in traceAcquire/traceRelease. For #70883. Fixes #71147. Change-Id: I857e96cec01688013597e8efc0c4c3d0b72d3a70 Reviewed-on: https://go-review.googlesource.com/c/go/+/638558 Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI (cherry picked from commit f025d19e7b3f0c66242760c213cc2b54cb100f69) Reviewed-on: https://go-review.googlesource.com/c/go/+/641378 Auto-Submit: Michael Pratt --- src/runtime/proc.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index d5cfaa391647d8..e3cdf71911be45 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -3873,23 +3873,23 @@ func injectglist(glist *gList) { if glist.empty() { return } - trace := traceAcquire() - if trace.ok() { - for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { - trace.GoUnpark(gp, 0) - } - traceRelease(trace) - } // Mark all the goroutines as runnable before we put them // on the run queues. head := glist.head.ptr() var tail *g qsize := 0 + trace := traceAcquire() for gp := head; gp != nil; gp = gp.schedlink.ptr() { tail = gp qsize++ casgstatus(gp, _Gwaiting, _Grunnable) + if trace.ok() { + trace.GoUnpark(gp, 0) + } + } + if trace.ok() { + traceRelease(trace) } // Turn the gList into a gQueue. From 29a6c8311c29924b49373acf4f04b7c8fdb2f798 Mon Sep 17 00:00:00 2001 From: Gourav Kumar Date: Fri, 17 Jan 2025 13:08:14 +0530 Subject: [PATCH 65/66] local changes after patch for memory regions --- src/runtime/cgocheck.go | 124 +++++++++++++++---------- src/runtime/internal/sys/intrinsics.go | 4 + src/runtime/mgc.go | 22 +++++ src/runtime/runtime2.go | 4 + 4 files changed, 105 insertions(+), 49 deletions(-) diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 3f2c271953db66..44f9eed40208c5 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -30,43 +30,48 @@ func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) { // runtime has set itself up. return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(unsafe.Pointer(dst)) { - return + if src != nil { + getg().m.p.ptr().ptrWrites++ } + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(unsafe.Pointer(dst)) { + return + } - // If we are running on the system stack then dst might be an - // address on the stack, which is OK. - gp := getg() - if gp == gp.m.g0 || gp == gp.m.gsignal { - return - } + // If we are running on the system stack then dst might be an + // address on the stack, which is OK. + gp := getg() + if gp == gp.m.g0 || gp == gp.m.gsignal { + return + } - // Allocating memory can write to various mfixalloc structs - // that look like they are non-Go memory. - if gp.m.mallocing != 0 { - return - } + // Allocating memory can write to various mfixalloc structs + // that look like they are non-Go memory. + if gp.m.mallocing != 0 { + return + } - // If the object is pinned, it's safe to store it in C memory. The GC - // ensures it will not be moved or freed. - if isPinned(src) { - return - } + // If the object is pinned, it's safe to store it in C memory. The GC + // ensures it will not be moved or freed. + if isPinned(src) { + return + } - // It's OK if writing to memory allocated by persistentalloc. - // Do this check last because it is more expensive and rarely true. - // If it is false the expense doesn't matter since we are crashing. - if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { - return - } + // It's OK if writing to memory allocated by persistentalloc. + // Do this check last because it is more expensive and rarely true. + // If it is false the expense doesn't matter since we are crashing. + if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { + return + } - systemstack(func() { - println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) - throw(cgoWriteBarrierFail) - }) + systemstack(func() { + println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) + throw(cgoWriteBarrierFail) + }) + */ } // cgoCheckMemmove is called when moving a block of memory. @@ -93,13 +98,16 @@ func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { if !typ.Pointers() { return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(dst) { - return - } - cgoCheckTypedBlock(typ, src, off, size) + countWrittenPointers(typ, 1) + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + cgoCheckTypedBlock(typ, src, off, size) + */ } // cgoCheckSliceCopy is called when copying n elements of a slice. @@ -114,17 +122,20 @@ func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { if !typ.Pointers() { return } - if !cgoIsGoPointer(src) { - return - } - if cgoIsGoPointer(dst) { - return - } - p := src - for i := 0; i < n; i++ { - cgoCheckTypedBlock(typ, p, 0, typ.Size_) - p = add(p, typ.Size_) - } + countWrittenPointers(typ, n) + /* + if !cgoIsGoPointer(src) { + return + } + if cgoIsGoPointer(dst) { + return + } + p := src + for i := 0; i < n; i++ { + cgoCheckTypedBlock(typ, p, 0, typ.Size_) + p = add(p, typ.Size_) + } + */ } // cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, @@ -190,6 +201,21 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } } +//go:nosplit +//go:nowritebarrier +func countWrittenPointers(typ *_type, n int) { + ptrs := uint64(0) + for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize * ptrBits { + b := uint64(readUintptr(addb(typ.GCData, i/ptrBits))) + for j := range 64 { + if b&(uint64(1)< Date: Fri, 17 Jan 2025 18:59:55 +0530 Subject: [PATCH 66/66] cockroach go diff patch --- src/context/context.go | 60 ++++++++++++++++++++++++++------ src/crypto/md5/md5.go | 9 +++++ src/crypto/md5/md5_test.go | 31 +++++++++++++++-- src/crypto/sha256/sha256.go | 9 +++++ src/crypto/sha256/sha256_test.go | 52 +++++++++++++++++++++++++++ src/runtime/extern.go | 4 +++ src/runtime/lock_futex.go | 2 ++ src/runtime/lock_js.go | 2 ++ src/runtime/lock_sema.go | 2 ++ src/runtime/lock_wasip1.go | 2 ++ src/runtime/malloc.go | 2 +- src/runtime/proc.go | 38 +++++++++++++++++--- src/runtime/runtime1.go | 2 ++ src/runtime/runtime2.go | 6 +++- src/runtime/sizeof_test.go | 2 +- 15 files changed, 203 insertions(+), 20 deletions(-) diff --git a/src/context/context.go b/src/context/context.go index 763d4f777ffb86..73739bc90ea79f 100644 --- a/src/context/context.go +++ b/src/context/context.go @@ -59,6 +59,7 @@ import ( "sync" "sync/atomic" "time" + _ "unsafe" // for go:linkname ) // A Context carries a deadline, a cancellation signal, and other values across @@ -361,6 +362,7 @@ type stopCtx struct { var goroutines atomic.Int32 // &cancelCtxKey is the key that a cancelCtx returns itself for. +//go:linkname cancelCtxKey var cancelCtxKey int // parentCancelCtx returns the underlying *cancelCtx for parent. @@ -477,17 +479,7 @@ func (c *cancelCtx) propagateCancel(parent Context, child canceler) { if p, ok := parentCancelCtx(parent); ok { // parent is a *cancelCtx, or derives from one. - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err, p.cause) - } else { - if p.children == nil { - p.children = make(map[canceler]struct{}) - } - p.children[child] = struct{}{} - } - p.mu.Unlock() + p.addChild(child) return } @@ -515,6 +507,22 @@ func (c *cancelCtx) propagateCancel(parent Context, child canceler) { }() } +// addChild adds child to the list of children. +// NB: CockroachDB runtime patch. +func (c *cancelCtx) addChild(child canceler) { + c.mu.Lock() + if c.err != nil { + // parent has already been canceled + child.cancel(false, c.err, c.cause) + } else { + if c.children == nil { + c.children = make(map[canceler]struct{}) + } + c.children[child] = struct{}{} + } + c.mu.Unlock() +} + type stringer interface { String() string } @@ -790,3 +798,33 @@ func value(c Context, key any) any { } } } + +// CockroachDB runtime patch. +// cancelerAdapter invokes f when cancel context completes. +type cancelerAdapter struct { + *cancelCtx + f func() +} + +func (c *cancelerAdapter) cancel(removeFromParent bool, err, cause error) { + if removeFromParent { + removeChild(c.cancelCtx, c) + } + c.f() +} + +// PropagateCancel arranges for f to be invoked when parent is done. +// Parent must be one of the cancelable contexts. +// Returns true if cancellation will be propagated, false if the parent +// is not cancelable. +// This is similar to AfterFunc(), but does not spin up goroutine, and instead +// invokes f on whatever goroutine completed parent context. +func PropagateCancel(parent Context, f func()) bool { + p, ok := parent.Value(&cancelCtxKey).(*cancelCtx) + if !ok { + return false + } + a := cancelerAdapter{cancelCtx: p, f: f} + p.addChild(&a) + return true +} diff --git a/src/crypto/md5/md5.go b/src/crypto/md5/md5.go index 843678702bf93f..979b4533221858 100644 --- a/src/crypto/md5/md5.go +++ b/src/crypto/md5/md5.go @@ -27,6 +27,10 @@ const Size = 16 // The blocksize of MD5 in bytes. const BlockSize = 64 +// The maximum number of bytes that can be passed to block. +const maxAsmIters = 1024 +const maxAsmSize = BlockSize * maxAsmIters // 64KiB + const ( init0 = 0x67452301 init1 = 0xEFCDAB89 @@ -130,6 +134,11 @@ func (d *digest) Write(p []byte) (nn int, err error) { if len(p) >= BlockSize { n := len(p) &^ (BlockSize - 1) if haveAsm { + for n > maxAsmSize { + block(d, p[:maxAsmSize]) + p = p[maxAsmSize:] + n -= maxAsmSize + } block(d, p[:n]) } else { blockGeneric(d, p[:n]) diff --git a/src/crypto/md5/md5_test.go b/src/crypto/md5/md5_test.go index a5b661126dd716..5285a13724d23d 100644 --- a/src/crypto/md5/md5_test.go +++ b/src/crypto/md5/md5_test.go @@ -121,10 +121,11 @@ func TestGoldenMarshal(t *testing.T) { func TestLarge(t *testing.T) { const N = 10000 + const offsets = 4 ok := "2bb571599a4180e1d542f76904adc3df" // md5sum of "0123456789" * 1000 - block := make([]byte, 10004) + block := make([]byte, N+offsets) c := New() - for offset := 0; offset < 4; offset++ { + for offset := 0; offset < offsets; offset++ { for i := 0; i < N; i++ { block[offset+i] = '0' + byte(i%10) } @@ -143,6 +144,32 @@ func TestLarge(t *testing.T) { } } +func TestExtraLarge(t *testing.T) { + const N = 100000 + const offsets = 4 + ok := "13572e9e296cff52b79c52148313c3a5" // md5sum of "0123456789" * 10000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("md5 TestExtraLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + + // Tests that blockGeneric (pure Go) and block (in assembly for amd64, 386, arm) match. func TestBlockGeneric(t *testing.T) { gen, asm := New().(*digest), New().(*digest) diff --git a/src/crypto/sha256/sha256.go b/src/crypto/sha256/sha256.go index 68244fd63b0c1e..a2f669fa9ccb96 100644 --- a/src/crypto/sha256/sha256.go +++ b/src/crypto/sha256/sha256.go @@ -28,6 +28,10 @@ const Size224 = 28 // The blocksize of SHA256 and SHA224 in bytes. const BlockSize = 64 +// The maximum number of bytes that can be passed to block. +const maxAsmIters = 1024 +const maxAsmSize = BlockSize * maxAsmIters // 64KiB + const ( chunk = 64 init0 = 0x6A09E667 @@ -186,6 +190,11 @@ func (d *digest) Write(p []byte) (nn int, err error) { } if len(p) >= chunk { n := len(p) &^ (chunk - 1) + for n > maxAsmSize { + block(d, p[:maxAsmSize]) + p = p[maxAsmSize:] + n -= maxAsmSize + } block(d, p[:n]) p = p[n:] } diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go index d91f01e9ba3a5f..f5dd4025d25ecf 100644 --- a/src/crypto/sha256/sha256_test.go +++ b/src/crypto/sha256/sha256_test.go @@ -184,6 +184,58 @@ func TestGoldenMarshal(t *testing.T) { } } + + +func TestLarge(t *testing.T) { + const N = 10000 + const offsets = 4 + ok := "4c207598af7a20db0e3334dd044399a40e467cb81b37f7ba05a4f76dcbd8fd59" // sha256sum of "0123456789" * 1000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("sha256 TestLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + +func TestExtraLarge(t *testing.T) { + const N = 100000 + const offsets = 4 + ok := "aca9e593cc629cbaa94cd5a07dc029424aad93e5129e5d11f8dcd2f139c16cc0" // sha256sum of "0123456789" * 10000 + block := make([]byte, N+offsets) + c := New() + for offset := 0; offset < offsets; offset++ { + for i := 0; i < N; i++ { + block[offset+i] = '0' + byte(i%10) + } + for blockSize := 10; blockSize <= N; blockSize *= 10 { + blocks := N / blockSize + b := block[offset : offset+blockSize] + c.Reset() + for i := 0; i < blocks; i++ { + c.Write(b) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != ok { + t.Fatalf("sha256 TestExtraLarge offset=%d, blockSize=%d = %s want %s", offset, blockSize, s, ok) + } + } + } +} + func TestMarshalTypeMismatch(t *testing.T) { h1 := New() h2 := New224() diff --git a/src/runtime/extern.go b/src/runtime/extern.go index 2019be4ddec4f3..fce67adb7d80d3 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -89,6 +89,10 @@ It is a comma-separated list of name=val pairs setting these named variables: making every garbage collection a stop-the-world event. Setting gcstoptheworld=2 also disables concurrent sweeping after the garbage collection finishes. + gcnoassist: setting gcnoassist=1 disables garbage collection assist, minimizing + garbage collection overhead for user goroutines at the expense of a higher risk + of out-of-memory failures with high allocation rates. + gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard error at each collection, summarizing the amount of memory collected and the length of the pause. The format of this line is subject to change. Included in diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index 58690e45e4d5a8..4aafc3e44d3496 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -48,6 +48,7 @@ func mutexContended(l *mutex) bool { return atomic.Load(key32(&l.key)) > mutex_locked } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -117,6 +118,7 @@ func lock2(l *mutex) { } } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go index b6ee5ec7afe269..5ca1e3d5611abb 100644 --- a/src/runtime/lock_js.go +++ b/src/runtime/lock_js.go @@ -27,6 +27,7 @@ func mutexContended(l *mutex) bool { return false } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -45,6 +46,7 @@ func lock2(l *mutex) { l.key = mutex_locked } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 32d2235ad3ab90..20a0243655465f 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -35,6 +35,7 @@ func mutexContended(l *mutex) bool { return atomic.Loaduintptr(&l.key) > locked } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -99,6 +100,7 @@ Loop: } } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/lock_wasip1.go b/src/runtime/lock_wasip1.go index acfc62acb48e90..2c5bd3c5907916 100644 --- a/src/runtime/lock_wasip1.go +++ b/src/runtime/lock_wasip1.go @@ -23,6 +23,7 @@ func mutexContended(l *mutex) bool { return false } +//go:linkname lock func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -41,6 +42,7 @@ func lock2(l *mutex) { l.key = mutex_locked } +//go:linkname unlock func unlock(l *mutex) { unlockWithRank(l) } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index b92a213245d4f7..112fd876d0e199 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1332,7 +1332,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Returns the G for which the assist credit was accounted. func deductAssistCredit(size uintptr) *g { var assistG *g - if gcBlackenEnabled != 0 { + if debug.gcnoassist == 0 && gcBlackenEnabled != 0 { // Charge the current user G for this allocation. assistG = getg() if assistG.m.curg != nil { diff --git a/src/runtime/proc.go b/src/runtime/proc.go index e3cdf71911be45..e5bf3ff4f0bfc0 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1137,6 +1137,11 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { dumpgstatus(gp) throw("casfrom_Gscanstatus: gp->status is not in scan state") } + // We're transitioning into the running state, record the timestamp for + // subsequent use. + if newval == _Grunning { + gp.lastsched = nanotime() + } releaseLockRankAndM(lockRankGscan) } @@ -1152,6 +1157,11 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { r := gp.atomicstatus.CompareAndSwap(oldval, newval) if r { acquireLockRankAndM(lockRankGscan) + // We're transitioning out of running, record how long we were in the + // state. + if oldval == _Grunning { + gp.runningnanos += nanotime() - gp.lastsched + } } return r @@ -1211,7 +1221,18 @@ func casgstatus(gp *g, oldval, newval uint32) { } } + now := nanotime() + if newval == _Grunning { + // We're transitioning into the running state, record the timestamp for + // subsequent use. + gp.lastsched = now + } + if oldval == _Grunning { + // We're transitioning out of running, record how long we were in the + // state. + gp.runningnanos += now - gp.lastsched + // Track every gTrackingPeriod time a goroutine transitions out of running. if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 { gp.tracking = true @@ -1232,7 +1253,6 @@ func casgstatus(gp *g, oldval, newval uint32) { // We transitioned out of runnable, so measure how much // time we spent in this state and add it to // runnableTime. - now := nanotime() gp.runnableTime += now - gp.trackingStamp gp.trackingStamp = 0 case _Gwaiting: @@ -1245,7 +1265,6 @@ func casgstatus(gp *g, oldval, newval uint32) { // a more representative estimate of the absolute value. // gTrackingPeriod also represents an accurate sampling period // because we can only enter this state from _Grunning. - now := nanotime() sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod) gp.trackingStamp = 0 } @@ -1256,12 +1275,10 @@ func casgstatus(gp *g, oldval, newval uint32) { break } // Blocking on a lock. Write down the timestamp. - now := nanotime() gp.trackingStamp = now case _Grunnable: // We just transitioned into runnable, so record what // time that happened. - now := nanotime() gp.trackingStamp = now case _Grunning: // We're transitioning into running, so turn off @@ -1323,6 +1340,9 @@ func casGToPreemptScan(gp *g, old, new uint32) { acquireLockRankAndM(lockRankGscan) for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) { } + // We're transitioning out of running, record how long we were in the + // state. + gp.runningnanos += nanotime() - gp.lastsched } // casGFromPreempted attempts to transition gp from _Gpreempted to @@ -4060,6 +4080,14 @@ func dropg() { setGNoWB(&gp.m.curg, nil) } +// Grunningnanos returns the wall time spent by current g in the running state. +// A goroutine may be running on an OS thread that's descheduled by the OS +// scheduler, this time still counts towards the metric. +func Grunningnanos() int64 { + gp := getg() + return gp.runningnanos + nanotime() - gp.lastsched +} + func parkunlock_c(gp *g, lock unsafe.Pointer) bool { unlock((*mutex)(lock)) return true @@ -4291,6 +4319,8 @@ func gdestroy(gp *g) { gp.param = nil gp.labels = nil gp.timer = nil + gp.lastsched = 0 + gp.runningnanos = 0 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { // Flush assist credit to the global pool. This gives diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 03ef74b8dc4b54..cb45504fbfc8ec 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -316,6 +316,7 @@ var debug struct { gcpacertrace int32 gcshrinkstackoff int32 gcstoptheworld int32 + gcnoassist int32 gctrace int32 invalidptr int32 madvdontneed int32 // for Linux; issue 28466 @@ -374,6 +375,7 @@ var dbgvars = []*dbgVar{ {name: "gcpacertrace", value: &debug.gcpacertrace}, {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, {name: "gcstoptheworld", value: &debug.gcstoptheworld}, + {name: "gcnoassist", value: &debug.gcnoassist}, {name: "gctrace", value: &debug.gctrace}, {name: "harddecommit", value: &debug.harddecommit}, {name: "inittrace", value: &debug.inittrace}, diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 9434dfa4ae8608..65051c55351c16 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -493,7 +493,6 @@ type g struct { trackingStamp int64 // timestamp of when the G last started being tracked runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking lockedm muintptr - sig uint32 writebuf []byte sigcode0 uintptr sigcode1 uintptr @@ -509,6 +508,10 @@ type g struct { timer *timer // cached timer for time.Sleep sleepWhen int64 // when to sleep until selectDone atomic.Uint32 // are we participating in a select and did someone win the race? + sig uint32 + lastsched int64 // timestamp when the G last started running + runningnanos int64 // wall time spent in the running state + // goroutineProfiled indicates the status of this goroutine's stack for the // current in-progress goroutine profile @@ -1196,6 +1199,7 @@ var ( // len(allp) == gomaxprocs; may change at safe points, otherwise // immutable. + //go:linkname allp allp []*p // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index 43aba98dcebcdb..a076c93b8e8df9 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, 272, 432}, // g, but exported for testing + {runtime.G{}, 272, 448}, // g, but exported for testing {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing }