diff -Nru golang-1.5.1/debian/changelog golang-1.5.2/debian/changelog
--- golang-1.5.1/debian/changelog 2015-11-24 11:09:15.000000000 +1300
+++ golang-1.5.2/debian/changelog 2015-12-09 10:36:24.000000000 +1300
@@ -1,3 +1,10 @@
+golang (2:1.5.2-0ubuntu1) xenial; urgency=medium
+
+ * New upstream release. (LP: #1524071)
+ - Drop d/patches/qemu-compat.patch and d/patches/support-new-relocations.patch.
+
+ -- Michael Hudson-Doyle Wed, 09 Dec 2015 10:34:00 +1300
+
golang (2:1.5.1-0ubuntu4) xenial; urgency=medium
* Add d/patches/support-new-relocations.patch to fix ftbfs on xenial.
diff -Nru golang-1.5.1/debian/patches/qemu-compat.patch golang-1.5.2/debian/patches/qemu-compat.patch
--- golang-1.5.1/debian/patches/qemu-compat.patch 2015-11-13 23:33:43.000000000 +1300
+++ golang-1.5.2/debian/patches/qemu-compat.patch 1970-01-01 12:00:00.000000000 +1200
@@ -1,17 +0,0 @@
-Description: handle a quirk of rt_sigaction under user-mode qemu
-Origin: https://go.googlesource.com/go/+/d10675089d74db0408f2432eae3bd89a8e1c2d6a
-Bug: https://github.com/golang/go/issues/13024
-Applied-Upstream: commit:d10675089d74db0408f2432eae3bd89a8e1c2d6a
-
---- a/src/runtime/os1_linux.go
-+++ b/src/runtime/os1_linux.go
-@@ -293,7 +293,8 @@
- fn = funcPC(sigtramp)
- }
- sa.sa_handler = fn
-- if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
-+ // Qemu rejects rt_sigaction of SIGRTMAX (64).
-+ if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 && i != 64 {
- throw("rt_sigaction failure")
- }
- }
diff -Nru golang-1.5.1/debian/patches/series golang-1.5.2/debian/patches/series
--- golang-1.5.1/debian/patches/series 2015-11-24 11:09:15.000000000 +1300
+++ golang-1.5.2/debian/patches/series 2015-12-09 10:33:36.000000000 +1300
@@ -1,3 +1 @@
armhf-elf-header.patch
-qemu-compat.patch
-support-new-relocations.patch
diff -Nru golang-1.5.1/debian/patches/support-new-relocations.patch golang-1.5.2/debian/patches/support-new-relocations.patch
--- golang-1.5.1/debian/patches/support-new-relocations.patch 2015-11-24 11:09:15.000000000 +1300
+++ golang-1.5.2/debian/patches/support-new-relocations.patch 1970-01-01 12:00:00.000000000 +1200
@@ -1,84 +0,0 @@
-Description: support new 386/amd64 relocations generated by new bintuils
-Origin: https://go.googlesource.com/go/+/914db9f060b1fd3eb1f74d48f3bd46a73d4ae9c7
-Bug: https://github.com/golang/go/issues/13114
-Applied-Upstream: commit:914db9f060b1fd3eb1f74d48f3bd46a73d4ae9c7
-
---- a/src/cmd/link/internal/amd64/asm.go
-+++ b/src/cmd/link/internal/amd64/asm.go
-@@ -141,7 +141,7 @@
-
- return
-
-- case 256 + ld.R_X86_64_GOTPCREL:
-+ case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX:
- if targ.Type != obj.SDYNIMPORT {
- // have symbol
- if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
---- a/src/cmd/link/internal/ld/elf.go
-+++ b/src/cmd/link/internal/ld/elf.go
-@@ -349,6 +349,23 @@
- R_X86_64_GOTTPOFF = 22
- R_X86_64_TPOFF32 = 23
- R_X86_64_COUNT = 24
-+ R_X86_64_PC64 = 24
-+ R_X86_64_GOTOFF64 = 25
-+ R_X86_64_GOTPC32 = 26
-+ R_X86_64_GOT64 = 27
-+ R_X86_64_GOTPCREL64 = 28
-+ R_X86_64_GOTPC64 = 29
-+ R_X86_64_GOTPLT64 = 30
-+ R_X86_64_PLTOFF64 = 31
-+ R_X86_64_SIZE32 = 32
-+ R_X86_64_SIZE64 = 33
-+ R_X86_64_GOTPC32_TLSDEC = 34
-+ R_X86_64_TLSDESC_CALL = 35
-+ R_X86_64_TLSDESC = 36
-+ R_X86_64_IRELATIVE = 37
-+ R_X86_64_PC32_BND = 40
-+ R_X86_64_GOTPCRELX = 41
-+ R_X86_64_REX_GOTPCRELX = 42
- R_AARCH64_ABS64 = 257
- R_AARCH64_ABS32 = 258
- R_AARCH64_CALL26 = 283
-@@ -454,7 +471,11 @@
- R_386_TLS_DTPMOD32 = 35
- R_386_TLS_DTPOFF32 = 36
- R_386_TLS_TPOFF32 = 37
-- R_386_COUNT = 38
-+ R_386_TLS_GOTDESC = 39
-+ R_386_TLS_DESC_CALL = 40
-+ R_386_TLS_DESC = 41
-+ R_386_IRELATIVE = 42
-+ R_386_GOT32X = 43
- R_PPC_NONE = 0
- R_PPC_ADDR32 = 1
- R_PPC_ADDR24 = 2
---- a/src/cmd/link/internal/ld/ldelf.go
-+++ b/src/cmd/link/internal/ld/ldelf.go
-@@ -1001,12 +1001,15 @@
- '6' | R_X86_64_PC32<<24,
- '6' | R_X86_64_PLT32<<24,
- '6' | R_X86_64_GOTPCREL<<24,
-+ '6' | R_X86_64_GOTPCRELX<<24,
-+ '6' | R_X86_64_REX_GOTPCRELX<<24,
- '8' | R_386_32<<24,
- '8' | R_386_PC32<<24,
- '8' | R_386_GOT32<<24,
- '8' | R_386_PLT32<<24,
- '8' | R_386_GOTOFF<<24,
- '8' | R_386_GOTPC<<24,
-+ '8' | R_386_GOT32X<<24,
- '9' | R_PPC64_REL24<<24:
- *siz = 4
-
---- a/src/cmd/link/internal/x86/asm.go
-+++ b/src/cmd/link/internal/x86/asm.go
-@@ -78,7 +78,7 @@
-
- return
-
-- case 256 + ld.R_386_GOT32:
-+ case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X:
- if targ.Type != obj.SDYNIMPORT {
- // have symbol
- if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
diff -Nru golang-1.5.1/doc/conduct.html golang-1.5.2/doc/conduct.html
--- golang-1.5.1/doc/conduct.html 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/doc/conduct.html 2015-12-03 13:52:58.000000000 +1300
@@ -0,0 +1,273 @@
+
+
+
+
+About the Code of Conduct
+
+Why have a Code of Conduct?
+
+
+Online communities include people from many different backgrounds.
+The Go contributors are committed to providing a friendly, safe and welcoming
+environment for all, regardless of age, disability, gender, nationality, race,
+religion, sexuality, or similar personal characteristic.
+
+
+
+The first goal of the Code of Conduct is to specify a baseline standard
+of behavior so that people with different social values and communication
+styles can talk about Go effectively, productively, and respectfully.
+
+
+
+The second goal is to provide a mechanism for resolving conflicts in the
+community when they arise.
+
+
+
+The third goal of the Code of Conduct is to make our community welcoming to
+people from different backgrounds.
+Diversity is critical to the project; for Go to be successful, it needs
+contributors and users from all backgrounds.
+(See Go, Open Source, Community.)
+
+
+
+With that said, a healthy community must allow for disagreement and debate.
+The Code of Conduct is not a mechanism for people to silence others with whom
+they disagree.
+
+
+Where does the Code of Conduct apply?
+
+
+If you participate in or contribute to the Go ecosystem in any way,
+you are encouraged to follow the Code of Conduct while doing so.
+
+
+
+Explicit enforcement of the Code of Conduct applies to the
+official forums operated by the Go project (“Go spaces”):
+
+
+
+
+
+Other Go groups (such as conferences, meetups, and other unofficial forums) are
+encouraged to adopt this Code of Conduct. Those groups must provide their own
+moderators and/or working group (see below).
+
+
+Gopher values
+
+
+These are the values to which people in the Go community (“Gophers”) should aspire.
+
+
+
+- Be friendly and welcoming
+
- Be patient
+
+ - Remember that people have varying communication styles and that not
+ everyone is using their native language.
+ (Meaning and tone can be lost in translation.)
+
+ - Be thoughtful
+
+ - Productive communication requires effort.
+ Think about how your words will be interpreted.
+
- Remember that sometimes it is best to refrain entirely from commenting.
+
+ - Be respectful
+
+ - In particular, respect differences of opinion.
+
+ - Be charitable
+
+ - Interpret the arguments of others in good faith, do not seek to disagree.
+
- When we do disagree, try to understand why.
+
+ - Avoid destructive behavior:
+
+ - Derailing: stay on topic; if you want to talk about something else,
+ start a new conversation.
+
- Unconstructive criticism: don't merely decry the current state of affairs;
+ offer—or at least solicit—suggestions as to how things may be improved.
+
- Snarking (pithy, unproductive, sniping comments)
+
- Discussing potentially offensive or sensitive issues;
+ this all too often leads to unnecessary conflict.
+
- Microaggressions: brief and commonplace verbal, behavioral and
+ environmental indignities that communicate hostile, derogatory or negative
+ slights and insults to a person or group.
+
+
+
+
+People are complicated.
+You should expect to be misunderstood and to misunderstand others;
+when this inevitably occurs, resist the urge to be defensive or assign blame.
+Try not to take offense where no offense was intended.
+Give people the benefit of the doubt.
+Even if the intent was to provoke, do not rise to it.
+It is the responsibility of all parties to de-escalate conflict when it arises.
+
+
+Unwelcome behavior
+
+
+These actions are explicitly forbidden in Go spaces:
+
+
+
+- Insulting, demeaning, hateful, or threatening remarks.
+
- Discrimination based on age, disability, gender, nationality, race,
+ religion, sexuality, or similar personal characteristic.
+
- Bullying or systematic harassment.
+
- Unwelcome sexual advances.
+
- Incitement to any of these.
+
+
+Moderation
+
+
+The Go spaces are not free speech venues; they are for discussion about Go.
+These spaces have moderators.
+The goal of the moderators is to facilitate civil discussion about Go.
+
+
+
+When using the official Go spaces you should act in the spirit of the “Gopher
+values”.
+If you conduct yourself in a way that is explicitly forbidden by the CoC,
+you will be warned and asked to stop.
+If you do not stop, you will be removed from our community spaces temporarily.
+Repeated, wilful breaches of the CoC will result in a permanent ban.
+
+
+
+Moderators are held to a higher standard than other community members.
+If a moderator creates an inappropriate situation, they should expect less
+leeway than others, and should expect to be removed from their position if they
+cannot adhere to the CoC.
+
+
+
+Complaints about moderator actions must be handled using the reporting process
+below.
+
+
+Reporting issues
+
+
+The Code of Conduct Working Group is a group of people that represent the Go
+community. They are responsible for handling conduct-related issues.
+Their purpose is to de-escalate conflicts and try to resolve issues to the
+satisfaction of all parties. They are:
+
+
+
+ - Aditya Mukerjee <dev@chimeracoder.net>
+
- Andrew Gerrand <adg@golang.org>
+
- Dave Cheney <dave@cheney.net>
+
- Jason Buberel <jbuberel@google.com>
+
- Peggy Li <peggyli.224@gmail.com>
+
- Sarah Adams <sadams.codes@gmail.com>
+
- Steve Francia <steve.francia@gmail.com>
+
- Verónica López <gveronicalg@gmail.com>
+
+
+
+If you encounter a conduct-related issue, you should report it to the
+Working Group using the process described below.
+Do not post about the issue publicly or try to rally sentiment against a
+particular individual or group.
+
+
+
+- Mail conduct@golang.org or
+ submit an anonymous report.
+
+ - Your message will reach the Working Group.
+
- Reports are confidential within the Working Group.
+
- Should you choose to remain anonymous then the Working Group cannot
+ notify you of the outcome of your report.
+
- You may contact a member of the group directly if you do not feel
+ comfortable contacting the group as a whole. That member will then raise
+ the issue with the Working Group as a whole, preserving the privacy of the
+ reporter (if desired).
+
- If your report concerns a member of the Working Group they will be recused
+ from Working Group discussions of the report.
+
- The Working Group will strive to handle reports with discretion and
+ sensitivity, to protect the privacy of the involved parties,
+ and to avoid conflicts of interest.
+
+ - You should receive a response within 48 hours (likely sooner).
+ (Should you choose to contact a single Working Group member,
+ it may take longer to receive a response.)
+
- The Working Group will meet to review the incident and determine what happened.
+
+ - With the permission of person reporting the incident, the Working Group
+ may reach out to other community members for more context.
+
+ - The Working Group will reach a decision as to how to act. These may include:
+
+ - Nothing.
+
- A request for a private or public apology.
+
- A private or public warning.
+
- An imposed vacation (for instance, asking someone to abstain for a week
+ from a mailing list or IRC).
+
- A permanent or temporary ban from some or all Go spaces.
+
+ - The Working Group will reach out to the original reporter to let them know
+ the decision.
+
- Appeals to the decision may be made to the Working Group,
+ or to any of its members directly.
+
+
+
+Note that the goal of the Code of Conduct and the Working Group is to resolve
+conflicts in the most harmonious way possible.
+We hope that in most cases issues may be resolved through polite discussion and
+mutual agreement.
+Bannings and other forceful measures are to be employed only as a last resort.
+
+
+
+Changes to the Code of Conduct (including to the members of the Working Group)
+should be proposed using the
+change proposal process.
+
+
+Summary
+
+
+- Treat everyone with respect and kindness.
+
- Be thoughtful in how you communicate.
+
- Don’t be destructive or inflammatory.
+
- If you encounter an issue, please mail conduct@golang.org.
+
+
+Acknowledgements
+
+
+Parts of this document were derived from the Code of Conduct documents of the
+Django, FreeBSD, and Rust projects.
+
diff -Nru golang-1.5.1/doc/contrib.html golang-1.5.2/doc/contrib.html
--- golang-1.5.1/doc/contrib.html 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/doc/contrib.html 2015-12-03 13:52:58.000000000 +1300
@@ -94,10 +94,16 @@
Security-related issues should be reported to
-security@golang.org.
+security@golang.org.
See the security policy for more details.
+
+Community-related issues should be reported to
+conduct@golang.org.
+See the Code of Conduct for more details.
+
+
diff -Nru golang-1.5.1/doc/devel/release.html golang-1.5.2/doc/devel/release.html
--- golang-1.5.1/doc/devel/release.html 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/doc/devel/release.html 2015-12-03 13:52:58.000000000 +1300
@@ -22,6 +22,14 @@
Minor revisions
+go1.5.2 (released 2015/12/02) includes bug fixes to the compiler, linker, and
+the mime/multipart
, net
, and runtime
+packages.
+See the Go
+1.5.2 milestone on our issue tracker for details.
+
+
+
go1.5.1 (released 2015/09/08) includes bug fixes to the compiler, assembler, and
the fmt
, net/textproto
, net/http
, and
runtime
packages.
@@ -48,6 +56,11 @@
See the Go 1.4.2 milestone on our issue tracker for details.
+
+go1.4.3 (released 2015/09/22) includes security fixes to the net/http
package and bug fixes to the runtime
package.
+See the Go 1.4.3 milestone on our issue tracker for details.
+
+
go1.3 (released 2014/06/18)
diff -Nru golang-1.5.1/doc/help.html golang-1.5.2/doc/help.html
--- golang-1.5.1/doc/help.html 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/doc/help.html 2015-12-03 13:52:58.000000000 +1300
@@ -48,3 +48,9 @@
meet to talk about Go. Find a chapter near you.
+
+
+Guidelines for participating in Go community spaces
+and a reporting process for handling issues.
+
+
diff -Nru golang-1.5.1/doc/install-source.html golang-1.5.2/doc/install-source.html
--- golang-1.5.1/doc/install-source.html 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/doc/install-source.html 2015-12-03 13:52:58.000000000 +1300
@@ -167,7 +167,7 @@
$ git clone https://go.googlesource.com/go
$ cd go
-$ git checkout go1.5
+$ git checkout go1.5.2
(Optional) Switch to the master branch
@@ -346,7 +346,7 @@
golang-announce
mailing list.
Each announcement mentions the latest release tag, for instance,
-go1.5
.
+go1.5.2
.
diff -Nru golang-1.5.1/misc/cgo/test/cgo_test.go golang-1.5.2/misc/cgo/test/cgo_test.go
--- golang-1.5.1/misc/cgo/test/cgo_test.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/misc/cgo/test/cgo_test.go 2015-12-03 13:52:58.000000000 +1300
@@ -65,5 +65,6 @@
func Test9557(t *testing.T) { test9557(t) }
func Test10303(t *testing.T) { test10303(t, 10) }
func Test11925(t *testing.T) { test11925(t) }
+func Test12030(t *testing.T) { test12030(t) }
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
diff -Nru golang-1.5.1/misc/cgo/test/issue12030.go golang-1.5.2/misc/cgo/test/issue12030.go
--- golang-1.5.1/misc/cgo/test/issue12030.go 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/misc/cgo/test/issue12030.go 2015-12-03 13:52:58.000000000 +1300
@@ -0,0 +1,35 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 12030. sprintf is defined in both ntdll and msvcrt,
+// Normally we want the one in the msvcrt.
+
+package cgotest
+
+/*
+#include
+#include
+void issue12030conv(char *buf, double x) {
+ sprintf(buf, "d=%g", x);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "testing"
+ "unsafe"
+)
+
+func test12030(t *testing.T) {
+ buf := (*C.char)(C.malloc(256))
+ defer C.free(unsafe.Pointer(buf))
+ for _, f := range []float64{1.0, 2.0, 3.14} {
+ C.issue12030conv(buf, C.double(f))
+ got := C.GoString(buf)
+ if want := fmt.Sprintf("d=%g", f); got != want {
+ t.Fatalf("C.sprintf failed for %g: %q != %q", f, got, want)
+ }
+ }
+}
diff -Nru golang-1.5.1/misc/cgo/test/issue1635.go golang-1.5.2/misc/cgo/test/issue1635.go
--- golang-1.5.1/misc/cgo/test/issue1635.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/misc/cgo/test/issue1635.go 2015-12-03 13:52:58.000000000 +1300
@@ -14,6 +14,11 @@
printf("scatter = %p\n", p);
}
+// Adding this explicit extern declaration makes this a test for
+// https://gcc.gnu.org/PR68072 aka https://golang.org/issue/13344 .
+// It used to cause a cgo error when building with GCC 6.
+extern int hola;
+
// this example is in issue 3253
int hola = 0;
int testHola() { return hola; }
diff -Nru golang-1.5.1/misc/cgo/test/setgid_linux.go golang-1.5.2/misc/cgo/test/setgid_linux.go
--- golang-1.5.1/misc/cgo/test/setgid_linux.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/misc/cgo/test/setgid_linux.go 2015-12-03 13:52:58.000000000 +1300
@@ -14,11 +14,14 @@
import "C"
import (
+ "os"
+ "os/signal"
+ "syscall"
"testing"
"time"
)
-func testSetgid(t *testing.T) {
+func runTestSetgid() bool {
c := make(chan bool)
go func() {
C.setgid(0)
@@ -26,7 +29,21 @@
}()
select {
case <-c:
+ return true
case <-time.After(5 * time.Second):
+ return false
+ }
+
+}
+
+func testSetgid(t *testing.T) {
+ if !runTestSetgid() {
t.Error("setgid hung")
}
+
+ // Now try it again after using signal.Notify.
+ signal.Notify(make(chan os.Signal, 1), syscall.SIGINT)
+ if !runTestSetgid() {
+ t.Error("setgid hung after signal.Notify")
+ }
}
diff -Nru golang-1.5.1/misc/ios/go_darwin_arm_exec.go golang-1.5.2/misc/ios/go_darwin_arm_exec.go
--- golang-1.5.1/misc/ios/go_darwin_arm_exec.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/misc/ios/go_darwin_arm_exec.go 2015-12-03 13:52:58.000000000 +1300
@@ -160,9 +160,6 @@
}
defer os.Chdir(oldwd)
- type waitPanic struct {
- err error
- }
defer func() {
if r := recover(); r != nil {
if w, ok := r.(waitPanic); ok {
@@ -174,14 +171,96 @@
}()
defer exec.Command("killall", "ios-deploy").Run() // cleanup
-
exec.Command("killall", "ios-deploy").Run()
var opts options
opts, args = parseArgs(args)
// ios-deploy invokes lldb to give us a shell session with the app.
- cmd = exec.Command(
+ s, err := newSession(appdir, args, opts)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ b := s.out.Bytes()
+ if err == nil && !debug {
+ i := bytes.Index(b, []byte("(lldb) process continue"))
+ if i > 0 {
+ b = b[i:]
+ }
+ }
+ os.Stdout.Write(b)
+ }()
+
+ // Script LLDB. Oh dear.
+ s.do(`process handle SIGHUP --stop false --pass true --notify false`)
+ s.do(`process handle SIGPIPE --stop false --pass true --notify false`)
+ s.do(`process handle SIGUSR1 --stop false --pass true --notify false`)
+ s.do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
+ s.do(`process handle SIGBUS --stop false --pass true --notify false`) // does not work
+
+ if opts.lldb {
+ _, err := io.Copy(s.in, os.Stdin)
+ if err != io.EOF {
+ return err
+ }
+ return nil
+ }
+
+ s.do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
+
+ s.doCmd("run", "stop reason = breakpoint", 20*time.Second)
+
+ // Move the current working directory into the faux gopath.
+ if pkgpath != "src" {
+ s.do(`breakpoint delete 1`)
+ s.do(`expr char* $mem = (char*)malloc(512)`)
+ s.do(`expr $mem = (char*)getwd($mem, 512)`)
+ s.do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
+ s.do(`call (void)chdir($mem)`)
+ }
+
+ startTestsLen := s.out.Len()
+ fmt.Fprintln(s.in, `process continue`)
+
+ passed := func(out *buf) bool {
+ // Just to make things fun, lldb sometimes translates \n into \r\n.
+ return s.out.LastIndex([]byte("\nPASS\n")) > startTestsLen ||
+ s.out.LastIndex([]byte("\nPASS\r")) > startTestsLen ||
+ s.out.LastIndex([]byte("\n(lldb) PASS\n")) > startTestsLen ||
+ s.out.LastIndex([]byte("\n(lldb) PASS\r")) > startTestsLen
+ }
+ err = s.wait("test completion", passed, opts.timeout)
+ if passed(s.out) {
+ // The returned lldb error code is usually non-zero.
+ // We check for test success by scanning for the final
+ // PASS returned by the test harness, assuming the worst
+ // in its absence.
+ return nil
+ }
+ return err
+}
+
+type lldbSession struct {
+ cmd *exec.Cmd
+ in *os.File
+ out *buf
+ timedout chan struct{}
+ exited chan error
+}
+
+func newSession(appdir string, args []string, opts options) (*lldbSession, error) {
+ lldbr, in, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ s := &lldbSession{
+ in: in,
+ out: new(buf),
+ exited: make(chan error),
+ }
+
+ s.cmd = exec.Command(
// lldb tries to be clever with terminals.
// So we wrap it in script(1) and be clever
// right back at it.
@@ -198,267 +277,120 @@
"--bundle", appdir,
)
if debug {
- log.Println(strings.Join(cmd.Args, " "))
+ log.Println(strings.Join(s.cmd.Args, " "))
}
- lldbr, lldb, err := os.Pipe()
- if err != nil {
- return err
- }
- w := new(bufWriter)
+ var out io.Writer = s.out
if opts.lldb {
- mw := io.MultiWriter(w, os.Stderr)
- cmd.Stdout = mw
- cmd.Stderr = mw
- } else {
- cmd.Stdout = w
- cmd.Stderr = w // everything of interest is on stderr
+ out = io.MultiWriter(out, os.Stderr)
}
- cmd.Stdin = lldbr
+ s.cmd.Stdout = out
+ s.cmd.Stderr = out // everything of interest is on stderr
+ s.cmd.Stdin = lldbr
- if err := cmd.Start(); err != nil {
- return fmt.Errorf("ios-deploy failed to start: %v", err)
+ if err := s.cmd.Start(); err != nil {
+ return nil, fmt.Errorf("ios-deploy failed to start: %v", err)
}
// Manage the -test.timeout here, outside of the test. There is a lot
// of moving parts in an iOS test harness (notably lldb) that can
// swallow useful stdio or cause its own ruckus.
- var timedout chan struct{}
if opts.timeout > 1*time.Second {
- timedout = make(chan struct{})
+ s.timedout = make(chan struct{})
time.AfterFunc(opts.timeout-1*time.Second, func() {
- close(timedout)
+ close(s.timedout)
})
}
- exited := make(chan error)
go func() {
- exited <- cmd.Wait()
+ s.exited <- s.cmd.Wait()
}()
- waitFor := func(stage, str string, timeout time.Duration) error {
- select {
- case <-timedout:
- w.printBuf()
- if p := cmd.Process; p != nil {
- p.Kill()
- }
- return fmt.Errorf("timeout (stage %s)", stage)
- case err := <-exited:
- w.printBuf()
- return fmt.Errorf("failed (stage %s): %v", stage, err)
- case i := <-w.find(str, timeout):
- if i < 0 {
- log.Printf("timed out on stage %q, retrying", stage)
- return errRetry
- }
- w.clearTo(i + len(str))
- return nil
- }
+ cond := func(out *buf) bool {
+ i0 := s.out.LastIndex([]byte("(lldb)"))
+ i1 := s.out.LastIndex([]byte("fruitstrap"))
+ i2 := s.out.LastIndex([]byte(" connect"))
+ return i0 > 0 && i1 > 0 && i2 > 0
}
- do := func(cmd string) {
- fmt.Fprintln(lldb, cmd)
- if err := waitFor(fmt.Sprintf("prompt after %q", cmd), "(lldb)", 0); err != nil {
- panic(waitPanic{err})
- }
- }
-
- // Wait for installation and connection.
- if err := waitFor("ios-deploy before run", "(lldb)", 0); err != nil {
- // Retry if we see a rare and longstanding ios-deploy bug.
- // https://github.com/phonegap/ios-deploy/issues/11
- // Assertion failed: (AMDeviceStartService(device, CFSTR("com.apple.debugserver"), &gdbfd, NULL) == 0)
- log.Printf("%v, retrying", err)
- return errRetry
- }
-
- // Script LLDB. Oh dear.
- do(`process handle SIGHUP --stop false --pass true --notify false`)
- do(`process handle SIGPIPE --stop false --pass true --notify false`)
- do(`process handle SIGUSR1 --stop false --pass true --notify false`)
- do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
- do(`process handle SIGBUS --stop false --pass true --notify false`) // does not work
-
- if opts.lldb {
- _, err := io.Copy(lldb, os.Stdin)
- if err != io.EOF {
- return err
- }
- return nil
+ if err := s.wait("lldb start", cond, 5*time.Second); err != nil {
+ fmt.Printf("lldb start error: %v\n", err)
+ return nil, errRetry
}
+ return s, nil
+}
- do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
+func (s *lldbSession) do(cmd string) { s.doCmd(cmd, "(lldb)", 0) }
- fmt.Fprintln(lldb, `run`)
- if err := waitFor("br getwd", "stop reason = breakpoint", 20*time.Second); err != nil {
- // At this point we see several flaky errors from the iOS
- // build infrastructure. The most common is never reaching
- // the breakpoint, which we catch with a timeout. Very
- // occasionally lldb can produce errors like:
- //
- // Breakpoint 1: no locations (pending).
- // WARNING: Unable to resolve breakpoint to any actual locations.
- //
- // As no actual test code has been executed by this point,
- // we treat all errors as recoverable.
- if err != errRetry {
- log.Printf("%v, retrying", err)
- err = errRetry
- }
- return err
+func (s *lldbSession) doCmd(cmd string, waitFor string, extraTimeout time.Duration) {
+ startLen := s.out.Len()
+ fmt.Fprintln(s.in, cmd)
+ cond := func(out *buf) bool {
+ i := s.out.LastIndex([]byte(waitFor))
+ return i > startLen
}
- if err := waitFor("br getwd prompt", "(lldb)", 0); err != nil {
- return err
+ if err := s.wait(fmt.Sprintf("running cmd %q", cmd), cond, extraTimeout); err != nil {
+ panic(waitPanic{err})
}
+}
- // Move the current working directory into the faux gopath.
- if pkgpath != "src" {
- do(`breakpoint delete 1`)
- do(`expr char* $mem = (char*)malloc(512)`)
- do(`expr $mem = (char*)getwd($mem, 512)`)
- do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
- do(`call (void)chdir($mem)`)
- }
-
- // Run the tests.
- w.trimSuffix("(lldb) ")
- fmt.Fprintln(lldb, `process continue`)
-
- // Wait for the test to complete.
- select {
- case <-timedout:
- w.printBuf()
- if p := cmd.Process; p != nil {
- p.Kill()
- }
- return errors.New("timeout running tests")
- case <-w.find("\nPASS", 0):
- passed := w.isPass()
- w.printBuf()
- if passed {
- return nil
- }
- return errors.New("test failure")
- case err := <-exited:
- // The returned lldb error code is usually non-zero.
- // We check for test success by scanning for the final
- // PASS returned by the test harness, assuming the worst
- // in its absence.
- if w.isPass() {
- err = nil
- } else if err == nil {
- err = errors.New("test failure")
+func (s *lldbSession) wait(reason string, cond func(out *buf) bool, extraTimeout time.Duration) error {
+ doTimeout := 1*time.Second + extraTimeout
+ doTimedout := time.After(doTimeout)
+ for {
+ select {
+ case <-s.timedout:
+ if p := s.cmd.Process; p != nil {
+ p.Kill()
+ }
+ return fmt.Errorf("test timeout (%s)", reason)
+ case <-doTimedout:
+ return fmt.Errorf("command timeout (%s for %v)", reason, doTimeout)
+ case err := <-s.exited:
+ return fmt.Errorf("exited (%s: %v)", reason, err)
+ default:
+ if cond(s.out) {
+ return nil
+ }
+ time.Sleep(20 * time.Millisecond)
}
- w.printBuf()
- return err
}
}
-type bufWriter struct {
- mu sync.Mutex
- buf []byte
- suffix []byte // remove from each Write
-
- findTxt []byte // search buffer on each Write
- findCh chan int // report find position
- findAfter *time.Timer
+type buf struct {
+ mu sync.Mutex
+ buf []byte
}
-func (w *bufWriter) Write(in []byte) (n int, err error) {
+func (w *buf) Write(in []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
-
- n = len(in)
- in = bytes.TrimSuffix(in, w.suffix)
-
- if debug {
- inTxt := strings.Replace(string(in), "\n", "\\n", -1)
- findTxt := strings.Replace(string(w.findTxt), "\n", "\\n", -1)
- fmt.Printf("debug --> %s <-- debug (findTxt='%s')\n", inTxt, findTxt)
- }
-
w.buf = append(w.buf, in...)
-
- if len(w.findTxt) > 0 {
- if i := bytes.Index(w.buf, w.findTxt); i >= 0 {
- w.findCh <- i
- close(w.findCh)
- w.findTxt = nil
- w.findCh = nil
- if w.findAfter != nil {
- w.findAfter.Stop()
- w.findAfter = nil
- }
- }
- }
- return n, nil
+ return len(in), nil
}
-func (w *bufWriter) trimSuffix(p string) {
+func (w *buf) LastIndex(sep []byte) int {
w.mu.Lock()
defer w.mu.Unlock()
- w.suffix = []byte(p)
+ return bytes.LastIndex(w.buf, sep)
}
-func (w *bufWriter) printBuf() {
+func (w *buf) Bytes() []byte {
w.mu.Lock()
defer w.mu.Unlock()
- fmt.Fprintf(os.Stderr, "%s", w.buf)
- w.buf = nil
-}
-func (w *bufWriter) clearTo(i int) {
- w.mu.Lock()
- defer w.mu.Unlock()
- w.buf = w.buf[i:]
+ b := make([]byte, len(w.buf))
+ copy(b, w.buf)
+ return b
}
-// find returns a channel that will have exactly one byte index sent
-// to it when the text str appears in the buffer. If the text does not
-// appear before timeout, -1 is sent.
-//
-// A timeout of zero means no timeout.
-func (w *bufWriter) find(str string, timeout time.Duration) <-chan int {
+func (w *buf) Len() int {
w.mu.Lock()
defer w.mu.Unlock()
- if len(w.findTxt) > 0 {
- panic(fmt.Sprintf("find(%s): already trying to find %s", str, w.findTxt))
- }
- txt := []byte(str)
- ch := make(chan int, 1)
- if i := bytes.Index(w.buf, txt); i >= 0 {
- ch <- i
- close(ch)
- } else {
- w.findTxt = txt
- w.findCh = ch
- if timeout > 0 {
- w.findAfter = time.AfterFunc(timeout, func() {
- w.mu.Lock()
- defer w.mu.Unlock()
- if w.findCh == ch {
- w.findTxt = nil
- w.findCh = nil
- w.findAfter = nil
- ch <- -1
- close(ch)
- }
- })
- }
- }
- return ch
+ return len(w.buf)
}
-func (w *bufWriter) isPass() bool {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- // The final stdio of lldb is non-deterministic, so we
- // scan the whole buffer.
- //
- // Just to make things fun, lldb sometimes translates \n
- // into \r\n.
- return bytes.Contains(w.buf, []byte("\nPASS\n")) || bytes.Contains(w.buf, []byte("\nPASS\r"))
+type waitPanic struct {
+ err error
}
type options struct {
diff -Nru golang-1.5.1/src/cmd/cgo/gcc.go golang-1.5.2/src/cmd/cgo/gcc.go
--- golang-1.5.1/src/cmd/cgo/gcc.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/cgo/gcc.go 2015-12-03 13:52:58.000000000 +1300
@@ -490,6 +490,11 @@
name, _ := e.Val(dwarf.AttrName).(string)
typOff, _ := e.Val(dwarf.AttrType).(dwarf.Offset)
if name == "" || typOff == 0 {
+ if e.Val(dwarf.AttrSpecification) != nil {
+ // Since we are reading all the DWARF,
+ // assume we will see the variable elsewhere.
+ break
+ }
fatalf("malformed DWARF TagVariable entry")
}
if !strings.HasPrefix(name, "__cgo__") {
diff -Nru golang-1.5.1/src/cmd/compile/internal/gc/const.go golang-1.5.2/src/cmd/compile/internal/gc/const.go
--- golang-1.5.1/src/cmd/compile/internal/gc/const.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/compile/internal/gc/const.go 2015-12-03 13:52:59.000000000 +1300
@@ -1279,20 +1279,28 @@
return
num:
+ // Note: n.Val().Ctype() can be CTxxx (not a constant) here
+ // in the case of an untyped non-constant value, like 1<= uint64(len(data)) || p.Off+p.Filesz >= uint64(len(data)) || p.Filesz < 16 {
+ if p.Type != elf.PT_NOTE || p.Filesz < 16 {
continue
}
- note := data[p.Off : p.Off+p.Filesz]
+ var note []byte
+ if p.Off+p.Filesz < uint64(len(data)) {
+ note = data[p.Off : p.Off+p.Filesz]
+ } else {
+ // For some linkers, such as the Solaris linker,
+ // the buildid may not be found in data (which
+ // likely contains the first 16kB of the file)
+ // or even the first few megabytes of the file
+ // due to differences in note segment placement;
+ // in that case, extract the note data manually.
+ _, err = f.Seek(int64(p.Off), 0)
+ if err != nil {
+ return "", err
+ }
+
+ note = make([]byte, p.Filesz)
+ _, err = io.ReadFull(f, note)
+ if err != nil {
+ return "", err
+ }
+ }
nameSize := ef.ByteOrder.Uint32(note)
valSize := ef.ByteOrder.Uint32(note[4:])
tag := ef.ByteOrder.Uint32(note[8:])
@@ -114,3 +135,42 @@
// No note. Treat as successful but build ID empty.
return "", nil
}
+
+// The Go build ID is stored at the beginning of the Mach-O __text segment.
+// The caller has already opened filename, to get f, and read a few kB out, in data.
+// Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount
+// of other junk placed in the file ahead of the main text.
+func readMachoGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) {
+ // If the data we want has already been read, don't worry about Mach-O parsing.
+ // This is both an optimization and a hedge against the Mach-O parsing failing
+ // in the future due to, for example, the name of the __text section changing.
+ if b, err := readRawGoBuildID(filename, data); b != "" && err == nil {
+ return b, err
+ }
+
+ mf, err := macho.NewFile(f)
+ if err != nil {
+ return "", &os.PathError{Path: filename, Op: "parse", Err: err}
+ }
+
+ sect := mf.Section("__text")
+ if sect == nil {
+ // Every binary has a __text section. Something is wrong.
+ return "", &os.PathError{Path: filename, Op: "parse", Err: fmt.Errorf("cannot find __text section")}
+ }
+
+ // It should be in the first few bytes, but read a lot just in case,
+ // especially given our past problems on OS X with the build ID moving.
+ // There shouldn't be much difference between reading 4kB and 32kB:
+ // the hard part is getting to the data, not transferring it.
+ n := sect.Size
+ if n > uint64(BuildIDReadSize) {
+ n = uint64(BuildIDReadSize)
+ }
+ buf := make([]byte, n)
+ if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil {
+ return "", err
+ }
+
+ return readRawGoBuildID(filename, buf)
+}
diff -Nru golang-1.5.1/src/cmd/go/note_test.go golang-1.5.2/src/cmd/go/note_test.go
--- golang-1.5.1/src/cmd/go/note_test.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/go/note_test.go 2015-12-03 13:52:59.000000000 +1300
@@ -11,6 +11,26 @@
)
func TestNoteReading(t *testing.T) {
+ testNoteReading(t)
+}
+
+func TestNoteReading2K(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skipf("2kB is not enough on %s", runtime.GOOS)
+ }
+ // Set BuildIDReadSize to 2kB to exercise Mach-O parsing more strictly.
+ defer func(old int) {
+ main.BuildIDReadSize = old
+ }(main.BuildIDReadSize)
+ main.BuildIDReadSize = 2 * 1024
+
+ testNoteReading(t)
+}
+
+func testNoteReading(t *testing.T) {
+ if runtime.GOOS == "dragonfly" {
+ t.Skipf("TestNoteReading is broken on dragonfly - golang.org/issue/13364", runtime.GOOS)
+ }
tg := testgo(t)
defer tg.cleanup()
tg.tempFile("hello.go", `package main; func main() { print("hello, world\n") }`)
@@ -33,9 +53,6 @@
// no external linking
t.Logf("no external linking - skipping linkmode=external test")
- case "solaris":
- t.Logf("skipping - golang.org/issue/12178")
-
default:
tg.run("build", "-ldflags", "-buildid="+buildID+" -linkmode=external", "-o", tg.path("hello.exe"), tg.path("hello.go"))
id, err := main.ReadBuildIDFromBinary(tg.path("hello.exe"))
diff -Nru golang-1.5.1/src/cmd/go/pkg.go golang-1.5.2/src/cmd/go/pkg.go
--- golang-1.5.1/src/cmd/go/pkg.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/go/pkg.go 2015-12-03 13:52:59.000000000 +1300
@@ -1781,8 +1781,17 @@
goBuildEnd = []byte("\"\n \xff")
elfPrefix = []byte("\x7fELF")
+
+ machoPrefixes = [][]byte{
+ {0xfe, 0xed, 0xfa, 0xce},
+ {0xfe, 0xed, 0xfa, 0xcf},
+ {0xce, 0xfa, 0xed, 0xfe},
+ {0xcf, 0xfa, 0xed, 0xfe},
+ }
)
+var BuildIDReadSize = 32 * 1024 // changed for testing
+
// ReadBuildIDFromBinary reads the build ID from a binary.
//
// ELF binaries store the build ID in a proper PT_NOTE section.
@@ -1797,10 +1806,11 @@
return "", &os.PathError{Op: "parse", Path: filename, Err: errBuildIDUnknown}
}
- // Read the first 16 kB of the binary file.
+ // Read the first 32 kB of the binary file.
// That should be enough to find the build ID.
// In ELF files, the build ID is in the leading headers,
- // which are typically less than 4 kB, not to mention 16 kB.
+ // which are typically less than 4 kB, not to mention 32 kB.
+ // In Mach-O files, there's no limit, so we have to parse the file.
// On other systems, we're trying to read enough that
// we get the beginning of the text segment in the read.
// The offset where the text segment begins in a hello
@@ -1808,7 +1818,6 @@
//
// Plan 9: 0x20
// Windows: 0x600
- // Mach-O: 0x2000
//
f, err := os.Open(filename)
if err != nil {
@@ -1816,7 +1825,7 @@
}
defer f.Close()
- data := make([]byte, 16*1024)
+ data := make([]byte, BuildIDReadSize)
_, err = io.ReadFull(f, data)
if err == io.ErrUnexpectedEOF {
err = nil
@@ -1828,7 +1837,17 @@
if bytes.HasPrefix(data, elfPrefix) {
return readELFGoBuildID(filename, f, data)
}
+ for _, m := range machoPrefixes {
+ if bytes.HasPrefix(data, m) {
+ return readMachoGoBuildID(filename, f, data)
+ }
+ }
+
+ return readRawGoBuildID(filename, data)
+}
+// readRawGoBuildID finds the raw build ID stored in text segment data.
+func readRawGoBuildID(filename string, data []byte) (id string, err error) {
i := bytes.Index(data, goBuildPrefix)
if i < 0 {
// Missing. Treat as successful but build ID empty.
diff -Nru golang-1.5.1/src/cmd/internal/obj/ppc64/asm9.go golang-1.5.2/src/cmd/internal/obj/ppc64/asm9.go
--- golang-1.5.1/src/cmd/internal/obj/ppc64/asm9.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/internal/obj/ppc64/asm9.go 2015-12-03 13:52:59.000000000 +1300
@@ -2173,7 +2173,7 @@
r = int(p.To.Reg)
}
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.To.Reg), uint32(v)&31)
- if p.As == ASRAD && (v&0x20 != 0) {
+ if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
o1 |= 1 << 1 /* mb[5] */
}
diff -Nru golang-1.5.1/src/cmd/link/internal/amd64/asm.go golang-1.5.2/src/cmd/link/internal/amd64/asm.go
--- golang-1.5.1/src/cmd/link/internal/amd64/asm.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/link/internal/amd64/asm.go 2015-12-03 13:52:59.000000000 +1300
@@ -141,7 +141,7 @@
return
- case 256 + ld.R_X86_64_GOTPCREL:
+ case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX:
if targ.Type != obj.SDYNIMPORT {
// have symbol
if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
diff -Nru golang-1.5.1/src/cmd/link/internal/ld/elf.go golang-1.5.2/src/cmd/link/internal/ld/elf.go
--- golang-1.5.1/src/cmd/link/internal/ld/elf.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/link/internal/ld/elf.go 2015-12-03 13:52:59.000000000 +1300
@@ -348,7 +348,23 @@
R_X86_64_DTPOFF32 = 21
R_X86_64_GOTTPOFF = 22
R_X86_64_TPOFF32 = 23
- R_X86_64_COUNT = 24
+ R_X86_64_PC64 = 24
+ R_X86_64_GOTOFF64 = 25
+ R_X86_64_GOTPC32 = 26
+ R_X86_64_GOT64 = 27
+ R_X86_64_GOTPCREL64 = 28
+ R_X86_64_GOTPC64 = 29
+ R_X86_64_GOTPLT64 = 30
+ R_X86_64_PLTOFF64 = 31
+ R_X86_64_SIZE32 = 32
+ R_X86_64_SIZE64 = 33
+ R_X86_64_GOTPC32_TLSDEC = 34
+ R_X86_64_TLSDESC_CALL = 35
+ R_X86_64_TLSDESC = 36
+ R_X86_64_IRELATIVE = 37
+ R_X86_64_PC32_BND = 40
+ R_X86_64_GOTPCRELX = 41
+ R_X86_64_REX_GOTPCRELX = 42
R_AARCH64_ABS64 = 257
R_AARCH64_ABS32 = 258
R_AARCH64_CALL26 = 283
@@ -382,7 +398,6 @@
R_ALPHA_GLOB_DAT = 25
R_ALPHA_JMP_SLOT = 26
R_ALPHA_RELATIVE = 27
- R_ALPHA_COUNT = 28
R_ARM_NONE = 0
R_ARM_PC24 = 1
R_ARM_ABS32 = 2
@@ -422,7 +437,6 @@
R_ARM_RABS32 = 253
R_ARM_RPC24 = 254
R_ARM_RBASE = 255
- R_ARM_COUNT = 38
R_386_NONE = 0
R_386_32 = 1
R_386_PC32 = 2
@@ -454,7 +468,11 @@
R_386_TLS_DTPMOD32 = 35
R_386_TLS_DTPOFF32 = 36
R_386_TLS_TPOFF32 = 37
- R_386_COUNT = 38
+ R_386_TLS_GOTDESC = 39
+ R_386_TLS_DESC_CALL = 40
+ R_386_TLS_DESC = 41
+ R_386_IRELATIVE = 42
+ R_386_GOT32X = 43
R_PPC_NONE = 0
R_PPC_ADDR32 = 1
R_PPC_ADDR24 = 2
@@ -492,7 +510,6 @@
R_PPC_SECTOFF_LO = 34
R_PPC_SECTOFF_HI = 35
R_PPC_SECTOFF_HA = 36
- R_PPC_COUNT = 37
R_PPC_TLS = 67
R_PPC_DTPMOD32 = 68
R_PPC_TPREL16 = 69
@@ -533,7 +550,6 @@
R_PPC_EMB_RELST_HA = 114
R_PPC_EMB_BIT_FLD = 115
R_PPC_EMB_RELSDA = 116
- R_PPC_EMB_COUNT = R_PPC_EMB_RELSDA - R_PPC_EMB_NADDR32 + 1
R_PPC64_REL24 = R_PPC_REL24
R_PPC64_JMP_SLOT = R_PPC_JMP_SLOT
R_PPC64_ADDR64 = 38
@@ -1723,10 +1739,6 @@
Addstring(shstrtab, ".note.go.pkg-list")
Addstring(shstrtab, ".note.go.deps")
}
-
- if buildid != "" {
- Addstring(shstrtab, ".note.go.buildid")
- }
}
hasinitarr := Linkshared
diff -Nru golang-1.5.1/src/cmd/link/internal/ld/ldelf.go golang-1.5.2/src/cmd/link/internal/ld/ldelf.go
--- golang-1.5.1/src/cmd/link/internal/ld/ldelf.go 2015-09-09 13:24:01.000000000 +1200
+++ golang-1.5.2/src/cmd/link/internal/ld/ldelf.go 2015-12-03 13:52:59.000000000 +1300
@@ -1001,12 +1001,15 @@
'6' | R_X86_64_PC32<<24,
'6' | R_X86_64_PLT32<<24,
'6' | R_X86_64_GOTPCREL<<24,
+ '6' | R_X86_64_GOTPCRELX<<24,
+ '6' | R_X86_64_REX_GOTPCRELX<<24,
'8' | R_386_32<<24,
'8' | R_386_PC32<<24,
'8' | R_386_GOT32<<24,
'8' | R_386_PLT32<<24,
'8' | R_386_GOTOFF<<24,
'8' | R_386_GOTPC<<24,
+ '8' | R_386_GOT32X<<24,
'9' | R_PPC64_REL24<<24:
*siz = 4
diff -Nru golang-1.5.1/src/cmd/link/internal/ld/lib.go golang-1.5.2/src/cmd/link/internal/ld/lib.go
--- golang-1.5.1/src/cmd/link/internal/ld/lib.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/cmd/link/internal/ld/lib.go 2015-12-03 13:52:59.000000000 +1300
@@ -886,8 +886,8 @@
mayberemoveoutfile()
argv := []string{"ar", "-q", "-c", "-s", outfile}
- argv = append(argv, hostobjCopy()...)
argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "archive: %s\n", strings.Join(argv, " "))
@@ -1012,8 +1012,8 @@
argv = append(argv, "-Qunused-arguments")
}
- argv = append(argv, hostobjCopy()...)
argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
if Linkshared {
seenDirs := make(map[string]bool)
diff -Nru golang-1.5.1/src/cmd/link/internal/x86/asm.go golang-1.5.2/src/cmd/link/internal/x86/asm.go
--- golang-1.5.1/src/cmd/link/internal/x86/asm.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/cmd/link/internal/x86/asm.go 2015-12-03 13:52:59.000000000 +1300
@@ -78,7 +78,7 @@
return
- case 256 + ld.R_386_GOT32:
+ case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X:
if targ.Type != obj.SDYNIMPORT {
// have symbol
if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
diff -Nru golang-1.5.1/src/mime/multipart/multipart.go golang-1.5.2/src/mime/multipart/multipart.go
--- golang-1.5.1/src/mime/multipart/multipart.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/mime/multipart/multipart.go 2015-12-03 13:53:01.000000000 +1300
@@ -25,6 +25,11 @@
var emptyParams = make(map[string]string)
+// This constant needs to be at least 76 for this package to work correctly.
+// This is because \r\n--separator_of_len_70- would fill the buffer and it
+// wouldn't be safe to consume a single byte from it.
+const peekBufferSize = 4096
+
// A Part represents a single part in a multipart body.
type Part struct {
// The headers of the body, if any, with the keys canonicalized
@@ -91,7 +96,7 @@
func NewReader(r io.Reader, boundary string) *Reader {
b := []byte("\r\n--" + boundary + "--")
return &Reader{
- bufReader: bufio.NewReader(r),
+ bufReader: bufio.NewReaderSize(r, peekBufferSize),
nl: b[:2],
nlDashBoundary: b[:len(b)-2],
dashBoundaryDash: b[2:],
@@ -148,7 +153,7 @@
// the read request. No need to parse more at the moment.
return p.buffer.Read(d)
}
- peek, err := p.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
+ peek, err := p.mr.bufReader.Peek(peekBufferSize) // TODO(bradfitz): add buffer size accessor
// Look for an immediate empty part without a leading \r\n
// before the boundary separator. Some MIME code makes empty
@@ -229,6 +234,7 @@
expectNewPart := false
for {
line, err := r.bufReader.ReadSlice('\n')
+
if err == io.EOF && r.isFinalBoundary(line) {
// If the buffer ends in "--boundary--" without the
// trailing "\r\n", ReadSlice will return an error
@@ -343,13 +349,17 @@
// peekBufferSeparatorIndex returns the index of mr.nlDashBoundary in
// peek and whether it is a real boundary (and not a prefix of an
// unrelated separator). To be the end, the peek buffer must contain a
-// newline after the boundary.
+// newline after the boundary or contain the ending boundary (--separator--).
func (mr *Reader) peekBufferSeparatorIndex(peek []byte) (idx int, isEnd bool) {
idx = bytes.Index(peek, mr.nlDashBoundary)
if idx == -1 {
return
}
+
peek = peek[idx+len(mr.nlDashBoundary):]
+ if len(peek) == 0 || len(peek) == 1 && peek[0] == '-' {
+ return idx, false
+ }
if len(peek) > 1 && peek[0] == '-' && peek[1] == '-' {
return idx, true
}
diff -Nru golang-1.5.1/src/mime/multipart/multipart_test.go golang-1.5.2/src/mime/multipart/multipart_test.go
--- golang-1.5.1/src/mime/multipart/multipart_test.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/mime/multipart/multipart_test.go 2015-12-03 13:53:01.000000000 +1300
@@ -616,6 +616,54 @@
},
},
},
+ // Issue 12662: Check that we don't consume the leading \r if the peekBuffer
+ // ends in '\r\n--separator-'
+ {
+ name: "peek buffer boundary condition",
+ sep: "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+ in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1),
+ want: []headerBody{
+ {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+ strings.Repeat("A", peekBufferSize-65),
+ },
+ },
+ },
+ // Issue 12662: Same test as above with \r\n at the end
+ {
+ name: "peek buffer boundary condition",
+ sep: "00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+ in: strings.Replace(`--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize-65)+"\n--00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--\n", "\n", "\r\n", -1),
+ want: []headerBody{
+ {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+ strings.Repeat("A", peekBufferSize-65),
+ },
+ },
+ },
+ // Issue 12662v2: We want to make sure that for short buffers that end with
+ // '\r\n--separator-' we always consume at least one (valid) symbol from the
+ // peekBuffer
+ {
+ name: "peek buffer boundary condition",
+ sep: "aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db",
+ in: strings.Replace(`--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db
+Content-Disposition: form-data; name="block"; filename="block"
+Content-Type: application/octet-stream
+
+`+strings.Repeat("A", peekBufferSize)+"\n--aaaaaaaaaa00ffded004d4dd0fdf945fbdef9d9050cfd6a13a821846299b27fc71b9db--", "\n", "\r\n", -1),
+ want: []headerBody{
+ {textproto.MIMEHeader{"Content-Type": {`application/octet-stream`}, "Content-Disposition": {`form-data; name="block"; filename="block"`}},
+ strings.Repeat("A", peekBufferSize),
+ },
+ },
+ },
roundTripParseTest(),
}
diff -Nru golang-1.5.1/src/net/interface_windows.go golang-1.5.2/src/net/interface_windows.go
--- golang-1.5.1/src/net/interface_windows.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/net/interface_windows.go 2015-12-03 13:53:01.000000000 +1300
@@ -48,7 +48,7 @@
return nil, os.NewSyscallError("wsaioctl", err)
}
iilen := ret / uint32(unsafe.Sizeof(iia[0]))
- return iia[:iilen-1], nil
+ return iia[:iilen], nil
}
func bytesEqualIP(a []byte, b []int8) bool {
diff -Nru golang-1.5.1/src/net/net_windows_test.go golang-1.5.2/src/net/net_windows_test.go
--- golang-1.5.1/src/net/net_windows_test.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/net/net_windows_test.go 2015-12-03 13:53:01.000000000 +1300
@@ -6,10 +6,13 @@
import (
"bufio"
+ "bytes"
"fmt"
"io"
"os"
"os/exec"
+ "sort"
+ "strings"
"syscall"
"testing"
"time"
@@ -163,3 +166,53 @@
t.Fatalf(`"%s" received from recv, but "abc" expected`, s)
}
}
+
+func isWindowsXP(t *testing.T) bool {
+ v, err := syscall.GetVersion()
+ if err != nil {
+ t.Fatalf("GetVersion failed: %v", err)
+ }
+ major := byte(v)
+ return major < 6
+}
+
+func listInterfacesWithNetsh() ([]string, error) {
+ out, err := exec.Command("netsh", "interface", "ip", "show", "config").CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("netsh failed: %v: %q", err, string(out))
+ }
+ lines := bytes.Split(out, []byte{'\r', '\n'})
+ names := make([]string, 0)
+ for _, line := range lines {
+ f := bytes.Split(line, []byte{'"'})
+ if len(f) == 3 {
+ names = append(names, string(f[1]))
+ }
+ }
+ return names, nil
+}
+
+func TestInterfaceList(t *testing.T) {
+ if isWindowsXP(t) {
+ t.Skip("Windows XP netsh command does not provide required functionality")
+ }
+ ift, err := Interfaces()
+ if err != nil {
+ t.Fatal(err)
+ }
+ have := make([]string, 0)
+ for _, ifi := range ift {
+ have = append(have, ifi.Name)
+ }
+ sort.Strings(have)
+
+ want, err := listInterfacesWithNetsh()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(want)
+
+ if strings.Join(want, "/") != strings.Join(have, "/") {
+ t.Fatalf("unexpected interface list %q, want %q", have, want)
+ }
+}
diff -Nru golang-1.5.1/src/runtime/asm_amd64p32.s golang-1.5.2/src/runtime/asm_amd64p32.s
--- golang-1.5.1/src/runtime/asm_amd64p32.s 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/runtime/asm_amd64p32.s 2015-12-03 13:53:01.000000000 +1300
@@ -627,15 +627,18 @@
MOVL ptr+0(FP), DI
MOVL n+4(FP), CX
MOVQ CX, BX
- ANDQ $7, BX
- SHRQ $3, CX
+ ANDQ $3, BX
+ SHRQ $2, CX
MOVQ $0, AX
CLD
REP
- STOSQ
+ STOSL
MOVQ BX, CX
REP
STOSB
+ // Note: we zero only 4 bytes at a time so that the tail is at most
+ // 3 bytes. That guarantees that we aren't zeroing pointers with STOSB.
+ // See issue 13160.
RET
TEXT runtime·getcallerpc(SB),NOSPLIT,$8-12
diff -Nru golang-1.5.1/src/runtime/asm_amd64.s golang-1.5.2/src/runtime/asm_amd64.s
--- golang-1.5.1/src/runtime/asm_amd64.s 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/runtime/asm_amd64.s 2015-12-03 13:53:01.000000000 +1300
@@ -661,6 +661,8 @@
// come in on the m->g0 stack already.
get_tls(CX)
MOVQ g(CX), R8
+ CMPQ R8, $0
+ JEQ nosave
MOVQ g_m(R8), R8
MOVQ m_g0(R8), SI
MOVQ g(CX), DI
@@ -670,11 +672,11 @@
CMPQ SI, DI
JEQ nosave
+ // Switch to system stack.
MOVQ m_g0(R8), SI
CALL gosave<>(SB)
MOVQ SI, g(CX)
MOVQ (g_sched+gobuf_sp)(SI), SP
-nosave:
// Now on a scheduling stack (a pthread-created stack).
// Make sure we have enough room for 4 stack-backed fast-call
@@ -700,6 +702,29 @@
MOVL AX, ret+16(FP)
RET
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown
+ // (see needm/dropm on Solaris, for example).
+ // This code is like the above sequence but without saving/restoring g
+ // and without worrying about the stack moving out from under us
+ // (because we're on a system stack, not a goroutine stack).
+ // The above code could be used directly if already on a system stack,
+ // but then the only path through this code would be a rare case on Solaris.
+ // Using this code for all "already on system stack" calls exercises it more,
+ // which should help keep it correct.
+ SUBQ $64, SP
+ ANDQ $~15, SP
+ MOVQ $0, 48(SP) // where above code stores g, in case someone looks during debugging
+ MOVQ DX, 40(SP) // save original stack pointer
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+ MOVQ 40(SP), SI // restore original stack pointer
+ MOVQ SI, SP
+ MOVL AX, ret+16(FP)
+ RET
+
// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
// Turn the fn into a Go func (by taking its address) and call
// cgocallback_gofunc.
diff -Nru golang-1.5.1/src/runtime/cgo/cgo.go golang-1.5.2/src/runtime/cgo/cgo.go
--- golang-1.5.1/src/runtime/cgo/cgo.go 2015-09-09 13:24:02.000000000 +1200
+++ golang-1.5.2/src/runtime/cgo/cgo.go 2015-12-03 13:53:01.000000000 +1300
@@ -20,7 +20,9 @@
#cgo !android,linux LDFLAGS: -lpthread
#cgo netbsd LDFLAGS: -lpthread
#cgo openbsd LDFLAGS: -lpthread
-#cgo windows LDFLAGS: -lm -mthreads
+// we must explicitly link msvcrt, because runtime needs ntdll, and ntdll
+// exports some incompatible libc functions. See golang.org/issue/12030.
+#cgo windows LDFLAGS: -lmsvcrt -lm -mthreads
#cgo CFLAGS: -Wall -Werror
diff -Nru golang-1.5.1/src/runtime/malloc.go golang-1.5.2/src/runtime/malloc.go
--- golang-1.5.1/src/runtime/malloc.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/malloc.go 2015-12-03 13:53:01.000000000 +1300
@@ -401,7 +401,7 @@
if p == h.arena_end {
h.arena_end = new_end
h.arena_reserved = reserved
- } else if p+p_size <= h.arena_start+_MaxArena32 {
+ } else if h.arena_start <= p && p+p_size <= h.arena_start+_MaxArena32 {
// Keep everything page-aligned.
// Our pages are bigger than hardware pages.
h.arena_end = p + p_size
@@ -411,7 +411,10 @@
h.arena_used = used
h.arena_reserved = reserved
} else {
- var stat uint64
+ // We haven't added this allocation to
+ // the stats, so subtract it from a
+ // fake stat (but avoid underflow).
+ stat := uint64(p_size)
sysFree((unsafe.Pointer)(p), p_size, &stat)
}
}
diff -Nru golang-1.5.1/src/runtime/memclr_386.s golang-1.5.2/src/runtime/memclr_386.s
--- golang-1.5.1/src/runtime/memclr_386.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memclr_386.s 2015-12-03 13:53:01.000000000 +1300
@@ -21,7 +21,8 @@
CMPL BX, $2
JBE _1or2
CMPL BX, $4
- JBE _3or4
+ JB _3
+ JE _4
CMPL BX, $8
JBE _5through8
CMPL BX, $16
@@ -68,9 +69,13 @@
RET
_0:
RET
-_3or4:
+_3:
MOVW AX, (DI)
- MOVW AX, -2(DI)(BX*1)
+ MOVB AX, 2(DI)
+ RET
+_4:
+ // We need a separate case for 4 to make sure we clear pointers atomically.
+ MOVL AX, (DI)
RET
_5through8:
MOVL AX, (DI)
diff -Nru golang-1.5.1/src/runtime/memclr_amd64.s golang-1.5.2/src/runtime/memclr_amd64.s
--- golang-1.5.1/src/runtime/memclr_amd64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memclr_amd64.s 2015-12-03 13:53:01.000000000 +1300
@@ -23,7 +23,8 @@
CMPQ BX, $4
JBE _3or4
CMPQ BX, $8
- JBE _5through8
+ JB _5through7
+ JE _8
CMPQ BX, $16
JBE _9through16
PXOR X0, X0
@@ -71,10 +72,14 @@
MOVW AX, (DI)
MOVW AX, -2(DI)(BX*1)
RET
-_5through8:
+_5through7:
MOVL AX, (DI)
MOVL AX, -4(DI)(BX*1)
RET
+_8:
+ // We need a separate case for 8 to make sure we clear pointers atomically.
+ MOVQ AX, (DI)
+ RET
_9through16:
MOVQ AX, (DI)
MOVQ AX, -8(DI)(BX*1)
diff -Nru golang-1.5.1/src/runtime/memclr_arm64.s golang-1.5.2/src/runtime/memclr_arm64.s
--- golang-1.5.1/src/runtime/memclr_arm64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memclr_arm64.s 2015-12-03 13:53:01.000000000 +1300
@@ -8,11 +8,30 @@
TEXT runtime·memclr(SB),NOSPLIT,$0-16
MOVD ptr+0(FP), R3
MOVD n+8(FP), R4
- CMP $0, R4
- BEQ done
- ADD R3, R4, R4
+ // TODO(mwhudson): this is written this way to avoid tickling
+ // warnings from addpool when written as AND $7, R4, R6 (see
+ // https://golang.org/issue/12708)
+ AND $~7, R4, R5 // R5 is N&~7
+ SUB R5, R4, R6 // R6 is N&7
+
+ CMP $0, R5
+ BEQ nowords
+
+ ADD R3, R5, R5
+
+wordloop: // TODO: Optimize for unaligned ptr.
+ MOVD.P $0, 8(R3)
+ CMP R3, R5
+ BNE wordloop
+nowords:
+ CMP $0, R6
+ BEQ done
+
+ ADD R3, R6, R6
+
+byteloop:
MOVBU.P $0, 1(R3)
- CMP R3, R4
- BNE -2(PC)
+ CMP R3, R6
+ BNE byteloop
done:
RET
diff -Nru golang-1.5.1/src/runtime/memclr_plan9_386.s golang-1.5.2/src/runtime/memclr_plan9_386.s
--- golang-1.5.1/src/runtime/memclr_plan9_386.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memclr_plan9_386.s 2015-12-03 13:53:01.000000000 +1300
@@ -16,7 +16,8 @@
CMPL BX, $2
JBE _1or2
CMPL BX, $4
- JBE _3or4
+ JB _3
+ JE _4
CMPL BX, $8
JBE _5through8
CMPL BX, $16
@@ -35,9 +36,13 @@
RET
_0:
RET
-_3or4:
+_3:
MOVW AX, (DI)
- MOVW AX, -2(DI)(BX*1)
+ MOVB AX, 2(DI)
+ RET
+_4:
+ // We need a separate case for 4 to make sure we clear pointers atomically.
+ MOVL AX, (DI)
RET
_5through8:
MOVL AX, (DI)
diff -Nru golang-1.5.1/src/runtime/memclr_ppc64x.s golang-1.5.2/src/runtime/memclr_ppc64x.s
--- golang-1.5.1/src/runtime/memclr_ppc64x.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memclr_ppc64x.s 2015-12-03 13:53:01.000000000 +1300
@@ -10,11 +10,22 @@
TEXT runtime·memclr(SB),NOSPLIT,$0-16
MOVD ptr+0(FP), R3
MOVD n+8(FP), R4
- CMP R4, $0
+ SRADCC $3, R4, R6 // R6 is the number of words to zero
+ BEQ bytes
+
+ SUB $8, R3
+ MOVD R6, CTR
+ MOVDU R0, 8(R3)
+ BC 25, 0, -1(PC) // bdnz+ $-4
+ ADD $8, R3
+
+bytes:
+ ANDCC $7, R4, R7 // R7 is the number of bytes to zero
BEQ done
SUB $1, R3
- MOVD R4, CTR
+ MOVD R7, CTR
MOVBU R0, 1(R3)
- BC 25, 0, -1(PC) // bdnz+ $-4
+ BC 25, 0, -1(PC) // bdnz+ $-4
+
done:
RET
diff -Nru golang-1.5.1/src/runtime/mem_linux.go golang-1.5.2/src/runtime/mem_linux.go
--- golang-1.5.1/src/runtime/mem_linux.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/mem_linux.go 2015-12-03 13:53:01.000000000 +1300
@@ -69,29 +69,89 @@
}
func sysUnused(v unsafe.Pointer, n uintptr) {
- var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
- if s != 0 && (uintptr(v)%s != 0 || n%s != 0) {
- // See issue 8832
- // Linux kernel bug: https://bugzilla.kernel.org/show_bug.cgi?id=93111
- // Mark the region as NOHUGEPAGE so the kernel's khugepaged
- // doesn't undo our DONTNEED request. khugepaged likes to migrate
- // regions which are only partially mapped to huge pages, including
- // regions with some DONTNEED marks. That needlessly allocates physical
- // memory for our DONTNEED regions.
- madvise(v, n, _MADV_NOHUGEPAGE)
+ // By default, Linux's "transparent huge page" support will
+ // merge pages into a huge page if there's even a single
+ // present regular page, undoing the effects of the DONTNEED
+ // below. On amd64, that means khugepaged can turn a single
+ // 4KB page to 2MB, bloating the process's RSS by as much as
+ // 512X. (See issue #8832 and Linux kernel bug
+ // https://bugzilla.kernel.org/show_bug.cgi?id=93111)
+ //
+ // To work around this, we explicitly disable transparent huge
+ // pages when we release pages of the heap. However, we have
+ // to do this carefully because changing this flag tends to
+ // split the VMA (memory mapping) containing v in to three
+ // VMAs in order to track the different values of the
+ // MADV_NOHUGEPAGE flag in the different regions. There's a
+ // default limit of 65530 VMAs per address space (sysctl
+ // vm.max_map_count), so we must be careful not to create too
+ // many VMAs (see issue #12233).
+ //
+ // Since huge pages are huge, there's little use in adjusting
+ // the MADV_NOHUGEPAGE flag on a fine granularity, so we avoid
+ // exploding the number of VMAs by only adjusting the
+ // MADV_NOHUGEPAGE flag on a large granularity. This still
+ // gets most of the benefit of huge pages while keeping the
+ // number of VMAs under control. With hugePageSize = 2MB, even
+ // a pessimal heap can reach 128GB before running out of VMAs.
+ if hugePageSize != 0 {
+ var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
+
+ // If it's a large allocation, we want to leave huge
+ // pages enabled. Hence, we only adjust the huge page
+ // flag on the huge pages containing v and v+n-1, and
+ // only if those aren't aligned.
+ var head, tail uintptr
+ if uintptr(v)%s != 0 {
+ // Compute huge page containing v.
+ head = uintptr(v) &^ (s - 1)
+ }
+ if (uintptr(v)+n)%s != 0 {
+ // Compute huge page containing v+n-1.
+ tail = (uintptr(v) + n - 1) &^ (s - 1)
+ }
+
+ // Note that madvise will return EINVAL if the flag is
+ // already set, which is quite likely. We ignore
+ // errors.
+ if head != 0 && head+hugePageSize == tail {
+ // head and tail are different but adjacent,
+ // so do this in one call.
+ madvise(unsafe.Pointer(head), 2*hugePageSize, _MADV_NOHUGEPAGE)
+ } else {
+ // Advise the huge pages containing v and v+n-1.
+ if head != 0 {
+ madvise(unsafe.Pointer(head), hugePageSize, _MADV_NOHUGEPAGE)
+ }
+ if tail != 0 && tail != head {
+ madvise(unsafe.Pointer(tail), hugePageSize, _MADV_NOHUGEPAGE)
+ }
+ }
}
+
madvise(v, n, _MADV_DONTNEED)
}
func sysUsed(v unsafe.Pointer, n uintptr) {
if hugePageSize != 0 {
- // Undo the NOHUGEPAGE marks from sysUnused. There is no alignment check
- // around this call as spans may have been merged in the interim.
- // Note that this might enable huge pages for regions which were
- // previously disabled. Unfortunately there is no easy way to detect
- // what the previous state was, and in any case we probably want huge
- // pages to back our heap if the kernel can arrange that.
- madvise(v, n, _MADV_HUGEPAGE)
+ // Partially undo the NOHUGEPAGE marks from sysUnused
+ // for whole huge pages between v and v+n. This may
+ // leave huge pages off at the end points v and v+n
+ // even though allocations may cover these entire huge
+ // pages. We could detect this and undo NOHUGEPAGE on
+ // the end points as well, but it's probably not worth
+ // the cost because when neighboring allocations are
+ // freed sysUnused will just set NOHUGEPAGE again.
+ var s uintptr = hugePageSize
+
+ // Round v up to a huge page boundary.
+ beg := (uintptr(v) + (s - 1)) &^ (s - 1)
+ // Round v+n down to a huge page boundary.
+ end := (uintptr(v) + n) &^ (s - 1)
+
+ if beg < end {
+ madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
+ }
}
}
diff -Nru golang-1.5.1/src/runtime/memmove_386.s golang-1.5.2/src/runtime/memmove_386.s
--- golang-1.5.1/src/runtime/memmove_386.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_386.s 2015-12-03 13:53:01.000000000 +1300
@@ -43,7 +43,8 @@
CMPL BX, $2
JBE move_1or2
CMPL BX, $4
- JBE move_3or4
+ JB move_3
+ JE move_4
CMPL BX, $8
JBE move_5through8
CMPL BX, $16
@@ -118,11 +119,16 @@
RET
move_0:
RET
-move_3or4:
+move_3:
MOVW (SI), AX
- MOVW -2(SI)(BX*1), CX
+ MOVB 2(SI), CX
MOVW AX, (DI)
- MOVW CX, -2(DI)(BX*1)
+ MOVB CX, 2(DI)
+ RET
+move_4:
+ // We need a separate case for 4 to make sure we write pointers atomically.
+ MOVL (SI), AX
+ MOVL AX, (DI)
RET
move_5through8:
MOVL (SI), AX
diff -Nru golang-1.5.1/src/runtime/memmove_amd64.s golang-1.5.2/src/runtime/memmove_amd64.s
--- golang-1.5.1/src/runtime/memmove_amd64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_amd64.s 2015-12-03 13:53:01.000000000 +1300
@@ -50,7 +50,8 @@
CMPQ BX, $4
JBE move_3or4
CMPQ BX, $8
- JBE move_5through8
+ JB move_5through7
+ JE move_8
CMPQ BX, $16
JBE move_9through16
CMPQ BX, $32
@@ -131,12 +132,17 @@
MOVW AX, (DI)
MOVW CX, -2(DI)(BX*1)
RET
-move_5through8:
+move_5through7:
MOVL (SI), AX
MOVL -4(SI)(BX*1), CX
MOVL AX, (DI)
MOVL CX, -4(DI)(BX*1)
RET
+move_8:
+ // We need a separate case for 8 to make sure we write pointers atomically.
+ MOVQ (SI), AX
+ MOVQ AX, (DI)
+ RET
move_9through16:
MOVQ (SI), AX
MOVQ -8(SI)(BX*1), CX
diff -Nru golang-1.5.1/src/runtime/memmove_arm64.s golang-1.5.2/src/runtime/memmove_arm64.s
--- golang-1.5.1/src/runtime/memmove_arm64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_arm64.s 2015-12-03 13:53:01.000000000 +1300
@@ -14,23 +14,78 @@
RET
check:
+ AND $~7, R5, R7 // R7 is N&~7
+ // TODO(mwhudson): this is written this way to avoid tickling
+ // warnings from addpool when written as AND $7, R5, R6 (see
+ // https://golang.org/issue/12708)
+ SUB R7, R5, R6 // R6 is N&7
+
CMP R3, R4
BLT backward
- ADD R3, R5
-loop:
- MOVBU.P 1(R4), R6
- MOVBU.P R6, 1(R3)
- CMP R3, R5
- BNE loop
+ // Copying forward proceeds by copying R7/8 words then copying R6 bytes.
+ // R3 and R4 are advanced as we copy.
+
+ // (There may be implementations of armv8 where copying by bytes until
+ // at least one of source or dest is word aligned is a worthwhile
+ // optimization, but the on the one tested so far (xgene) it did not
+ // make a significance difference.)
+
+ CMP $0, R7 // Do we need to do any word-by-word copying?
+ BEQ noforwardlarge
+
+ ADD R3, R7, R9 // R9 points just past where we copy by word
+
+forwardlargeloop:
+ MOVD.P 8(R4), R8 // R8 is just a scratch register
+ MOVD.P R8, 8(R3)
+ CMP R3, R9
+ BNE forwardlargeloop
+
+noforwardlarge:
+ CMP $0, R6 // Do we need to do any byte-by-byte copying?
+ BNE forwardtail
+ RET
+
+forwardtail:
+ ADD R3, R6, R9 // R9 points just past the destination memory
+
+forwardtailloop:
+ MOVBU.P 1(R4), R8
+ MOVBU.P R8, 1(R3)
+ CMP R3, R9
+ BNE forwardtailloop
RET
backward:
- ADD R5, R4
- ADD R3, R5
-loop1:
- MOVBU.W -1(R4), R6
- MOVBU.W R6, -1(R5)
- CMP R3, R5
- BNE loop1
+ // Copying backwards proceeds by copying R6 bytes then copying R7/8 words.
+ // R3 and R4 are advanced to the end of the destination/source buffers
+ // respectively and moved back as we copy.
+
+ ADD R4, R5, R4 // R4 points just past the last source byte
+ ADD R3, R5, R3 // R3 points just past the last destination byte
+
+ CMP $0, R6 // Do we need to do any byte-by-byte copying?
+ BEQ nobackwardtail
+
+ SUB R6, R3, R9 // R9 points at the lowest destination byte that should be copied by byte.
+backwardtailloop:
+ MOVBU.W -1(R4), R8
+ MOVBU.W R8, -1(R3)
+ CMP R9, R3
+ BNE backwardtailloop
+
+nobackwardtail:
+ CMP $0, R7 // Do we need to do any word-by-word copying?
+ BNE backwardlarge
+ RET
+
+backwardlarge:
+ SUB R7, R3, R9 // R9 points at the lowest destination byte
+
+backwardlargeloop:
+ MOVD.W -8(R4), R8
+ MOVD.W R8, -8(R3)
+ CMP R9, R3
+ BNE backwardlargeloop
RET
diff -Nru golang-1.5.1/src/runtime/memmove_nacl_amd64p32.s golang-1.5.2/src/runtime/memmove_nacl_amd64p32.s
--- golang-1.5.1/src/runtime/memmove_nacl_amd64p32.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_nacl_amd64p32.s 2015-12-03 13:53:01.000000000 +1300
@@ -4,6 +4,9 @@
#include "textflag.h"
+// This could use MOVSQ, but we use MOVSL so that if an object ends in
+// a 4 byte pointer, we copy it as a unit instead of byte by byte.
+
TEXT runtime·memmove(SB), NOSPLIT, $0-12
MOVL to+0(FP), DI
MOVL from+4(FP), SI
@@ -14,9 +17,9 @@
forward:
MOVL BX, CX
- SHRL $3, CX
- ANDL $7, BX
- REP; MOVSQ
+ SHRL $2, CX
+ ANDL $3, BX
+ REP; MOVSL
MOVL BX, CX
REP; MOVSB
RET
@@ -32,15 +35,18 @@
STD
MOVL BX, CX
- SHRL $3, CX
- ANDL $7, BX
- SUBL $8, DI
- SUBL $8, SI
- REP; MOVSQ
- ADDL $7, DI
- ADDL $7, SI
+ SHRL $2, CX
+ ANDL $3, BX
+ SUBL $4, DI
+ SUBL $4, SI
+ REP; MOVSL
+ ADDL $3, DI
+ ADDL $3, SI
MOVL BX, CX
REP; MOVSB
CLD
+ // Note: we copy only 4 bytes at a time so that the tail is at most
+ // 3 bytes. That guarantees that we aren't copying pointers with MOVSB.
+ // See issue 13160.
RET
diff -Nru golang-1.5.1/src/runtime/memmove_plan9_386.s golang-1.5.2/src/runtime/memmove_plan9_386.s
--- golang-1.5.1/src/runtime/memmove_plan9_386.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_plan9_386.s 2015-12-03 13:53:01.000000000 +1300
@@ -39,7 +39,8 @@
CMPL BX, $2
JBE move_1or2
CMPL BX, $4
- JBE move_3or4
+ JB move_3
+ JE move_4
CMPL BX, $8
JBE move_5through8
CMPL BX, $16
@@ -104,11 +105,16 @@
RET
move_0:
RET
-move_3or4:
+move_3:
MOVW (SI), AX
- MOVW -2(SI)(BX*1), CX
+ MOVB 2(SI), CX
MOVW AX, (DI)
- MOVW CX, -2(DI)(BX*1)
+ MOVB CX, 2(DI)
+ RET
+move_4:
+ // We need a separate case for 4 to make sure we write pointers atomically.
+ MOVL (SI), AX
+ MOVL AX, (DI)
RET
move_5through8:
MOVL (SI), AX
diff -Nru golang-1.5.1/src/runtime/memmove_plan9_amd64.s golang-1.5.2/src/runtime/memmove_plan9_amd64.s
--- golang-1.5.1/src/runtime/memmove_plan9_amd64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_plan9_amd64.s 2015-12-03 13:53:01.000000000 +1300
@@ -43,7 +43,8 @@
CMPQ BX, $4
JBE move_3or4
CMPQ BX, $8
- JBE move_5through8
+ JB move_5through7
+ JE move_8
CMPQ BX, $16
JBE move_9through16
@@ -113,12 +114,17 @@
MOVW AX, (DI)
MOVW CX, -2(DI)(BX*1)
RET
-move_5through8:
+move_5through7:
MOVL (SI), AX
MOVL -4(SI)(BX*1), CX
MOVL AX, (DI)
MOVL CX, -4(DI)(BX*1)
RET
+move_8:
+ // We need a separate case for 8 to make sure we write pointers atomically.
+ MOVQ (SI), AX
+ MOVQ AX, (DI)
+ RET
move_9through16:
MOVQ (SI), AX
MOVQ -8(SI)(BX*1), CX
diff -Nru golang-1.5.1/src/runtime/memmove_ppc64x.s golang-1.5.2/src/runtime/memmove_ppc64x.s
--- golang-1.5.1/src/runtime/memmove_ppc64x.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_ppc64x.s 2015-12-03 13:53:01.000000000 +1300
@@ -16,25 +16,73 @@
RET
check:
- CMP R3, R4
- BGT backward
+ ANDCC $7, R5, R7 // R7 is the number of bytes to copy and CR0[EQ] is set if there are none.
+ SRAD $3, R5, R6 // R6 is the number of words to copy
+ CMP R6, $0, CR1 // CR1[EQ] is set if there are no words to copy.
+ CMP R3, R4, CR2
+ BC 12, 9, backward // I think you should be able to write this as "BGT CR2, backward"
+
+ // Copying forward proceeds by copying R6 words then copying R7 bytes.
+ // R3 and R4 are advanced as we copy. Becuase PPC64 lacks post-increment
+ // load/store, R3 and R4 point before the bytes that are to be copied.
+
+ BC 12, 6, noforwardlarge // "BEQ CR1, noforwardlarge"
+
+ MOVD R6, CTR
+
+ SUB $8, R3
+ SUB $8, R4
+
+forwardlargeloop:
+ MOVDU 8(R4), R8
+ MOVDU R8, 8(R3)
+ BC 16, 0, forwardlargeloop // "BDNZ"
+
+ ADD $8, R3
+ ADD $8, R4
+
+noforwardlarge:
+ BNE forwardtail // Tests the bit set by ANDCC above
+ RET
+
+forwardtail:
SUB $1, R3
- ADD R3, R5
SUB $1, R4
-loop:
- MOVBU 1(R4), R6
- MOVBU R6, 1(R3)
- CMP R3, R5
- BNE loop
+ MOVD R7, CTR
+
+forwardtailloop:
+ MOVBZU 1(R4), R8
+ MOVBZU R8, 1(R3)
+ BC 16, 0, forwardtailloop
RET
backward:
- ADD R5, R4
- ADD R3, R5
-loop1:
- MOVBU -1(R4), R6
- MOVBU R6, -1(R5)
- CMP R3, R5
- BNE loop1
+ // Copying backwards proceeds by copying R7 bytes then copying R6 words.
+ // R3 and R4 are advanced to the end of the destination/source buffers
+ // respectively and moved back as we copy.
+
+ ADD R5, R4, R4
+ ADD R3, R5, R3
+
+ BEQ nobackwardtail
+
+ MOVD R7, CTR
+
+backwardtailloop:
+ MOVBZU -1(R4), R8
+ MOVBZU R8, -1(R3)
+ BC 16, 0, backwardtailloop
+
+nobackwardtail:
+ BC 4, 6, backwardlarge // "BNE CR1"
+ RET
+
+backwardlarge:
+ MOVD R6, CTR
+
+backwardlargeloop:
+ MOVDU -8(R4), R8
+ MOVDU R8, -8(R3)
+ BC 16, 0, backwardlargeloop // "BDNZ"
RET
diff -Nru golang-1.5.1/src/runtime/memmove_test.go golang-1.5.2/src/runtime/memmove_test.go
--- golang-1.5.1/src/runtime/memmove_test.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/memmove_test.go 2015-12-03 13:53:01.000000000 +1300
@@ -116,6 +116,41 @@
func BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }
func BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }
+func bmMemmoveUnaligned(b *testing.B, n int) {
+ x := make([]byte, n+1)
+ y := make([]byte, n)
+ b.SetBytes(int64(n))
+ for i := 0; i < b.N; i++ {
+ copy(x[1:], y)
+ }
+}
+
+func BenchmarkMemmoveUnaligned0(b *testing.B) { bmMemmoveUnaligned(b, 0) }
+func BenchmarkMemmoveUnaligned1(b *testing.B) { bmMemmoveUnaligned(b, 1) }
+func BenchmarkMemmoveUnaligned2(b *testing.B) { bmMemmoveUnaligned(b, 2) }
+func BenchmarkMemmoveUnaligned3(b *testing.B) { bmMemmoveUnaligned(b, 3) }
+func BenchmarkMemmoveUnaligned4(b *testing.B) { bmMemmoveUnaligned(b, 4) }
+func BenchmarkMemmoveUnaligned5(b *testing.B) { bmMemmoveUnaligned(b, 5) }
+func BenchmarkMemmoveUnaligned6(b *testing.B) { bmMemmoveUnaligned(b, 6) }
+func BenchmarkMemmoveUnaligned7(b *testing.B) { bmMemmoveUnaligned(b, 7) }
+func BenchmarkMemmoveUnaligned8(b *testing.B) { bmMemmoveUnaligned(b, 8) }
+func BenchmarkMemmoveUnaligned9(b *testing.B) { bmMemmoveUnaligned(b, 9) }
+func BenchmarkMemmoveUnaligned10(b *testing.B) { bmMemmoveUnaligned(b, 10) }
+func BenchmarkMemmoveUnaligned11(b *testing.B) { bmMemmoveUnaligned(b, 11) }
+func BenchmarkMemmoveUnaligned12(b *testing.B) { bmMemmoveUnaligned(b, 12) }
+func BenchmarkMemmoveUnaligned13(b *testing.B) { bmMemmoveUnaligned(b, 13) }
+func BenchmarkMemmoveUnaligned14(b *testing.B) { bmMemmoveUnaligned(b, 14) }
+func BenchmarkMemmoveUnaligned15(b *testing.B) { bmMemmoveUnaligned(b, 15) }
+func BenchmarkMemmoveUnaligned16(b *testing.B) { bmMemmoveUnaligned(b, 16) }
+func BenchmarkMemmoveUnaligned32(b *testing.B) { bmMemmoveUnaligned(b, 32) }
+func BenchmarkMemmoveUnaligned64(b *testing.B) { bmMemmoveUnaligned(b, 64) }
+func BenchmarkMemmoveUnaligned128(b *testing.B) { bmMemmoveUnaligned(b, 128) }
+func BenchmarkMemmoveUnaligned256(b *testing.B) { bmMemmoveUnaligned(b, 256) }
+func BenchmarkMemmoveUnaligned512(b *testing.B) { bmMemmoveUnaligned(b, 512) }
+func BenchmarkMemmoveUnaligned1024(b *testing.B) { bmMemmoveUnaligned(b, 1024) }
+func BenchmarkMemmoveUnaligned2048(b *testing.B) { bmMemmoveUnaligned(b, 2048) }
+func BenchmarkMemmoveUnaligned4096(b *testing.B) { bmMemmoveUnaligned(b, 4096) }
+
func TestMemclr(t *testing.T) {
size := 512
if testing.Short() {
diff -Nru golang-1.5.1/src/runtime/mgcmark.go golang-1.5.2/src/runtime/mgcmark.go
--- golang-1.5.1/src/runtime/mgcmark.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/mgcmark.go 2015-12-03 13:53:01.000000000 +1300
@@ -152,6 +152,11 @@
}
// Record allocation.
+ if gp.gcalloc+size < gp.gcalloc {
+ // gcalloc would overflow, or it's set to a sentinel
+ // value to prevent recursive assist.
+ return
+ }
gp.gcalloc += size
if !allowAssist {
@@ -292,7 +297,12 @@
// more, so go around again after performing an
// interruptible sleep for 100 us (the same as the
// getfull barrier) to let other mutators run.
+
+ // timeSleep may allocate, so avoid recursive assist.
+ gcalloc := gp.gcalloc
+ gp.gcalloc = ^uintptr(0)
timeSleep(100 * 1000)
+ gp.gcalloc = gcalloc
goto retry
}
}
@@ -355,6 +365,8 @@
throw("g already has stack barriers")
}
+ gcLockStackBarriers(gp)
+
case _GCmarktermination:
if int(gp.stkbarPos) == len(gp.stkbar) {
// gp hit all of the stack barriers (or there
@@ -409,6 +421,9 @@
if gcphase == _GCmarktermination {
gcw.dispose()
}
+ if gcphase == _GCscan {
+ gcUnlockStackBarriers(gp)
+ }
gp.gcscanvalid = true
}
@@ -562,6 +577,8 @@
print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
}
+ gcLockStackBarriers(gp)
+
// Remove stack barriers that we didn't hit.
for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
gcRemoveStackBarrier(gp, stkbar)
@@ -571,6 +588,8 @@
// adjust them.
gp.stkbarPos = 0
gp.stkbar = gp.stkbar[:0]
+
+ gcUnlockStackBarriers(gp)
}
// gcRemoveStackBarrier removes a single stack barrier. It is the
@@ -589,22 +608,36 @@
printlock()
print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
print("gp.stkbar=")
- gcPrintStkbars(gp.stkbar)
- print(", gp.stkbarPos=", gp.stkbarPos, ", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
+ gcPrintStkbars(gp, -1)
+ print(", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
throw("stack barrier lost")
}
*lrPtr = uintreg(stkbar.savedLRVal)
}
-// gcPrintStkbars prints a []stkbar for debugging.
-func gcPrintStkbars(stkbar []stkbar) {
+// gcPrintStkbars prints the stack barriers of gp for debugging. It
+// places a "@@@" marker at gp.stkbarPos. If marker >= 0, it will also
+// place a "==>" marker before the marker'th entry.
+func gcPrintStkbars(gp *g, marker int) {
print("[")
- for i, s := range stkbar {
+ for i, s := range gp.stkbar {
if i > 0 {
print(" ")
}
+ if i == int(gp.stkbarPos) {
+ print("@@@ ")
+ }
+ if i == marker {
+ print("==> ")
+ }
print("*", hex(s.savedLRPtr), "=", hex(s.savedLRVal))
}
+ if int(gp.stkbarPos) == len(gp.stkbar) {
+ print(" @@@")
+ }
+ if marker == len(gp.stkbar) {
+ print(" ==>")
+ }
print("]")
}
@@ -617,6 +650,7 @@
//
//go:nosplit
func gcUnwindBarriers(gp *g, sp uintptr) {
+ gcLockStackBarriers(gp)
// On LR machines, if there is a stack barrier on the return
// from the frame containing sp, this will mark it as hit even
// though it isn't, but it's okay to be conservative.
@@ -625,9 +659,12 @@
gcRemoveStackBarrier(gp, gp.stkbar[gp.stkbarPos])
gp.stkbarPos++
}
+ gcUnlockStackBarriers(gp)
if debugStackBarrier && gp.stkbarPos != before {
print("skip barriers below ", hex(sp), " in goid=", gp.goid, ": ")
- gcPrintStkbars(gp.stkbar[before:gp.stkbarPos])
+ // We skipped barriers between the "==>" marker
+ // (before) and the "@@@" marker (gp.stkbarPos).
+ gcPrintStkbars(gp, int(before))
print("\n")
}
}
@@ -648,6 +685,28 @@
gp.stkbar[gp.stkbarPos].savedLRVal = pc
}
+// gcLockStackBarriers synchronizes with tracebacks of gp's stack
+// during sigprof for installation or removal of stack barriers. It
+// blocks until any current sigprof is done tracebacking gp's stack
+// and then disallows profiling tracebacks of gp's stack.
+//
+// This is necessary because a sigprof during barrier installation or
+// removal could observe inconsistencies between the stkbar array and
+// the stack itself and crash.
+func gcLockStackBarriers(gp *g) {
+ for !cas(&gp.stackLock, 0, 1) {
+ osyield()
+ }
+}
+
+func gcTryLockStackBarriers(gp *g) bool {
+ return cas(&gp.stackLock, 0, 1)
+}
+
+func gcUnlockStackBarriers(gp *g) {
+ atomicstore(&gp.stackLock, 0)
+}
+
// TODO(austin): Can we consolidate the gcDrain* functions?
// gcDrain scans objects in work buffers, blackening grey
diff -Nru golang-1.5.1/src/runtime/os1_darwin.go golang-1.5.2/src/runtime/os1_darwin.go
--- golang-1.5.1/src/runtime/os1_darwin.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_darwin.go 2015-12-03 13:53:01.000000000 +1300
@@ -130,6 +130,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -138,6 +139,17 @@
sigprocmask(_SIG_SETMASK, nil, smask)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -156,10 +168,8 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
- _g_ := getg()
- smask := (*uint32)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask, nil)
signalstack(nil)
}
@@ -459,6 +469,7 @@
return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
}
+//go:nosplit
func signalstack(s *stack) {
var st stackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_dragonfly.go golang-1.5.2/src/runtime/os1_dragonfly.go
--- golang-1.5.1/src/runtime/os1_dragonfly.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_dragonfly.go 2015-12-03 13:53:01.000000000 +1300
@@ -119,6 +119,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -127,6 +128,17 @@
sigprocmask(_SIG_SETMASK, nil, smask)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -150,9 +162,6 @@
// Called from dropm to undo the effect of an minit.
func unminit() {
- _g_ := getg()
- smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask, nil)
signalstack(nil)
}
@@ -222,6 +231,7 @@
return sa.sa_sigaction
}
+//go:nosplit
func signalstack(s *stack) {
var st sigaltstackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_freebsd.go golang-1.5.2/src/runtime/os1_freebsd.go
--- golang-1.5.1/src/runtime/os1_freebsd.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_freebsd.go 2015-12-03 13:53:01.000000000 +1300
@@ -118,6 +118,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -126,6 +127,17 @@
sigprocmask(_SIG_SETMASK, nil, smask)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -151,10 +163,8 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
- _g_ := getg()
- smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask, nil)
signalstack(nil)
}
@@ -224,6 +234,7 @@
return sa.sa_handler
}
+//go:nosplit
func signalstack(s *stack) {
var st stackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_linux.go golang-1.5.2/src/runtime/os1_linux.go
--- golang-1.5.1/src/runtime/os1_linux.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_linux.go 2015-12-03 13:53:01.000000000 +1300
@@ -198,6 +198,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -206,6 +207,17 @@
rtsigprocmask(_SIG_SETMASK, nil, smask, int32(unsafe.Sizeof(*smask)))
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+ rtsigprocmask(_SIG_SETMASK, smask, nil, int32(unsafe.Sizeof(*smask)))
+}
+
+//go:nosplit
+func sigblock() {
+ rtsigprocmask(_SIG_SETMASK, &sigset_all, nil, int32(unsafe.Sizeof(sigset_all)))
+}
+
func gettid() uint32
// Called to initialize a new m (including the bootstrap m).
@@ -229,10 +241,8 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
- _g_ := getg()
- smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
- rtsigprocmask(_SIG_SETMASK, smask, nil, int32(unsafe.Sizeof(*smask)))
signalstack(nil)
}
@@ -293,7 +303,8 @@
fn = funcPC(sigtramp)
}
sa.sa_handler = fn
- if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
+ // Qemu rejects rt_sigaction of SIGRTMAX (64).
+ if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 && i != 64 {
throw("rt_sigaction failure")
}
}
@@ -325,6 +336,7 @@
return sa.sa_handler
}
+//go:nosplit
func signalstack(s *stack) {
var st sigaltstackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_nacl.go golang-1.5.2/src/runtime/os1_nacl.go
--- golang-1.5.1/src/runtime/os1_nacl.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_nacl.go 2015-12-03 13:53:01.000000000 +1300
@@ -15,9 +15,18 @@
func sigtramp()
+//go:nosplit
func msigsave(mp *m) {
}
+//go:nosplit
+func msigrestore(mp *m) {
+}
+
+//go:nosplit
+func sigblock() {
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
diff -Nru golang-1.5.1/src/runtime/os1_netbsd.go golang-1.5.2/src/runtime/os1_netbsd.go
--- golang-1.5.1/src/runtime/os1_netbsd.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_netbsd.go 2015-12-03 13:53:01.000000000 +1300
@@ -138,6 +138,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -146,6 +147,17 @@
sigprocmask(_SIG_SETMASK, nil, smask)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -166,11 +178,8 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
- _g_ := getg()
- smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask, nil)
-
signalstack(nil)
}
@@ -213,6 +222,7 @@
return sa.sa_sigaction
}
+//go:nosplit
func signalstack(s *stack) {
var st sigaltstackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_openbsd.go golang-1.5.2/src/runtime/os1_openbsd.go
--- golang-1.5.1/src/runtime/os1_openbsd.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_openbsd.go 2015-12-03 13:53:01.000000000 +1300
@@ -148,6 +148,7 @@
mp.gsignal.m = mp
}
+//go:nosplit
func msigsave(mp *m) {
smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -156,6 +157,17 @@
*smask = sigprocmask(_SIG_BLOCK, 0)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := *(*uint32)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, sigset_all)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -178,10 +190,8 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
- _g_ := getg()
- smask := *(*uint32)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask)
signalstack(nil)
}
@@ -224,6 +234,7 @@
return sa.sa_sigaction
}
+//go:nosplit
func signalstack(s *stack) {
var st stackt
if s == nil {
diff -Nru golang-1.5.1/src/runtime/os1_plan9.go golang-1.5.2/src/runtime/os1_plan9.go
--- golang-1.5.1/src/runtime/os1_plan9.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_plan9.go 2015-12-03 13:53:01.000000000 +1300
@@ -21,6 +21,12 @@
func msigsave(mp *m) {
}
+func msigrestore(mp *m) {
+}
+
+func sigblock() {
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
diff -Nru golang-1.5.1/src/runtime/os1_windows.go golang-1.5.2/src/runtime/os1_windows.go
--- golang-1.5.1/src/runtime/os1_windows.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os1_windows.go 2015-12-03 13:53:01.000000000 +1300
@@ -284,9 +284,18 @@
func mpreinit(mp *m) {
}
+//go:nosplit
func msigsave(mp *m) {
}
+//go:nosplit
+func msigrestore(mp *m) {
+}
+
+//go:nosplit
+func sigblock() {
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -296,6 +305,7 @@
}
// Called from dropm to undo the effect of an minit.
+//go:nosplit
func unminit() {
tp := &getg().m.thread
stdcall1(_CloseHandle, *tp)
diff -Nru golang-1.5.1/src/runtime/os3_solaris.go golang-1.5.2/src/runtime/os3_solaris.go
--- golang-1.5.1/src/runtime/os3_solaris.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os3_solaris.go 2015-12-03 13:53:01.000000000 +1300
@@ -192,6 +192,7 @@
func miniterrno()
+//go:nosplit
func msigsave(mp *m) {
smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
@@ -200,6 +201,17 @@
sigprocmask(_SIG_SETMASK, nil, smask)
}
+//go:nosplit
+func msigrestore(mp *m) {
+ smask := (*sigset)(unsafe.Pointer(&mp.sigmask))
+ sigprocmask(_SIG_SETMASK, smask, nil)
+}
+
+//go:nosplit
+func sigblock() {
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
@@ -220,10 +232,6 @@
// Called from dropm to undo the effect of an minit.
func unminit() {
- _g_ := getg()
- smask := (*sigset)(unsafe.Pointer(&_g_.m.sigmask))
- sigprocmask(_SIG_SETMASK, smask, nil)
-
signalstack(nil)
}
@@ -289,6 +297,7 @@
return *((*uintptr)(unsafe.Pointer(&sa._funcptr)))
}
+//go:nosplit
func signalstack(s *stack) {
var st sigaltstackt
if s == nil {
@@ -493,6 +502,7 @@
sysvicall2(&libc_sigaltstack, uintptr(unsafe.Pointer(ss)), uintptr(unsafe.Pointer(oss)))
}
+//go:nosplit
func sigprocmask(how int32, set *sigset, oset *sigset) /* int32 */ {
sysvicall3(&libc_sigprocmask, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oset)))
}
diff -Nru golang-1.5.1/src/runtime/os_solaris.go golang-1.5.2/src/runtime/os_solaris.go
--- golang-1.5.1/src/runtime/os_solaris.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/os_solaris.go 2015-12-03 13:53:01.000000000 +1300
@@ -15,71 +15,71 @@
//go:nosplit
func sysvicall0(fn *libcFunc) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 0
libcall.args = uintptr(unsafe.Pointer(fn)) // it's unused but must be non-nil, otherwise crashes
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 1
// TODO(rsc): Why is noescape necessary here and below?
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 2
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 3
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 4
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 5
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
//go:nosplit
func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
- libcall := &getg().m.libcall
+ var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(fn))
libcall.n = 6
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&libcall))
return libcall.r1
}
diff -Nru golang-1.5.1/src/runtime/proc1.go golang-1.5.2/src/runtime/proc1.go
--- golang-1.5.1/src/runtime/proc1.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/proc1.go 2015-12-03 13:53:01.000000000 +1300
@@ -414,13 +414,7 @@
// the goroutine until we're done.
if castogscanstatus(gp, s, s|_Gscan) {
if !gp.gcscandone {
- // Coordinate with traceback
- // in sigprof.
- for !cas(&gp.stackLock, 0, 1) {
- osyield()
- }
scanstack(gp)
- atomicstore(&gp.stackLock, 0)
gp.gcscandone = true
}
restartg(gp)
@@ -951,6 +945,15 @@
mp.needextram = mp.schedlink == 0
unlockextra(mp.schedlink.ptr())
+ // Save and block signals before installing g.
+ // Once g is installed, any incoming signals will try to execute,
+ // but we won't have the sigaltstack settings and other data
+ // set up appropriately until the end of minit, which will
+ // unblock the signals. This is the same dance as when
+ // starting a new m to run Go code via newosproc.
+ msigsave(mp)
+ sigblock()
+
// Install g (= m->g0) and set the stack bounds
// to match the current stack. We don't actually know
// how big the stack is, like we don't know how big any
@@ -962,7 +965,6 @@
_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
_g_.stackguard0 = _g_.stack.lo + _StackGuard
- msigsave(mp)
// Initialize this thread to use the m.
asminit()
minit()
@@ -1033,9 +1035,6 @@
// We may have to keep the current version on systems with cgo
// but without pthreads, like Windows.
func dropm() {
- // Undo whatever initialization minit did during needm.
- unminit()
-
// Clear m and g, and return m to the extra list.
// After the call to setg we can only call nosplit functions
// with no pointer manipulation.
@@ -1043,7 +1042,16 @@
mnext := lockextra(true)
mp.schedlink.set(mnext)
+ // Block signals before unminit.
+ // Unminit unregisters the signal handling stack (but needs g on some systems).
+ // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
+ // It's important not to try to handle a signal between those two steps.
+ sigblock()
+ unminit()
setg(nil)
+ msigrestore(mp)
+
+ // Commit the release of mp.
unlockextra(mp)
}
@@ -2500,11 +2508,6 @@
// Profiling runs concurrently with GC, so it must not allocate.
mp.mallocing++
- // Coordinate with stack barrier insertion in scanstack.
- for !cas(&gp.stackLock, 0, 1) {
- osyield()
- }
-
// Define that a "user g" is a user-created goroutine, and a "system g"
// is one that is m->g0 or m->gsignal.
//
@@ -2571,8 +2574,18 @@
// transition. We simply require that g and SP match and that the PC is not
// in gogo.
traceback := true
+ haveStackLock := false
if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
traceback = false
+ } else if gp.m.curg != nil {
+ if gcTryLockStackBarriers(gp.m.curg) {
+ haveStackLock = true
+ } else {
+ // Stack barriers are being inserted or
+ // removed, so we can't get a consistent
+ // traceback right now.
+ traceback = false
+ }
}
var stk [maxCPUProfStack]uintptr
n := 0
@@ -2582,7 +2595,14 @@
// This is especially important on windows, since all syscalls are cgo calls.
n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
} else if traceback {
- n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
+ flags := uint(_TraceTrap | _TraceJumpStack)
+ if gp.m.curg != nil && readgstatus(gp.m.curg) == _Gcopystack {
+ // We can traceback the system stack, but
+ // don't jump to the potentially inconsistent
+ // user stack.
+ flags &^= _TraceJumpStack
+ }
+ n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags)
}
if !traceback || n <= 0 {
// Normal traceback is impossible or has failed.
@@ -2608,7 +2628,9 @@
}
}
}
- atomicstore(&gp.stackLock, 0)
+ if haveStackLock {
+ gcUnlockStackBarriers(gp.m.curg)
+ }
if prof.hz != 0 {
// Simple cas-lock to coordinate with setcpuprofilerate.
diff -Nru golang-1.5.1/src/runtime/race/testdata/issue12225_test.go golang-1.5.2/src/runtime/race/testdata/issue12225_test.go
--- golang-1.5.1/src/runtime/race/testdata/issue12225_test.go 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/src/runtime/race/testdata/issue12225_test.go 2015-12-03 13:53:02.000000000 +1300
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+// golang.org/issue/12225
+// The test is that this compiles at all.
+
+func issue12225() {
+ println(*(*int)(unsafe.Pointer(&convert("")[0])))
+ println(*(*int)(unsafe.Pointer(&[]byte("")[0])))
+}
diff -Nru golang-1.5.1/src/runtime/rt0_darwin_arm.s golang-1.5.2/src/runtime/rt0_darwin_arm.s
--- golang-1.5.1/src/runtime/rt0_darwin_arm.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/rt0_darwin_arm.s 2015-12-03 13:53:02.000000000 +1300
@@ -16,27 +16,34 @@
//
// Note that all currently shipping darwin/arm platforms require
// cgo and do not support c-shared.
-TEXT _rt0_arm_darwin_lib(SB),NOSPLIT,$12
+TEXT _rt0_arm_darwin_lib(SB),NOSPLIT,$0
+ // R11 is REGTMP, reserved for liblink. It is used below to
+ // move R0/R1 into globals. However in the darwin ARMv7 calling
+ // convention, it is a callee-saved register. So we save it to a
+ // temporary register.
+ MOVW R11, R2
MOVW R0, _rt0_arm_darwin_lib_argc<>(SB)
MOVW R1, _rt0_arm_darwin_lib_argv<>(SB)
// Create a new thread to do the runtime initialization and return.
- MOVW _cgo_sys_thread_create(SB), R4
- CMP $0, R4
+ MOVW _cgo_sys_thread_create(SB), R3
+ CMP $0, R3
B.EQ nocgo
MOVW $_rt0_arm_darwin_lib_go(SB), R0
MOVW $0, R1
- BL (R4)
+ MOVW R2, R11
+ BL (R3)
RET
nocgo:
MOVW $0x400000, R0
- MOVW $_rt0_arm_darwin_lib_go(SB), R1
- MOVW $0, R2
- MOVW R0, (R13) // stacksize
- MOVW R1, 4(R13) // fn
- MOVW R2, 8(R13) // fnarg
- MOVW $runtime·newosproc0(SB), R4
- BL (R4)
+ MOVW R0, (R13) // stacksize
+ MOVW $_rt0_arm_darwin_lib_go(SB), R0
+ MOVW R0, 4(R13) // fn
+ MOVW $0, R0
+ MOVW R0, 8(R13) // fnarg
+ MOVW $runtime·newosproc0(SB), R3
+ MOVW R2, R11
+ BL (R3)
RET
TEXT _rt0_arm_darwin_lib_go(SB),NOSPLIT,$0
diff -Nru golang-1.5.1/src/runtime/signal_linux.go golang-1.5.2/src/runtime/signal_linux.go
--- golang-1.5.1/src/runtime/signal_linux.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/signal_linux.go 2015-12-03 13:53:02.000000000 +1300
@@ -44,8 +44,8 @@
/* 29 */ {_SigNotify, "SIGIO: i/o now possible"},
/* 30 */ {_SigNotify, "SIGPWR: power failure restart"},
/* 31 */ {_SigNotify, "SIGSYS: bad system call"},
- /* 32 */ {_SigSetStack, "signal 32"}, /* SIGCANCEL; see issue 6997 */
- /* 33 */ {_SigSetStack, "signal 33"}, /* SIGSETXID; see issue 3871, 9400 */
+ /* 32 */ {_SigSetStack + _SigUnblock, "signal 32"}, /* SIGCANCEL; see issue 6997 */
+ /* 33 */ {_SigSetStack + _SigUnblock, "signal 33"}, /* SIGSETXID; see issues 3871, 9400, 12498 */
/* 34 */ {_SigNotify, "signal 34"},
/* 35 */ {_SigNotify, "signal 35"},
/* 36 */ {_SigNotify, "signal 36"},
diff -Nru golang-1.5.1/src/runtime/stack1.go golang-1.5.2/src/runtime/stack1.go
--- golang-1.5.1/src/runtime/stack1.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/stack1.go 2015-12-03 13:53:02.000000000 +1300
@@ -609,6 +609,10 @@
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
}
+ // Disallow sigprof scans of this stack and block if there's
+ // one in progress.
+ gcLockStackBarriers(gp)
+
// adjust pointers in the to-be-copied frames
var adjinfo adjustinfo
adjinfo.old = old
@@ -640,6 +644,8 @@
gp.stackAlloc = newsize
gp.stkbar = newstkbar
+ gcUnlockStackBarriers(gp)
+
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
diff -Nru golang-1.5.1/src/runtime/sys_solaris_amd64.s golang-1.5.2/src/runtime/sys_solaris_amd64.s
--- golang-1.5.1/src/runtime/sys_solaris_amd64.s 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/sys_solaris_amd64.s 2015-12-03 13:53:02.000000000 +1300
@@ -80,6 +80,8 @@
get_tls(CX)
MOVQ g(CX), BX
+ CMPQ BX, $0
+ JEQ skiperrno1
MOVQ g_m(BX), BX
MOVQ m_perrno(BX), DX
CMPQ DX, $0
@@ -108,6 +110,8 @@
get_tls(CX)
MOVQ g(CX), BX
+ CMPQ BX, $0
+ JEQ skiperrno2
MOVQ g_m(BX), BX
MOVQ m_perrno(BX), AX
CMPQ AX, $0
diff -Nru golang-1.5.1/src/runtime/traceback.go golang-1.5.2/src/runtime/traceback.go
--- golang-1.5.1/src/runtime/traceback.go 2015-09-09 13:24:03.000000000 +1200
+++ golang-1.5.2/src/runtime/traceback.go 2015-12-03 13:53:02.000000000 +1300
@@ -142,7 +142,8 @@
// Fix up returns to the stack barrier by fetching the
// original return PC from gp.stkbar.
- stkbar := gp.stkbar[gp.stkbarPos:]
+ stkbarG := gp
+ stkbar := stkbarG.stkbar[stkbarG.stkbarPos:]
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
if gp.syscallsp != 0 {
@@ -188,6 +189,34 @@
}
f := findfunc(frame.pc)
+ if f != nil && f.entry == stackBarrierPC {
+ // We got caught in the middle of a stack barrier
+ // (presumably by a signal), so stkbar may be
+ // inconsistent with the barriers on the stack.
+ // Simulate the completion of the barrier.
+ //
+ // On x86, SP will be exactly one word above
+ // savedLRPtr. On LR machines, SP will be above
+ // savedLRPtr by some frame size.
+ var stkbarPos uintptr
+ if len(stkbar) > 0 && stkbar[0].savedLRPtr < sp0 {
+ // stackBarrier has not incremented stkbarPos.
+ stkbarPos = gp.stkbarPos
+ } else if gp.stkbarPos > 0 && gp.stkbar[gp.stkbarPos-1].savedLRPtr < sp0 {
+ // stackBarrier has incremented stkbarPos.
+ stkbarPos = gp.stkbarPos - 1
+ } else {
+ printlock()
+ print("runtime: failed to unwind through stackBarrier at SP ", hex(sp0), "; ")
+ gcPrintStkbars(gp, int(gp.stkbarPos))
+ print("\n")
+ throw("inconsistent state in stackBarrier")
+ }
+
+ frame.pc = gp.stkbar[stkbarPos].savedLRVal
+ stkbar = gp.stkbar[stkbarPos+1:]
+ f = findfunc(frame.pc)
+ }
if f == nil {
if callback != nil {
print("runtime: unknown pc ", hex(frame.pc), "\n")
@@ -216,7 +245,8 @@
sp := frame.sp
if flags&_TraceJumpStack != 0 && f.entry == systemstackPC && gp == g.m.g0 && gp.m.curg != nil {
sp = gp.m.curg.sched.sp
- stkbar = gp.m.curg.stkbar[gp.m.curg.stkbarPos:]
+ stkbarG = gp.m.curg
+ stkbar = stkbarG.stkbar[stkbarG.stkbarPos:]
}
frame.fp = sp + uintptr(funcspdelta(f, frame.pc))
if !usesLR {
@@ -254,9 +284,9 @@
}
if frame.lr == stackBarrierPC {
// Recover original PC.
- if stkbar[0].savedLRPtr != lrPtr {
+ if len(stkbar) == 0 || stkbar[0].savedLRPtr != lrPtr {
print("found next stack barrier at ", hex(lrPtr), "; expected ")
- gcPrintStkbars(stkbar)
+ gcPrintStkbars(stkbarG, len(stkbarG.stkbar)-len(stkbar))
print("\n")
throw("missed stack barrier")
}
@@ -476,7 +506,7 @@
if callback != nil && n < max && len(stkbar) > 0 {
print("runtime: g", gp.goid, ": leftover stack barriers ")
- gcPrintStkbars(stkbar)
+ gcPrintStkbars(stkbarG, len(stkbarG.stkbar)-len(stkbar))
print("\n")
throw("traceback has leftover stack barriers")
}
diff -Nru golang-1.5.1/test/fixedbugs/issue11987.go golang-1.5.2/test/fixedbugs/issue11987.go
--- golang-1.5.1/test/fixedbugs/issue11987.go 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/test/fixedbugs/issue11987.go 2015-12-03 13:53:02.000000000 +1300
@@ -0,0 +1,23 @@
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 11987. The ppc64 SRADCC instruction was misassembled in a way
+// lost bit 5 of the immediate so v>>32 was assembled as v>>0. SRADCC
+// is only ever inserted by peep so it's hard to be sure when it will
+// be used. This formulation worked when the bug was fixed.
+
+package main
+
+import "fmt"
+
+var v int64 = 0x80000000
+
+func main() {
+ s := fmt.Sprintf("%v", v>>32 == 0)
+ if s != "true" {
+ fmt.Printf("BUG: v>>32 == 0 evaluated as %q\n", s)
+ }
+}
diff -Nru golang-1.5.1/test/fixedbugs/issue12686.go golang-1.5.2/test/fixedbugs/issue12686.go
--- golang-1.5.1/test/fixedbugs/issue12686.go 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/test/fixedbugs/issue12686.go 2015-12-03 13:53:02.000000000 +1300
@@ -0,0 +1,16 @@
+// compile
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// golang.org/issue/12686.
+// interesting because it's a non-constant but ideal value
+// and we used to incorrectly attach a constant Val to the Node.
+
+package p
+
+func f(i uint) uint {
+ x := []uint{1 << i}
+ return x[0]
+}
diff -Nru golang-1.5.1/test/fixedbugs/issue13160.go golang-1.5.2/test/fixedbugs/issue13160.go
--- golang-1.5.1/test/fixedbugs/issue13160.go 1970-01-01 12:00:00.000000000 +1200
+++ golang-1.5.2/test/fixedbugs/issue13160.go 2015-12-03 13:53:02.000000000 +1300
@@ -0,0 +1,70 @@
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+)
+
+const N = 100000
+
+func main() {
+ // Allocate more Ps than processors. This raises
+ // the chance that we get interrupted by the OS
+ // in exactly the right (wrong!) place.
+ p := runtime.NumCPU()
+ runtime.GOMAXPROCS(2 * p)
+
+ // Allocate some pointers.
+ ptrs := make([]*int, p)
+ for i := 0; i < p; i++ {
+ ptrs[i] = new(int)
+ }
+
+ // Arena where we read and write pointers like crazy.
+ collider := make([]*int, p)
+
+ done := make(chan struct{}, 2*p)
+
+ // Start writers. They alternately write a pointer
+ // and nil to a slot in the collider.
+ for i := 0; i < p; i++ {
+ i := i
+ go func() {
+ for j := 0; j < N; j++ {
+ // Write a pointer using memmove.
+ copy(collider[i:i+1], ptrs[i:i+1])
+ // Write nil using memclr.
+ // (This is a magic loop that gets lowered to memclr.)
+ r := collider[i : i+1]
+ for k := range r {
+ r[k] = nil
+ }
+ }
+ done <- struct{}{}
+ }()
+ }
+ // Start readers. They read pointers from slots
+ // and make sure they are valid.
+ for i := 0; i < p; i++ {
+ i := i
+ go func() {
+ for j := 0; j < N; j++ {
+ var ptr [1]*int
+ copy(ptr[:], collider[i:i+1])
+ if ptr[0] != nil && ptr[0] != ptrs[i] {
+ panic(fmt.Sprintf("bad pointer read %p!", ptr[0]))
+ }
+ }
+ done <- struct{}{}
+ }()
+ }
+ for i := 0; i < 2*p; i++ {
+ <-done
+ }
+}
diff -Nru golang-1.5.1/VERSION golang-1.5.2/VERSION
--- golang-1.5.1/VERSION 2015-09-09 13:24:06.000000000 +1200
+++ golang-1.5.2/VERSION 2015-12-03 13:53:32.000000000 +1300
@@ -1 +1 @@
-go1.5.1
\ No newline at end of file
+go1.5.2
\ No newline at end of file