Commit 8ea89ba8 authored by Chris Broadfoot's avatar Chris Broadfoot

all: merge master into release-branch.go1.7

Change-Id: Ifb9647fa9817ed57aa4835a35a05020aba00a24e
parents 8707f31c 28ee1796
......@@ -780,6 +780,64 @@ mode as on the x86, but the only scale allowed is <code>1</code>.
</ul>
<h3 id="s390x">IBM z/Architecture, a.k.a. s390x</h3>
<p>
The registers <code>R10</code> and <code>R11</code> are reserved.
The assembler uses them to hold temporary values when assembling some instructions.
</p>
<p>
<code>R13</code> points to the <code>g</code> (goroutine) structure.
This register must be referred to as <code>g</code>; the name <code>R13</code> is not recognized.
</p>
<p>
<code>R15</code> points to the stack frame and should typically only be accessed using the
virtual registers <code>SP</code> and <code>FP</code>.
</p>
<p>
Load- and store-multiple instructions operate on a range of registers.
The range of registers is specified by a start register and an end register.
For example, <code>LMG</code> <code>(R9),</code> <code>R5,</code> <code>R7</code> would load
<code>R5</code>, <code>R6</code> and <code>R7</code> with the 64-bit values at
<code>0(R9)</code>, <code>8(R9)</code> and <code>16(R9)</code> respectively.
</p>
<p>
Storage-and-storage instructions such as <code>MVC</code> and <code>XC</code> are written
with the length as the first argument.
For example, <code>XC</code> <code>$8,</code> <code>(R9),</code> <code>(R9)</code> would clear
eight bytes at the address specified in <code>R9</code>.
</p>
<p>
If a vector instruction takes a length or an index as an argument then it will be the
first argument.
For example, <code>VLEIF</code> <code>$1,</code> <code>$16,</code> <code>V2</code> will load
the value sixteen into index one of <code>V2</code>.
Care should be taken when using vector instructions to ensure that they are available at
runtime.
To use vector instructions a machine must have both the vector facility (bit 129 in the
facility list) and kernel support.
Without kernel support a vector instruction will have no effect (it will be equivalent
to a <code>NOP</code> instruction).
</p>
<p>
Addressing modes:
</p>
<ul>
<li>
<code>(R5)(R6*1)</code>: The location at <code>R5</code> plus <code>R6</code>.
It is a scaled mode as on the x86, but the only scale allowed is <code>1</code>.
</li>
</ul>
<h3 id="unsupported_opcodes">Unsupported opcodes</h3>
<p>
......
......@@ -102,6 +102,17 @@ POWER5 architecture.
The OpenBSD port now requires OpenBSD 5.6 or later, for access to the <a href="http://man.openbsd.org/getentropy.2"><i>getentropy</i>(2)</a> system call.
</p>
<h3 id="known_issues">Known Issues</h3>
<p>
There are some instabilities on FreeBSD that are known but not understood.
These can lead to program crashes in rare cases.
See <a href="https://golang.org/issue/16136">issue 16136</a>,
<a href="https://golang.org/issue/15658">issue 15658</a>,
and <a href="https://golang.org/issue/16396">issue 16396</a>.
Any help in solving these FreeBSD-specific issues would be appreciated.
</p>
<h2 id="tools">Tools</h2>
<h3 id="cmd_asm">Assembler</h3>
......@@ -905,6 +916,12 @@ For example, the address on which a request received is
<code>req.Context().Value(http.LocalAddrContextKey).(net.Addr)</code>.
</p>
<p>
The server's <a href="/pkg/net/http/#Server.Serve"><code>Serve</code></a> method
now only enables HTTP/2 support if the <code>Server.TLSConfig</code> field is <code>nil</code>
or includes <code>"h2"</code> in its <code>TLSConfig.NextProto</code>.
</p>
<p>
The server implementation now
pads response codes less than 100 to three digits
......
......@@ -156,6 +156,36 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog {
return p
}
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
// See runtime/mkduff.go.
func duffStart(size int64) int64 {
x, _ := duff(size)
return x
}
func duffAdj(size int64) int64 {
_, x := duff(size)
return x
}
// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
// required to use the duffzero mechanism for a block of the given size.
func duff(size int64) (int64, int64) {
if size < 32 || size > 1024 || size%dzClearStep != 0 {
panic("bad duffzero size")
}
steps := size / dzClearStep
blocks := steps / dzBlockLen
steps %= dzBlockLen
off := dzBlockSize * (dzBlocks - blocks)
var adj int64
if steps != 0 {
off -= dzAddSize
off -= dzMovSize * steps
adj -= dzClearStep * (dzBlockLen - steps)
}
return off, adj
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
......@@ -649,10 +679,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpAMD64DUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
off := duffStart(v.AuxInt)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
p = gc.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = adj
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
p = gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
p.To.Offset = v.AuxInt
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
v.Unimplementedf("MOVOconst can only do constant=0")
......
......@@ -389,7 +389,7 @@
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
(DUFFZERO [size] destptr (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
......
......@@ -425,10 +425,10 @@ func init() {
{name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem"}, // store 8 bytes of ... arg1 ...
{name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem"}, // store 8 bytes of ... 8*arg1 ...
// arg0 = (duff-adjusted) pointer to start of memory to zero
// arg0 = pointer to start of memory to zero
// arg1 = value to store (will always be zero)
// arg2 = mem
// auxint = offset into duffzero code to start executing
// auxint = # of bytes to zero
// returns mem
{
name: "DUFFZERO",
......
......@@ -254,52 +254,6 @@ func isSamePtr(p1, p2 *Value) bool {
return false
}
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
// See runtime/mkduff.go.
const (
dzBlocks = 16 // number of MOV/ADD blocks
dzBlockLen = 4 // number of clears per block
dzBlockSize = 19 // size of instructions in a single block
dzMovSize = 4 // size of single MOV instruction w/ offset
dzAddSize = 4 // size of single ADD instruction
dzClearStep = 16 // number of bytes cleared by each MOV instruction
dzTailLen = 4 // number of final STOSQ instructions
dzTailSize = 2 // size of single STOSQ instruction
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
dzSize = dzBlocks * dzBlockSize
)
func duffStart(size int64) int64 {
x, _ := duff(size)
return x
}
func duffAdj(size int64) int64 {
_, x := duff(size)
return x
}
// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
// required to use the duffzero mechanism for a block of the given size.
func duff(size int64) (int64, int64) {
if size < 32 || size > 1024 || size%dzClearStep != 0 {
panic("bad duffzero size")
}
// TODO: arch-dependent
steps := size / dzClearStep
blocks := steps / dzBlockLen
steps %= dzBlockLen
off := dzBlockSize * (dzBlocks - blocks)
var adj int64
if steps != 0 {
off -= dzAddSize
off -= dzMovSize * steps
adj -= dzClearStep * (dzBlockLen - steps)
}
return off, adj
}
// mergePoint finds a block among a's blocks which dominates b and is itself
// dominated by all of a's blocks. Returns nil if it can't find one.
// Might return nil even if one does exist.
......
......@@ -17175,7 +17175,7 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
}
// match: (Zero [size] destptr mem)
// cond: size <= 1024 && size%16 == 0 && !config.noDuffDevice
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// result: (DUFFZERO [size] destptr (MOVOconst [0]) mem)
for {
size := v.AuxInt
destptr := v.Args[0]
......@@ -17184,14 +17184,11 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
break
}
v.reset(OpAMD64DUFFZERO)
v.AuxInt = duffStart(size)
v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
v0.AuxInt = duffAdj(size)
v0.AddArg(destptr)
v.AuxInt = size
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
return true
}
......
......@@ -15,7 +15,17 @@ const (
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
HuffmanOnly = -2 // Disables match search and only does Huffman entropy reduction.
// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
// entropy encoding. This mode is useful in compressing data that has
// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
// that lacks an entropy encoder. Compression gains are achieved when
// certain bytes in the input stream occur more frequently than others.
//
// Note that HuffmanOnly produces a compressed output that is
// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
// continue to be able to decompress this output.
HuffmanOnly = -2
)
const (
......@@ -644,7 +654,6 @@ func (d *compressor) close() error {
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
......
......@@ -255,6 +255,12 @@ func TestDeadline(t *testing.T) {
o = otherContext{c}
c, _ = WithDeadline(o, time.Now().Add(4*time.Second))
testDeadline(c, "WithDeadline+otherContext+WithDeadline", 2*time.Second, t)
c, _ = WithDeadline(Background(), time.Now().Add(-time.Millisecond))
testDeadline(c, "WithDeadline+inthepast", time.Second, t)
c, _ = WithDeadline(Background(), time.Now())
testDeadline(c, "WithDeadline+now", time.Second, t)
}
func TestTimeout(t *testing.T) {
......
......@@ -10,9 +10,65 @@ package x509
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <errno.h>
#include <sys/sysctl.h>
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
// FetchPEMRoots_MountainLion is the version of FetchPEMRoots from Go 1.6
// which still works on OS X 10.8 (Mountain Lion).
// It lacks support for admin & user cert domains.
// See golang.org/issue/16473
int FetchPEMRoots_MountainLion(CFDataRef *pemRoots) {
if (pemRoots == NULL) {
return -1;
}
CFArrayRef certs = NULL;
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
if (err != noErr) {
return -1;
}
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
int i, ncerts = CFArrayGetCount(certs);
for (i = 0; i < ncerts; i++) {
CFDataRef data = NULL;
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
if (cert == NULL) {
continue;
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
*pemRoots = combinedData;
return 0;
}
// useOldCode reports whether the running machine is OS X 10.8 Mountain Lion
// or older. We only support Mountain Lion and higher, but we'll at least try our
// best on older machines and continue to use the old code path.
//
// See golang.org/issue/16473
int useOldCode() {
char str[256];
size_t size = sizeof(str);
memset(str, 0, size);
sysctlbyname("kern.osrelease", str, &size, NULL, 0);
// OS X 10.8 is osrelease "12.*", 10.7 is 11.*, 10.6 is 10.*.
// We never supported things before that.
return memcmp(str, "12.", 3) == 0 || memcmp(str, "11.", 3) == 0 || memcmp(str, "10.", 3) == 0;
}
// FetchPEMRoots fetches the system's list of trusted X.509 root certificates.
//
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
......@@ -21,6 +77,10 @@ package x509
// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
// we've consumed its content.
int FetchPEMRoots(CFDataRef *pemRoots) {
if (useOldCode()) {
return FetchPEMRoots_MountainLion(pemRoots);
}
// Get certificates from all domains, not just System, this lets
// the user add CAs to their "login" keychain, and Admins to add
// to the "System" keychain
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package net
import (
"context"
"syscall"
"testing"
"time"
)
// Issue 16523
func TestDialContextCancelRace(t *testing.T) {
oldConnectFunc := connectFunc
oldGetsockoptIntFunc := getsockoptIntFunc
oldTestHookCanceledDial := testHookCanceledDial
defer func() {
connectFunc = oldConnectFunc
getsockoptIntFunc = oldGetsockoptIntFunc
testHookCanceledDial = oldTestHookCanceledDial
}()
ln, err := newLocalListener("tcp")
if err != nil {
t.Fatal(err)
}
listenerDone := make(chan struct{})
go func() {
defer close(listenerDone)
c, err := ln.Accept()
if err == nil {
c.Close()
}
}()
defer func() { <-listenerDone }()
defer ln.Close()
sawCancel := make(chan bool, 1)
testHookCanceledDial = func() {
sawCancel <- true
}
ctx, cancelCtx := context.WithCancel(context.Background())
connectFunc = func(fd int, addr syscall.Sockaddr) error {
err := oldConnectFunc(fd, addr)
t.Logf("connect(%d, addr) = %v", fd, err)
if err == nil {
// On some operating systems, localhost
// connects _sometimes_ succeed immediately.
// Prevent that, so we exercise the code path
// we're interested in testing. This seems
// harmless. It makes FreeBSD 10.10 work when
// run with many iterations. It failed about
// half the time previously.
return syscall.EINPROGRESS
}
return err
}
getsockoptIntFunc = func(fd, level, opt int) (val int, err error) {
val, err = oldGetsockoptIntFunc(fd, level, opt)
t.Logf("getsockoptIntFunc(%d, %d, %d) = (%v, %v)", fd, level, opt, val, err)
if level == syscall.SOL_SOCKET && opt == syscall.SO_ERROR && err == nil && val == 0 {
t.Logf("canceling context")
// Cancel the context at just the moment which
// caused the race in issue 16523.
cancelCtx()
// And wait for the "interrupter" goroutine to
// cancel the dial by messing with its write
// timeout before returning.
select {
case <-sawCancel:
t.Logf("saw cancel")
case <-time.After(5 * time.Second):
t.Errorf("didn't see cancel after 5 seconds")
}
}
return
}
var d Dialer
c, err := d.DialContext(ctx, "tcp", ln.Addr().String())
if err == nil {
c.Close()
t.Fatal("unexpected successful dial; want context canceled error")
}
select {
case <-ctx.Done():
case <-time.After(5 * time.Second):
t.Fatal("expected context to be canceled")
}
oe, ok := err.(*OpError)
if !ok || oe.Op != "dial" {
t.Fatalf("Dial error = %#v; want dial *OpError", err)
}
if oe.Err != ctx.Err() {
t.Errorf("DialContext = (%v, %v); want OpError with error %v", c, err, ctx.Err())
}
}
......@@ -64,7 +64,7 @@ func (fd *netFD) name() string {
return fd.net + ":" + ls + "->" + rs
}
func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error {
func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret error) {
// Do not need to call fd.writeLock here,
// because fd is not yet accessible to user,
// so no concurrent operations are possible.
......@@ -101,21 +101,44 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error {
defer fd.setWriteDeadline(noDeadline)
}
// Wait for the goroutine converting context.Done into a write timeout
// to exist, otherwise our caller might cancel the context and
// cause fd.setWriteDeadline(aLongTimeAgo) to cancel a successful dial.
done := make(chan bool) // must be unbuffered
defer func() { done <- true }()
go func() {
select {
case <-ctx.Done():
// Force the runtime's poller to immediately give
// up waiting for writability.
fd.setWriteDeadline(aLongTimeAgo)
<-done
case <-done:
}
}()
// Start the "interrupter" goroutine, if this context might be canceled.
// (The background context cannot)
//
// The interrupter goroutine waits for the context to be done and
// interrupts the dial (by altering the fd's write deadline, which
// wakes up waitWrite).
if ctx != context.Background() {
// Wait for the interrupter goroutine to exit before returning
// from connect.
done := make(chan struct{})
interruptRes := make(chan error)
defer func() {
close(done)
if ctxErr := <-interruptRes; ctxErr != nil && ret == nil {
// The interrupter goroutine called setWriteDeadline,
// but the connect code below had returned from
// waitWrite already and did a successful connect (ret
// == nil). Because we've now poisoned the connection
// by making it unwritable, don't return a successful
// dial. This was issue 16523.
ret = ctxErr
fd.Close() // prevent a leak
}
}()
go func() {
select {
case <-ctx.Done():
// Force the runtime's poller to immediately give up
// waiting for writability, unblocking waitWrite
// below.
fd.setWriteDeadline(aLongTimeAgo)
testHookCanceledDial()
interruptRes <- ctx.Err()
case <-done:
interruptRes <- nil
}
}()
}
for {
// Performing multiple connect system calls on a
......
......@@ -9,7 +9,8 @@ package net
import "syscall"
var (
testHookDialChannel = func() {} // see golang.org/issue/5349
testHookDialChannel = func() {} // for golang.org/issue/5349
testHookCanceledDial = func() {} // for golang.org/issue/16523
// Placeholders for socket system calls.
socketFunc func(int, int, int) (int, error) = syscall.Socket
......
This diff is collapsed.
......@@ -4716,3 +4716,14 @@ func BenchmarkCloseNotifier(b *testing.B) {
}
b.StopTimer()
}
// Verify this doesn't race (Issue 16505)
func TestConcurrentServerServe(t *testing.T) {
for i := 0; i < 100; i++ {
ln1 := &oneConnListener{conn: nil}
ln2 := &oneConnListener{conn: nil}
srv := Server{}
go func() { srv.Serve(ln1) }()
go func() { srv.Serve(ln2) }()
}
}
......@@ -2129,8 +2129,8 @@ type Server struct {
ErrorLog *log.Logger
disableKeepAlives int32 // accessed atomically.
nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve
nextProtoErr error
nextProtoOnce sync.Once // guards setupHTTP2_* init
nextProtoErr error // result of http2.ConfigureServer if used
}
// A ConnState represents the state of a client connection to a server.
......@@ -2260,10 +2260,8 @@ func (srv *Server) Serve(l net.Listener) error {
}
var tempDelay time.Duration // how long to sleep on accept failure
if srv.shouldConfigureHTTP2ForServe() {
if err := srv.setupHTTP2(); err != nil {
return err
}
if err := srv.setupHTTP2_Serve(); err != nil {
return err
}
// TODO: allow changing base context? can't imagine concrete
......@@ -2408,7 +2406,7 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
// before we clone it and create the TLS Listener.
if err := srv.setupHTTP2(); err != nil {
if err := srv.setupHTTP2_ListenAndServeTLS(); err != nil {
return err
}
......@@ -2436,14 +2434,36 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
return srv.Serve(tlsListener)
}
func (srv *Server) setupHTTP2() error {
// setupHTTP2_ListenAndServeTLS conditionally configures HTTP/2 on
// srv and returns whether there was an error setting it up. If it is
// not configured for policy reasons, nil is returned.
func (srv *Server) setupHTTP2_ListenAndServeTLS() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
// setupHTTP2_ListenAndServeTLS because Serve may be called
// concurrently.
//
// The tests named TestTransportAutomaticHTTP2* and
// TestConcurrentServerServe in server_test.go demonstrate some
// of the supported use cases and motivations.
func (srv *Server) setupHTTP2_Serve() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
return srv.nextProtoErr
}
func (srv *Server) onceSetNextProtoDefaults_Serve() {
if srv.shouldConfigureHTTP2ForServe() {
srv.onceSetNextProtoDefaults()
}
}
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2).
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
func (srv *Server) onceSetNextProtoDefaults() {
if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
return
......
......@@ -383,6 +383,11 @@ func (t *Transport) RoundTrip(req *Request) (*Response, error) {
return resp, nil
}
if !pconn.shouldRetryRequest(req, err) {
// Issue 16465: return underlying net.Conn.Read error from peek,
// as we've historically done.
if e, ok := err.(transportReadFromServerError); ok {
err = e.err
}
return nil, err
}
testHookRoundTripRetried()
......@@ -415,11 +420,19 @@ func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
// first, per golang.org/issue/15723
return false
}
if _, ok := err.(nothingWrittenError); ok {
switch err.(type) {
case nothingWrittenError:
// We never wrote anything, so it's safe to retry.
return true
case transportReadFromServerError:
// We got some non-EOF net.Conn.Read failure reading
// the 1st response byte from the server.
return true
}
if err == errServerClosedIdle || err == errServerClosedConn {
if err == errServerClosedIdle {
// The server replied with io.EOF while we were trying to
// read the response. Probably an unfortunately keep-alive
// timeout, just as the client was writing a request.
return true
}
return false // conservatively
......@@ -566,10 +579,25 @@ var (
errCloseIdleConns = errors.New("http: CloseIdleConnections called")
errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
errServerClosedIdle = errors.New("http: server closed idle connection")
errServerClosedConn = errors.New("http: server closed connection")
errIdleConnTimeout = errors.New("http: idle connection timeout")
)
// transportReadFromServerError is used by Transport.readLoop when the
// 1 byte peek read fails and we're actually anticipating a response.
// Usually this is just due to the inherent keep-alive shut down race,
// where the server closed the connection at the same time the client
// wrote. The underlying err field is usually io.EOF or some
// ECONNRESET sort of thing which varies by platform. But it might be
// the user's custom net.Conn.Read error too, so we carry it along for
// them to return from Transport.RoundTrip.
type transportReadFromServerError struct {
err error
}
func (e transportReadFromServerError) Error() string {
return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
}
func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
if err := t.tryPutIdleConn(pconn); err != nil {
pconn.close(err)
......@@ -1293,7 +1321,10 @@ func (pc *persistConn) mapRoundTripErrorFromReadLoop(startBytesWritten int64, er
if pc.isCanceled() {
return errRequestCanceled
}
if err == errServerClosedIdle || err == errServerClosedConn {
if err == errServerClosedIdle {
return err
}
if _, ok := err.(transportReadFromServerError); ok {
return err
}
if pc.isBroken() {
......@@ -1314,7 +1345,11 @@ func (pc *persistConn) mapRoundTripErrorAfterClosed(startBytesWritten int64) err
return errRequestCanceled
}
err := pc.closed
if err == errServerClosedIdle || err == errServerClosedConn {
if err == errServerClosedIdle {
// Don't decorate
return err
}
if _, ok := err.(transportReadFromServerError); ok {
// Don't decorate
return err
}
......@@ -1383,7 +1418,7 @@ func (pc *persistConn) readLoop() {
if err == nil {
resp, err = pc.readResponse(rc, trace)
} else {
err = errServerClosedConn
err = transportReadFromServerError{err}
closeErr = err
}
......@@ -1784,6 +1819,7 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
var re responseAndError
var respHeaderTimer <-chan time.Time
cancelChan := req.Request.Cancel
ctxDoneChan := req.Context().Done()
WaitResponse:
for {
testHookWaitResLoop()
......@@ -1815,9 +1851,11 @@ WaitResponse:
case <-cancelChan:
pc.t.CancelRequest(req.Request)
cancelChan = nil
case <-req.Context().Done():
ctxDoneChan = nil
case <-ctxDoneChan:
pc.t.CancelRequest(req.Request)
cancelChan = nil
ctxDoneChan = nil
}
}
......
......@@ -46,17 +46,22 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) {
conn.Close() // simulate the server hanging up on the client
_, err = pc.roundTrip(treq)
if err != errServerClosedConn && err != errServerClosedIdle {
if !isTransportReadFromServerError(err) && err != errServerClosedIdle {
t.Fatalf("roundTrip = %#v, %v; want errServerClosedConn or errServerClosedIdle", err, err)
}
<-pc.closech
err = pc.closed
if err != errServerClosedConn && err != errServerClosedIdle {
if !isTransportReadFromServerError(err) && err != errServerClosedIdle {
t.Fatalf("pc.closed = %#v, %v; want errServerClosedConn or errServerClosedIdle", err, err)
}
}
func isTransportReadFromServerError(err error) bool {
_, ok := err.(transportReadFromServerError)
return ok
}
func newLocalListener(t *testing.T) net.Listener {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
......
......@@ -3511,6 +3511,45 @@ func TestTransportIdleConnTimeout(t *testing.T) {
}
}
type funcConn struct {
net.Conn
read func([]byte) (int, error)
write func([]byte) (int, error)
}
func (c funcConn) Read(p []byte) (int, error) { return c.read(p) }
func (c funcConn) Write(p []byte) (int, error) { return c.write(p) }
func (c funcConn) Close() error { return nil }
// Issue 16465: Transport.RoundTrip should return the raw net.Conn.Read error from Peek
// back to the caller.
func TestTransportReturnsPeekError(t *testing.T) {
errValue := errors.New("specific error value")
wrote := make(chan struct{})
var wroteOnce sync.Once
tr := &Transport{
Dial: func(network, addr string) (net.Conn, error) {
c := funcConn{
read: func([]byte) (int, error) {
<-wrote
return 0, errValue
},
write: func(p []byte) (int, error) {
wroteOnce.Do(func() { close(wrote) })
return len(p), nil
},
}
return c, nil
},
}
_, err := tr.RoundTrip(httptest.NewRequest("GET", "http://fake.tld/", nil))
if err != errValue {
t.Errorf("error = %#v; want %v", err, errValue)
}
}
var errFakeRoundTrip = errors.New("fake roundtrip")
type funcRoundTripper func()
......
......@@ -741,11 +741,10 @@ const gcCreditSlack = 2000
// can accumulate on a P before updating gcController.assistTime.
const gcAssistTimeSlack = 5000
// gcOverAssistBytes determines how many extra allocation bytes of
// assist credit a GC assist builds up when an assist happens. This
// amortizes the cost of an assist by pre-paying for this many bytes
// of future allocations.
const gcOverAssistBytes = 1 << 20
// gcOverAssistWork determines how many extra units of scan work a GC
// assist does when an assist happens. This amortizes the cost of an
// assist by pre-paying for this many bytes of future allocations.
const gcOverAssistWork = 64 << 10
var work struct {
full uint64 // lock-free list of full blocks workbuf
......
......@@ -393,10 +393,15 @@ func gcAssistAlloc(gp *g) {
}
// Compute the amount of scan work we need to do to make the
// balance positive. We over-assist to build up credit for
// future allocations and amortize the cost of assisting.
debtBytes := -gp.gcAssistBytes + gcOverAssistBytes
// balance positive. When the required amount of work is low,
// we over-assist to build up credit for future allocations
// and amortize the cost of assisting.
debtBytes := -gp.gcAssistBytes
scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
if scanWork < gcOverAssistWork {
scanWork = gcOverAssistWork
debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
}
retry:
// Steal as much credit as we can from the background GC's
......
......@@ -4,8 +4,69 @@
// Package pprof writes runtime profiling data in the format expected
// by the pprof visualization tool.
//
// Profiling a Go program
//
// The first step to profiling a Go program is to enable profiling.
// Support for profiling benchmarks built with the standard testing
// package is built into go test. For example, the following command
// runs benchmarks in the current directory and writes the CPU and
// memory profiles to cpu.prof and mem.prof:
//
// go test -cpuprofile cpu.prof -memprofile mem.prof -bench .
//
// To add equivalent profiling support to a standalone program, add
// code like the following to your main function:
//
// var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
// var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
//
// func main() {
// flag.Parse()
// if *cpuprofile != "" {
// f, err := os.Create(*cpuprofile)
// if err != nil {
// log.Fatal("could not create CPU profile: ", err)
// }
// if err := pprof.StartCPUProfile(f); err != nil {
// log.Fatal("could not start CPU profile: ", err)
// }
// defer pprof.StopCPUProfile()
// }
// ...
// if *memprofile != "" {
// f, err := os.Create(*memprofile)
// if err != nil {
// log.Fatal("could not create memory profile: ", err)
// }
// runtime.GC() // get up-to-date statistics
// if err := pprof.WriteHeapProfile(f); err != nil {
// log.Fatal("could not write memory profile: ", err)
// }
// f.Close()
// }
// }
//
// There is also a standard HTTP interface to profiling data. Adding
// the following line will install handlers under the /debug/pprof/
// URL to download live profiles:
//
// import _ "net/http/pprof"
//
// See the net/http/pprof package for more details.
//
// Profiles can then be visualized with the pprof tool:
//
// go tool pprof cpu.prof
//
// There are many commands available from the pprof command line.
// Commonly used commands include "top", which prints a summary of the
// top program hot-spots, and "web", which opens an interactive graph
// of hot-spots and their call graphs. Use "help" for information on
// all pprof commands.
//
// For more information about pprof, see
// http://github.com/google/pprof/.
// https://github.com/google/pprof/blob/master/doc/pprof.md.
package pprof
import (
......
// run
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// issue 16515: spilled Duff-adjusted address may be invalid
package main
import "runtime"
type T [62]int // DUFFZERO with non-zero adjustment on AMD64
var sink interface{}
//go:noinline
func zero(x *T) {
// Two DUFFZEROs on the same address with a function call in between.
// Duff-adjusted address will be spilled and loaded
*x = T{} // DUFFZERO
runtime.GC()
(*x)[0] = 1
g() // call a function with large frame, trigger a stack move
*x = T{} // DUFFZERO again
}
//go:noinline
// a function with large frame
func g() {
var x [1000]int
_ = x
}
func main() {
var s struct { a T; b [8192-62]int } // allocate 64K, hopefully it's in a new span and a few bytes before it is garbage
sink = &s // force heap allocation
s.a[0] = 2
zero(&s.a)
if s.a[0] != 0 {
println("s.a[0] =", s.a[0])
panic("zeroing failed")
}
var a T // on stack
a[0] = 2
zero(&a)
if a[0] != 0 {
println("a[0] =", a[0])
panic("zeroing failed")
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment