Commit 6afbe6cb authored by Jacob Vosmaer's avatar Jacob Vosmaer

Use grpc-go 1.9.1

parent cdabb988
......@@ -7,7 +7,7 @@ If you are new to github, please start by reading [Pull Request howto](https://h
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
## Guidelines for Pull Requests
How to get your contributions merged smoothly and quickly.
......
......@@ -23,10 +23,10 @@ proto:
go generate google.golang.org/grpc/...
test: testdeps
go test -cpu 1,4 google.golang.org/grpc/...
go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
testrace: testdeps
go test -race -cpu 1,4 google.golang.org/grpc/...
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
......
# gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
......@@ -16,7 +16,8 @@ $ go get -u google.golang.org/grpc
Prerequisites
-------------
This requires Go 1.7 or later.
This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go
release (1.8).
Constraints
-----------
......
......@@ -25,14 +25,12 @@ import (
// DefaultBackoffConfig uses values specified for backoff in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
var (
DefaultBackoffConfig = BackoffConfig{
MaxDelay: 120 * time.Second,
baseDelay: 1.0 * time.Second,
factor: 1.6,
jitter: 0.2,
}
)
var DefaultBackoffConfig = BackoffConfig{
MaxDelay: 120 * time.Second,
baseDelay: 1.0 * time.Second,
factor: 1.6,
jitter: 0.2,
}
// backoffStrategy defines the methodology for backing off after a grpc
// connection failure.
......
......@@ -28,6 +28,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/naming"
"google.golang.org/grpc/status"
)
// Address represents a server the client connects to.
......@@ -310,7 +311,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
if !opts.BlockingWait {
if len(rr.addrs) == 0 {
rr.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
err = status.Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on rr.addrs for failfast RPCs.
......
......@@ -23,6 +23,7 @@ package balancer
import (
"errors"
"net"
"strings"
"golang.org/x/net/context"
"google.golang.org/grpc/connectivity"
......@@ -33,24 +34,23 @@ import (
var (
// m is a map from name to balancer builder.
m = make(map[string]Builder)
// defaultBuilder is the default balancer to use.
defaultBuilder Builder // TODO(bar) install pickfirst as default.
)
// Register registers the balancer builder to the balancer map.
// b.Name will be used as the name registered with this builder.
// b.Name (lowercased) will be used as the name registered with
// this builder.
func Register(b Builder) {
m[b.Name()] = b
m[strings.ToLower(b.Name())] = b
}
// Get returns the resolver builder registered with the given name.
// If no builder is register with the name, the default pickfirst will
// be used.
// Note that the compare is done in a case-insenstive fashion.
// If no builder is register with the name, nil will be returned.
func Get(name string) Builder {
if b, ok := m[name]; ok {
if b, ok := m[strings.ToLower(name)]; ok {
return b
}
return defaultBuilder
return nil
}
// SubConn represents a gRPC sub connection.
......@@ -66,6 +66,11 @@ func Get(name string) Builder {
// When the connection encounters an error, it will reconnect immediately.
// When the connection becomes IDLE, it will not reconnect unless Connect is
// called.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
......@@ -83,6 +88,11 @@ type SubConn interface {
type NewSubConnOptions struct{}
// ClientConn represents a gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// NewSubConn is called by balancer to create a new SubConn.
// It doesn't block and wait for the connections to be established.
......@@ -99,6 +109,9 @@ type ClientConn interface {
// on the new picker to pick new SubConn.
UpdateBalancerState(s connectivity.State, p Picker)
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
ResolveNow(resolver.ResolveNowOption)
// Target returns the dial target for this ClientConn.
Target() string
}
......@@ -131,6 +144,10 @@ type PickOptions struct{}
type DoneInfo struct {
// Err is the rpc error the RPC finished with. It could be nil.
Err error
// BytesSent indicates if any bytes have been sent to the server.
BytesSent bool
// BytesReceived indicates if any byte has been received from the server.
BytesReceived bool
}
var (
......@@ -161,7 +178,7 @@ type Picker interface {
// If a SubConn is returned:
// - If it is READY, gRPC will send the RPC on it;
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
// this call until a new picker is updated and will call pick on the new picker.
// until UpdateBalancerState() is called and will call pick on the new picker.
//
// If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
......
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package base
import (
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
)
type baseBuilder struct {
name string
pickerBuilder PickerBuilder
}
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
return &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &connectivityStateEvaluator{},
// Initialize picker to a picker that always return
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
// may call UpdateBalancerState with this picker.
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
}
}
func (bb *baseBuilder) Name() string {
return bb.name
}
type baseBalancer struct {
cc balancer.ClientConn
pickerBuilder PickerBuilder
csEvltr *connectivityStateEvaluator
state connectivity.State
subConns map[resolver.Address]balancer.SubConn
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
}
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil {
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
return
}
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{})
for _, a := range addrs {
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
if err != nil {
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue
}
b.subConns[a] = sc
b.scStates[sc] = connectivity.Idle
sc.Connect()
}
}
for a, sc := range b.subConns {
// a was removed by resolver.
if _, ok := addrsSet[a]; !ok {
b.cc.RemoveSubConn(sc)
delete(b.subConns, a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
// The entry will be deleted in HandleSubConnStateChange.
}
}
}
// regeneratePicker takes a snapshot of the balancer, and generates a picker
// from it. The picker is
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
b.picker = NewErrPicker(balancer.ErrTransientFailure)
return
}
readySCs := make(map[resolver.Address]balancer.SubConn)
// Filter out all ready SCs from full subConn map.
for addr, sc := range b.subConns {
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[addr] = sc
}
}
b.picker = b.pickerBuilder.Build(readySCs)
}
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
oldS, ok := b.scStates[sc]
if !ok {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
return
}
b.scStates[sc] = s
switch s {
case connectivity.Idle:
sc.Connect()
case connectivity.Shutdown:
// When an address was removed by resolver, b called RemoveSubConn but
// kept the sc's state in scStates. Remove state for this sc here.
delete(b.scStates, sc)
}
oldAggrState := b.state
b.state = b.csEvltr.recordTransition(oldS, s)
// Regenerate picker when one of the following happens:
// - this sc became ready from not-ready
// - this sc became not-ready from ready
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
b.regeneratePicker()
}
b.cc.UpdateBalancerState(b.state, b.picker)
return
}
// Close is a nop because base balancer doesn't have internal state to clean up,
// and it doesn't need to call RemoveSubConn for the SubConns.
func (b *baseBalancer) Close() {
}
// NewErrPicker returns a picker that always returns err on Pick().
func NewErrPicker(err error) balancer.Picker {
return &errPicker{err: err}
}
type errPicker struct {
err error // Pick() always returns this err.
}
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, p.err
}
// connectivityStateEvaluator gets updated by addrConns when their
// states transition, based on which it evaluates the state of
// ClientConn.
type connectivityStateEvaluator struct {
numReady uint64 // Number of addrConns in ready state.
numConnecting uint64 // Number of addrConns in connecting state.
numTransientFailure uint64 // Number of addrConns in transientFailure.
}
// recordTransition records state change happening in every subConn and based on
// that it evaluates what aggregated state should be.
// It can only transition between Ready, Connecting and TransientFailure. Other states,
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
// closes it is in Shutdown state.
//
// recordTransition should only be called synchronously from the same goroutine.
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
// Update counters.
for idx, state := range []connectivity.State{oldState, newState} {
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
switch state {
case connectivity.Ready:
cse.numReady += updateVal
case connectivity.Connecting:
cse.numConnecting += updateVal
case connectivity.TransientFailure:
cse.numTransientFailure += updateVal
}
}
// Evaluate.
if cse.numReady > 0 {
return connectivity.Ready
}
if cse.numConnecting > 0 {
return connectivity.Connecting
}
return connectivity.TransientFailure
}
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package base defines a balancer base that can be used to build balancers with
// different picking algorithms.
//
// The base balancer creates a new SubConn for each resolved address. The
// provided picker will only be notified about READY SubConns.
//
// This package is the base of round_robin balancer, its purpose is to be used
// to build round_robin like balancers with complex picking algorithms.
// Balancers with more complicated logic should try to implement a balancer
// builder from scratch.
//
// All APIs in this package are experimental.
package base
import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/resolver"
)
// PickerBuilder creates balancer.Picker.
type PickerBuilder interface {
// Build takes a slice of ready SubConns, and returns a picker that will be
// used by gRPC to pick a SubConn.
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
}
// NewBalancerBuilder returns a balancer builder. The balancers
// built by this builder will use the picker builder to build pickers.
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
return &baseBuilder{
name: name,
pickerBuilder: pb,
}
}
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
// installed as one of the default balancers in gRPC, users don't need to
// explicitly install this balancer.
package roundrobin
import (
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
)
// Name is the name of round_robin balancer.
const Name = "round_robin"
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
}
func init() {
balancer.Register(newBuilder())
}
type rrPickerBuilder struct{}
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
var scs []balancer.SubConn
for _, sc := range readySCs {
scs = append(scs, sc)
}
return &rrPicker{
subConns: scs,
}
}
type rrPicker struct {
// subConns is the snapshot of the roundrobin balancer when this picker was
// created. The slice is immutable. Each Get() will do a round robin
// selection from it and return the selected SubConn.
subConns []balancer.SubConn
mu sync.Mutex
next int
}
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
if len(p.subConns) <= 0 {
return nil, nil, balancer.ErrNoSubConnAvailable
}
p.mu.Lock()
sc := p.subConns[p.next]
p.next = (p.next + 1) % len(p.subConns)
p.mu.Unlock()
return sc, nil, nil
}
......@@ -19,6 +19,7 @@
package grpc
import (
"fmt"
"sync"
"google.golang.org/grpc/balancer"
......@@ -73,7 +74,7 @@ func (b *scStateUpdateBuffer) load() {
}
}
// get returns the channel that receives a recvMsg in the buffer.
// get returns the channel that the scStateUpdate will be sent to.
//
// Upon receiving, the caller should call load to send another
// scStateChangeTuple onto the channel if there is any.
......@@ -96,6 +97,9 @@ type ccBalancerWrapper struct {
stateChangeQueue *scStateUpdateBuffer
resolverUpdateCh chan *resolverUpdate
done chan struct{}
mu sync.Mutex
subConns map[*acBalancerWrapper]struct{}
}
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
......@@ -104,6 +108,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
stateChangeQueue: newSCStateUpdateBuffer(),
resolverUpdateCh: make(chan *resolverUpdate, 1),
done: make(chan struct{}),
subConns: make(map[*acBalancerWrapper]struct{}),
}
go ccb.watcher()
ccb.balancer = b.Build(ccb, bopts)
......@@ -117,8 +122,20 @@ func (ccb *ccBalancerWrapper) watcher() {
select {
case t := <-ccb.stateChangeQueue.get():
ccb.stateChangeQueue.load()
select {
case <-ccb.done:
ccb.balancer.Close()
return
default:
}
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
case t := <-ccb.resolverUpdateCh:
select {
case <-ccb.done:
ccb.balancer.Close()
return
default:
}
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
case <-ccb.done:
}
......@@ -126,6 +143,13 @@ func (ccb *ccBalancerWrapper) watcher() {
select {
case <-ccb.done:
ccb.balancer.Close()
ccb.mu.Lock()
scs := ccb.subConns
ccb.subConns = nil
ccb.mu.Unlock()
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
return
default:
}
......@@ -165,31 +189,54 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs)
if len(addrs) <= 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
}
ac, err := ccb.cc.newAddrConn(addrs)
if err != nil {
return nil, err
}
acbw := &acBalancerWrapper{ac: ac}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
ccb.subConns[acbw] = struct{}{}
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
grpclog.Infof("ccBalancerWrapper: removing subconn")
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
delete(ccb.subConns, acbw)
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p)
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
ccb.cc.csMgr.updateState(s)
ccb.cc.blockingpicker.updatePicker(p)
}
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
ccb.cc.resolveNow(o)
}
func (ccb *ccBalancerWrapper) Target() string {
return ccb.cc.target
}
......@@ -202,9 +249,12 @@ type acBalancerWrapper struct {
}
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs)
acbw.mu.Lock()
defer acbw.mu.Unlock()
if len(addrs) <= 0 {
acbw.ac.tearDown(errConnDrain)
return
}
if !acbw.ac.tryUpdateAddrs(addrs) {
cc := acbw.ac.cc
acbw.ac.mu.Lock()
......@@ -228,9 +278,11 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
return
}
acbw.ac = ac
ac.mu.Lock()
ac.acbw = acbw
ac.mu.Unlock()
if acState != connectivity.Idle {
ac.connect(false)
ac.connect()
}
}
}
......@@ -238,7 +290,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
func (acbw *acBalancerWrapper) Connect() {
acbw.mu.Lock()
defer acbw.mu.Unlock()
acbw.ac.connect(false)
acbw.ac.connect()
}
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
......
......@@ -19,6 +19,7 @@
package grpc
import (
"strings"
"sync"
"golang.org/x/net/context"
......@@ -27,6 +28,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
type balancerWrapperBuilder struct {
......@@ -34,20 +36,27 @@ type balancerWrapperBuilder struct {
}
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
bwb.b.Start(cc.Target(), BalancerConfig{
targetAddr := cc.Target()
targetSplitted := strings.Split(targetAddr, ":///")
if len(targetSplitted) >= 2 {
targetAddr = targetSplitted[1]
}
bwb.b.Start(targetAddr, BalancerConfig{
DialCreds: opts.DialCreds,
Dialer: opts.Dialer,
})
_, pickfirst := bwb.b.(*pickFirst)
bw := &balancerWrapper{
balancer: bwb.b,
pickfirst: pickfirst,
cc: cc,
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
csEvltr: &connectivityStateEvaluator{},
state: connectivity.Idle,
balancer: bwb.b,
pickfirst: pickfirst,
cc: cc,
targetAddr: targetAddr,
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
csEvltr: &connectivityStateEvaluator{},
state: connectivity.Idle,
}
cc.UpdateBalancerState(connectivity.Idle, bw)
go bw.lbWatcher()
......@@ -68,7 +77,8 @@ type balancerWrapper struct {
balancer Balancer // The v1 balancer.
pickfirst bool
cc balancer.ClientConn
cc balancer.ClientConn
targetAddr string // Target without the scheme.
// To aggregate the connectivity state.
csEvltr *connectivityStateEvaluator
......@@ -88,12 +98,11 @@ type balancerWrapper struct {
// connections accordingly.
func (bw *balancerWrapper) lbWatcher() {
<-bw.startCh
grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst)
notifyCh := bw.balancer.Notify()
if notifyCh == nil {
// There's no resolver in the balancer. Connect directly.
a := resolver.Address{
Addr: bw.cc.Target(),
Addr: bw.targetAddr,
Type: resolver.Backend,
}
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
......@@ -103,7 +112,7 @@ func (bw *balancerWrapper) lbWatcher() {
bw.mu.Lock()
bw.conns[a] = sc
bw.connSt[sc] = &scState{
addr: Address{Addr: bw.cc.Target()},
addr: Address{Addr: bw.targetAddr},
s: connectivity.Idle,
}
bw.mu.Unlock()
......@@ -165,10 +174,10 @@ func (bw *balancerWrapper) lbWatcher() {
sc.Connect()
}
} else {
oldSC.UpdateAddresses(newAddrs)
bw.mu.Lock()
bw.connSt[oldSC].addr = addrs[0]
bw.mu.Unlock()
oldSC.UpdateAddresses(newAddrs)
}
} else {
var (
......@@ -221,7 +230,6 @@ func (bw *balancerWrapper) lbWatcher() {
}
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s)
bw.mu.Lock()
defer bw.mu.Unlock()
scSt, ok := bw.connSt[sc]
......@@ -310,12 +318,12 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
Metadata: a.Metadata,
}]
if !ok && failfast {
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
}
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
// If the returned sc is not ready and RPC is failfast,
// return error, and this RPC will fail.
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
}
}
......
......@@ -19,7 +19,6 @@
package grpc
import (
"bytes"
"io"
"time"
......@@ -27,6 +26,7 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
......@@ -60,9 +60,19 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
for {
if c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
// Set dc if it exists and matches the message compression type used,
// otherwise set comp if a registered compressor exists for it.
var comp encoding.Compressor
var dc Decompressor
if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
dc = dopts.dc
} else if rc != "" && rc != encoding.Identity {
comp = encoding.GetCompressor(rc)
}
if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
if err == io.EOF {
break
}
......@@ -89,26 +99,33 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
}
}()
var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if compressor != nil {
cbuf = new(bytes.Buffer)
}
if dopts.copts.StatsHandler != nil {
outPayload = &stats.OutPayload{
Client: true,
}
}
hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
// Set comp and clear compressor if a registered compressor matches the type
// specified via UseCompressor. (And error if a matching compressor is not
// registered.)
var comp encoding.Compressor
if ct := c.compressorType; ct != "" && ct != encoding.Identity {
compressor = nil // Disable the legacy compressor.
comp = encoding.GetCompressor(ct)
if comp == nil {
return status.Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
}
}
hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
if err != nil {
return err
}
if c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(data) > *c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
}
err = t.Write(stream, hdr, data, opts)
if err == nil && outPayload != nil {
......@@ -125,16 +142,23 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
return nil
}
// Invoke sends the RPC request on the wire and returns after response is received.
// Invoke is called by generated code. Also users can call Invoke directly when it
// is really needed in their use cases.
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
// Invoke sends the RPC request on the wire and returns after response is
// received. This is typically called by generated code.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
if cc.dopts.unaryInt != nil {
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
}
return invoke(ctx, method, args, reply, cc, opts...)
}
// Invoke sends the RPC request on the wire and returns after response is
// received. This is typically called by generated code.
//
// DEPRECATED: Use ClientConn.Invoke instead.
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
return cc.Invoke(ctx, method, args, reply, opts...)
}
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo()
mc := cc.GetMethodConfig(method)
......@@ -202,57 +226,45 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
Last: true,
Delay: false,
}
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
}
if c.creds != nil {
callHdr.Creds = c.creds
}
if c.compressorType != "" {
callHdr.SendCompress = c.compressorType
} else if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
firstAttempt := true
for {
var (
err error
t transport.ClientTransport
stream *transport.Stream
// Record the done handler from Balancer.Get(...). It is called once the
// RPC has completed or failed.
done func(balancer.DoneInfo)
)
// TODO(zhaoq): Need a formal spec of fail-fast.
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
}
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
// Check to make sure the context has expired. This will prevent us from
// looping forever if an error occurs for wait-for-ready RPCs where no data
// is sent on the wire.
select {
case <-ctx.Done():
return toRPCErr(ctx.Err())
default:
}
t, done, err = cc.getTransport(ctx, c.failFast)
// Record the done handler from Balancer.Get(...). It is called once the
// RPC has completed or failed.
t, done, err := cc.getTransport(ctx, c.failFast)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := status.FromError(err); ok {
return err
}
if err == errConnClosing || err == errConnUnavailable {
if c.failFast {
return Errorf(codes.Unavailable, "%v", err)
}
continue
}
// All the other errors are treated as Internal errors.
return Errorf(codes.Internal, "%v", err)
return err
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = t.NewStream(ctx, callHdr)
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
if done != nil {
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
done(balancer.DoneInfo{Err: err})
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
// In the event of any error from NewStream, we never attempted to write
// anything to the wire, so we can retry indefinitely for non-fail-fast
// RPCs.
if !c.failFast {
continue
}
return toRPCErr(err)
......@@ -260,34 +272,51 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if peer, ok := peer.FromContext(stream.Context()); ok {
c.peer = peer
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
if err != nil {
if done != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
done(balancer.DoneInfo{
Err: err,
BytesSent: true,
BytesReceived: stream.BytesReceived(),
})
done(balancer.DoneInfo{Err: err})
}
// Retry a non-failfast RPC when
// i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
// i) the server started to drain before this RPC was initiated.
// ii) the server refused the stream.
if !c.failFast && stream.Unprocessed() {
// In this case, the server did not receive the data, but we still
// created wire traffic, so we should not retry indefinitely.
if firstAttempt {
// TODO: Add a field to header for grpc-transparent-retry-attempts
firstAttempt = false
continue
}
// Otherwise, give up and return an error anyway.
}
return toRPCErr(err)
}
err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
if err != nil {
if done != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
done(balancer.DoneInfo{
Err: err,
BytesSent: true,
BytesReceived: stream.BytesReceived(),
})
done(balancer.DoneInfo{Err: err})
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
if !c.failFast && stream.Unprocessed() {
// In these cases, the server did not receive the data, but we still
// created wire traffic, so we should not retry indefinitely.
if firstAttempt {
// TODO: Add a field to header for grpc-transparent-retry-attempts
firstAttempt = false
continue
}
// Otherwise, give up and return an error anyway.
}
return toRPCErr(err)
}
......@@ -295,13 +324,23 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
}
t.CloseStream(stream, nil)
err = stream.Status().Err()
if done != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
done(balancer.DoneInfo{
Err: err,
BytesSent: true,
BytesReceived: stream.BytesReceived(),
})
done(balancer.DoneInfo{Err: err})
}
return stream.Status().Err()
if !c.failFast && stream.Unprocessed() {
// In these cases, the server did not receive the data, but we still
// created wire traffic, so we should not retry indefinitely.
if firstAttempt {
// TODO: Add a field to header for grpc-transparent-retry-attempts
firstAttempt = false
continue
}
}
return err
}
}
......@@ -31,11 +31,14 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
......@@ -48,7 +51,20 @@ var (
// underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
// The following errors are returned from Dial and DialContext
var (
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
......@@ -62,16 +78,6 @@ var (
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
// errNetworkIO indicates that the connection is down due to some network I/O error.
errNetworkIO = errors.New("grpc: failed with network I/O error")
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
// dialOptions configure a Dial call. dialOptions are set by the DialOption
......@@ -89,8 +95,14 @@ type dialOptions struct {
scChan <-chan ServiceConfig
copts transport.ConnectOptions
callOptions []CallOption
// This is to support v1 balancer.
// This is used by v1 balancer dial option WithBalancer to support v1
// balancer, and also by WithBalancerName dial option.
balancerBuilder balancer.Builder
// This is to support grpclb.
resolverBuilder resolver.Builder
// Custom user options for resolver.Build.
resolverBuildUserOptions interface{}
waitForHandshake bool
}
const (
......@@ -101,6 +113,15 @@ const (
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
// WithWaitForHandshake blocks until the initial settings frame is received from the
// server before assigning RPCs to the connection.
// Experimental API.
func WithWaitForHandshake() DialOption {
return func(o *dialOptions) {
o.waitForHandshake = true
}
}
// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
// before doing a write on the wire.
func WithWriteBufferSize(s int) DialOption {
......@@ -152,16 +173,26 @@ func WithCodec(c Codec) DialOption {
}
}
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
// compressor.
// WithCompressor returns a DialOption which sets a Compressor to use for
// message compression. It has lower priority than the compressor set by
// the UseCompressor CallOption.
//
// Deprecated: use UseCompressor instead.
func WithCompressor(cp Compressor) DialOption {
return func(o *dialOptions) {
o.cp = cp
}
}
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
// message decompressor.
// WithDecompressor returns a DialOption which sets a Decompressor to use for
// incoming message decompression. If incoming response messages are encoded
// using the decompressor's Type(), it will be used. Otherwise, the message
// encoding will be used to look up the compressor registered via
// encoding.RegisterCompressor, which will then be used to decompress the
// message. If no compressor is registered for the encoding, an Unimplemented
// status error will be returned.
//
// Deprecated: use encoding.RegisterCompressor instead.
func WithDecompressor(dc Decompressor) DialOption {
return func(o *dialOptions) {
o.dc = dc
......@@ -170,7 +201,8 @@ func WithDecompressor(dc Decompressor) DialOption {
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
// Name resolver will be ignored if this DialOption is specified.
// Deprecated: use the new balancer APIs in balancer package instead.
//
// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
func WithBalancer(b Balancer) DialOption {
return func(o *dialOptions) {
o.balancerBuilder = &balancerWrapperBuilder{
......@@ -179,16 +211,42 @@ func WithBalancer(b Balancer) DialOption {
}
}
// WithBalancerBuilder is for testing only. Users using custom balancers should
// register their balancer and use service config to choose the balancer to use.
func WithBalancerBuilder(b balancer.Builder) DialOption {
// TODO(bar) remove this when switching balancer is done.
// WithBalancerName sets the balancer that the ClientConn will be initialized
// with. Balancer registered with balancerName will be used. This function
// panics if no balancer was registered by balancerName.
//
// The balancer cannot be overridden by balancer option specified by service
// config.
//
// This is an EXPERIMENTAL API.
func WithBalancerName(balancerName string) DialOption {
builder := balancer.Get(balancerName)
if builder == nil {
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
}
return func(o *dialOptions) {
o.balancerBuilder = builder
}
}
// withResolverBuilder is only for grpclb.
func withResolverBuilder(b resolver.Builder) DialOption {
return func(o *dialOptions) {
o.balancerBuilder = b
o.resolverBuilder = b
}
}
// WithResolverUserOptions returns a DialOption which sets the UserOptions
// field of resolver's BuildOption.
func WithResolverUserOptions(userOpt interface{}) DialOption {
return func(o *dialOptions) {
o.resolverBuildUserOptions = userOpt
}
}
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
// DEPRECATED: service config should be received through name resolver, as specified here.
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
return func(o *dialOptions) {
o.scChan = c
......@@ -213,7 +271,7 @@ func WithBackoffConfig(b BackoffConfig) DialOption {
return withBackoff(b)
}
// withBackoff sets the backoff strategy used for retries after a
// withBackoff sets the backoff strategy used for connectRetryNum after a
// failed connection attempt.
//
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
......@@ -265,18 +323,23 @@ func WithTimeout(d time.Duration) DialOption {
}
}
func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
return func(o *dialOptions) {
o.copts.Dialer = f
}
}
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
// Temporary() method to decide if it should try to reconnect to the network address.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
return func(o *dialOptions) {
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
return withContextDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return f(addr, deadline.Sub(time.Now()))
}
return f(addr, 0)
}
}
})
}
// WithStatsHandler returns a DialOption that specifies the stats handler
......@@ -378,7 +441,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
return dialContext(ctx, "tcp", addr)
},
)
}
......@@ -426,51 +489,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if cc.dopts.bs == nil {
cc.dopts.bs = DefaultBackoffConfig
}
cc.parsedTarget = parseTarget(cc.target)
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else {
cc.authority = target
// Use endpoint from "scheme://authority/endpoint" as the default
// authority for ClientConn.
cc.authority = cc.parsedTarget.Endpoint
}
if cc.dopts.balancerBuilder != nil {
var credsClone credentials.TransportCredentials
if creds != nil {
credsClone = creds.Clone()
}
buildOpts := balancer.BuildOptions{
DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
}
// Build should not take long time. So it's ok to not have a goroutine for it.
// TODO(bar) init balancer after first resolver result to support service config balancer.
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts)
} else {
waitC := make(chan error, 1)
go func() {
defer close(waitC)
// No balancer, or no resolver within the balancer. Connect directly.
ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}})
if err != nil {
waitC <- err
return
}
if err := ac.connect(cc.dopts.block); err != nil {
waitC <- err
return
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case err := <-waitC:
if err != nil {
return nil, err
}
}
}
if cc.dopts.scChan != nil && !scSet {
// Blocking wait for the initial service config.
select {
......@@ -486,19 +516,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
go cc.scWatcher()
}
var credsClone credentials.TransportCredentials
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
credsClone = creds.Clone()
}
cc.balancerBuildOpts = balancer.BuildOptions{
DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
}
// Build the resolver.
cc.resolverWrapper, err = newCCResolverWrapper(cc)
if err != nil {
return nil, fmt.Errorf("failed to build resolver: %v", err)
}
if cc.balancerWrapper != nil && cc.resolverWrapper == nil {
// TODO(bar) there should always be a resolver (DNS as the default).
// Unblock balancer initialization with a fake resolver update if there's no resolver.
// The balancer wrapper will not read the addresses, so an empty list works.
// TODO(bar) remove this after the real resolver is started.
cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil)
}
// Start the resolver wrapper goroutine after resolverWrapper is created.
//
// If the goroutine is started before resolverWrapper is ready, the
// following may happen: The goroutine sends updates to cc. cc forwards
// those to balancer. Balancer creates new addrConn. addrConn fails to
// connect, and calls resolveNow(). resolveNow() tries to use the non-ready
// resolverWrapper.
cc.resolverWrapper.start()
// A blocking dial blocks until the clientConn is ready.
if cc.dopts.block {
......@@ -565,21 +604,26 @@ type ClientConn struct {
ctx context.Context
cancel context.CancelFunc
target string
authority string
dopts dialOptions
csMgr *connectivityStateManager
balancerWrapper *ccBalancerWrapper
resolverWrapper *ccResolverWrapper
target string
parsedTarget resolver.Target
authority string
dopts dialOptions
csMgr *connectivityStateManager
blockingpicker *pickerWrapper
balancerBuildOpts balancer.BuildOptions
resolverWrapper *ccResolverWrapper
blockingpicker *pickerWrapper
mu sync.RWMutex
sc ServiceConfig
scRaw string
conns map[*addrConn]struct{}
// Keepalive parameter can be updated if a GoAway is received.
mkp keepalive.ClientParameters
mkp keepalive.ClientParameters
curBalancerName string
preBalancerName string // previous balancer name.
curAddresses []resolver.Address
balancerWrapper *ccBalancerWrapper
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
......@@ -615,6 +659,7 @@ func (cc *ClientConn) scWatcher() {
// TODO: load balance policy runtime change is ignored.
// We may revist this decision in the future.
cc.sc = sc
cc.scRaw = ""
cc.mu.Unlock()
case <-cc.ctx.Done():
return
......@@ -622,7 +667,113 @@ func (cc *ClientConn) scWatcher() {
}
}
func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.conns == nil {
// cc was closed.
return
}
if reflect.DeepEqual(cc.curAddresses, addrs) {
return
}
cc.curAddresses = addrs
if cc.dopts.balancerBuilder == nil {
// Only look at balancer types and switch balancer if balancer dial
// option is not set.
var isGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
var newBalancerName string
if isGRPCLB {
newBalancerName = grpclbName
} else {
// Address list doesn't contain grpclb address. Try to pick a
// non-grpclb balancer.
newBalancerName = cc.curBalancerName
// If current balancer is grpclb, switch to the previous one.
if newBalancerName == grpclbName {
newBalancerName = cc.preBalancerName
}
// The following could be true in two cases:
// - the first time handling resolved addresses
// (curBalancerName="")
// - the first time handling non-grpclb addresses
// (curBalancerName="grpclb", preBalancerName="")
if newBalancerName == "" {
newBalancerName = PickFirstBalancerName
}
}
cc.switchBalancer(newBalancerName)
} else if cc.balancerWrapper == nil {
// Balancer dial option was set, and this is the first time handling
// resolved addresses. Build a balancer with dopts.balancerBuilder.
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
}
// switchBalancer starts the switching from current balancer to the balancer
// with the given name.
//
// It will NOT send the current address list to the new balancer. If needed,
// caller of this function should send address list to the new balancer after
// this function returns.
//
// Caller must hold cc.mu.
func (cc *ClientConn) switchBalancer(name string) {
if cc.conns == nil {
return
}
if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
return
}
grpclog.Infof("ClientConn switching balancer to %q", name)
if cc.dopts.balancerBuilder != nil {
grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
return
}
// TODO(bar switching) change this to two steps: drain and close.
// Keep track of sc in wrapper.
if cc.balancerWrapper != nil {
cc.balancerWrapper.close()
}
builder := balancer.Get(name)
if builder == nil {
grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder()
}
cc.preBalancerName = cc.curBalancerName
cc.curBalancerName = builder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
}
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return
}
// TODO(bar switching) send updates to all balancer wrappers when balancer
// gracefully switching is supported.
cc.balancerWrapper.handleSubConnStateChange(sc, s)
cc.mu.Unlock()
}
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
ac := &addrConn{
cc: cc,
......@@ -659,7 +810,7 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
// It does nothing if the ac is not IDLE.
// TODO(bar) Move this to the addrConn section.
// This was part of resetAddrConn, keep it here to make the diff look clean.
func (ac *addrConn) connect(block bool) error {
func (ac *addrConn) connect() error {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
......@@ -670,39 +821,21 @@ func (ac *addrConn) connect(block bool) error {
return nil
}
ac.state = connectivity.Connecting
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
}
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.mu.Unlock()
if block {
// Start a goroutine connecting to the server asynchronously.
go func() {
if err := ac.resetTransport(); err != nil {
grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err)
}
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
return e.Origin()
}
return err
return
}
// Start to monitor the error status of transport.
go ac.transportMonitor()
} else {
// Start a goroutine connecting to the server asynchronously.
go func() {
if err := ac.resetTransport(); err != nil {
grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
if err != errConnClosing {
// Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err)
}
return
}
ac.transportMonitor()
}()
}
ac.transportMonitor()
}()
return nil
}
......@@ -731,6 +864,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound {
ac.addrs = addrs
ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
}
return curAddrFound
......@@ -756,31 +890,6 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
if cc.balancerWrapper == nil {
// If balancer is nil, there should be only one addrConn available.
cc.mu.RLock()
if cc.conns == nil {
cc.mu.RUnlock()
// TODO this function returns toRPCErr and non-toRPCErr. Clean up
// the errors in ClientConn.
return nil, nil, toRPCErr(ErrClientConnClosing)
}
var ac *addrConn
for ac = range cc.conns {
// Break after the first iteration to get the first addrConn.
break
}
cc.mu.RUnlock()
if ac == nil {
return nil, nil, errConnClosing
}
t, err := ac.wait(ctx, false /*hasBalancer*/, failfast)
if err != nil {
return nil, nil, err
}
return t, nil, nil
}
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
if err != nil {
return nil, nil, toRPCErr(err)
......@@ -788,6 +897,43 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transpor
return t, done, nil
}
// handleServiceConfig parses the service config string in JSON format to Go native
// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
func (cc *ClientConn) handleServiceConfig(js string) error {
sc, err := parseServiceConfig(js)
if err != nil {
return err
}
cc.mu.Lock()
cc.scRaw = js
cc.sc = sc
if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
if cc.curBalancerName == grpclbName {
// If current balancer is grpclb, there's at least one grpclb
// balancer address in the resolved list. Don't switch the balancer,
// but change the previous balancer name, so if a new resolved
// address list doesn't contain grpclb address, balancer will be
// switched to *sc.LB.
cc.preBalancerName = *sc.LB
} else {
cc.switchBalancer(*sc.LB)
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
}
}
cc.mu.Unlock()
return nil
}
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
cc.mu.Lock()
r := cc.resolverWrapper
cc.mu.Unlock()
if r == nil {
return
}
go r.resolveNow(o)
}
// Close tears down the ClientConn and all underlying connections.
func (cc *ClientConn) Close() error {
cc.cancel()
......@@ -800,13 +946,18 @@ func (cc *ClientConn) Close() error {
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
rWrapper := cc.resolverWrapper
cc.resolverWrapper = nil
bWrapper := cc.balancerWrapper
cc.balancerWrapper = nil
cc.mu.Unlock()
cc.blockingpicker.close()
if cc.resolverWrapper != nil {
cc.resolverWrapper.close()
if rWrapper != nil {
rWrapper.close()
}
if cc.balancerWrapper != nil {
cc.balancerWrapper.close()
if bWrapper != nil {
bWrapper.close()
}
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
......@@ -819,15 +970,16 @@ type addrConn struct {
ctx context.Context
cancel context.CancelFunc
cc *ClientConn
curAddr resolver.Address
addrs []resolver.Address
dopts dialOptions
events trace.EventLog
acbw balancer.SubConn
cc *ClientConn
addrs []resolver.Address
dopts dialOptions
events trace.EventLog
acbw balancer.SubConn
mu sync.Mutex
state connectivity.State
mu sync.Mutex
curAddr resolver.Address
reconnectIdx int // The index in addrs list to start reconnecting from.
state connectivity.State
// ready is closed and becomes nil when a new transport is up or failed
// due to timeout.
ready chan struct{}
......@@ -835,13 +987,21 @@ type addrConn struct {
// The reason this addrConn is torn down.
tearDownErr error
connectRetryNum int
// backoffDeadline is the time until which resetTransport needs to
// wait before increasing connectRetryNum count.
backoffDeadline time.Time
// connectDeadline is the time by which all connection
// negotiations must complete.
connectDeadline time.Time
}
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
case transport.GoAwayTooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
......@@ -869,6 +1029,15 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
// resetTransport recreates a transport to the address for ac. The old
// transport will close itself on error or when the clientconn is closed.
// The created transport must receive initial settings frame from the server.
// In case that doesnt happen, transportMonitor will kill the newly created
// transport after connectDeadline has expired.
// In case there was an error on the transport before the settings frame was
// received, resetTransport resumes connecting to backends after the one that
// was previously connected to. In case end of the list is reached, resetTransport
// backs off until the original deadline.
// If the DialOption WithWaitForHandshake was set, resetTrasport returns
// successfully only after server settings are received.
//
// TODO(bar) make sure all state transitions are valid.
func (ac *addrConn) resetTransport() error {
......@@ -882,19 +1051,38 @@ func (ac *addrConn) resetTransport() error {
ac.ready = nil
}
ac.transport = nil
ac.curAddr = resolver.Address{}
ridx := ac.reconnectIdx
ac.mu.Unlock()
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
ac.cc.mu.RUnlock()
for retries := 0; ; retries++ {
sleepTime := ac.dopts.bs.backoff(retries)
timeout := minConnectTimeout
var backoffDeadline, connectDeadline time.Time
for connectRetryNum := 0; ; connectRetryNum++ {
ac.mu.Lock()
if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) {
timeout = time.Duration(int(sleepTime) / len(ac.addrs))
if ac.backoffDeadline.IsZero() {
// This means either a successful HTTP2 connection was established
// or this is the first time this addrConn is trying to establish a
// connection.
backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
// This will be the duration that dial gets to finish.
dialDuration := minConnectTimeout
if backoffFor > dialDuration {
// Give dial more time as we keep failing to connect.
dialDuration = backoffFor
}
start := time.Now()
backoffDeadline = start.Add(backoffFor)
connectDeadline = start.Add(dialDuration)
ridx = 0 // Start connecting from the beginning.
} else {
// Continue trying to conect with the same deadlines.
connectRetryNum = ac.connectRetryNum
backoffDeadline = ac.backoffDeadline
connectDeadline = ac.connectDeadline
ac.backoffDeadline = time.Time{}
ac.connectDeadline = time.Time{}
ac.connectRetryNum = 0
}
connectTime := time.Now()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return errConnClosing
......@@ -902,106 +1090,166 @@ func (ac *addrConn) resetTransport() error {
ac.printf("connecting")
if ac.state != connectivity.Connecting {
ac.state = connectivity.Connecting
// TODO(bar) remove condition once we always have a balancer.
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
}
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
// copy ac.addrs in case of race
addrsIter := make([]resolver.Address, len(ac.addrs))
copy(addrsIter, ac.addrs)
copts := ac.dopts.copts
ac.mu.Unlock()
for _, addr := range addrsIter {
connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
if err != nil {
return err
}
if connected {
return nil
}
}
}
// createTransport creates a connection to one of the backends in addrs.
// It returns true if a connection was established.
func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
for i := ridx; i < len(addrs); i++ {
addr := addrs[i]
target := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
Authority: ac.cc.authority,
}
done := make(chan struct{})
onPrefaceReceipt := func() {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return errConnClosing
close(done)
if !ac.backoffDeadline.IsZero() {
// If we haven't already started reconnecting to
// other backends.
// Note, this can happen when writer notices an error
// and triggers resetTransport while at the same time
// reader receives the preface and invokes this closure.
ac.backoffDeadline = time.Time{}
ac.connectDeadline = time.Time{}
ac.connectRetryNum = 0
}
ac.mu.Unlock()
sinfo := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
}
newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout)
if err != nil {
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
return err
}
grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr)
}
// Do not cancel in the success path because of
// this issue in Go1.6: https://github.com/golang/go/issues/15078.
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
if err != nil {
cancel()
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
return errConnClosing
if ac.state != connectivity.Shutdown {
ac.state = connectivity.TransientFailure
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
ac.mu.Unlock()
continue
return false, err
}
ac.mu.Lock()
ac.printf("ready")
if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
newTransport.Close()
return errConnClosing
}
ac.state = connectivity.Ready
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
}
t := ac.transport
ac.transport = newTransport
if t != nil {
t.Close()
}
ac.curAddr = addr
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
return false, errConnClosing
}
ac.mu.Unlock()
return nil
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
continue
}
if ac.dopts.waitForHandshake {
select {
case <-done:
case <-connectCtx.Done():
// Didn't receive server preface, must kill this new transport now.
grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
newTr.Close()
break
case <-ac.ctx.Done():
}
}
ac.mu.Lock()
ac.state = connectivity.TransientFailure
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
// ac.tearDonn(...) has been invoked.
newTr.Close()
return false, errConnClosing
}
ac.printf("ready")
ac.state = connectivity.Ready
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.transport = newTr
ac.curAddr = addr
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
ac.mu.Unlock()
timer := time.NewTimer(sleepTime - time.Since(connectTime))
select {
case <-timer.C:
case <-ac.ctx.Done():
timer.Stop()
return ac.ctx.Err()
case <-done:
// If the server has responded back with preface already,
// don't set the reconnect parameters.
default:
ac.connectRetryNum = connectRetryNum
ac.backoffDeadline = backoffDeadline
ac.connectDeadline = connectDeadline
ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
}
ac.mu.Unlock()
return true, nil
}
ac.mu.Lock()
ac.state = connectivity.TransientFailure
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.cc.resolveNow(resolver.ResolveNowOption{})
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
ac.mu.Unlock()
timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
select {
case <-timer.C:
case <-ac.ctx.Done():
timer.Stop()
return false, ac.ctx.Err()
}
return false, nil
}
// Run in a goroutine to track the error in transport and create the
// new transport if an error happens. It returns when the channel is closing.
func (ac *addrConn) transportMonitor() {
for {
var timer *time.Timer
var cdeadline <-chan time.Time
ac.mu.Lock()
t := ac.transport
if !ac.connectDeadline.IsZero() {
timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
cdeadline = timer.C
}
ac.mu.Unlock()
// Block until we receive a goaway or an error occurs.
select {
case <-t.GoAway():
case <-t.Error():
case <-cdeadline:
ac.mu.Lock()
// This implies that client received server preface.
if ac.backoffDeadline.IsZero() {
ac.mu.Unlock()
continue
}
ac.mu.Unlock()
timer = nil
// No server preface received until deadline.
// Kill the connection.
grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
t.Close()
}
if timer != nil {
timer.Stop()
}
// If a GoAway happened, regardless of error, adjust our keepalive
// parameters as appropriate.
......@@ -1011,14 +1259,15 @@ func (ac *addrConn) transportMonitor() {
default:
}
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return
}
// Set connectivity state to TransientFailure before calling
// resetTransport. Transition READY->CONNECTING is not valid.
ac.state = connectivity.TransientFailure
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
}
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.cc.resolveNow(resolver.ResolveNowOption{})
ac.curAddr = resolver.Address{}
ac.mu.Unlock()
if err := ac.resetTransport(); err != nil {
......@@ -1092,7 +1341,7 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
ac.mu.Unlock()
// Trigger idle ac to connect.
if idle {
ac.connect(false)
ac.connect()
}
return nil, false
}
......@@ -1105,8 +1354,11 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
func (ac *addrConn) tearDown(err error) {
ac.cancel()
ac.mu.Lock()
ac.curAddr = resolver.Address{}
defer ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
return
}
ac.curAddr = resolver.Address{}
if err == errConnDrain && ac.transport != nil {
// GracefulClose(...) may be executed multiple times when
// i) receiving multiple GoAway frames from the server; or
......@@ -1114,16 +1366,9 @@ func (ac *addrConn) tearDown(err error) {
// address removal and GoAway.
ac.transport.GracefulClose()
}
if ac.state == connectivity.Shutdown {
return
}
ac.state = connectivity.Shutdown
ac.tearDownErr = err
if ac.cc.balancerWrapper != nil {
ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state)
} else {
ac.cc.csMgr.updateState(ac.state)
}
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
if ac.events != nil {
ac.events.Finish()
ac.events = nil
......
......@@ -69,6 +69,11 @@ func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error
}
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
if pm, ok := v.(proto.Marshaler); ok {
// object can marshal itself, no need for buffer
return pm.Marshal()
}
cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := p.marshal(v, cb)
......@@ -79,10 +84,17 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
}
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
protoMsg := v.(proto.Message)
protoMsg.Reset()
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
// object can unmarshal itself, no need for buffer
return pu.Unmarshal(data)
}
cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
v.(proto.Message).Reset()
err := cb.Unmarshal(v.(proto.Message))
err := cb.Unmarshal(protoMsg)
cb.SetBuf(nil)
protoBufferPool.Put(cb)
return err
......@@ -92,13 +104,11 @@ func (protoCodec) String() string {
return "proto"
}
var (
protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
)
var protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
},
}
// Code generated by "stringer -type=Code"; DO NOT EDIT.
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package codes
import "fmt"
import "strconv"
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
func (i Code) String() string {
if i >= Code(len(_Code_index)-1) {
return fmt.Sprintf("Code(%d)", i)
func (c Code) String() string {
switch c {
case OK:
return "OK"
case Canceled:
return "Canceled"
case Unknown:
return "Unknown"
case InvalidArgument:
return "InvalidArgument"
case DeadlineExceeded:
return "DeadlineExceeded"
case NotFound:
return "NotFound"
case AlreadyExists:
return "AlreadyExists"
case PermissionDenied:
return "PermissionDenied"
case ResourceExhausted:
return "ResourceExhausted"
case FailedPrecondition:
return "FailedPrecondition"
case Aborted:
return "Aborted"
case OutOfRange:
return "OutOfRange"
case Unimplemented:
return "Unimplemented"
case Internal:
return "Internal"
case Unavailable:
return "Unavailable"
case DataLoss:
return "DataLoss"
case Unauthenticated:
return "Unauthenticated"
default:
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
return _Code_name[_Code_index[i]:_Code_index[i+1]]
}
......@@ -19,12 +19,13 @@
// Package codes defines the canonical error codes used by gRPC. It is
// consistent across various languages.
package codes // import "google.golang.org/grpc/codes"
import (
"fmt"
)
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
type Code uint32
//go:generate stringer -type=Code
const (
// OK is returned on success.
OK Code = 0
......@@ -142,3 +143,41 @@ const (
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
)
var strToCode = map[string]Code{
`"OK"`: OK,
`"CANCELLED"`:/* [sic] */ Canceled,
`"UNKNOWN"`: Unknown,
`"INVALID_ARGUMENT"`: InvalidArgument,
`"DEADLINE_EXCEEDED"`: DeadlineExceeded,
`"NOT_FOUND"`: NotFound,
`"ALREADY_EXISTS"`: AlreadyExists,
`"PERMISSION_DENIED"`: PermissionDenied,
`"RESOURCE_EXHAUSTED"`: ResourceExhausted,
`"FAILED_PRECONDITION"`: FailedPrecondition,
`"ABORTED"`: Aborted,
`"OUT_OF_RANGE"`: OutOfRange,
`"UNIMPLEMENTED"`: Unimplemented,
`"INTERNAL"`: Internal,
`"UNAVAILABLE"`: Unavailable,
`"DATA_LOSS"`: DataLoss,
`"UNAUTHENTICATED"`: Unauthenticated,
}
// UnmarshalJSON unmarshals b into the Code.
func (c *Code) UnmarshalJSON(b []byte) error {
// From json.Unmarshaler: By convention, to approximate the behavior of
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
// a no-op.
if string(b) == "null" {
return nil
}
if c == nil {
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
}
if jc, ok := strToCode[string(b)]; ok {
*c = jc
return nil
}
return fmt.Errorf("invalid code: %q", string(b))
}
......@@ -34,10 +34,8 @@ import (
"golang.org/x/net/context"
)
var (
// alpnProtoStr are the specified application level protocols for gRPC.
alpnProtoStr = []string{"h2"}
)
// alpnProtoStr are the specified application level protocols for gRPC.
var alpnProtoStr = []string{"h2"}
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
......@@ -74,11 +72,9 @@ type AuthInfo interface {
AuthType() string
}
var (
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
)
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
......@@ -91,10 +87,14 @@ type TransportCredentials interface {
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
// Info provides the ProtocolInfo of this TransportCredentials.
Info() ProtocolInfo
......@@ -131,15 +131,15 @@ func (c tlsCreds) Info() ProtocolInfo {
}
}
func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := cloneTLSConfig(c.config)
if cfg.ServerName == "" {
colonPos := strings.LastIndex(addr, ":")
colonPos := strings.LastIndex(authority, ":")
if colonPos == -1 {
colonPos = len(addr)
colonPos = len(authority)
}
cfg.ServerName = addr[:colonPos]
cfg.ServerName = authority[:colonPos]
}
conn := tls.Client(rawConn, cfg)
errChannel := make(chan error, 1)
......
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package encoding defines the interface for the compressor and the functions
// to register and get the compossor.
// This package is EXPERIMENTAL.
package encoding
import (
"io"
)
var registerCompressor = make(map[string]Compressor)
// Compressor is used for compressing and decompressing when sending or receiving messages.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an error
// occurs while initializing the compressor, that error is returned instead.
Compress(w io.Writer) (io.WriteCloser, error)
// Decompress reads data from r, decompresses it, and provides the uncompressed data
// via the returned io.Reader. If an error occurs while initializing the decompressor, that error
// is returned instead.
Decompress(r io.Reader) (io.Reader, error)
// Name is the name of the compression codec and is used to set the content coding header.
Name() string
}
// RegisterCompressor registers the compressor with gRPC by its name. It can be activated when
// sending an RPC via grpc.UseCompressor(). It will be automatically accessed when receiving a
// message based on the content coding header. Servers also use it to send a response with the
// same encoding as the request.
//
// NOTE: this function must only be called during initialization time (i.e. in an init() function). If
// multiple Compressors are registered with the same name, the one registered last will take effect.
func RegisterCompressor(c Compressor) {
registerCompressor[c.Name()] = c
}
// GetCompressor returns Compressor for the given compressor name.
func GetCompressor(name string) Compressor {
return registerCompressor[name]
}
// Identity specifies the optional encoding for uncompressed streams.
// It is intended for grpc internal use only.
const Identity = "identity"
// +build go1.6,!go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"io"
"net"
"net/http"
"os"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req.Cancel = ctx.Done()
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled:
return codes.Canceled
case context.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}
// +build go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"context"
"fmt"
"io"
"net"
"net/http"
"os"
netctx "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
return fmt.Errorf("failed to write the HTTP request: %v", err)
}
return nil
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled, netctx.Canceled:
return codes.Canceled
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}
......@@ -19,21 +19,32 @@
package grpc
import (
"errors"
"fmt"
"math/rand"
"net"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/naming"
"google.golang.org/grpc/resolver"
)
const (
lbTokeyKey = "lb-token"
defaultFallbackTimeout = 10 * time.Second
grpclbName = "grpclb"
)
func convertDuration(d *lbpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
}
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
......@@ -59,646 +70,273 @@ type balanceLoadClientStream struct {
ClientStream
}
func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
m := new(lbmpb.LoadBalanceResponse)
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// NewGRPCLBBalancer creates a grpclb load balancer.
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
return &grpclbBalancer{
r: r,
}
func init() {
balancer.Register(newLBBuilder())
}
type remoteBalancerInfo struct {
addr string
// the server name used for authentication with the remote LB server.
name string
// newLBBuilder creates a builder for grpclb.
func newLBBuilder() balancer.Builder {
return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
}
// grpclbAddrInfo consists of the information of a backend server.
type grpclbAddrInfo struct {
addr Address
connected bool
// dropForRateLimiting indicates whether this particular request should be
// dropped by the client for rate limiting.
dropForRateLimiting bool
// dropForLoadBalancing indicates whether this particular request should be
// dropped by the client for load balancing.
dropForLoadBalancing bool
// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
// fallbackTimeout. If no response is received from the remote balancer within
// fallbackTimeout, the backend addresses from the resolved address list will be
// used.
//
// Only call this function when a non-default fallback timeout is needed.
func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
return &lbBuilder{
fallbackTimeout: fallbackTimeout,
}
}
type grpclbBalancer struct {
r naming.Resolver
target string
mu sync.Mutex
seq int // a sequence number to make sure addrCh does not get stale addresses.
w naming.Watcher
addrCh chan []Address
rbs []remoteBalancerInfo
addrs []*grpclbAddrInfo
next int
waitCh chan struct{}
done bool
rand *rand.Rand
clientStats lbmpb.ClientStats
type lbBuilder struct {
fallbackTimeout time.Duration
}
func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
updates, err := w.Next()
if err != nil {
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
return err
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return ErrClientConnClosing
}
for _, update := range updates {
switch update.Op {
case naming.Add:
var exist bool
for _, v := range b.rbs {
// TODO: Is the same addr with different server name a different balancer?
if update.Addr == v.addr {
exist = true
break
}
}
if exist {
continue
}
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
if !ok {
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
continue
}
switch md.AddrType {
case naming.Backend:
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution does not give grpclb addresses")
continue
case naming.GRPCLB:
b.rbs = append(b.rbs, remoteBalancerInfo{
addr: update.Addr,
name: md.ServerName,
})
default:
grpclog.Errorf("Received unknow address type %d", md.AddrType)
continue
}
case naming.Delete:
for i, v := range b.rbs {
if update.Addr == v.addr {
copy(b.rbs[i:], b.rbs[i+1:])
b.rbs = b.rbs[:len(b.rbs)-1]
break
}
}
default:
grpclog.Errorf("Unknown update.Op %v", update.Op)
}
func (b *lbBuilder) Name() string {
return grpclbName
}
func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
// This generates a manual resolver builder with a random scheme. This
// scheme will be used to dial to remote LB, so we can send filtered address
// updates to remote LB ClientConn using this manual resolver.
scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
r := &lbManualResolver{scheme: scheme, ccb: cc}
var target string
targetSplitted := strings.Split(cc.Target(), ":///")
if len(targetSplitted) < 2 {
target = cc.Target()
} else {
target = targetSplitted[1]
}
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
// not a load balancer.
select {
case <-ch:
default:
lb := &lbBalancer{
cc: cc,
target: target,
opt: opt,
fallbackTimeout: b.fallbackTimeout,
doneCh: make(chan struct{}),
manualResolver: r,
csEvltr: &connectivityStateEvaluator{},
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
clientStats: &rpcStats{},
}
ch <- b.rbs
return nil
return lb
}
func convertDuration(d *lbmpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
type lbBalancer struct {
cc balancer.ClientConn
target string
opt balancer.BuildOptions
fallbackTimeout time.Duration
doneCh chan struct{}
// manualResolver is used in the remote LB ClientConn inside grpclb. When
// resolved address updates are received by grpclb, filtered updates will be
// send to remote LB ClientConn through this resolver.
manualResolver *lbManualResolver
// The ClientConn to talk to the remote balancer.
ccRemoteLB *ClientConn
// Support client side load reporting. Each picker gets a reference to this,
// and will update its content.
clientStats *rpcStats
mu sync.Mutex // guards everything following.
// The full server list including drops, used to check if the newly received
// serverList contains anything new. Each generate picker will also have
// reference to this list to do the first layer pick.
fullServerList []*lbpb.Server
// All backends addresses, with metadata set to nil. This list contains all
// backend addresses in the same order and with the same duplicates as in
// serverlist. When generating picker, a SubConn slice with the same order
// but with only READY SCs will be gerenated.
backendAddrs []resolver.Address
// Roundrobin functionalities.
csEvltr *connectivityStateEvaluator
state connectivity.State
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
picker balancer.Picker
// Support fallback to resolved backend addresses if there's no response
// from remote balancer within fallbackTimeout.
fallbackTimerExpired bool
serverListReceived bool
// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
// when resolved address updates are received, and read in the goroutine
// handling fallback.
resolvedBackendAddrs []resolver.Address
}
func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
if l == nil {
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
// it. The picker
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
// - does two layer roundrobin pick otherwise.
// Caller must hold lb.mu.
func (lb *lbBalancer) regeneratePicker() {
if lb.state == connectivity.TransientFailure {
lb.picker = &errPicker{err: balancer.ErrTransientFailure}
return
}
servers := l.GetServers()
var (
sl []*grpclbAddrInfo
addrs []Address
)
for _, s := range servers {
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
ip := net.IP(s.IpAddress)
ipStr := ip.String()
if ip.To4() == nil {
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
// net.SplitHostPort() will return too many colons error.
ipStr = fmt.Sprintf("[%s]", ipStr)
}
addr := Address{
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
Metadata: &md,
var readySCs []balancer.SubConn
for _, a := range lb.backendAddrs {
if sc, ok := lb.subConns[a]; ok {
if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
readySCs = append(readySCs, sc)
}
}
sl = append(sl, &grpclbAddrInfo{
addr: addr,
dropForRateLimiting: s.DropForRateLimiting,
dropForLoadBalancing: s.DropForLoadBalancing,
})
addrs = append(addrs, addr)
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
if len(sl) > 0 {
// reset b.next to 0 when replacing the server list.
b.next = 0
b.addrs = sl
b.addrCh <- addrs
}
return
}
func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-done:
return
}
b.mu.Lock()
stats := b.clientStats
b.clientStats = lbmpb.ClientStats{} // Clear the stats.
b.mu.Unlock()
t := time.Now()
stats.Timestamp = &lbmpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbmpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
ClientStats: &stats,
},
}); err != nil {
grpclog.Errorf("grpclb: failed to send load report: %v", err)
if len(lb.fullServerList) <= 0 {
if len(readySCs) <= 0 {
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
return
}
}
}
func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbc.BalanceLoad(ctx)
if err != nil {
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
lb.picker = &rrPicker{subConns: readySCs}
return
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return
}
b.mu.Unlock()
initReq := &lbmpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbmpb.InitialLoadBalanceRequest{
Name: b.target,
},
},
lb.picker = &lbPicker{
serverList: lb.fullServerList,
subConns: readySCs,
stats: lb.clientStats,
}
if err := stream.Send(initReq); err != nil {
grpclog.Errorf("grpclb: failed to send init request: %v", err)
// TODO: backoff on retry?
return true
}
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
// TODO: backoff on retry?
return true
}
initResp := reply.GetInitialResponse()
if initResp == nil {
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
return
}
// TODO: Support delegation.
if initResp.LoadBalancerDelegate != "" {
// delegation
grpclog.Errorf("TODO: Delegation is not supported yet.")
return
}
streamDone := make(chan struct{})
defer close(streamDone)
b.mu.Lock()
b.clientStats = lbmpb.ClientStats{} // Clear client stats.
b.mu.Unlock()
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
go b.sendLoadReport(stream, d, streamDone)
}
// Retrieve the server list.
for {
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
break
}
b.mu.Lock()
if b.done || seq < b.seq {
b.mu.Unlock()
return
}
b.seq++ // tick when receiving a new list of servers.
seq = b.seq
b.mu.Unlock()
if serverList := reply.GetServerList(); serverList != nil {
b.processServerList(serverList, seq)
}
}
return true
return
}
func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
lb.mu.Lock()
defer lb.mu.Unlock()
oldS, ok := lb.scStates[sc]
if !ok {
grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
return
}
b.target = target
b.mu.Lock()
if b.done {
b.mu.Unlock()
return ErrClientConnClosing
lb.scStates[sc] = s
switch s {
case connectivity.Idle:
sc.Connect()
case connectivity.Shutdown:
// When an address was removed by resolver, b called RemoveSubConn but
// kept the sc's state in scStates. Remove state for this sc here.
delete(lb.scStates, sc)
}
b.addrCh = make(chan []Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
return err
}
b.w = w
b.mu.Unlock()
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
close(balancerAddrsCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var (
cc *ClientConn
// ccError is closed when there is an error in the current cc.
// A new rb should be picked from rbs and connected.
ccError chan struct{}
rb *remoteBalancerInfo
rbs []remoteBalancerInfo
rbIdx int
)
defer func() {
if ccError != nil {
select {
case <-ccError:
default:
close(ccError)
}
}
if cc != nil {
cc.Close()
}
}()
for {
var ok bool
select {
case rbs, ok = <-balancerAddrsCh:
if !ok {
return
}
foundIdx := -1
if rb != nil {
for i, trb := range rbs {
if trb == *rb {
foundIdx = i
break
}
}
}
if foundIdx >= 0 {
if foundIdx >= 1 {
// Move the address in use to the beginning of the list.
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
rbIdx = 0
}
continue // If found, don't dial new cc.
} else if len(rbs) > 0 {
// Pick a random one from the list, instead of always using the first one.
if l := len(rbs); l > 1 && rb != nil {
tmpIdx := b.rand.Intn(l - 1)
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
}
rbIdx = 0
rb = &rbs[0]
} else {
// foundIdx < 0 && len(rbs) <= 0.
rb = nil
}
case <-ccError:
ccError = nil
if rbIdx < len(rbs)-1 {
rbIdx++
rb = &rbs[rbIdx]
} else {
rb = nil
}
}
if rb == nil {
continue
}
if cc != nil {
cc.Close()
}
// Talk to the remote load balancer to get the server list.
var (
err error
dopts []DialOption
)
if creds := config.DialCreds; creds != nil {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
continue
}
}
dopts = append(dopts, WithTransportCredentials(creds))
} else {
dopts = append(dopts, WithInsecure())
}
if dialer := config.Dialer; dialer != nil {
// WithDialer takes a different type of function, so we instead use a special DialOption here.
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
}
dopts = append(dopts, WithBlock())
ccError = make(chan struct{})
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
cc, err = DialContext(ctx, rb.addr, dopts...)
cancel()
if err != nil {
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
close(ccError)
continue
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *ClientConn, ccError chan struct{}) {
lbc := &loadBalancerClient{cc}
b.callRemoteBalancer(lbc, seq)
cc.Close()
select {
case <-ccError:
default:
close(ccError)
}
}(cc, ccError)
}
}()
return nil
}
oldAggrState := lb.state
lb.state = lb.csEvltr.recordTransition(oldS, s)
func (b *grpclbBalancer) down(addr Address, err error) {
b.mu.Lock()
defer b.mu.Unlock()
for _, a := range b.addrs {
if addr == a.addr {
a.connected = false
break
}
// Regenerate picker when one of the following happens:
// - this sc became ready from not-ready
// - this sc became not-ready from ready
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
lb.regeneratePicker()
}
lb.cc.UpdateBalancerState(lb.state, lb.picker)
return
}
func (b *grpclbBalancer) Up(addr Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return nil
}
var cnt int
for _, a := range b.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
cnt++
}
}
// addr is the only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && b.waitCh != nil {
close(b.waitCh)
b.waitCh = nil
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
// resolved backends (backends received from resolver, not from remote balancer)
// if no connection to remote balancers was successful.
func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
timer := time.NewTimer(fallbackTimeout)
defer timer.Stop()
select {
case <-timer.C:
case <-lb.doneCh:
return
}
return func(err error) {
b.down(addr, err)
lb.mu.Lock()
if lb.serverListReceived {
lb.mu.Unlock()
return
}
lb.fallbackTimerExpired = true
lb.refreshSubConns(lb.resolvedBackendAddrs)
lb.mu.Unlock()
}
func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
b.mu.Lock()
if b.done {
b.mu.Unlock()
err = ErrClientConnClosing
// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
// connections.
func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
if len(addrs) <= 0 {
return
}
seq := b.seq
defer func() {
if err != nil {
return
}
put = func() {
s, ok := rpcInfoFromContext(ctx)
if !ok {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
b.clientStats.NumCallsFinished++
if !s.bytesSent {
b.clientStats.NumCallsFinishedWithClientFailedToSend++
} else if s.bytesReceived {
b.clientStats.NumCallsFinishedKnownReceived++
}
var remoteBalancerAddrs, backendAddrs []resolver.Address
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
remoteBalancerAddrs = append(remoteBalancerAddrs, a)
} else {
backendAddrs = append(backendAddrs, a)
}
}()
b.clientStats.NumCallsStarted++
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
return
}
// Wait on b.waitCh for non-failfast RPCs.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
for {
select {
case <-ctx.Done():
b.mu.Lock()
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ctx.Err()
return
case <-ch:
b.mu.Lock()
if b.done {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
if lb.ccRemoteLB == nil {
if len(remoteBalancerAddrs) <= 0 {
grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
return
}
// First time receiving resolved addresses, create a cc to remote
// balancers.
lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
// Start the fallback goroutine.
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
}
}
func (b *grpclbBalancer) Notify() <-chan []Address {
return b.addrCh
// cc to remote balancers uses lb.manualResolver. Send the updated remote
// balancer addresses to it through manualResolver.
lb.manualResolver.NewAddress(remoteBalancerAddrs)
lb.mu.Lock()
lb.resolvedBackendAddrs = backendAddrs
// If serverListReceived is true, connection to remote balancer was
// successful and there's no need to do fallback anymore.
// If fallbackTimerExpired is false, fallback hasn't happened yet.
if !lb.serverListReceived && lb.fallbackTimerExpired {
// This means we received a new list of resolved backends, and we are
// still in fallback mode. Need to update the list of backends we are
// using to the new list of backends.
lb.refreshSubConns(lb.resolvedBackendAddrs)
}
lb.mu.Unlock()
}
func (b *grpclbBalancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return errBalancerClosed
}
b.done = true
if b.waitCh != nil {
close(b.waitCh)
}
if b.addrCh != nil {
close(b.addrCh)
func (lb *lbBalancer) Close() {
select {
case <-lb.doneCh:
return
default:
}
if b.w != nil {
b.w.Close()
close(lb.doneCh)
if lb.ccRemoteLB != nil {
lb.ccRemoteLB.Close()
}
return nil
}
......@@ -15,7 +15,7 @@
syntax = "proto3";
package grpc.lb.v1;
option go_package = "messages";
option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
......
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"sync"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
"google.golang.org/grpc/status"
)
type rpcStats struct {
NumCallsStarted int64
NumCallsFinished int64
NumCallsFinishedWithDropForRateLimiting int64
NumCallsFinishedWithDropForLoadBalancing int64
NumCallsFinishedWithClientFailedToSend int64
NumCallsFinishedKnownReceived int64
}
// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
func (s *rpcStats) toClientStats() *lbpb.ClientStats {
stats := &lbpb.ClientStats{
NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0),
NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0),
NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
}
return stats
}
func (s *rpcStats) dropForRateLimiting() {
atomic.AddInt64(&s.NumCallsStarted, 1)
atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
atomic.AddInt64(&s.NumCallsFinished, 1)
}
func (s *rpcStats) dropForLoadBalancing() {
atomic.AddInt64(&s.NumCallsStarted, 1)
atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
atomic.AddInt64(&s.NumCallsFinished, 1)
}
func (s *rpcStats) failedToSend() {
atomic.AddInt64(&s.NumCallsStarted, 1)
atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
atomic.AddInt64(&s.NumCallsFinished, 1)
}
func (s *rpcStats) knownReceived() {
atomic.AddInt64(&s.NumCallsStarted, 1)
atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
atomic.AddInt64(&s.NumCallsFinished, 1)
}
type errPicker struct {
// Pick always returns this err.
err error
}
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, p.err
}
// rrPicker does roundrobin on subConns. It's typically used when there's no
// response from remote balancer, and grpclb falls back to the resolved
// backends.
//
// It guaranteed that len(subConns) > 0.
type rrPicker struct {
mu sync.Mutex
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
subConnsNext int
}
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
p.mu.Lock()
defer p.mu.Unlock()
sc := p.subConns[p.subConnsNext]
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
return sc, nil, nil
}
// lbPicker does two layers of picks:
//
// First layer: roundrobin on all servers in serverList, including drops and backends.
// - If it picks a drop, the RPC will fail as being dropped.
// - If it picks a backend, do a second layer pick to pick the real backend.
//
// Second layer: roundrobin on all READY backends.
//
// It's guaranteed that len(serverList) > 0.
type lbPicker struct {
mu sync.Mutex
serverList []*lbpb.Server
serverListNext int
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
subConnsNext int
stats *rpcStats
}
func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
p.mu.Lock()
defer p.mu.Unlock()
// Layer one roundrobin on serverList.
s := p.serverList[p.serverListNext]
p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
// If it's a drop, return an error and fail the RPC.
if s.DropForRateLimiting {
p.stats.dropForRateLimiting()
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
}
if s.DropForLoadBalancing {
p.stats.dropForLoadBalancing()
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
}
// If not a drop but there's no ready subConns.
if len(p.subConns) <= 0 {
return nil, nil, balancer.ErrNoSubConnAvailable
}
// Return the next ready subConn in the list, also collect rpc stats.
sc := p.subConns[p.subConnsNext]
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
done := func(info balancer.DoneInfo) {
if !info.BytesSent {
p.stats.failedToSend()
} else if info.BytesReceived {
p.stats.knownReceived()
}
}
return sc, done, nil
}
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"net"
"reflect"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
// processServerList updates balaner's internal state, create/remove SubConns
// and regenerates picker using the received serverList.
func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
grpclog.Infof("lbBalancer: processing server list: %+v", l)
lb.mu.Lock()
defer lb.mu.Unlock()
// Set serverListReceived to true so fallback will not take effect if it has
// not hit timeout.
lb.serverListReceived = true
// If the new server list == old server list, do nothing.
if reflect.DeepEqual(lb.fullServerList, l.Servers) {
grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
return
}
lb.fullServerList = l.Servers
var backendAddrs []resolver.Address
for _, s := range l.Servers {
if s.DropForLoadBalancing || s.DropForRateLimiting {
continue
}
md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
ip := net.IP(s.IpAddress)
ipStr := ip.String()
if ip.To4() == nil {
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
// net.SplitHostPort() will return too many colons error.
ipStr = fmt.Sprintf("[%s]", ipStr)
}
addr := resolver.Address{
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
Metadata: &md,
}
backendAddrs = append(backendAddrs, addr)
}
// Call refreshSubConns to create/remove SubConns.
backendsUpdated := lb.refreshSubConns(backendAddrs)
// If no backend was updated, no SubConn will be newed/removed. But since
// the full serverList was different, there might be updates in drops or
// pick weights(different number of duplicates). We need to update picker
// with the fulllist.
if !backendsUpdated {
lb.regeneratePicker()
lb.cc.UpdateBalancerState(lb.state, lb.picker)
}
}
// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
// indicating whether the backendAddrs are different from the cached
// backendAddrs (whether any SubConn was newed/removed).
// Caller must hold lb.mu.
func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
lb.backendAddrs = nil
var backendsUpdated bool
// addrsSet is the set converted from backendAddrs, it's used to quick
// lookup for an address.
addrsSet := make(map[resolver.Address]struct{})
// Create new SubConns.
for _, addr := range backendAddrs {
addrWithoutMD := addr
addrWithoutMD.Metadata = nil
addrsSet[addrWithoutMD] = struct{}{}
lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
if _, ok := lb.subConns[addrWithoutMD]; !ok {
backendsUpdated = true
// Use addrWithMD to create the SubConn.
sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
if err != nil {
grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
continue
}
lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
lb.scStates[sc] = connectivity.Idle
sc.Connect()
}
}
for a, sc := range lb.subConns {
// a was removed by resolver.
if _, ok := addrsSet[a]; !ok {
backendsUpdated = true
lb.cc.RemoveSubConn(sc)
delete(lb.subConns, a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
// The entry will be deleted in HandleSubConnStateChange.
}
}
return backendsUpdated
}
func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
for {
reply, err := s.Recv()
if err != nil {
return fmt.Errorf("grpclb: failed to recv server list: %v", err)
}
if serverList := reply.GetServerList(); serverList != nil {
lb.processServerList(serverList)
}
}
}
func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-s.Context().Done():
return
}
stats := lb.clientStats.toClientStats()
t := time.Now()
stats.Timestamp = &lbpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
ClientStats: stats,
},
}); err != nil {
return
}
}
}
func (lb *lbBalancer) callRemoteBalancer() error {
lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
if err != nil {
return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
}
// grpclb handshake on the stream.
initReq := &lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbpb.InitialLoadBalanceRequest{
Name: lb.target,
},
},
}
if err := stream.Send(initReq); err != nil {
return fmt.Errorf("grpclb: failed to send init request: %v", err)
}
reply, err := stream.Recv()
if err != nil {
return fmt.Errorf("grpclb: failed to recv init response: %v", err)
}
initResp := reply.GetInitialResponse()
if initResp == nil {
return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
}
if initResp.LoadBalancerDelegate != "" {
return fmt.Errorf("grpclb: Delegation is not supported")
}
go func() {
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
lb.sendLoadReport(stream, d)
}
}()
return lb.readServerList(stream)
}
func (lb *lbBalancer) watchRemoteBalancer() {
for {
err := lb.callRemoteBalancer()
select {
case <-lb.doneCh:
return
default:
if err != nil {
grpclog.Error(err)
}
}
}
}
func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
var dopts []DialOption
if creds := lb.opt.DialCreds; creds != nil {
if err := creds.OverrideServerName(remoteLBName); err == nil {
dopts = append(dopts, WithTransportCredentials(creds))
} else {
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
dopts = append(dopts, WithInsecure())
}
} else {
dopts = append(dopts, WithInsecure())
}
if lb.opt.Dialer != nil {
// WithDialer takes a different type of function, so we instead use a
// special DialOption here.
dopts = append(dopts, withContextDialer(lb.opt.Dialer))
}
// Explicitly set pickfirst as the balancer.
dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
dopts = append(dopts, withResolverBuilder(lb.manualResolver))
// Dial using manualResolver.Scheme, which is a random scheme generated
// when init grpclb. The target name is not important.
cc, err := Dial("grpclb:///grpclb.server", dopts...)
if err != nil {
grpclog.Fatalf("failed to dial: %v", err)
}
lb.ccRemoteLB = cc
go lb.watchRemoteBalancer()
}
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/resolver"
)
// The parent ClientConn should re-resolve when grpclb loses connection to the
// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
// ResolveNow, and eventually results in re-resolve happening in parent
// ClientConn's resolver (DNS for example).
//
// parent
// ClientConn
// +-----------------------------------------------------------------+
// | parent +---------------------------------+ |
// | DNS ClientConn | grpclb | |
// | resolver balancerWrapper | | |
// | + + | grpclb grpclb | |
// | | | | ManualResolver ClientConn | |
// | | | | + + | |
// | | | | | | Transient | |
// | | | | | | Failure | |
// | | | | | <--------- | | |
// | | | <--------------- | ResolveNow | | |
// | | <--------- | ResolveNow | | | | |
// | | ResolveNow | | | | | |
// | | | | | | | |
// | + + | + + | |
// | +---------------------------------+ |
// +-----------------------------------------------------------------+
// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
// resolver with a special ResolveNow() function.
//
// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
// so when grpclb client lose contact with remote balancers, the parent
// ClientConn's resolver will re-resolve.
type lbManualResolver struct {
scheme string
ccr resolver.ClientConn
ccb balancer.ClientConn
}
func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
r.ccr = cc
return r, nil
}
func (r *lbManualResolver) Scheme() string {
return r.scheme
}
// ResolveNow calls resolveNow on the parent ClientConn.
func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
r.ccb.ResolveNow(o)
}
// Close is a noop for Resolver.
func (*lbManualResolver) Close() {}
// NewAddress calls cc.NewAddress.
func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
r.ccr.NewAddress(addrs)
}
// NewServiceConfig calls cc.NewServiceConfig.
func (r *lbManualResolver) NewServiceConfig(sc string) {
r.ccr.NewServiceConfig(sc)
}
......@@ -19,13 +19,6 @@
// the godoc of the top-level grpc package.
package internal
// TestingCloseConns closes all existing transports but keeps
// grpcServer.lis accepting new connections.
//
// The provided grpcServer must be of type *grpc.Server. It is untyped
// for circular dependency reasons.
var TestingCloseConns func(grpcServer interface{})
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
// It must be called before Serve and requires TLS credentials.
//
......
// +build go1.7, !go1.8
// +build go1.6,!go1.8
/*
*
......
......@@ -97,7 +97,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
p = bp.picker
bp.mu.Unlock()
subConn, put, err := p.Pick(ctx, opts)
subConn, done, err := p.Pick(ctx, opts)
if err != nil {
switch err {
......@@ -120,7 +120,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
continue
}
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
return t, put, nil
return t, done, nil
}
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
......
......@@ -26,6 +26,9 @@ import (
"google.golang.org/grpc/resolver"
)
// PickFirstBalancerName is the name of the pick_first balancer.
const PickFirstBalancerName = "pick_first"
func newPickfirstBuilder() balancer.Builder {
return &pickfirstBuilder{}
}
......@@ -37,7 +40,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions
}
func (*pickfirstBuilder) Name() string {
return "pickfirst"
return PickFirstBalancerName
}
type pickfirstBalancer struct {
......@@ -57,14 +60,20 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
return
}
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
b.sc.Connect()
} else {
b.sc.UpdateAddresses(addrs)
b.sc.Connect()
}
}
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
if b.sc != sc || s == connectivity.Shutdown {
if b.sc != sc {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
return
}
if s == connectivity.Shutdown {
b.sc = nil
return
}
......@@ -93,3 +102,7 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.
}
return p.sc, nil, nil
}
func init() {
balancer.Register(newPickfirstBuilder())
}
......@@ -82,8 +82,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
Header: map[string][]string{"User-Agent": {grpcUA}},
})
req = req.WithContext(ctx)
if err := req.Write(conn); err != nil {
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
......
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package dns implements a dns resolver to be installed as the default resolver
// in grpc.
package dns
import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
)
func init() {
resolver.Register(NewBuilder())
}
const (
defaultPort = "443"
defaultFreq = time.Minute * 30
golang = "GO"
// In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config.
txtAttribute = "grpc_config="
)
var errMissingAddr = errors.New("missing address")
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
func NewBuilder() resolver.Builder {
return &dnsBuilder{freq: defaultFreq}
}
type dnsBuilder struct {
// frequency of polling the DNS server.
freq time.Duration
}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
host, port, err := parseTarget(target.Endpoint)
if err != nil {
return nil, err
}
// IP address.
if net.ParseIP(host) != nil {
host, _ = formatIP(host)
addr := []resolver.Address{{Addr: host + ":" + port}}
i := &ipResolver{
cc: cc,
ip: addr,
rn: make(chan struct{}, 1),
q: make(chan struct{}),
}
cc.NewAddress(addr)
go i.watcher()
return i, nil
}
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
freq: b.freq,
host: host,
port: port,
ctx: ctx,
cancel: cancel,
cc: cc,
t: time.NewTimer(0),
rn: make(chan struct{}, 1),
}
d.wg.Add(1)
go d.watcher()
return d, nil
}
// Scheme returns the naming scheme of this resolver builder, which is "dns".
func (b *dnsBuilder) Scheme() string {
return "dns"
}
// ipResolver watches for the name resolution update for an IP address.
type ipResolver struct {
cc resolver.ClientConn
ip []resolver.Address
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
rn chan struct{}
q chan struct{}
}
// ResolveNow resend the address it stores, no resolution is needed.
func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
select {
case i.rn <- struct{}{}:
default:
}
}
// Close closes the ipResolver.
func (i *ipResolver) Close() {
close(i.q)
}
func (i *ipResolver) watcher() {
for {
select {
case <-i.rn:
i.cc.NewAddress(i.ip)
case <-i.q:
return
}
}
}
// dnsResolver watches for the name resolution update for a non-IP target.
type dnsResolver struct {
freq time.Duration
host string
port string
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
rn chan struct{}
t *time.Timer
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
// replace the real lookup functions with mocked ones to facilitate testing.
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
// has data race with replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
}
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
select {
case d.rn <- struct{}{}:
default:
}
}
// Close closes the dnsResolver.
func (d *dnsResolver) Close() {
d.cancel()
d.wg.Wait()
d.t.Stop()
}
func (d *dnsResolver) watcher() {
defer d.wg.Done()
for {
select {
case <-d.ctx.Done():
return
case <-d.t.C:
case <-d.rn:
}
result, sc := d.lookup()
// Next lookup should happen after an interval defined by d.freq.
d.t.Reset(d.freq)
d.cc.NewServiceConfig(string(sc))
d.cc.NewAddress(result)
}
}
func (d *dnsResolver) lookupSRV() []resolver.Address {
var newAddrs []resolver.Address
_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
if err != nil {
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
return nil
}
for _, s := range srvs {
lbAddrs, err := lookupHost(d.ctx, s.Target)
if err != nil {
grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
continue
}
for _, a := range lbAddrs {
a, ok := formatIP(a)
if !ok {
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
continue
}
addr := a + ":" + strconv.Itoa(int(s.Port))
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
}
}
return newAddrs
}
func (d *dnsResolver) lookupTXT() string {
ss, err := lookupTXT(d.ctx, d.host)
if err != nil {
grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
return ""
}
var res string
for _, s := range ss {
res += s
}
// TXT record must have "grpc_config=" attribute in order to be used as service config.
if !strings.HasPrefix(res, txtAttribute) {
grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
return ""
}
return strings.TrimPrefix(res, txtAttribute)
}
func (d *dnsResolver) lookupHost() []resolver.Address {
var newAddrs []resolver.Address
addrs, err := lookupHost(d.ctx, d.host)
if err != nil {
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
return nil
}
for _, a := range addrs {
a, ok := formatIP(a)
if !ok {
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
continue
}
addr := a + ":" + d.port
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
}
return newAddrs
}
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
var newAddrs []resolver.Address
newAddrs = d.lookupSRV()
// Support fallback to non-balancer address.
newAddrs = append(newAddrs, d.lookupHost()...)
sc := d.lookupTXT()
return newAddrs, canaryingSC(sc)
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
// If addr is an IPv4 address, return the addr and ok = true.
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
func formatIP(addr string) (addrIP string, ok bool) {
ip := net.ParseIP(addr)
if ip == nil {
return "", false
}
if ip.To4() != nil {
return addr, true
}
return "[" + addr + "]", true
}
// parseTarget takes the user input target string, returns formatted host and port info.
// If target doesn't specify a port, set the port to be the defaultPort.
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
// are strippd when setting the host.
// examples:
// target: "www.google.com" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
// target: ":80" returns host: "localhost", port: "80"
// target: ":" returns host: "localhost", port: "443"
func parseTarget(target string) (host, port string, err error) {
if target == "" {
return "", "", errMissingAddr
}
if ip := net.ParseIP(target); ip != nil {
// target is an IPv4 or IPv6(without brackets) address
return target, defaultPort, nil
}
if host, port, err = net.SplitHostPort(target); err == nil {
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
host = "localhost"
}
if port == "" {
// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
port = defaultPort
}
return host, port, nil
}
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
// target doesn't have port
return host, port, nil
}
return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
}
type rawChoice struct {
ClientLanguage *[]string `json:"clientLanguage,omitempty"`
Percentage *int `json:"percentage,omitempty"`
ClientHostName *[]string `json:"clientHostName,omitempty"`
ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
}
func containsString(a *[]string, b string) bool {
if a == nil {
return true
}
for _, c := range *a {
if c == b {
return true
}
}
return false
}
func chosenByPercentage(a *int) bool {
if a == nil {
return true
}
s := rand.NewSource(time.Now().UnixNano())
r := rand.New(s)
if r.Intn(100)+1 > *a {
return false
}
return true
}
func canaryingSC(js string) string {
if js == "" {
return ""
}
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
return ""
}
var sc string
for _, c := range rcs {
if !containsString(c.ClientLanguage, golang) ||
!chosenByPercentage(c.Percentage) ||
!containsString(c.ClientHostName, cliHostname) ||
c.ServiceConfig == nil {
continue
}
sc = string(*c.ServiceConfig)
break
}
return sc
}
// +build go1.6, !go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package dns
import (
"net"
"golang.org/x/net/context"
)
var (
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
return net.LookupSRV(service, proto, name)
}
lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
)
// +build go1.8
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package dns
import "net"
var (
lookupHost = net.DefaultResolver.LookupHost
lookupSRV = net.DefaultResolver.LookupSRV
lookupTXT = net.DefaultResolver.LookupTXT
)
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package passthrough implements a pass-through resolver. It sends the target
// name without scheme back to gRPC as resolved address.
package passthrough
import "google.golang.org/grpc/resolver"
const scheme = "passthrough"
type passthroughBuilder struct{}
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
r := &passthroughResolver{
target: target,
cc: cc,
}
r.start()
return r, nil
}
func (*passthroughBuilder) Scheme() string {
return scheme
}
type passthroughResolver struct {
target resolver.Target
cc resolver.ClientConn
}
func (r *passthroughResolver) start() {
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
func (*passthroughResolver) Close() {}
func init() {
resolver.Register(&passthroughBuilder{})
}
......@@ -24,7 +24,7 @@ var (
// m is a map from scheme to resolver builder.
m = make(map[string]Builder)
// defaultScheme is the default scheme to use.
defaultScheme string
defaultScheme = "passthrough"
)
// TODO(bar) install dns resolver in init(){}.
......@@ -38,7 +38,7 @@ func Register(b Builder) {
// Get returns the resolver builder registered with the given scheme.
// If no builder is register with the scheme, the default scheme will
// be used.
// If the default scheme is not modified, "dns" will be the default
// If the default scheme is not modified, "passthrough" will be the default
// scheme, and the preinstalled dns resolver will be used.
// If the default scheme is modified, and a resolver is registered with
// the scheme, that resolver will be returned.
......@@ -55,7 +55,7 @@ func Get(scheme string) Builder {
}
// SetDefaultScheme sets the default scheme that will be used.
// The default default scheme is "dns".
// The default default scheme is "passthrough".
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
}
......@@ -78,7 +78,9 @@ type Address struct {
// Type is the type of this address.
Type AddressType
// ServerName is the name of this address.
// It's the name of the grpc load balancer, which will be used for authentication.
//
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
ServerName string
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
......@@ -88,10 +90,18 @@ type Address struct {
// BuildOption includes additional information for the builder to create
// the resolver.
type BuildOption struct {
// UserOptions can be used to pass configuration between DialOptions and the
// resolver.
UserOptions interface{}
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
......@@ -128,8 +138,10 @@ type ResolveNowOption struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver interface {
// ResolveNow will be called by gRPC to try to resolve the target name again.
// It's just a hint, resolver can ignore this if it's not necessary.
// ResolveNow will be called by gRPC to try to resolve the target name
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
ResolveNow(ResolveNowOption)
// Close closes the resolver.
Close()
......
......@@ -19,6 +19,7 @@
package grpc
import (
"fmt"
"strings"
"google.golang.org/grpc/grpclog"
......@@ -36,20 +37,24 @@ type ccResolverWrapper struct {
}
// split2 returns the values from strings.SplitN(s, sep, 2).
// If sep is not found, it returns "", s instead.
func split2(s, sep string) (string, string) {
// If sep is not found, it returns ("", s, false) instead.
func split2(s, sep string) (string, string, bool) {
spl := strings.SplitN(s, sep, 2)
if len(spl) < 2 {
return "", s
return "", "", false
}
return spl[0], spl[1]
return spl[0], spl[1], true
}
// parseTarget splits target into a struct containing scheme, authority and
// endpoint.
func parseTarget(target string) (ret resolver.Target) {
ret.Scheme, ret.Endpoint = split2(target, "://")
ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/")
var ok bool
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
if !ok {
return resolver.Target{Endpoint: target}
}
ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
return ret
}
......@@ -57,18 +62,17 @@ func parseTarget(target string) (ret resolver.Target) {
// builder for this scheme. It then builds the resolver and starts the
// monitoring goroutine for it.
//
// This function could return nil, nil, in tests for old behaviors.
// TODO(bar) never return nil, nil when DNS becomes the default resolver.
// If withResolverBuilder dial option is set, the specified resolver will be
// used instead.
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
target := parseTarget(cc.target)
grpclog.Infof("dialing to target with scheme: %q", target.Scheme)
grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme)
rb := resolver.Get(target.Scheme)
rb := cc.dopts.resolverBuilder
if rb == nil {
// TODO(bar) return error when DNS becomes the default (implemented and
// registered by DNS package).
grpclog.Infof("could not get resolver for scheme: %q", target.Scheme)
return nil, nil
rb = resolver.Get(cc.parsedTarget.Scheme)
if rb == nil {
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
}
}
ccr := &ccResolverWrapper{
......@@ -79,14 +83,19 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
}
var err error
ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{})
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{
UserOptions: cc.dopts.resolverBuildUserOptions,
})
if err != nil {
return nil, err
}
go ccr.watcher()
return ccr, nil
}
func (ccr *ccResolverWrapper) start() {
go ccr.watcher()
}
// watcher processes address updates and service config updates sequencially.
// Otherwise, we need to resolve possible races between address and service
// config (e.g. they specify different balancer types).
......@@ -100,20 +109,31 @@ func (ccr *ccResolverWrapper) watcher() {
select {
case addrs := <-ccr.addrCh:
grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs)
// TODO(bar switching) this should never be nil. Pickfirst should be default.
if ccr.cc.balancerWrapper != nil {
// TODO(bar switching) create balancer if it's nil?
ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
select {
case <-ccr.done:
return
default:
}
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
ccr.cc.handleResolvedAddrs(addrs, nil)
case sc := <-ccr.scCh:
select {
case <-ccr.done:
return
default:
}
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
ccr.cc.handleServiceConfig(sc)
case <-ccr.done:
return
}
}
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
ccr.resolver.ResolveNow(o)
}
func (ccr *ccResolverWrapper) close() {
ccr.resolver.Close()
close(ccr.done)
......
......@@ -21,18 +21,17 @@ package grpc
import (
"bytes"
"compress/gzip"
stdctx "context"
"encoding/binary"
"io"
"io/ioutil"
"math"
"os"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
......@@ -124,6 +123,7 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
compressorType string
failFast bool
headerMD metadata.MD
trailerMD metadata.MD
......@@ -195,12 +195,15 @@ func Peer(peer *peer.Peer) CallOption {
}
// FailFast configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If failfast is true, the RPC will fail
// connections or unreachable servers. If failFast is true, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will retry
// the call if it fails due to a transient error. Please refer to
// connection is available (or the call is canceled or times out) and will
// retry the call if it fails due to a transient error. gRPC will not retry if
// data was written to the wire unless the server indicates it did not process
// the data. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
// Note: failFast is default to true.
//
// By default, RPCs are "Fail Fast".
func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error {
c.failFast = failFast
......@@ -233,6 +236,18 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
})
}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
// This API is EXPERIMENTAL.
func UseCompressor(name string) CallOption {
return beforeCall(func(c *callInfo) error {
c.compressorType = name
return nil
})
}
// The format of the payload: compressed or not?
type payloadFormat uint8
......@@ -277,8 +292,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if length == 0 {
return pf, nil, nil
}
if length > uint32(maxReceiveMessageSize) {
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
if int64(length) > int64(maxInt) {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
}
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message:
......@@ -294,18 +312,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
// encode serializes msg and returns a buffer of message header and a buffer of msg.
// If msg is nil, it generates the message header and an empty msg buffer.
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) {
var b []byte
// TODO(ddyihai): eliminate extra Compressor parameter.
func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
var (
b []byte
cbuf *bytes.Buffer
)
const (
payloadLen = 1
sizeLen = 4
)
if msg != nil {
var err error
b, err = c.Marshal(msg)
if err != nil {
return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if outPayload != nil {
outPayload.Payload = msg
......@@ -313,24 +334,35 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
outPayload.Data = b
outPayload.Length = len(b)
}
if cp != nil {
if err := cp.Do(cbuf, b); err != nil {
return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
if compressor != nil || cp != nil {
cbuf = new(bytes.Buffer)
// Has compressor, check Compressor is set by UseCompressor first.
if compressor != nil {
z, _ := compressor.Compress(cbuf)
if _, err := z.Write(b); err != nil {
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
z.Close()
} else {
// If Compressor is not set by UseCompressor, use default Compressor
if err := cp.Do(cbuf, b); err != nil {
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
}
b = cbuf.Bytes()
}
}
if uint(len(b)) > math.MaxUint32 {
return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
bufHeader := make([]byte, payloadLen+sizeLen)
if cp == nil {
bufHeader[0] = byte(compressionNone)
} else {
if compressor != nil || cp != nil {
bufHeader[0] = byte(compressionMade)
} else {
bufHeader[0] = byte(compressionNone)
}
// Write length of b into buf
binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
if outPayload != nil {
......@@ -339,20 +371,26 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
return bufHeader, b, nil
}
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
if dc == nil || recvCompress != dc.Type() {
return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
if recvCompress == "" || recvCompress == encoding.Identity {
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
}
return nil
}
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
......@@ -360,22 +398,37 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
if inPayload != nil {
inPayload.WireLength = len(d)
}
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
return err
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
return st.Err()
}
if pf == compressionMade {
d, err = dc.Do(bytes.NewReader(d))
if err != nil {
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
d, err = dc.Do(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
} else {
dcReader, err := compressor.Decompress(bytes.NewReader(d))
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
d, err = ioutil.ReadAll(dcReader)
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
}
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
}
if inPayload != nil {
inPayload.RecvTime = time.Now()
......@@ -388,9 +441,7 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
}
type rpcInfo struct {
failfast bool
bytesSent bool
bytesReceived bool
failfast bool
}
type rpcInfoContextKey struct{}
......@@ -404,69 +455,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
return
}
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
if ss, ok := rpcInfoFromContext(ctx); ok {
ss.bytesReceived = s.bytesReceived
ss.bytesSent = s.bytesSent
}
return
}
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
case transport.StreamError:
return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
switch err {
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, stdctx.Canceled:
return status.Error(codes.Canceled, err.Error())
case ErrClientConnClosing:
return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
// convertCode converts a standard Go error into its canonical code. Note that
// this is only used to translate the error returned by the server applications.
func convertCode(err error) codes.Code {
switch err {
case nil:
return codes.OK
case io.EOF:
return codes.OutOfRange
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
return codes.FailedPrecondition
case os.ErrInvalid:
return codes.InvalidArgument
case context.Canceled, stdctx.Canceled:
return codes.Canceled
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
return codes.DeadlineExceeded
}
switch {
case os.IsExist(err):
return codes.AlreadyExists
case os.IsNotExist(err):
return codes.NotFound
case os.IsPermission(err):
return codes.PermissionDenied
}
return codes.Unknown
}
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
//
// Deprecated; use status.FromError and Code method instead.
// Deprecated: use status.FromError and Code method instead.
func Code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
......@@ -477,7 +469,7 @@ func Code(err error) codes.Code {
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
//
// Deprecated; use status.FromError and Message method instead.
// Deprecated: use status.FromError and Message method instead.
func ErrorDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
......@@ -488,85 +480,26 @@ func ErrorDesc(err error) string {
// Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK.
//
// Deprecated; use status.Errorf instead.
// Deprecated: use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error {
return status.Errorf(c, format, a...)
}
// MethodConfig defines the configuration recommended by the service providers for a
// particular method.
// This is EXPERIMENTAL and subject to change.
type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here.
WaitForReady *bool
// Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline.
Timeout *time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized
// payload after per-message compression (but before stream compression) in bytes.
// The actual value used is the minimum of the value specified here and the value set
// by the application via the gRPC client API. If either one is not set, then the other
// will be used. If neither is set, then the built-in default is used.
MaxReqSize *int
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
MaxRespSize *int
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
// This is EXPERIMENTAL and subject to change.
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
LB Balancer
// Methods contains a map for the methods in this service.
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
// Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig
}
func min(a, b *int) *int {
if *a < *b {
return a
}
return b
}
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
if mcMax == nil && doptMax == nil {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
return min(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
}
return doptMax
}
// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
// The latest support package version is 4.
// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the
// next support package version update.
const SupportPackageIsVersion3 = true
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package.
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
// support package version is 5.
//
// This constant may be renamed in the future if a change in the generated code
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
// should not be referenced from any other code.
const SupportPackageIsVersion4 = true
// Older versions are kept for compatibility. They may be removed if
// compatibility cannot be maintained.
//
// These constants should not be referenced from any other code.
const (
SupportPackageIsVersion3 = true
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
)
// Version is the current grpc version.
const Version = "1.7.1"
const Version = "1.9.1"
const grpcUA = "grpc-go/" + Version
......@@ -32,11 +32,14 @@ import (
"sync"
"time"
"io/ioutil"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
......@@ -89,13 +92,15 @@ type Server struct {
conns map[io.Closer]bool
serve bool
drain bool
ctx context.Context
cancel context.CancelFunc
// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
// and all the transport goes away.
cv *sync.Cond
cv *sync.Cond // signaled when connections close for GracefulStop
m map[string]*service // service name -> service info
events trace.EventLog
quit chan struct{}
done chan struct{}
quitOnce sync.Once
doneOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
}
type options struct {
......@@ -118,11 +123,13 @@ type options struct {
initialConnWindowSize int32
writeBufferSize int
readBufferSize int
connectionTimeout time.Duration
}
var defaultServerOptions = options{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
}
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
......@@ -181,14 +188,24 @@ func CustomCodec(codec Codec) ServerOption {
}
}
// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
// RPCCompressor returns a ServerOption that sets a compressor for outbound
// messages. For backward compatibility, all outbound messages will be sent
// using this compressor, regardless of incoming message compression. By
// default, server messages will be sent using the same compressor with which
// request messages were sent.
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption {
return func(o *options) {
o.cp = cp
}
}
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
// messages. It has higher priority than decompressors registered via
// encoding.RegisterCompressor.
//
// Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption {
return func(o *options) {
o.dc = dc
......@@ -291,6 +308,18 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
}
}
// ConnectionTimeout returns a ServerOption that sets the timeout for
// connection establishment (up to and including HTTP/2 handshaking) for all
// new connections. If this is not set, the default is 120 seconds. A zero or
// negative value will result in an immediate timeout.
//
// This API is EXPERIMENTAL.
func ConnectionTimeout(d time.Duration) ServerOption {
return func(o *options) {
o.connectionTimeout = d
}
}
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
......@@ -307,9 +336,10 @@ func NewServer(opt ...ServerOption) *Server {
opts: opts,
conns: make(map[io.Closer]bool),
m: make(map[string]*service),
quit: make(chan struct{}),
done: make(chan struct{}),
}
s.cv = sync.NewCond(&s.mu)
s.ctx, s.cancel = context.WithCancel(context.Background())
if EnableTracing {
_, file, line, _ := runtime.Caller(1)
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
......@@ -418,11 +448,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
return ret
}
var (
// ErrServerStopped indicates that the operation is now illegal because of
// the server being stopped.
ErrServerStopped = errors.New("grpc: the server has been stopped")
)
// ErrServerStopped indicates that the operation is now illegal because of
// the server being stopped.
var ErrServerStopped = errors.New("grpc: the server has been stopped")
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if s.opts.creds == nil {
......@@ -436,16 +464,29 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
// read gRPC requests and then call the registered handlers to reply to them.
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
// Serve always returns non-nil error.
// Serve will return a non-nil error unless Stop or GracefulStop is called.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
s.serve = true
if s.lis == nil {
// Serve called after Stop or GracefulStop.
s.mu.Unlock()
lis.Close()
return ErrServerStopped
}
s.serveWG.Add(1)
defer func() {
s.serveWG.Done()
select {
// Stop or GracefulStop called; block until done and return nil.
case <-s.quit:
<-s.done
default:
}
}()
s.lis[lis] = true
s.mu.Unlock()
defer func() {
......@@ -479,36 +520,52 @@ func (s *Server) Serve(lis net.Listener) error {
timer := time.NewTimer(tempDelay)
select {
case <-timer.C:
case <-s.ctx.Done():
case <-s.quit:
timer.Stop()
return nil
}
timer.Stop()
continue
}
s.mu.Lock()
s.printf("done serving; Accept = %v", err)
s.mu.Unlock()
select {
case <-s.quit:
return nil
default:
}
return err
}
tempDelay = 0
// Start a new goroutine to deal with rawConn
// so we don't stall this Accept loop goroutine.
go s.handleRawConn(rawConn)
// Start a new goroutine to deal with rawConn so we don't stall this Accept
// loop goroutine.
//
// Make sure we account for the goroutine so GracefulStop doesn't nil out
// s.conns before this conn can be added.
s.serveWG.Add(1)
go func() {
s.handleRawConn(rawConn)
s.serveWG.Done()
}()
}
}
// handleRawConn is run in its own goroutine and handles a just-accepted
// connection that has not had any I/O performed on it yet.
// handleRawConn forks a goroutine to handle a just-accepted connection that
// has not had any I/O performed on it yet.
func (s *Server) handleRawConn(rawConn net.Conn) {
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
if err != nil {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
// If serverHandShake returns ErrConnDispatched, keep rawConn open.
// If serverHandshake returns ErrConnDispatched, keep rawConn open.
if err != credentials.ErrConnDispatched {
rawConn.Close()
}
rawConn.SetDeadline(time.Time{})
return
}
......@@ -520,19 +577,33 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
}
s.mu.Unlock()
var serve func()
c := conn.(io.Closer)
if s.opts.useHandlerImpl {
s.serveUsingHandler(conn)
serve = func() { s.serveUsingHandler(conn) }
} else {
s.serveHTTP2Transport(conn, authInfo)
// Finish handshaking (HTTP2)
st := s.newHTTP2Transport(conn, authInfo)
if st == nil {
return
}
c = st
serve = func() { s.serveStreams(st) }
}
rawConn.SetDeadline(time.Time{})
if !s.addConn(c) {
return
}
go func() {
serve()
s.removeConn(c)
}()
}
// serveHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go) and
// serves streams on it.
// This is run in its own goroutine (it does network I/O in
// transport.NewServerTransport).
func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
AuthInfo: authInfo,
......@@ -552,17 +623,12 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
s.mu.Unlock()
c.Close()
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
return
}
if !s.addConn(st) {
st.Close()
return
return nil
}
s.serveStreams(st)
return st
}
func (s *Server) serveStreams(st transport.ServerTransport) {
defer s.removeConn(st)
defer st.Close()
var wg sync.WaitGroup
st.HandleStreams(func(stream *transport.Stream) {
......@@ -596,11 +662,6 @@ var _ http.Handler = (*Server)(nil)
//
// conn is the *tls.Conn that's already been authenticated.
func (s *Server) serveUsingHandler(conn net.Conn) {
if !s.addConn(conn) {
conn.Close()
return
}
defer s.removeConn(conn)
h2s := &http2.Server{
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
}
......@@ -640,7 +701,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if !s.addConn(st) {
st.Close()
return
}
defer s.removeConn(st)
......@@ -670,9 +730,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
func (s *Server) addConn(c io.Closer) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil || s.drain {
if s.conns == nil {
c.Close()
return false
}
if s.drain {
// Transport added after we drained our existing conns: drain it
// immediately.
c.(transport.ServerTransport).Drain()
}
s.conns[c] = true
return true
}
......@@ -686,18 +752,14 @@ func (s *Server) removeConn(c io.Closer) {
}
}
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if cp != nil {
cbuf = new(bytes.Buffer)
}
if s.opts.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
hdr, data, err := encode(s.opts.codec, msg, cp, outPayload, comp)
if err != nil {
grpclog.Errorln("grpc: server failed to encode response: ", err)
return err
......@@ -741,10 +803,43 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}()
}
// comp and cp are used for compression. decomp and dc are used for
// decompression. If comp and decomp are both set, they are the same;
// however they are kept separate to ensure that at most one of the
// compressor/decompressor variable pairs are set for use later.
var comp, decomp encoding.Compressor
var cp Compressor
var dc Decompressor
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
dc = s.opts.dc
} else if rc != "" && rc != encoding.Identity {
decomp = encoding.GetCompressor(rc)
if decomp == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
t.WriteStatus(stream, st)
return st.Err()
}
}
// If cp is set, use it. Otherwise, attempt to compress the response using
// the incoming message compression method.
//
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
stream.SetSendCompress(s.opts.cp.Type())
cp = s.opts.cp
stream.SetSendCompress(cp.Type())
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
comp = encoding.GetCompressor(rc)
if comp != nil {
stream.SetSendCompress(rc)
}
}
p := &parser{r: stream}
pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
if err == io.EOF {
......@@ -752,7 +847,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
if err != nil {
if st, ok := status.FromError(err); ok {
......@@ -773,19 +868,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
return err
}
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
return err
}
if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
return st.Err()
}
var inPayload *stats.InPayload
if sh != nil {
......@@ -799,9 +886,17 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
if err != nil {
return Errorf(codes.Internal, err.Error())
if dc != nil {
req, err = dc.Do(bytes.NewReader(req))
if err != nil {
return status.Errorf(codes.Internal, err.Error())
}
} else {
tmp, _ := decomp.Decompress(bytes.NewReader(req))
req, err = ioutil.ReadAll(tmp)
if err != nil {
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
}
if len(req) > s.opts.maxReceiveMessageSize {
......@@ -847,7 +942,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Last: true,
Delay: false,
}
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
......@@ -896,21 +992,45 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
sh.HandleRPC(stream.Context(), end)
}()
}
if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type())
}
ss := &serverStream{
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
statsHandler: sh,
}
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
ss.dc = s.opts.dc
} else if rc != "" && rc != encoding.Identity {
ss.decomp = encoding.GetCompressor(rc)
if ss.decomp == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
t.WriteStatus(ss.s, st)
return st.Err()
}
}
// If cp is set, use it. Otherwise, attempt to compress the response using
// the incoming message compression method.
//
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
ss.cp = s.opts.cp
stream.SetSendCompress(s.opts.cp.Type())
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
ss.comp = encoding.GetCompressor(rc)
if ss.comp != nil {
stream.SetSendCompress(rc)
}
}
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
defer func() {
......@@ -1054,6 +1174,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
s.quitOnce.Do(func() {
close(s.quit)
})
defer func() {
s.serveWG.Wait()
s.doneOnce.Do(func() {
close(s.done)
})
}()
s.mu.Lock()
listeners := s.lis
s.lis = nil
......@@ -1071,7 +1202,6 @@ func (s *Server) Stop() {
}
s.mu.Lock()
s.cancel()
if s.events != nil {
s.events.Finish()
s.events = nil
......@@ -1083,22 +1213,38 @@ func (s *Server) Stop() {
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
s.quitOnce.Do(func() {
close(s.quit)
})
defer func() {
s.doneOnce.Do(func() {
close(s.done)
})
}()
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
s.mu.Unlock()
return
}
for lis := range s.lis {
lis.Close()
}
s.lis = nil
s.cancel()
if !s.drain {
for c := range s.conns {
c.(transport.ServerTransport).Drain()
}
s.drain = true
}
// Wait for serving threads to be ready to exit. Only then can we be sure no
// new conns will be created.
s.mu.Unlock()
s.serveWG.Wait()
s.mu.Lock()
for len(s.conns) != 0 {
s.cv.Wait()
}
......@@ -1107,28 +1253,15 @@ func (s *Server) GracefulStop() {
s.events.Finish()
s.events = nil
}
s.mu.Unlock()
}
func init() {
internal.TestingCloseConns = func(arg interface{}) {
arg.(*Server).testingCloseConns()
}
internal.TestingUseHandlerImpl = func(arg interface{}) {
arg.(*Server).opts.useHandlerImpl = true
}
}
// testingCloseConns closes all existing transports but keeps s.lis
// accepting new connections.
func (s *Server) testingCloseConns() {
s.mu.Lock()
for c := range s.conns {
c.Close()
delete(s.conns, c)
}
s.mu.Unlock()
}
// SetHeader sets the header metadata.
// When called multiple times, all the provided metadata will be merged.
// All the metadata will be sent out when one of the following happens:
......@@ -1141,7 +1274,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
}
stream, ok := transport.StreamFromContext(ctx)
if !ok {
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetHeader(md)
}
......@@ -1151,7 +1284,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
func SendHeader(ctx context.Context, md metadata.MD) error {
stream, ok := transport.StreamFromContext(ctx)
if !ok {
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
t := stream.ServerTransport()
if t == nil {
......@@ -1171,7 +1304,7 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
}
stream, ok := transport.StreamFromContext(ctx)
if !ok {
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetTrailer(md)
}
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"google.golang.org/grpc/grpclog"
)
const maxInt = int(^uint(0) >> 1)
// MethodConfig defines the configuration recommended by the service providers for a
// particular method.
// DEPRECATED: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type MethodConfig struct {
// WaitForReady indicates whether RPCs sent to this method should wait until
// the connection is ready by default (!failfast). The value specified via the
// gRPC client API will override the value set here.
WaitForReady *bool
// Timeout is the default timeout for RPCs sent to this method. The actual
// deadline used will be the minimum of the value specified here and the value
// set by the application via the gRPC client API. If either one is not set,
// then the other will be used. If neither is set, then the RPC has no deadline.
Timeout *time.Duration
// MaxReqSize is the maximum allowed payload size for an individual request in a
// stream (client->server) in bytes. The size which is measured is the serialized
// payload after per-message compression (but before stream compression) in bytes.
// The actual value used is the minimum of the value specified here and the value set
// by the application via the gRPC client API. If either one is not set, then the other
// will be used. If neither is set, then the built-in default is used.
MaxReqSize *int
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
MaxRespSize *int
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
// DEPRECATED: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
LB *string
// Methods contains a map for the methods in this service.
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
// Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig
}
func parseDuration(s *string) (*time.Duration, error) {
if s == nil {
return nil, nil
}
if !strings.HasSuffix(*s, "s") {
return nil, fmt.Errorf("malformed duration %q", *s)
}
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
if len(ss) > 2 {
return nil, fmt.Errorf("malformed duration %q", *s)
}
// hasDigits is set if either the whole or fractional part of the number is
// present, since both are optional but one is required.
hasDigits := false
var d time.Duration
if len(ss[0]) > 0 {
i, err := strconv.ParseInt(ss[0], 10, 32)
if err != nil {
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
}
d = time.Duration(i) * time.Second
hasDigits = true
}
if len(ss) == 2 && len(ss[1]) > 0 {
if len(ss[1]) > 9 {
return nil, fmt.Errorf("malformed duration %q", *s)
}
f, err := strconv.ParseInt(ss[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
}
for i := 9; i > len(ss[1]); i-- {
f *= 10
}
d += time.Duration(f)
hasDigits = true
}
if !hasDigits {
return nil, fmt.Errorf("malformed duration %q", *s)
}
return &d, nil
}
type jsonName struct {
Service *string
Method *string
}
func (j jsonName) generatePath() (string, bool) {
if j.Service == nil {
return "", false
}
res := "/" + *j.Service + "/"
if j.Method != nil {
res += *j.Method
}
return res, true
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonMC struct {
Name *[]jsonName
WaitForReady *bool
Timeout *string
MaxRequestMessageBytes *int64
MaxResponseMessageBytes *int64
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
MethodConfig *[]jsonMC
}
func parseServiceConfig(js string) (ServiceConfig, error) {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err
}
sc := ServiceConfig{
LB: rsc.LoadBalancingPolicy,
Methods: make(map[string]MethodConfig),
}
if rsc.MethodConfig == nil {
return sc, nil
}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
}
d, err := parseDuration(m.Timeout)
if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err
}
mc := MethodConfig{
WaitForReady: m.WaitForReady,
Timeout: d,
}
if m.MaxRequestMessageBytes != nil {
if *m.MaxRequestMessageBytes > int64(maxInt) {
mc.MaxReqSize = newInt(maxInt)
} else {
mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
}
}
if m.MaxResponseMessageBytes != nil {
if *m.MaxResponseMessageBytes > int64(maxInt) {
mc.MaxRespSize = newInt(maxInt)
} else {
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
}
}
for _, n := range *m.Name {
if path, valid := n.generatePath(); valid {
sc.Methods[path] = mc
}
}
}
return sc, nil
}
func min(a, b *int) *int {
if *a < *b {
return a
}
return b
}
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
if mcMax == nil && doptMax == nil {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
return min(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
}
return doptMax
}
func newInt(b int) *int {
return &b
}
......@@ -125,8 +125,8 @@ func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
if s, ok := err.(*statusError); ok {
return s.status(), true
if se, ok := err.(*statusError); ok {
return se.status(), true
}
return nil, false
}
......@@ -166,3 +166,16 @@ func (s *Status) Details() []interface{} {
}
return details
}
// Code returns the Code of the error if it is a Status error, codes.OK if err
// is nil, or codes.Unknown otherwise.
func Code(err error) codes.Code {
// Don't use FromError to avoid allocation of OK status.
if err == nil {
return codes.OK
}
if se, ok := err.(*statusError); ok {
return se.status().Code()
}
return codes.Unknown
}
......@@ -19,7 +19,6 @@
package grpc
import (
"bytes"
"errors"
"io"
"sync"
......@@ -29,6 +28,7 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
......@@ -94,15 +94,23 @@ type ClientStream interface {
Stream
}
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
// NewStream creates a new Stream for the client side. This is typically
// called by generated code.
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
return newClientStream(ctx, desc, cc, method, opts...)
}
// NewClientStream creates a new Stream for the client side. This is typically
// called by generated code.
//
// DEPRECATED: Use ClientConn.NewStream instead.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
return cc.NewStream(ctx, desc, method, opts...)
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
var (
t transport.ClientTransport
......@@ -116,7 +124,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil {
if mc.Timeout != nil && *mc.Timeout >= 0 {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
defer func() {
if err != nil {
......@@ -143,8 +151,24 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// time soon, so we ask the transport to flush the header.
Flush: desc.ClientStreams,
}
if cc.dopts.cp != nil {
// Set our outgoing compression according to the UseCompressor CallOption, if
// set. In that case, also find the compressor from the encoding package.
// Otherwise, use the compressor configured by the WithCompressor DialOption,
// if set.
var cp Compressor
var comp encoding.Compressor
if ct := c.compressorType; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
comp = encoding.GetCompressor(ct)
if comp == nil {
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
} else if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
cp = cc.dopts.cp
}
if c.creds != nil {
callHdr.Creds = c.creds
......@@ -189,42 +213,46 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
for {
// Check to make sure the context has expired. This will prevent us from
// looping forever if an error occurs for wait-for-ready RPCs where no data
// is sent on the wire.
select {
case <-ctx.Done():
return nil, toRPCErr(ctx.Err())
default:
}
t, done, err = cc.getTransport(ctx, c.failFast)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := status.FromError(err); ok {
return nil, err
}
if err == errConnClosing || err == errConnUnavailable {
if c.failFast {
return nil, Errorf(codes.Unavailable, "%v", err)
}
continue
}
// All the other errors are treated as Internal errors.
return nil, Errorf(codes.Internal, "%v", err)
return nil, err
}
s, err = t.NewStream(ctx, callHdr)
if err != nil {
if _, ok := err.(transport.ConnectionError); ok && done != nil {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
if done != nil {
done(balancer.DoneInfo{Err: err})
doneInfo := balancer.DoneInfo{Err: err}
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
doneInfo.BytesSent = true
}
done(doneInfo)
done = nil
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
// In the event of any error from NewStream, we never attempted to write
// anything to the wire, so we can retry indefinitely for non-fail-fast
// RPCs.
if !c.failFast {
continue
}
return nil, toRPCErr(err)
}
break
}
// Set callInfo.peer object from stream's context.
if peer, ok := peer.FromContext(s.Context()); ok {
c.peer = peer
......@@ -234,8 +262,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
c: c,
desc: desc,
codec: cc.dopts.codec,
cp: cc.dopts.cp,
cp: cp,
dc: cc.dopts.dc,
comp: comp,
cancel: cancel,
done: done,
......@@ -249,8 +278,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
statsCtx: ctx,
statsHandler: cc.dopts.copts.StatsHandler,
}
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
// when there is no pending I/O operations on this stream.
// Listen on s.Context().Done() to detect cancellation and s.Done() to detect
// normal termination when there is no pending I/O operations on this stream.
go func() {
select {
case <-t.Error():
......@@ -277,15 +306,20 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
opts []CallOption
c *callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
dc Decompressor
opts []CallOption
c *callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
desc *StreamDesc
codec Codec
cp Compressor
dc Decompressor
comp encoding.Compressor
decomp encoding.Compressor
decompSet bool
cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
......@@ -361,15 +395,15 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
Client: true,
}
}
hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
if err != nil {
return err
}
if cs.c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(data) > *cs.c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
}
err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
if err == nil && outPayload != nil {
......@@ -387,9 +421,25 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
}
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if !cs.decompSet {
// Block until we receive headers containing received message encoding.
if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
if cs.dc == nil || cs.dc.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
cs.dc = nil
cs.decomp = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
cs.dc = nil
}
// Only initialize this state once per stream.
cs.decompSet = true
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
......@@ -413,9 +463,9 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
// Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload.
if cs.c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
......@@ -486,11 +536,11 @@ func (cs *clientStream) finish(err error) {
o.after(cs.c)
}
if cs.done != nil {
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
bytesSent: cs.s.BytesSent(),
bytesReceived: cs.s.BytesReceived(),
cs.done(balancer.DoneInfo{
Err: err,
BytesSent: true,
BytesReceived: cs.s.BytesReceived(),
})
cs.done(balancer.DoneInfo{Err: err})
cs.done = nil
}
if cs.statsHandler != nil {
......@@ -540,12 +590,16 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
t transport.ServerTransport
s *transport.Stream
p *parser
codec Codec
cp Compressor
dc Decompressor
comp encoding.Compressor
decomp encoding.Compressor
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
......@@ -601,12 +655,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
if ss.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
if err != nil {
return err
}
if len(data) > ss.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
......@@ -641,12 +695,12 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
if err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
......@@ -655,3 +709,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
return nil
}
// MethodFromServerStream returns the method string for the input stream.
// The returned string is in the format of "/service/method".
func MethodFromServerStream(stream ServerStream) (string, bool) {
s, ok := transport.StreamFromContext(stream.Context())
if !ok {
return "", ok
}
return s.Method(), ok
}
......@@ -41,12 +41,9 @@ const (
gamma = 2
)
var (
// Adding arbitrary data to ping so that its ack can be
// identified.
// Easter-egg: what does the ping message say?
bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
)
// Adding arbitrary data to ping so that its ack can be identified.
// Easter-egg: what does the ping message say?
var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
type bdpEstimator struct {
// sentAt is the time when the ping was sent.
......
......@@ -20,9 +20,9 @@ package transport
import (
"fmt"
"io"
"math"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/http2"
......@@ -49,7 +49,7 @@ const (
// defaultLocalSendQuota sets is default value for number of data
// bytes that each stream can schedule before some of it being
// flushed out.
defaultLocalSendQuota = 64 * 1024
defaultLocalSendQuota = 128 * 1024
)
// The following defines various control items which could flow through
......@@ -89,12 +89,16 @@ type windowUpdate struct {
func (*windowUpdate) item() {}
type settings struct {
ack bool
ss []http2.Setting
ss []http2.Setting
}
func (*settings) item() {}
type settingsAck struct {
}
func (*settingsAck) item() {}
type resetStream struct {
streamID uint32
code http2.ErrCode
......@@ -112,6 +116,7 @@ type goAway struct {
func (*goAway) item() {}
type flushIO struct {
closeTr bool
}
func (*flushIO) item() {}
......@@ -126,9 +131,8 @@ func (*ping) item() {}
// quotaPool is a pool which accumulates the quota and sends it to acquire()
// when it is available.
type quotaPool struct {
c chan int
mu sync.Mutex
c chan struct{}
version uint32
quota int
}
......@@ -136,12 +140,8 @@ type quotaPool struct {
// newQuotaPool creates a quotaPool which has quota q available to consume.
func newQuotaPool(q int) *quotaPool {
qb := &quotaPool{
c: make(chan int, 1),
}
if q > 0 {
qb.c <- q
} else {
qb.quota = q
quota: q,
c: make(chan struct{}, 1),
}
return qb
}
......@@ -155,60 +155,83 @@ func (qb *quotaPool) add(v int) {
}
func (qb *quotaPool) lockedAdd(v int) {
select {
case n := <-qb.c:
qb.quota += n
default:
}
qb.quota += v
var wakeUp bool
if qb.quota <= 0 {
return
wakeUp = true // Wake up potential waiters.
}
// After the pool has been created, this is the only place that sends on
// the channel. Since mu is held at this point and any quota that was sent
// on the channel has been retrieved, we know that this code will always
// place any positive quota value on the channel.
select {
case qb.c <- qb.quota:
qb.quota = 0
default:
qb.quota += v
if wakeUp && qb.quota > 0 {
select {
case qb.c <- struct{}{}:
default:
}
}
}
func (qb *quotaPool) addAndUpdate(v int) {
qb.mu.Lock()
defer qb.mu.Unlock()
qb.lockedAdd(v)
// Update the version only after having added to the quota
// so that if acquireWithVesrion sees the new vesrion it is
// guaranteed to have seen the updated quota.
// Also, still keep this inside of the lock, so that when
// compareAndExecute is processing, this function doesn't
// get executed partially (quota gets updated but the version
// doesn't).
atomic.AddUint32(&(qb.version), 1)
qb.version++
qb.mu.Unlock()
}
func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
return qb.c, atomic.LoadUint32(&(qb.version))
func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
qb.mu.Lock()
if qb.quota > 0 {
if v > qb.quota {
v = qb.quota
}
qb.quota -= v
ver := qb.version
qb.mu.Unlock()
return v, ver, nil
}
qb.mu.Unlock()
for {
select {
case <-wc.ctx.Done():
return 0, 0, ContextErr(wc.ctx.Err())
case <-wc.tctx.Done():
return 0, 0, ErrConnClosing
case <-wc.done:
return 0, 0, io.EOF
case <-wc.goAway:
return 0, 0, errStreamDrain
case <-qb.c:
qb.mu.Lock()
if qb.quota > 0 {
if v > qb.quota {
v = qb.quota
}
qb.quota -= v
ver := qb.version
if qb.quota > 0 {
select {
case qb.c <- struct{}{}:
default:
}
}
qb.mu.Unlock()
return v, ver, nil
}
qb.mu.Unlock()
}
}
}
func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
qb.mu.Lock()
defer qb.mu.Unlock()
if version == atomic.LoadUint32(&(qb.version)) {
if version == qb.version {
success()
qb.mu.Unlock()
return true
}
failure()
qb.mu.Unlock()
return false
}
// acquire returns the channel on which available quota amounts are sent.
func (qb *quotaPool) acquire() <-chan int {
return qb.c
}
// inFlow deals with inbound flow control
type inFlow struct {
mu sync.Mutex
......
// +build go1.6,!go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"net"
"net/http"
"google.golang.org/grpc/codes"
"golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a background context.
func contextFromRequest(r *http.Request) context.Context {
return context.Background()
}
// +build go1.7
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"context"
"net"
"net/http"
"google.golang.org/grpc/codes"
netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled, netctx.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a context from the HTTP Request.
func contextFromRequest(r *http.Request) context.Context {
return r.Context()
}
......@@ -123,10 +123,9 @@ type serverHandlerTransport struct {
// when WriteStatus is called.
writes chan func()
mu sync.Mutex
// streamDone indicates whether WriteStatus has been called and writes channel
// has been closed.
streamDone bool
// block concurrent WriteStatus calls
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
writeStatusMu sync.Mutex
}
func (ht *serverHandlerTransport) Close() error {
......@@ -177,13 +176,9 @@ func (ht *serverHandlerTransport) do(fn func()) error {
}
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
ht.mu.Lock()
if ht.streamDone {
ht.mu.Unlock()
return nil
}
ht.streamDone = true
ht.mu.Unlock()
ht.writeStatusMu.Lock()
defer ht.writeStatusMu.Unlock()
err := ht.do(func() {
ht.writeCommonHeaders(s)
......@@ -222,7 +217,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
})
close(ht.writes)
if err == nil { // transport has not been closed
ht.Close()
close(ht.writes)
}
return err
}
......@@ -285,12 +284,12 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var ctx context.Context
ctx := contextFromRequest(ht.req)
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
} else {
ctx, cancel = context.WithCancel(context.Background())
ctx, cancel = context.WithCancel(ctx)
}
// requestOver is closed when either the request's context is done
......
......@@ -20,6 +20,7 @@ package transport
import (
"bytes"
"fmt"
"io"
"math"
"net"
......@@ -44,7 +45,6 @@ import (
type http2Client struct {
ctx context.Context
cancel context.CancelFunc
target string // server name/addr
userAgent string
md interface{}
conn net.Conn // underlying communication channel
......@@ -69,6 +69,9 @@ type http2Client struct {
fc *inFlow
// sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool
// localSendQuota limits the amount of data that can be scheduled
// for writing before it is actually written out.
localSendQuota *quotaPool
// streamsQuota limits the max number of concurrent streams.
streamsQuota *quotaPool
......@@ -91,6 +94,11 @@ type http2Client struct {
bdpEst *bdpEstimator
outQuotaVersion uint32
// onSuccess is a callback that client transport calls upon
// receiving server preface to signal that a succefull HTTP2
// connection was established.
onSuccess func()
mu sync.Mutex // guard the following variables
state transportState // the state of underlying connection
activeStreams map[uint32]*Stream
......@@ -109,7 +117,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
if fn != nil {
return fn(ctx, addr)
}
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
return dialContext(ctx, "tcp", addr)
}
func isTemporary(err error) bool {
......@@ -143,12 +151,10 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
defer func() {
connectCancel()
if err != nil {
cancel()
}
......@@ -173,7 +179,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
)
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn)
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
// Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates.
......@@ -208,7 +214,6 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
t := &http2Client{
ctx: ctx,
cancel: cancel,
target: addr.Addr,
userAgent: opts.UserAgent,
md: addr.Metadata,
conn: conn,
......@@ -225,6 +230,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
controlBuf: newControlBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
localSendQuota: newQuotaPool(defaultLocalSendQuota),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
......@@ -236,6 +242,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
kp: kp,
statsHandler: opts.StatsHandler,
initialWindowSize: initialWindowSize,
onSuccess: onSuccess,
}
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
......@@ -296,7 +303,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
t.framer.writer.Flush()
go func() {
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
t.Close()
t.conn.Close()
}()
if t.kp.Time != infinity {
go t.keepalive()
......@@ -307,16 +314,15 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &Stream{
id: t.nextID,
done: make(chan struct{}),
goAway: make(chan struct{}),
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
fc: &inFlow{limit: uint32(t.initialWindowSize)},
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
localSendQuota: newQuotaPool(defaultLocalSendQuota),
headerChan: make(chan struct{}),
id: t.nextID,
done: make(chan struct{}),
goAway: make(chan struct{}),
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
fc: &inFlow{limit: uint32(t.initialWindowSize)},
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
headerChan: make(chan struct{}),
}
t.nextID += 2
s.requestRead = func(n int) {
......@@ -336,7 +342,12 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
t.updateWindow(s, uint32(n))
},
}
s.waiters = waiters{
ctx: s.ctx,
tctx: t.ctx,
done: s.done,
goAway: s.goAway,
}
return s
}
......@@ -402,22 +413,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
if t.state == draining {
t.mu.Unlock()
return nil, ErrStreamDrain
return nil, errStreamDrain
}
if t.state != reachable {
t.mu.Unlock()
return nil, ErrConnClosing
}
t.mu.Unlock()
sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
if err != nil {
// Get a quota of 1 from streamsQuota.
if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
return nil, err
}
// Returns the quota balance back.
if sq > 1 {
t.streamsQuota.add(sq - 1)
}
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
// Make the slice of certain predictable size to reduce allocations made by append.
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
......@@ -477,7 +484,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
if t.state == draining {
t.mu.Unlock()
t.streamsQuota.add(1)
return nil, ErrStreamDrain
return nil, errStreamDrain
}
if t.state != reachable {
t.mu.Unlock()
......@@ -505,10 +512,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
})
t.mu.Unlock()
s.mu.Lock()
s.bytesSent = true
s.mu.Unlock()
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
......@@ -582,16 +585,16 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
func (t *http2Client) Close() (err error) {
func (t *http2Client) Close() error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return
return nil
}
t.state = closing
t.mu.Unlock()
t.cancel()
err = t.conn.Close()
err := t.conn.Close()
t.mu.Lock()
streams := t.activeStreams
t.activeStreams = nil
......@@ -659,44 +662,44 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
var (
streamQuota int
streamQuotaVer uint32
err error
)
for idx, r := range [][]byte{hdr, data} {
for len(r) > 0 {
size := http2MaxFrameLen
// Wait until the stream has some quota to send the data.
quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan)
if err != nil {
return err
if size > len(r) {
size = len(r)
}
// Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
if streamQuota == 0 { // Used up all the locally cached stream quota.
// Get all the stream quota there is.
streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
if err != nil {
return err
}
}
if size > streamQuota {
size = streamQuota
}
// Get size worth quota from transport.
tq, _, err := t.sendQuotaPool.get(size, s.waiters)
if err != nil {
return err
}
if sq < size {
size = sq
}
if tq < size {
size = tq
}
if size > len(r) {
size = len(r)
}
p := r[:size]
ps := len(p)
if ps < tq {
// Overbooked transport quota. Return it back.
t.sendQuotaPool.add(tq - ps)
}
// Acquire local send quota to be able to write to the controlBuf.
ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
ltq, _, err := t.localSendQuota.get(size, s.waiters)
if err != nil {
if _, ok := err.(ConnectionError); !ok {
t.sendQuotaPool.add(ps)
}
return err
}
s.localSendQuota.add(ltq - ps) // It's ok if we make it negative.
// even if ltq is smaller than size we don't adjust size since
// ltq is only a soft limit.
streamQuota -= size
p := r[:size]
var endStream bool
// See if this is the last frame to be written.
if opts.Last {
......@@ -711,21 +714,25 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
}
}
success := func() {
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }})
if ps < sq {
s.sendQuotaPool.lockedAdd(sq - ps)
}
r = r[ps:]
ltq := ltq
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
r = r[size:]
}
failure := func() {
s.sendQuotaPool.lockedAdd(sq)
failure := func() { // The stream quota version must have changed.
// Our streamQuota cache is invalidated now, so give it back.
s.sendQuotaPool.lockedAdd(streamQuota + size)
}
if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
t.sendQuotaPool.add(ps)
s.localSendQuota.add(ps)
if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
// Couldn't send this chunk out.
t.sendQuotaPool.add(size)
t.localSendQuota.add(ltq)
streamQuota = 0
}
}
}
if streamQuota > 0 { // Add the left over quota back to stream.
s.sendQuotaPool.add(streamQuota)
}
if !opts.Last {
return nil
}
......@@ -791,7 +798,6 @@ func (t *http2Client) updateFlowControl(n uint32) {
t.mu.Unlock()
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
t.controlBuf.put(&settings{
ack: false,
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
......@@ -894,7 +900,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
close(s.headerChan)
s.headerDone = true
}
statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
code := http2.ErrCode(f.ErrCode)
if code == http2.ErrCodeRefusedStream {
// The stream was unprocessed by the server.
s.unprocessed = true
}
statusCode, ok := http2ErrConvTab[code]
if !ok {
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
statusCode = codes.Unknown
......@@ -904,17 +916,48 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
s.write(recvMsg{err: io.EOF})
}
func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
if f.IsAck() {
return
}
var ss []http2.Setting
var rs []http2.Setting
var ps []http2.Setting
isMaxConcurrentStreamsMissing := true
f.ForeachSetting(func(s http2.Setting) error {
ss = append(ss, s)
if s.ID == http2.SettingMaxConcurrentStreams {
isMaxConcurrentStreamsMissing = false
}
if t.isRestrictive(s) {
rs = append(rs, s)
} else {
ps = append(ps, s)
}
return nil
})
// The settings will be applied once the ack is sent.
t.controlBuf.put(&settings{ack: true, ss: ss})
if isFirst && isMaxConcurrentStreamsMissing {
// This means server is imposing no limits on
// maximum number of concurrent streams initiated by client.
// So we must remove our self-imposed limit.
ps = append(ps, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
Val: math.MaxUint32,
})
}
t.applySettings(rs)
t.controlBuf.put(&settingsAck{})
t.applySettings(ps)
}
func (t *http2Client) isRestrictive(s http2.Setting) bool {
switch s.ID {
case http2.SettingMaxConcurrentStreams:
return int(s.Val) < t.maxStreams
case http2.SettingInitialWindowSize:
// Note: we don't acquire a lock here to read streamSendQuota
// because the same goroutine updates it later.
return s.Val < t.streamSendQuota
}
return false
}
func (t *http2Client) handlePing(f *http2.PingFrame) {
......@@ -945,12 +988,16 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.Close()
return
}
// A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
// The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
// with the ID of the last stream the server will process.
// Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
// close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server
// was being sent don't get killed.
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
// sent after an RTT delay with the ID of the last stream the server will
// process.
//
// Therefore, when we get the first GoAway we don't necessarily close any
// streams. While in case of second GoAway we close all streams created after
// the GoAwayId. This way streams that were in-flight while the GoAway from
// server was being sent don't get killed.
select {
case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
......@@ -972,6 +1019,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
}
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
stream.mu.Lock()
stream.unprocessed = true
stream.finish(statusGoAway)
stream.mu.Unlock()
close(stream.goAway)
}
}
......@@ -988,11 +1040,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// It expects a lock on transport's mutext to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = NoReason
t.goAwayReason = GoAwayNoReason
switch f.ErrCode {
case http2.ErrCodeEnhanceYourCalm:
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = TooManyPings
t.goAwayReason = GoAwayTooManyPings
}
}
}
......@@ -1073,7 +1125,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
s.mu.Unlock()
return
}
if len(state.mdata) > 0 {
s.trailer = state.mdata
}
......@@ -1111,7 +1162,8 @@ func (t *http2Client) reader() {
t.Close()
return
}
t.handleSettings(sf)
t.onSuccess()
t.handleSettings(sf, true)
// loop to keep reading incoming messages on this transport.
for {
......@@ -1144,7 +1196,7 @@ func (t *http2Client) reader() {
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
t.handleSettings(frame)
t.handleSettings(frame, false)
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
......@@ -1167,10 +1219,8 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
if s.Val > math.MaxInt32 {
s.Val = math.MaxInt32
}
t.mu.Lock()
ms := t.maxStreams
t.maxStreams = int(s.Val)
t.mu.Unlock()
t.streamsQuota.add(int(s.Val) - ms)
case http2.SettingInitialWindowSize:
t.mu.Lock()
......@@ -1187,14 +1237,19 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
// is duplicated between the client and the server.
// The transport layer needs to be refactored to take care of this.
func (t *http2Client) itemHandler(i item) error {
var err error
func (t *http2Client) itemHandler(i item) (err error) {
defer func() {
if err != nil {
errorf(" error in itemHandler: %v", err)
}
}()
switch i := i.(type) {
case *dataFrame:
err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
if err == nil {
i.f()
if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
return err
}
i.f()
return nil
case *headerFrame:
t.hBuf.Reset()
for _, f := range i.hf {
......@@ -1228,34 +1283,33 @@ func (t *http2Client) itemHandler(i item) error {
return err
}
}
return nil
case *windowUpdate:
err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
case *settings:
if i.ack {
t.applySettings(i.ss)
err = t.framer.fr.WriteSettingsAck()
} else {
err = t.framer.fr.WriteSettings(i.ss...)
}
return t.framer.fr.WriteSettings(i.ss...)
case *settingsAck:
return t.framer.fr.WriteSettingsAck()
case *resetStream:
// If the server needs to be to intimated about stream closing,
// then we need to make sure the RST_STREAM frame is written to
// the wire before the headers of the next stream waiting on
// streamQuota. We ensure this by adding to the streamsQuota pool
// only after having acquired the writableChan to send RST_STREAM.
err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
t.streamsQuota.add(1)
return err
case *flushIO:
err = t.framer.writer.Flush()
return t.framer.writer.Flush()
case *ping:
if !i.ack {
t.bdpEst.timesnap(i.data)
}
err = t.framer.fr.WritePing(i.ack, i.data)
return t.framer.fr.WritePing(i.ack, i.data)
default:
errorf("transport: http2Client.controller got unexpected item type %v\n", i)
errorf("transport: http2Client.controller got unexpected item type %v", i)
return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
}
return err
}
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
......
......@@ -70,7 +70,10 @@ type http2Server struct {
fc *inFlow
// sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool
stats stats.Handler
// localSendQuota limits the amount of data that can be scheduled
// for writing before it is actually written out.
localSendQuota *quotaPool
stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
......@@ -152,12 +155,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
Val: uint32(iwz)})
}
if err := framer.fr.WriteSettings(isettings...); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(false, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
return nil, connectionErrorf(false, err, "transport: %v", err)
}
}
kp := config.KeepaliveParams
......@@ -199,6 +202,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
controlBuf: newControlBuffer(),
fc: &inFlow{limit: uint32(icwz)},
sendQuotaPool: newQuotaPool(defaultWindowSize),
localSendQuota: newQuotaPool(defaultLocalSendQuota),
state: reachable,
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
......@@ -223,9 +227,39 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.stats.HandleConn(t.ctx, connBegin)
}
t.framer.writer.Flush()
defer func() {
if err != nil {
t.Close()
}
}()
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
}
frame, err := t.framer.fr.ReadFrame()
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, err
}
if err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
}
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
}
t.handleSettings(sf)
go func() {
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
t.Close()
t.conn.Close()
}()
go t.keepalive()
return t, nil
......@@ -316,7 +350,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
t.maxStreamID = streamID
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
s.localSendQuota = newQuotaPool(defaultLocalSendQuota)
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
......@@ -346,6 +379,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
t.updateWindow(s, uint32(n))
},
}
s.waiters = waiters{
ctx: s.ctx,
tctx: t.ctx,
}
handle(s)
return
}
......@@ -354,41 +391,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
// Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket)
if err != io.EOF {
errorf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
t.Close()
return
}
if !bytes.Equal(preface, clientPreface) {
errorf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
t.Close()
return
}
frame, err := t.framer.fr.ReadFrame()
if err == io.EOF || err == io.ErrUnexpectedEOF {
t.Close()
return
}
if err != nil {
errorf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
t.Close()
return
}
atomic.StoreUint32(&t.activity, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
errorf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
t.Close()
return
}
t.handleSettings(sf)
for {
frame, err := t.framer.fr.ReadFrame()
atomic.StoreUint32(&t.activity, 1)
......@@ -496,7 +498,6 @@ func (t *http2Server) updateFlowControl(n uint32) {
t.mu.Unlock()
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
t.controlBuf.put(&settings{
ack: false,
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
......@@ -594,12 +595,29 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
var ss []http2.Setting
var rs []http2.Setting
var ps []http2.Setting
f.ForeachSetting(func(s http2.Setting) error {
ss = append(ss, s)
if t.isRestrictive(s) {
rs = append(rs, s)
} else {
ps = append(ps, s)
}
return nil
})
t.controlBuf.put(&settings{ack: true, ss: ss})
t.applySettings(rs)
t.controlBuf.put(&settingsAck{})
t.applySettings(ps)
}
func (t *http2Server) isRestrictive(s http2.Setting) bool {
switch s.ID {
case http2.SettingInitialWindowSize:
// Note: we don't acquire a lock here to read streamSendQuota
// because the same goroutine updates it later.
return s.Val < t.streamSendQuota
}
return false
}
func (t *http2Server) applySettings(ss []http2.Setting) {
......@@ -666,7 +684,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
errorf("transport: Got to too many pings from the client, closing the connection.")
errorf("transport: Got too many pings from the client, closing the connection.")
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
......@@ -708,7 +726,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
md = s.header
s.mu.Unlock()
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
......@@ -769,7 +787,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headersSent = true
}
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
if !headersSent {
......@@ -813,7 +831,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) {
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
select {
case <-s.ctx.Done():
return ContextErr(s.ctx.Err())
......@@ -842,66 +860,69 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
var (
streamQuota int
streamQuotaVer uint32
err error
)
for _, r := range [][]byte{hdr, data} {
for len(r) > 0 {
size := http2MaxFrameLen
// Wait until the stream has some quota to send the data.
quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan)
if err != nil {
return err
if size > len(r) {
size = len(r)
}
// Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire())
if streamQuota == 0 { // Used up all the locally cached stream quota.
// Get all the stream quota there is.
streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
if err != nil {
return err
}
}
if size > streamQuota {
size = streamQuota
}
// Get size worth quota from transport.
tq, _, err := t.sendQuotaPool.get(size, s.waiters)
if err != nil {
return err
}
if sq < size {
size = sq
}
if tq < size {
size = tq
}
if size > len(r) {
size = len(r)
}
p := r[:size]
ps := len(p)
if ps < tq {
// Overbooked transport quota. Return it back.
t.sendQuotaPool.add(tq - ps)
}
// Acquire local send quota to be able to write to the controlBuf.
ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire())
ltq, _, err := t.localSendQuota.get(size, s.waiters)
if err != nil {
if _, ok := err.(ConnectionError); !ok {
t.sendQuotaPool.add(ps)
}
return err
}
s.localSendQuota.add(ltq - ps) // It's ok we make this negative.
// even if ltq is smaller than size we don't adjust size since,
// ltq is only a soft limit.
streamQuota -= size
p := r[:size]
// Reset ping strikes when sending data since this might cause
// the peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
success := func() {
ltq := ltq
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
s.localSendQuota.add(ps)
t.localSendQuota.add(ltq)
}})
if ps < sq {
// Overbooked stream quota. Return it back.
s.sendQuotaPool.lockedAdd(sq - ps)
}
r = r[ps:]
r = r[size:]
}
failure := func() {
s.sendQuotaPool.lockedAdd(sq)
failure := func() { // The stream quota version must have changed.
// Our streamQuota cache is invalidated now, so give it back.
s.sendQuotaPool.lockedAdd(streamQuota + size)
}
if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
t.sendQuotaPool.add(ps)
s.localSendQuota.add(ps)
if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
// Couldn't send this chunk out.
t.sendQuotaPool.add(size)
t.localSendQuota.add(ltq)
streamQuota = 0
}
}
}
if streamQuota > 0 {
// ADd the left over quota back to stream.
s.sendQuotaPool.add(streamQuota)
}
return nil
}
......@@ -1037,11 +1058,9 @@ func (t *http2Server) itemHandler(i item) error {
case *windowUpdate:
return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
case *settings:
if i.ack {
t.applySettings(i.ss)
return t.framer.fr.WriteSettingsAck()
}
return t.framer.fr.WriteSettings(i.ss...)
case *settingsAck:
return t.framer.fr.WriteSettingsAck()
case *resetStream:
return t.framer.fr.WriteRSTStream(i.streamID, i.code)
case *goAway:
......@@ -1055,6 +1074,9 @@ func (t *http2Server) itemHandler(i item) error {
if !i.headsUp {
// Stop accepting more streams now.
t.state = draining
if len(t.activeStreams) == 0 {
i.closeConn = true
}
t.mu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
return err
......@@ -1062,8 +1084,7 @@ func (t *http2Server) itemHandler(i item) error {
if i.closeConn {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return fmt.Errorf("transport: Connection closing")
t.controlBuf.put(&flushIO{closeTr: true})
}
return nil
}
......@@ -1093,7 +1114,13 @@ func (t *http2Server) itemHandler(i item) error {
}()
return nil
case *flushIO:
return t.framer.writer.Flush()
if err := t.framer.writer.Flush(); err != nil {
return err
}
if i.closeTr {
return ErrConnClosing
}
return nil
case *ping:
if !i.ack {
t.bdpEst.timesnap(i.data)
......@@ -1141,7 +1168,7 @@ func (t *http2Server) closeStream(s *Stream) {
t.idle = time.Now()
}
if t.state == draining && len(t.activeStreams) == 0 {
defer t.Close()
defer t.controlBuf.put(&flushIO{closeTr: true})
}
t.mu.Unlock()
// In case stream sending and receiving are invoked in separate
......
......@@ -17,16 +17,15 @@
*/
// Package transport defines and implements message oriented communication
// channel to complete various transactions (e.g., an RPC).
// channel to complete various transactions (e.g., an RPC). It is meant for
// grpc-internal usage and is not intended to be imported directly by users.
package transport // import "google.golang.org/grpc/transport"
import (
stdctx "context"
"fmt"
"io"
"net"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/http2"
......@@ -134,7 +133,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
case <-r.ctx.Done():
return 0, ContextErr(r.ctx.Err())
case <-r.goAway:
return 0, ErrStreamDrain
return 0, errStreamDrain
case m := <-r.recv.get():
r.recv.load()
if m.err != nil {
......@@ -211,66 +210,67 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
id uint32
// nil for client side Stream.
st ServerTransport
// ctx is the associated context of the stream.
ctx context.Context
// cancel is always nil for client side Stream.
cancel context.CancelFunc
// done is closed when the final status arrives.
done chan struct{}
// goAway is closed when the server sent GoAways signal before this stream was initiated.
goAway chan struct{}
// method records the associated RPC method of the stream.
method string
id uint32
st ServerTransport // nil for client side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed when the final status arrives
goAway chan struct{} // closed when a GOAWAY control message is received
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
trReader io.Reader
fc *inFlow
recvQuota uint32
// TODO: Remote this unused variable.
// The accumulated inbound quota pending for window update.
updateQuota uint32
waiters waiters
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if need be.
// is used to adjust flow control, if needed.
requestRead func(int)
sendQuotaPool *quotaPool
localSendQuota *quotaPool
// Close headerChan to indicate the end of reception of header metadata.
headerChan chan struct{}
// header caches the received header metadata.
header metadata.MD
// The key-value map of trailer metadata.
trailer metadata.MD
mu sync.RWMutex // guard the following
// headerOK becomes true from the first header is about to send.
headerOk bool
sendQuotaPool *quotaPool
headerChan chan struct{} // closed to indicate the end of header metadata.
headerDone bool // set when headerChan is closed. Used to avoid closing headerChan multiple times.
header metadata.MD // the received header metadata.
trailer metadata.MD // the key-value map of trailer metadata.
mu sync.RWMutex // guard the following
headerOk bool // becomes true from the first header is about to send
state streamState
// true iff headerChan is closed. Used to avoid closing headerChan
// multiple times.
headerDone bool
// the status error received from the server.
status *status.Status
// rstStream indicates whether a RST_STREAM frame needs to be sent
// to the server to signify that this stream is closing.
rstStream bool
// rstError is the error that needs to be sent along with the RST_STREAM frame.
rstError http2.ErrCode
// bytesSent and bytesReceived indicates whether any bytes have been sent or
// received on this stream.
bytesSent bool
bytesReceived bool
status *status.Status // the status error received from the server
rstStream bool // indicates whether a RST_STREAM frame needs to be sent
rstError http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
bytesReceived bool // indicates whether any bytes have been received on this stream
unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
}
func (s *Stream) waitOnHeader() error {
if s.headerChan == nil {
// On the server headerChan is always nil since a stream originates
// only after having received headers.
return nil
}
wc := s.waiters
select {
case <-wc.ctx.Done():
return ContextErr(wc.ctx.Err())
case <-wc.goAway:
return errStreamDrain
case <-s.headerChan:
return nil
}
}
// RecvCompress returns the compression algorithm applied to the inbound
// message. It is empty string if there is no compression applied.
func (s *Stream) RecvCompress() string {
if err := s.waitOnHeader(); err != nil {
return ""
}
return s.recvCompress
}
......@@ -295,15 +295,7 @@ func (s *Stream) GoAway() <-chan struct{} {
// is available. It blocks until i) the metadata is ready or ii) there is no
// header metadata or iii) the stream is canceled/expired.
func (s *Stream) Header() (metadata.MD, error) {
var err error
select {
case <-s.ctx.Done():
err = ContextErr(s.ctx.Err())
case <-s.goAway:
err = ErrStreamDrain
case <-s.headerChan:
return s.header.Copy(), nil
}
err := s.waitOnHeader()
// Even if the stream is closed, header is returned if available.
select {
case <-s.headerChan:
......@@ -417,18 +409,19 @@ func (s *Stream) finish(st *status.Status) {
close(s.done)
}
// BytesSent indicates whether any bytes have been sent on this stream.
func (s *Stream) BytesSent() bool {
// BytesReceived indicates whether any bytes have been received on this stream.
func (s *Stream) BytesReceived() bool {
s.mu.Lock()
bs := s.bytesSent
br := s.bytesReceived
s.mu.Unlock()
return bs
return br
}
// BytesReceived indicates whether any bytes have been received on this stream.
func (s *Stream) BytesReceived() bool {
// Unprocessed indicates whether the server did not process this stream --
// i.e. it sent a refused stream or GOAWAY including this stream ID.
func (s *Stream) Unprocessed() bool {
s.mu.Lock()
br := s.bytesReceived
br := s.unprocessed
s.mu.Unlock()
return br
}
......@@ -514,14 +507,15 @@ type ConnectOptions struct {
// TargetInfo contains the information of the target such as network address and metadata.
type TargetInfo struct {
Addr string
Metadata interface{}
Addr string
Metadata interface{}
Authority string
}
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
return newHTTP2Client(ctx, target, opts, timeout)
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
}
// Options provides additional hints and information for message
......@@ -545,10 +539,6 @@ type CallHdr struct {
// Method specifies the operation to perform.
Method string
// RecvCompress specifies the compression algorithm applied on
// inbound messages.
RecvCompress string
// SendCompress specifies the compression algorithm applied on
// outbound message.
SendCompress string
......@@ -686,9 +676,13 @@ func (e ConnectionError) Origin() error {
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
// ErrStreamDrain indicates that the stream is rejected by the server because
// errStreamDrain indicates that the stream is rejected by the server because
// the server stops accepting new RPCs.
ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
// TODO: delete this error; it is no longer necessary.
errStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
// StatusGoAway indicates that the server sent a GOAWAY that included this
// stream's ID in unprocessed RPCs.
statusGoAway = status.New(codes.Unavailable, "the server stopped accepting new RPCs")
)
// TODO: See if we can replace StreamError with status package errors.
......@@ -703,44 +697,27 @@ func (e StreamError) Error() string {
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
}
// wait blocks until it can receive from one of the provided contexts or channels
func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
select {
case <-ctx.Done():
return 0, ContextErr(ctx.Err())
case <-done:
return 0, io.EOF
case <-goAway:
return 0, ErrStreamDrain
case <-tctx.Done():
return 0, ErrConnClosing
case i := <-proceed:
return i, nil
}
}
// ContextErr converts the error from context package into a StreamError.
func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled, stdctx.Canceled:
return streamErrorf(codes.Canceled, "%v", err)
}
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
// waiters are passed to quotaPool get methods to
// wait on in addition to waiting on quota.
type waiters struct {
ctx context.Context
tctx context.Context
done chan struct{}
goAway chan struct{}
}
// GoAwayReason contains the reason for the GoAway frame received.
type GoAwayReason uint8
const (
// Invalid indicates that no GoAway frame is received.
Invalid GoAwayReason = 0
// NoReason is the default value when GoAway frame is received.
NoReason GoAwayReason = 1
// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
// was received and that the debug data said "too_many_pings".
TooManyPings GoAwayReason = 2
// GoAwayInvalid indicates that no GoAway frame is received.
GoAwayInvalid GoAwayReason = 0
// GoAwayNoReason is the default value when GoAway frame is received.
GoAwayNoReason GoAwayReason = 1
// GoAwayTooManyPings indicates that a GoAway frame with
// ErrCodeEnhanceYourCalm was received and that the debug data said
// "too_many_pings".
GoAwayTooManyPings GoAwayReason = 2
)
// loopyWriter is run in a separate go routine. It is the single code path that will
......@@ -751,6 +728,7 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
case i := <-cbuf.get():
cbuf.load()
if err := handler(i); err != nil {
errorf("transport: Error while handling item. Err: %v", err)
return
}
case <-ctx.Done():
......@@ -762,12 +740,14 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
case i := <-cbuf.get():
cbuf.load()
if err := handler(i); err != nil {
errorf("transport: Error while handling item. Err: %v", err)
return
}
case <-ctx.Done():
return
default:
if err := handler(&flushIO{}); err != nil {
errorf("transport: Error while flushing. Err: %v", err)
return
}
break hasData
......
......@@ -8,12 +8,6 @@ die() {
exit 1
}
# TODO: Remove this check and the mangling below once "context" is imported
# directly.
if git status --porcelain | read; then
die "Uncommitted or untracked files found; commit changes first"
fi
PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
# Check proto in manual runs or cron runs.
......@@ -28,8 +22,8 @@ if [ "$1" = "-install" ]; then
github.com/golang/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck \
github.com/golang/protobuf/protoc-gen-go \
golang.org/x/tools/cmd/stringer
github.com/client9/misspell/cmd/misspell \
github.com/golang/protobuf/protoc-gen-go
if [[ "$check_proto" = "true" ]]; then
if [[ "$TRAVIS" = "true" ]]; then
PROTOBUF_VERSION=3.3.0
......@@ -48,10 +42,16 @@ elif [[ "$#" -ne 0 ]]; then
die "Unknown argument(s): $*"
fi
# TODO: Remove this check and the mangling below once "context" is imported
# directly.
if git status --porcelain | read; then
die "Uncommitted or untracked files found; commit changes first"
fi
git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
goimports -l . 2>&1 | tee /dev/stderr | (! read)
golint ./... 2>&1 | (grep -vE "(_mock|_string|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
# Undo any edits made by this script.
cleanup() {
......@@ -64,7 +64,7 @@ trap cleanup EXIT
git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
set +o pipefail
# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
set -o pipefail
git reset --hard HEAD
......@@ -75,4 +75,10 @@ if [[ "$check_proto" = "true" ]]; then
fi
# TODO(menghanl): fix errors in transport_test.
staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./...
staticcheck -ignore '
google.golang.org/grpc/transport/transport_test.go:SA2002
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
google.golang.org/grpc/stats/stats_test.go:SA1019
google.golang.org/grpc/test/end2end_test.go:SA1019
' ./...
misspell -error .
......@@ -267,140 +267,180 @@
"revisionTime": "2017-10-02T23:26:14Z"
},
{
"checksumSHA1": "y6h+XSUljIi7QBgKWGNl9cRtdZA=",
"checksumSHA1": "LXTQppZOmpZb8/zNBzfXmq3GDEg=",
"path": "google.golang.org/grpc",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "OCBWpefHJ05ZkENccs7COJjWIvk=",
"checksumSHA1": "xBhmO0Vn4kzbmySioX+2gBImrkk=",
"path": "google.golang.org/grpc/balancer",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "Dkjgw1HasWvqct0IuiZdjbD7O0c=",
"checksumSHA1": "CPWX/IgaQSR3+78j4sPrvHNkW+U=",
"path": "google.golang.org/grpc/balancer/base",
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=",
"path": "google.golang.org/grpc/balancer/roundrobin",
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "bfmh2m3qW8bb6qpfS/D4Wcl4hZE=",
"path": "google.golang.org/grpc/codes",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=",
"path": "google.golang.org/grpc/connectivity",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "5ylThBvJnIcyWhL17AC9+Sdbw2E=",
"checksumSHA1": "4DnDX81AOSyVP3UJ5tQmlNcG1MI=",
"path": "google.golang.org/grpc/credentials",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "9DImIDqmAMPO24loHJ77UVJTDxQ=",
"path": "google.golang.org/grpc/encoding",
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "WxP3QV0Y4fIx5NsT0dwBp6JsrJE=",
"checksumSHA1": "H7SuPUqbPcdbNqgl+k3ohuwMAwE=",
"path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=",
"path": "google.golang.org/grpc/grpclog",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "U9vDe05/tQrvFBojOQX8Xk12W9I=",
"checksumSHA1": "Qvf3zdmRCSsiM/VoBv0qB/naHtU=",
"path": "google.golang.org/grpc/internal",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=",
"path": "google.golang.org/grpc/keepalive",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "KeUmTZV+2X46C49cKyjp+xM7fvw=",
"path": "google.golang.org/grpc/metadata",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "556Vl75S7EVxgScPckfELwn6+xo=",
"checksumSHA1": "5dwF592DPvhF2Wcex3m7iV6aGRQ=",
"path": "google.golang.org/grpc/naming",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=",
"path": "google.golang.org/grpc/peer",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "ifLyU1wZH521mt8htJZpGB/XVgQ=",
"checksumSHA1": "y8Ta+ctMP9CUTiPyPyxiD154d8w=",
"path": "google.golang.org/grpc/resolver",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "WpWF+bDzObsHf+bjoGpb/abeFxo=",
"path": "google.golang.org/grpc/resolver/dns",
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=",
"path": "google.golang.org/grpc/resolver/passthrough",
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "G9lgXNi7qClo5sM2s6TbTHLFR3g=",
"path": "google.golang.org/grpc/stats",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "3Dwz4RLstDHMPyDA7BUsYe+JP4w=",
"checksumSHA1": "tUo+M0Cb0W9ZEIt5BH30wJz/Kjc=",
"path": "google.golang.org/grpc/status",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=",
"path": "google.golang.org/grpc/tap",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
},
{
"checksumSHA1": "Bxregh/v5pH6fVa1rIo2zLCb5NI=",
"checksumSHA1": "4PldZ/0JjX6SpJYaMByY1ozywnY=",
"path": "google.golang.org/grpc/transport",
"revision": "61d37c5d657a47e4404fd6823bd598341a2595de",
"revisionTime": "2017-10-25T22:03:47Z",
"version": "v1.7.1",
"versionExact": "v1.7.1"
"revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
"revisionTime": "2018-01-08T22:01:35Z",
"version": "v1.9.1",
"versionExact": "v1.9.1"
}
],
"rootPath": "gitlab.com/gitlab-org/gitlab-workhorse"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment