summaryrefslogtreecommitdiffstats
path: root/vendor/golang.org/x/net/http2
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-03-13 12:54:22 -0400
committerGitHub <noreply@github.com>2017-03-13 12:54:22 -0400
commitc281ee3b61e8ab53ff118866d72618ae8cce582b (patch)
tree776e7bdf6c8bfbb9a1dee5976496ab065959991f /vendor/golang.org/x/net/http2
parent3ada7a41a7fb13abef19dd63dc56b720900dbaa9 (diff)
downloadchat-c281ee3b61e8ab53ff118866d72618ae8cce582b.tar.gz
chat-c281ee3b61e8ab53ff118866d72618ae8cce582b.tar.bz2
chat-c281ee3b61e8ab53ff118866d72618ae8cce582b.zip
Updating server dependancies. Also adding github.com/jaytaylor/html2text and gopkg.in/gomail.v2 (#5748)
Diffstat (limited to 'vendor/golang.org/x/net/http2')
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go2
-rw-r--r--vendor/golang.org/x/net/http2/databuffer.go146
-rw-r--r--vendor/golang.org/x/net/http2/databuffer_test.go155
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer.go60
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer_test.go128
-rw-r--r--vendor/golang.org/x/net/http2/frame.go81
-rw-r--r--vendor/golang.org/x/net/http2/frame_test.go91
-rw-r--r--vendor/golang.org/x/net/http2/go18.go6
-rw-r--r--vendor/golang.org/x/net/http2/go18_test.go13
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go29
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode_test.go70
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go104
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack_test.go146
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables.go250
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables_test.go188
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go2
-rw-r--r--vendor/golang.org/x/net/http2/server.go199
-rw-r--r--vendor/golang.org/x/net/http2/server_test.go155
-rw-r--r--vendor/golang.org/x/net/http2/transport.go9
-rw-r--r--vendor/golang.org/x/net/http2/transport_test.go2
20 files changed, 1193 insertions, 643 deletions
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index b13941258..bdf5652b0 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -247,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
-// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type noDialClientConnPool struct{ *clientConnPool }
diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go
new file mode 100644
index 000000000..a3067f8de
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/databuffer.go
@@ -0,0 +1,146 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var (
+ dataChunkSizeClasses = []int{
+ 1 << 10,
+ 2 << 10,
+ 4 << 10,
+ 8 << 10,
+ 16 << 10,
+ }
+ dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return make([]byte, 1<<10) }},
+ {New: func() interface{} { return make([]byte, 2<<10) }},
+ {New: func() interface{} { return make([]byte, 4<<10) }},
+ {New: func() interface{} { return make([]byte, 8<<10) }},
+ {New: func() interface{} { return make([]byte, 16<<10) }},
+ }
+)
+
+func getDataBufferChunk(size int64) []byte {
+ i := 0
+ for ; i < len(dataChunkSizeClasses)-1; i++ {
+ if size <= int64(dataChunkSizeClasses[i]) {
+ break
+ }
+ }
+ return dataChunkPools[i].Get().([]byte)
+}
+
+func putDataBufferChunk(p []byte) {
+ for i, n := range dataChunkSizeClasses {
+ if len(p) == n {
+ dataChunkPools[i].Put(p)
+ return
+ }
+ }
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
diff --git a/vendor/golang.org/x/net/http2/databuffer_test.go b/vendor/golang.org/x/net/http2/databuffer_test.go
new file mode 100644
index 000000000..ca227b528
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/databuffer_test.go
@@ -0,0 +1,155 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func fmtDataChunk(chunk []byte) string {
+ out := ""
+ var last byte
+ var count int
+ for _, c := range chunk {
+ if c != last {
+ if count > 0 {
+ out += fmt.Sprintf(" x %d ", count)
+ count = 0
+ }
+ out += string([]byte{c})
+ last = c
+ }
+ count++
+ }
+ if count > 0 {
+ out += fmt.Sprintf(" x %d", count)
+ }
+ return out
+}
+
+func fmtDataChunks(chunks [][]byte) string {
+ var out string
+ for _, chunk := range chunks {
+ out += fmt.Sprintf("{%q}", fmtDataChunk(chunk))
+ }
+ return out
+}
+
+func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) {
+ // Run setup, then read the remaining bytes from the dataBuffer and check
+ // that they match wantBytes. We use different read sizes to check corner
+ // cases in Read.
+ for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} {
+ t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) {
+ b := setup(t)
+ buf := make([]byte, readSize)
+ var gotRead bytes.Buffer
+ for {
+ n, err := b.Read(buf)
+ gotRead.Write(buf[:n])
+ if err == errReadEmpty {
+ break
+ }
+ if err != nil {
+ t.Fatalf("error after %v bytes: %v", gotRead.Len(), err)
+ }
+ }
+ if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) {
+ t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want))
+ }
+ })
+ }
+}
+
+func TestDataBufferAllocation(t *testing.T) {
+ writes := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024-1),
+ []byte{'a'},
+ bytes.Repeat([]byte("b"), 4*1024-1),
+ []byte{'b'},
+ bytes.Repeat([]byte("c"), 8*1024-1),
+ []byte{'c'},
+ bytes.Repeat([]byte("d"), 16*1024-1),
+ []byte{'d'},
+ bytes.Repeat([]byte("e"), 32*1024),
+ }
+ var wantRead bytes.Buffer
+ for _, p := range writes {
+ wantRead.Write(p)
+ }
+
+ testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{}
+ for _, p := range writes {
+ if n, err := b.Write(p); n != len(p) || err != nil {
+ t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
+ }
+ }
+ want := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024),
+ bytes.Repeat([]byte("b"), 4*1024),
+ bytes.Repeat([]byte("c"), 8*1024),
+ bytes.Repeat([]byte("d"), 16*1024),
+ bytes.Repeat([]byte("e"), 16*1024),
+ bytes.Repeat([]byte("e"), 16*1024),
+ }
+ if !reflect.DeepEqual(b.chunks, want) {
+ t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
+ }
+ return b
+ })
+}
+
+func TestDataBufferAllocationWithExpected(t *testing.T) {
+ writes := [][]byte{
+ bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB
+ bytes.Repeat([]byte("b"), 14*1024),
+ bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more
+ bytes.Repeat([]byte("d"), 2*1024),
+ bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB
+ }
+ var wantRead bytes.Buffer
+ for _, p := range writes {
+ wantRead.Write(p)
+ }
+
+ testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{expected: 32 * 1024}
+ for _, p := range writes {
+ if n, err := b.Write(p); n != len(p) || err != nil {
+ t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
+ }
+ }
+ want := [][]byte{
+ append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...),
+ append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...),
+ bytes.Repeat([]byte("e"), 1*1024),
+ }
+ if !reflect.DeepEqual(b.chunks, want) {
+ t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
+ }
+ return b
+ })
+}
+
+func TestDataBufferWriteAfterPartialRead(t *testing.T) {
+ testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer {
+ b := &dataBuffer{}
+ if n, err := b.Write([]byte("abcd")); n != 4 || err != nil {
+ t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err)
+ }
+ p := make([]byte, 2)
+ if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) {
+ t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err)
+ }
+ if n, err := b.Write([]byte("xyz")); n != 3 || err != nil {
+ t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err)
+ }
+ return b
+ })
+}
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go
deleted file mode 100644
index 47da0f0bf..000000000
--- a/vendor/golang.org/x/net/http2/fixed_buffer.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "errors"
-)
-
-// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
-// It never allocates, but moves old data as new data is written.
-type fixedBuffer struct {
- buf []byte
- r, w int
-}
-
-var (
- errReadEmpty = errors.New("read from empty fixedBuffer")
- errWriteFull = errors.New("write on full fixedBuffer")
-)
-
-// Read copies bytes from the buffer into p.
-// It is an error to read when no data is available.
-func (b *fixedBuffer) Read(p []byte) (n int, err error) {
- if b.r == b.w {
- return 0, errReadEmpty
- }
- n = copy(p, b.buf[b.r:b.w])
- b.r += n
- if b.r == b.w {
- b.r = 0
- b.w = 0
- }
- return n, nil
-}
-
-// Len returns the number of bytes of the unread portion of the buffer.
-func (b *fixedBuffer) Len() int {
- return b.w - b.r
-}
-
-// Write copies bytes from p into the buffer.
-// It is an error to write more data than the buffer can hold.
-func (b *fixedBuffer) Write(p []byte) (n int, err error) {
- // Slide existing data to beginning.
- if b.r > 0 && len(p) > len(b.buf)-b.w {
- copy(b.buf, b.buf[b.r:b.w])
- b.w -= b.r
- b.r = 0
- }
-
- // Write new data.
- n = copy(b.buf[b.w:], p)
- b.w += n
- if n < len(p) {
- err = errWriteFull
- }
- return n, err
-}
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer_test.go b/vendor/golang.org/x/net/http2/fixed_buffer_test.go
deleted file mode 100644
index f5432f8d8..000000000
--- a/vendor/golang.org/x/net/http2/fixed_buffer_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "reflect"
- "testing"
-)
-
-var bufferReadTests = []struct {
- buf fixedBuffer
- read, wn int
- werr error
- wp []byte
- wbuf fixedBuffer
-}{
- {
- fixedBuffer{[]byte{'a', 0}, 0, 1},
- 5, 1, nil, []byte{'a'},
- fixedBuffer{[]byte{'a', 0}, 0, 0},
- },
- {
- fixedBuffer{[]byte{0, 'a'}, 1, 2},
- 5, 1, nil, []byte{'a'},
- fixedBuffer{[]byte{0, 'a'}, 0, 0},
- },
- {
- fixedBuffer{[]byte{'a', 'b'}, 0, 2},
- 1, 1, nil, []byte{'a'},
- fixedBuffer{[]byte{'a', 'b'}, 1, 2},
- },
- {
- fixedBuffer{[]byte{}, 0, 0},
- 5, 0, errReadEmpty, []byte{},
- fixedBuffer{[]byte{}, 0, 0},
- },
-}
-
-func TestBufferRead(t *testing.T) {
- for i, tt := range bufferReadTests {
- read := make([]byte, tt.read)
- n, err := tt.buf.Read(read)
- if n != tt.wn {
- t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
- continue
- }
- if err != tt.werr {
- t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
- continue
- }
- read = read[:n]
- if !reflect.DeepEqual(read, tt.wp) {
- t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
- }
- if !reflect.DeepEqual(tt.buf, tt.wbuf) {
- t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
- }
- }
-}
-
-var bufferWriteTests = []struct {
- buf fixedBuffer
- write, wn int
- werr error
- wbuf fixedBuffer
-}{
- {
- buf: fixedBuffer{
- buf: []byte{},
- },
- wbuf: fixedBuffer{
- buf: []byte{},
- },
- },
- {
- buf: fixedBuffer{
- buf: []byte{1, 'a'},
- },
- write: 1,
- wn: 1,
- wbuf: fixedBuffer{
- buf: []byte{0, 'a'},
- w: 1,
- },
- },
- {
- buf: fixedBuffer{
- buf: []byte{'a', 1},
- r: 1,
- w: 1,
- },
- write: 2,
- wn: 2,
- wbuf: fixedBuffer{
- buf: []byte{0, 0},
- w: 2,
- },
- },
- {
- buf: fixedBuffer{
- buf: []byte{},
- },
- write: 5,
- werr: errWriteFull,
- wbuf: fixedBuffer{
- buf: []byte{},
- },
- },
-}
-
-func TestBufferWrite(t *testing.T) {
- for i, tt := range bufferWriteTests {
- n, err := tt.buf.Write(make([]byte, tt.write))
- if n != tt.wn {
- t.Errorf("#%d: wrote %d bytes; want %d", i, n, tt.wn)
- continue
- }
- if err != tt.werr {
- t.Errorf("#%d: error = %v; want %v", i, err, tt.werr)
- continue
- }
- if !reflect.DeepEqual(tt.buf, tt.wbuf) {
- t.Errorf("#%d: buf = %+v; want %+v", i, tt.buf, tt.wbuf)
- }
- }
-}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 358833fed..3b1489072 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
-type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame,
@@ -312,7 +312,7 @@ type Framer struct {
MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent
- // last. Then return an error (unless AllowIllegalWrites) if
+ // last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is
// attempted to be written.
@@ -323,6 +323,8 @@ type Framer struct {
debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *frameCache // nil if frames aren't reused (default)
}
func (fr *Framer) maxHeaderListSize() uint32 {
@@ -398,6 +400,27 @@ const (
maxFrameSize = 1<<24 - 1
)
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &frameCache{}
+}
+
+type frameCache struct {
+ dataFrame DataFrame
+}
+
+func (fc *frameCache) getDataFrame() *DataFrame {
+ if fc == nil {
+ return &DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
// NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
@@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
- f, err := typeFrameParser(fh.Type)(fh, payload)
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil {
if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data
}
-func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
@@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
- f := &DataFrame{
- FrameHeader: fh,
- }
+ f := fc.getDataFrame()
+ f.FrameHeader = fh
+
var padSize byte
if fh.Flags.Has(FlagDataPadded) {
var err error
@@ -600,6 +623,7 @@ var (
errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large")
+ errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
)
func validStreamIDOrZero(streamID uint32) bool {
@@ -623,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
@@ -631,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID
}
- if len(pad) > 255 {
- return errPadLength
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return errPadBytes
+ }
+ }
+ }
}
var flags Flags
if endStream {
@@ -660,10 +695,10 @@ type SettingsFrame struct {
p []byte
}
-func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
- // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
@@ -672,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
}
if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection,
- // never a single stream. The stream identifier for a
+ // never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST
@@ -762,7 +797,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
-func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -802,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData
}
-func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol)
}
@@ -842,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p
}
-func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil
}
@@ -853,7 +888,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
-func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -918,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
}
-func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{
FrameHeader: fh,
}
if fh.StreamID == 0 {
- // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
@@ -1045,7 +1080,7 @@ type PriorityParam struct {
Exclusive bool
// Weight is the stream's zero-indexed weight. It should be
- // set together with StreamDep, or neither should be set. Per
+ // set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
@@ -1055,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{}
}
-func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
@@ -1102,7 +1137,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode
}
-func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -1132,7 +1167,7 @@ type ContinuationFrame struct {
headerFragBuf []byte
}
-func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
@@ -1182,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
}
-func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{
FrameHeader: fh,
}
diff --git a/vendor/golang.org/x/net/http2/frame_test.go b/vendor/golang.org/x/net/http2/frame_test.go
index 7b1933d96..37266bc58 100644
--- a/vendor/golang.org/x/net/http2/frame_test.go
+++ b/vendor/golang.org/x/net/http2/frame_test.go
@@ -141,7 +141,7 @@ func TestWriteDataPadded(t *testing.T) {
streamID: 1,
endStream: false,
data: []byte("foo"),
- pad: []byte("bar"),
+ pad: []byte{0, 0, 0},
wantHeader: FrameHeader{
Type: FrameData,
Flags: FlagDataPadded,
@@ -1096,6 +1096,95 @@ func TestMetaFrameHeader(t *testing.T) {
}
}
+func TestSetReuseFrames(t *testing.T) {
+ fr, buf := testFramer()
+ fr.SetReuseFrames()
+
+ // Check that DataFrames are reused. Note that
+ // SetReuseFrames only currently implements reuse of DataFrames.
+ firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t)
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("", 0, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("HHH", 3, fr, buf, t)
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+}
+
+func TestSetReuseFramesMoreThanOnce(t *testing.T) {
+ fr, buf := testFramer()
+ fr.SetReuseFrames()
+
+ firstDf := readAndVerifyDataFrame("ABC", 3, fr, buf, t)
+ fr.SetReuseFrames()
+
+ for i := 0; i < 10; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ // SetReuseFrames should be idempotent
+ fr.SetReuseFrames()
+ if df != firstDf {
+ t.Errorf("Expected Framer to return references to the same DataFrame. Have %v and %v", &df, &firstDf)
+ }
+ }
+}
+
+func TestNoSetReuseFrames(t *testing.T) {
+ fr, buf := testFramer()
+ const numNewDataFrames = 10
+ dfSoFar := make([]interface{}, numNewDataFrames)
+
+ // Check that DataFrames are not reused if SetReuseFrames wasn't called.
+ // SetReuseFrames only currently implements reuse of DataFrames.
+ for i := 0; i < numNewDataFrames; i++ {
+ df := readAndVerifyDataFrame("XYZ", 3, fr, buf, t)
+ for _, item := range dfSoFar {
+ if df == item {
+ t.Errorf("Expected Framer to return new DataFrames since SetNoReuseFrames not set.")
+ }
+ }
+ dfSoFar[i] = df
+ }
+}
+
+func readAndVerifyDataFrame(data string, length byte, fr *Framer, buf *bytes.Buffer, t *testing.T) *DataFrame {
+ var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+ fr.WriteData(streamID, true, []byte(data))
+ wantEnc := "\x00\x00" + string(length) + "\x00\x01\x01\x02\x03\x04" + data
+ if buf.String() != wantEnc {
+ t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+ }
+ f, err := fr.ReadFrame()
+ if err != nil {
+ t.Fatal(err)
+ }
+ df, ok := f.(*DataFrame)
+ if !ok {
+ t.Fatalf("got %T; want *DataFrame", f)
+ }
+ if !bytes.Equal(df.Data(), []byte(data)) {
+ t.Errorf("got %q; want %q", df.Data(), []byte(data))
+ }
+ if f.Header().Flags&1 == 0 {
+ t.Errorf("didn't see END_STREAM flag")
+ }
+ return df
+}
+
func encodeHeaderRaw(t *testing.T, pairs ...string) []byte {
var he hpackEncoder
return he.encodeHeaderRaw(t, pairs...)
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
index 633202c39..73cc2381f 100644
--- a/vendor/golang.org/x/net/http2/go18.go
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -12,7 +12,11 @@ import (
"net/http"
)
-func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ c2 := c.Clone()
+ c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
+ return c2
+}
var _ http.Pusher = (*responseWriter)(nil)
diff --git a/vendor/golang.org/x/net/http2/go18_test.go b/vendor/golang.org/x/net/http2/go18_test.go
index 836550597..30e3b038b 100644
--- a/vendor/golang.org/x/net/http2/go18_test.go
+++ b/vendor/golang.org/x/net/http2/go18_test.go
@@ -7,6 +7,7 @@
package http2
import (
+ "crypto/tls"
"net/http"
"testing"
"time"
@@ -64,3 +65,15 @@ func TestConfigureServerIdleTimeout_Go18(t *testing.T) {
}
}
}
+
+func TestCertClone(t *testing.T) {
+ c := &tls.Config{
+ GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ panic("shouldn't be called")
+ },
+ }
+ c2 := cloneTLSConfig(c)
+ if c2.GetClientCertificate == nil {
+ t.Error("GetClientCertificate is nil")
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index f9bb03398..54726c2a3 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder {
tableSizeUpdate: false,
w: w,
}
+ e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize)
return e
}
// WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update"
-// if necessary. If produced, it is done before encoding f.
+// if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0]
@@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
// only name matches, i points to that index and nameValueMatch
// becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
- for idx, hf := range staticTable {
- if !constantTimeStringCompare(hf.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(idx + 1)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(hf.Value, f.Value) {
- continue
- }
- i = uint64(idx + 1)
- nameValueMatch = true
- return
+ i, nameValueMatch = staticTable.search(f)
+ if nameValueMatch {
+ return i, true
}
- j, nameValueMatch := e.dynTab.search(f)
+ j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) {
- i = j + uint64(len(staticTable))
+ return j + uint64(staticTable.len()), nameValueMatch
}
- return
+
+ return i, false
}
// SetMaxDynamicTableSize changes the dynamic header table size to v.
diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go
index 92286f3ba..05f12db9c 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode_test.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go
@@ -7,6 +7,8 @@ package hpack
import (
"bytes"
"encoding/hex"
+ "fmt"
+ "math/rand"
"reflect"
"strings"
"testing"
@@ -101,17 +103,20 @@ func TestEncoderSearchTable(t *testing.T) {
wantMatch bool
}{
// Name and Value match
- {pair("foo", "bar"), uint64(len(staticTable) + 3), true},
- {pair("blake", "miz"), uint64(len(staticTable) + 2), true},
+ {pair("foo", "bar"), uint64(staticTable.len()) + 3, true},
+ {pair("blake", "miz"), uint64(staticTable.len()) + 2, true},
{pair(":method", "GET"), 2, true},
- // Only name match because Sensitive == true
- {HeaderField{":method", "GET", true}, 2, false},
+ // Only name match because Sensitive == true. This is allowed to match
+ // any ":method" entry. The current implementation uses the last entry
+ // added in newStaticTable.
+ {HeaderField{":method", "GET", true}, 3, false},
// Only Name matches
- {pair("foo", "..."), uint64(len(staticTable) + 3), false},
- {pair("blake", "..."), uint64(len(staticTable) + 2), false},
- {pair(":method", "..."), 2, false},
+ {pair("foo", "..."), uint64(staticTable.len()) + 3, false},
+ {pair("blake", "..."), uint64(staticTable.len()) + 2, false},
+ // As before, this is allowed to match any ":method" entry.
+ {pair(":method", "..."), 3, false},
// None match
{pair("foo-", "bar"), 0, false},
@@ -328,3 +333,54 @@ func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
func removeSpace(s string) string {
return strings.Replace(s, " ", "", -1)
}
+
+func BenchmarkEncoderSearchTable(b *testing.B) {
+ e := NewEncoder(nil)
+
+ // A sample of possible header fields.
+ // This is not based on any actual data from HTTP/2 traces.
+ var possible []HeaderField
+ for _, f := range staticTable.ents {
+ if f.Value == "" {
+ possible = append(possible, f)
+ continue
+ }
+ // Generate 5 random values, except for cookie and set-cookie,
+ // which we know can have many values in practice.
+ num := 5
+ if f.Name == "cookie" || f.Name == "set-cookie" {
+ num = 25
+ }
+ for i := 0; i < num; i++ {
+ f.Value = fmt.Sprintf("%s-%d", f.Name, i)
+ possible = append(possible, f)
+ }
+ }
+ for k := 0; k < 10; k++ {
+ f := HeaderField{
+ Name: fmt.Sprintf("x-header-%d", k),
+ Sensitive: rand.Int()%2 == 0,
+ }
+ for i := 0; i < 5; i++ {
+ f.Value = fmt.Sprintf("%s-%d", f.Name, i)
+ possible = append(possible, f)
+ }
+ }
+
+ // Add a random sample to the dynamic table. This very loosely simulates
+ // a history of 100 requests with 20 header fields per request.
+ for r := 0; r < 100*20; r++ {
+ f := possible[rand.Int31n(int32(len(possible)))]
+ // Skip if this is in the staticTable verbatim.
+ if _, has := staticTable.search(f); !has {
+ e.dynTab.add(f)
+ }
+ }
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ for _, f := range possible {
+ e.searchTable(f)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
index 135b9f62c..176644acd 100644
--- a/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -61,7 +61,7 @@ func (hf HeaderField) String() string {
func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
- // its entries. The size of an entry is the sum of its name's
+ // its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and
@@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
emit: emitFunc,
emitEnabled: true,
}
+ d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize)
return d
@@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
}
type dynamicTable struct {
- // ents is the FIFO described at
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
- // The newest (low index) is append at the end, and items are
- // evicted from the front.
- ents []HeaderField
- size uint32
+ table headerFieldTable
+ size uint32 // in bytes
maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive
}
@@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
dt.evict()
}
-// TODO: change dynamicTable to be a struct with a slice and a size int field,
-// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
-//
-//
-// Then make add increment the size. maybe the max size should move from Decoder to
-// dynamicTable and add should return an ok bool if there was enough space.
-//
-// Later we'll need a remove operation on dynamicTable.
-
func (dt *dynamicTable) add(f HeaderField) {
- dt.ents = append(dt.ents, f)
+ dt.table.addEntry(f)
dt.size += f.Size()
dt.evict()
}
-// If we're too big, evict old stuff (front of the slice)
+// If we're too big, evict old stuff.
func (dt *dynamicTable) evict() {
- base := dt.ents // keep base pointer of slice
- for dt.size > dt.maxSize {
- dt.size -= dt.ents[0].Size()
- dt.ents = dt.ents[1:]
- }
-
- // Shift slice contents down if we evicted things.
- if len(dt.ents) != len(base) {
- copy(base, dt.ents)
- dt.ents = base[:len(dt.ents)]
+ var n int
+ for dt.size > dt.maxSize && n < dt.table.len() {
+ dt.size -= dt.table.ents[n].Size()
+ n++
}
-}
-
-// constantTimeStringCompare compares string a and b in a constant
-// time manner.
-func constantTimeStringCompare(a, b string) bool {
- if len(a) != len(b) {
- return false
- }
-
- c := byte(0)
-
- for i := 0; i < len(a); i++ {
- c |= a[i] ^ b[i]
- }
-
- return c == 0
-}
-
-// Search searches f in the table. The return value i is 0 if there is
-// no name match. If there is name match or name/value match, i is the
-// index of that entry (1-based). If both name and value match,
-// nameValueMatch becomes true.
-func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
- l := len(dt.ents)
- for j := l - 1; j >= 0; j-- {
- ent := dt.ents[j]
- if !constantTimeStringCompare(ent.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(l - j)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(ent.Value, f.Value) {
- continue
- }
- i = uint64(l - j)
- nameValueMatch = true
- return
- }
- return
+ dt.table.evictOldest(n)
}
func (d *Decoder) maxTableIndex() int {
- return len(d.dynTab.ents) + len(staticTable)
+ // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
+ // the dynamic table to 2^32 bytes, where each entry will occupy more than
+ // one byte. Further, the staticTable has a fixed, small length.
+ return d.dynTab.table.len() + staticTable.len()
}
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
- if i < 1 {
+ // See Section 2.3.3.
+ if i == 0 {
return
}
+ if i <= uint64(staticTable.len()) {
+ return staticTable.ents[i-1], true
+ }
if i > uint64(d.maxTableIndex()) {
return
}
- if i <= uint64(len(staticTable)) {
- return staticTable[i-1], true
- }
- dents := d.dynTab.ents
- return dents[len(dents)-(int(i)-len(staticTable))], true
+ // In the dynamic table, newer entries have lower indices.
+ // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
+ // the reversed dynamic table.
+ dt := d.dynTab.table
+ return dt.ents[dt.len()-(int(i)-staticTable.len())], true
}
// Decode decodes an entire block.
@@ -307,7 +255,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
err = d.parseHeaderFieldRepr()
if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't
- // get too large. All the varint and string
+ // get too large. All the varint and string
// reading code earlier should already catch
// overlong things and return ErrStringLength,
// but keep this as a last resort.
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
index 4c7b17bfb..c2f8fd102 100644
--- a/vendor/golang.org/x/net/http2/hpack/hpack_test.go
+++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go
@@ -5,117 +5,16 @@
package hpack
import (
- "bufio"
"bytes"
"encoding/hex"
"fmt"
"math/rand"
"reflect"
- "regexp"
- "strconv"
"strings"
"testing"
"time"
)
-func TestStaticTable(t *testing.T) {
- fromSpec := `
- +-------+-----------------------------+---------------+
- | 1 | :authority | |
- | 2 | :method | GET |
- | 3 | :method | POST |
- | 4 | :path | / |
- | 5 | :path | /index.html |
- | 6 | :scheme | http |
- | 7 | :scheme | https |
- | 8 | :status | 200 |
- | 9 | :status | 204 |
- | 10 | :status | 206 |
- | 11 | :status | 304 |
- | 12 | :status | 400 |
- | 13 | :status | 404 |
- | 14 | :status | 500 |
- | 15 | accept-charset | |
- | 16 | accept-encoding | gzip, deflate |
- | 17 | accept-language | |
- | 18 | accept-ranges | |
- | 19 | accept | |
- | 20 | access-control-allow-origin | |
- | 21 | age | |
- | 22 | allow | |
- | 23 | authorization | |
- | 24 | cache-control | |
- | 25 | content-disposition | |
- | 26 | content-encoding | |
- | 27 | content-language | |
- | 28 | content-length | |
- | 29 | content-location | |
- | 30 | content-range | |
- | 31 | content-type | |
- | 32 | cookie | |
- | 33 | date | |
- | 34 | etag | |
- | 35 | expect | |
- | 36 | expires | |
- | 37 | from | |
- | 38 | host | |
- | 39 | if-match | |
- | 40 | if-modified-since | |
- | 41 | if-none-match | |
- | 42 | if-range | |
- | 43 | if-unmodified-since | |
- | 44 | last-modified | |
- | 45 | link | |
- | 46 | location | |
- | 47 | max-forwards | |
- | 48 | proxy-authenticate | |
- | 49 | proxy-authorization | |
- | 50 | range | |
- | 51 | referer | |
- | 52 | refresh | |
- | 53 | retry-after | |
- | 54 | server | |
- | 55 | set-cookie | |
- | 56 | strict-transport-security | |
- | 57 | transfer-encoding | |
- | 58 | user-agent | |
- | 59 | vary | |
- | 60 | via | |
- | 61 | www-authenticate | |
- +-------+-----------------------------+---------------+
-`
- bs := bufio.NewScanner(strings.NewReader(fromSpec))
- re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
- for bs.Scan() {
- l := bs.Text()
- if !strings.Contains(l, "|") {
- continue
- }
- m := re.FindStringSubmatch(l)
- if m == nil {
- continue
- }
- i, err := strconv.Atoi(m[1])
- if err != nil {
- t.Errorf("Bogus integer on line %q", l)
- continue
- }
- if i < 1 || i > len(staticTable) {
- t.Errorf("Bogus index %d on line %q", i, l)
- continue
- }
- if got, want := staticTable[i-1].Name, m[2]; got != want {
- t.Errorf("header index %d name = %q; want %q", i, got, want)
- }
- if got, want := staticTable[i-1].Value, m[3]; got != want {
- t.Errorf("header index %d value = %q; want %q", i, got, want)
- }
- }
- if err := bs.Err(); err != nil {
- t.Error(err)
- }
-}
-
func (d *Decoder) mustAt(idx int) HeaderField {
if hf, ok := d.at(uint64(idx)); !ok {
panic(fmt.Sprintf("bogus index %d", idx))
@@ -132,10 +31,10 @@ func TestDynamicTableAt(t *testing.T) {
}
d.dynTab.add(pair("foo", "bar"))
d.dynTab.add(pair("blake", "miz"))
- if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
+ if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
- if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
+ if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 2) = %v; want %v", got, want)
}
if got, want := at(3), (pair(":method", "POST")); got != want {
@@ -143,41 +42,6 @@ func TestDynamicTableAt(t *testing.T) {
}
}
-func TestDynamicTableSearch(t *testing.T) {
- dt := dynamicTable{}
- dt.setMaxSize(4096)
-
- dt.add(pair("foo", "bar"))
- dt.add(pair("blake", "miz"))
- dt.add(pair(":method", "GET"))
-
- tests := []struct {
- hf HeaderField
- wantI uint64
- wantMatch bool
- }{
- // Name and Value match
- {pair("foo", "bar"), 3, true},
- {pair(":method", "GET"), 1, true},
-
- // Only name match because of Sensitive == true
- {HeaderField{"blake", "miz", true}, 2, false},
-
- // Only Name matches
- {pair("foo", "..."), 3, false},
- {pair("blake", "..."), 2, false},
- {pair(":method", "..."), 1, false},
-
- // None match
- {pair("foo-", "bar"), 0, false},
- }
- for _, tt := range tests {
- if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
- t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
- }
- }
-}
-
func TestDynamicTableSizeEvict(t *testing.T) {
d := NewDecoder(4096, nil)
if want := uint32(0); d.dynTab.size != want {
@@ -196,7 +60,7 @@ func TestDynamicTableSizeEvict(t *testing.T) {
if want := uint32(6 + 32); d.dynTab.size != want {
t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
}
- if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
+ if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
add(pair("long", strings.Repeat("x", 500)))
@@ -255,9 +119,9 @@ func TestDecoderDecode(t *testing.T) {
}
func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
- hf = make([]HeaderField, len(dt.ents))
+ hf = make([]HeaderField, len(dt.table.ents))
for i := range hf {
- hf[i] = dt.ents[len(dt.ents)-1-i]
+ hf[i] = dt.table.ents[len(dt.table.ents)-1-i]
}
return
}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
index b9283a023..870159244 100644
--- a/vendor/golang.org/x/net/http2/hpack/tables.go
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -4,73 +4,199 @@
package hpack
+import (
+ "fmt"
+)
+
+// headerFieldTable implements a list of HeaderFields.
+// This is used to implement the static and dynamic tables.
+type headerFieldTable struct {
+ // For static tables, entries are never evicted.
+ //
+ // For dynamic tables, entries are evicted from ents[0] and added to the end.
+ // Each entry has a unique id that starts at one and increments for each
+ // entry that is added. This unique id is stable across evictions, meaning
+ // it can be used as a pointer to a specific entry. As in hpack, unique ids
+ // are 1-based. The unique id for ents[k] is k + evictCount + 1.
+ //
+ // Zero is not a valid unique id.
+ //
+ // evictCount should not overflow in any remotely practical situation. In
+ // practice, we will have one dynamic table per HTTP/2 connection. If we
+ // assume a very powerful server that handles 1M QPS per connection and each
+ // request adds (then evicts) 100 entries from the table, it would still take
+ // 2M years for evictCount to overflow.
+ ents []HeaderField
+ evictCount uint64
+
+ // byName maps a HeaderField name to the unique id of the newest entry with
+ // the same name. See above for a definition of "unique id".
+ byName map[string]uint64
+
+ // byNameValue maps a HeaderField name/value pair to the unique id of the newest
+ // entry with the same name and value. See above for a definition of "unique id".
+ byNameValue map[pairNameValue]uint64
+}
+
+type pairNameValue struct {
+ name, value string
+}
+
+func (t *headerFieldTable) init() {
+ t.byName = make(map[string]uint64)
+ t.byNameValue = make(map[pairNameValue]uint64)
+}
+
+// len reports the number of entries in the table.
+func (t *headerFieldTable) len() int {
+ return len(t.ents)
+}
+
+// addEntry adds a new entry.
+func (t *headerFieldTable) addEntry(f HeaderField) {
+ id := uint64(t.len()) + t.evictCount + 1
+ t.byName[f.Name] = id
+ t.byNameValue[pairNameValue{f.Name, f.Value}] = id
+ t.ents = append(t.ents, f)
+}
+
+// evictOldest evicts the n oldest entries in the table.
+func (t *headerFieldTable) evictOldest(n int) {
+ if n > t.len() {
+ panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
+ }
+ for k := 0; k < n; k++ {
+ f := t.ents[k]
+ id := t.evictCount + uint64(k) + 1
+ if t.byName[f.Name] == id {
+ t.byName[f.Name] = 0
+ }
+ if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
+ t.byNameValue[p] = 0
+ }
+ }
+ copy(t.ents, t.ents[n:])
+ for k := t.len() - n; k < t.len(); k++ {
+ t.ents[k] = HeaderField{} // so strings can be garbage collected
+ }
+ t.ents = t.ents[:t.len()-n]
+ if t.evictCount+uint64(n) < t.evictCount {
+ panic("evictCount overflow")
+ }
+ t.evictCount += uint64(n)
+}
+
+// search finds f in the table. If there is no match, i is 0.
+// If both name and value match, i is the matched index and nameValueMatch
+// becomes true. If only name matches, i points to that index and
+// nameValueMatch becomes false.
+//
+// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
+// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
+// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
+// table, the return value i actually refers to the entry t.ents[t.len()-i].
+//
+// All tables are assumed to be a dynamic tables except for the global
+// staticTable pointer.
+//
+// See Section 2.3.3.
+func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ if !f.Sensitive {
+ if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
+ return t.idToIndex(id), true
+ }
+ }
+ if id := t.byName[f.Name]; id != 0 {
+ return t.idToIndex(id), false
+ }
+ return 0, false
+}
+
+// idToIndex converts a unique id to an HPACK index.
+// See Section 2.3.3.
+func (t *headerFieldTable) idToIndex(id uint64) uint64 {
+ if id <= t.evictCount {
+ panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
+ }
+ k := id - t.evictCount - 1 // convert id to an index t.ents[k]
+ if t != staticTable {
+ return uint64(t.len()) - k // dynamic table
+ }
+ return k + 1
+}
+
func pair(name, value string) HeaderField {
return HeaderField{Name: name, Value: value}
}
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
-var staticTable = [...]HeaderField{
- pair(":authority", ""), // index 1 (1-based)
- pair(":method", "GET"),
- pair(":method", "POST"),
- pair(":path", "/"),
- pair(":path", "/index.html"),
- pair(":scheme", "http"),
- pair(":scheme", "https"),
- pair(":status", "200"),
- pair(":status", "204"),
- pair(":status", "206"),
- pair(":status", "304"),
- pair(":status", "400"),
- pair(":status", "404"),
- pair(":status", "500"),
- pair("accept-charset", ""),
- pair("accept-encoding", "gzip, deflate"),
- pair("accept-language", ""),
- pair("accept-ranges", ""),
- pair("accept", ""),
- pair("access-control-allow-origin", ""),
- pair("age", ""),
- pair("allow", ""),
- pair("authorization", ""),
- pair("cache-control", ""),
- pair("content-disposition", ""),
- pair("content-encoding", ""),
- pair("content-language", ""),
- pair("content-length", ""),
- pair("content-location", ""),
- pair("content-range", ""),
- pair("content-type", ""),
- pair("cookie", ""),
- pair("date", ""),
- pair("etag", ""),
- pair("expect", ""),
- pair("expires", ""),
- pair("from", ""),
- pair("host", ""),
- pair("if-match", ""),
- pair("if-modified-since", ""),
- pair("if-none-match", ""),
- pair("if-range", ""),
- pair("if-unmodified-since", ""),
- pair("last-modified", ""),
- pair("link", ""),
- pair("location", ""),
- pair("max-forwards", ""),
- pair("proxy-authenticate", ""),
- pair("proxy-authorization", ""),
- pair("range", ""),
- pair("referer", ""),
- pair("refresh", ""),
- pair("retry-after", ""),
- pair("server", ""),
- pair("set-cookie", ""),
- pair("strict-transport-security", ""),
- pair("transfer-encoding", ""),
- pair("user-agent", ""),
- pair("vary", ""),
- pair("via", ""),
- pair("www-authenticate", ""),
+var staticTable = newStaticTable()
+
+func newStaticTable() *headerFieldTable {
+ t := &headerFieldTable{}
+ t.init()
+ t.addEntry(pair(":authority", ""))
+ t.addEntry(pair(":method", "GET"))
+ t.addEntry(pair(":method", "POST"))
+ t.addEntry(pair(":path", "/"))
+ t.addEntry(pair(":path", "/index.html"))
+ t.addEntry(pair(":scheme", "http"))
+ t.addEntry(pair(":scheme", "https"))
+ t.addEntry(pair(":status", "200"))
+ t.addEntry(pair(":status", "204"))
+ t.addEntry(pair(":status", "206"))
+ t.addEntry(pair(":status", "304"))
+ t.addEntry(pair(":status", "400"))
+ t.addEntry(pair(":status", "404"))
+ t.addEntry(pair(":status", "500"))
+ t.addEntry(pair("accept-charset", ""))
+ t.addEntry(pair("accept-encoding", "gzip, deflate"))
+ t.addEntry(pair("accept-language", ""))
+ t.addEntry(pair("accept-ranges", ""))
+ t.addEntry(pair("accept", ""))
+ t.addEntry(pair("access-control-allow-origin", ""))
+ t.addEntry(pair("age", ""))
+ t.addEntry(pair("allow", ""))
+ t.addEntry(pair("authorization", ""))
+ t.addEntry(pair("cache-control", ""))
+ t.addEntry(pair("content-disposition", ""))
+ t.addEntry(pair("content-encoding", ""))
+ t.addEntry(pair("content-language", ""))
+ t.addEntry(pair("content-length", ""))
+ t.addEntry(pair("content-location", ""))
+ t.addEntry(pair("content-range", ""))
+ t.addEntry(pair("content-type", ""))
+ t.addEntry(pair("cookie", ""))
+ t.addEntry(pair("date", ""))
+ t.addEntry(pair("etag", ""))
+ t.addEntry(pair("expect", ""))
+ t.addEntry(pair("expires", ""))
+ t.addEntry(pair("from", ""))
+ t.addEntry(pair("host", ""))
+ t.addEntry(pair("if-match", ""))
+ t.addEntry(pair("if-modified-since", ""))
+ t.addEntry(pair("if-none-match", ""))
+ t.addEntry(pair("if-range", ""))
+ t.addEntry(pair("if-unmodified-since", ""))
+ t.addEntry(pair("last-modified", ""))
+ t.addEntry(pair("link", ""))
+ t.addEntry(pair("location", ""))
+ t.addEntry(pair("max-forwards", ""))
+ t.addEntry(pair("proxy-authenticate", ""))
+ t.addEntry(pair("proxy-authorization", ""))
+ t.addEntry(pair("range", ""))
+ t.addEntry(pair("referer", ""))
+ t.addEntry(pair("refresh", ""))
+ t.addEntry(pair("retry-after", ""))
+ t.addEntry(pair("server", ""))
+ t.addEntry(pair("set-cookie", ""))
+ t.addEntry(pair("strict-transport-security", ""))
+ t.addEntry(pair("transfer-encoding", ""))
+ t.addEntry(pair("user-agent", ""))
+ t.addEntry(pair("vary", ""))
+ t.addEntry(pair("via", ""))
+ t.addEntry(pair("www-authenticate", ""))
+ return t
}
var huffmanCodes = [256]uint32{
diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go
new file mode 100644
index 000000000..7f40d9a42
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go
@@ -0,0 +1,188 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bufio"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestHeaderFieldTable(t *testing.T) {
+ table := &headerFieldTable{}
+ table.init()
+ table.addEntry(pair("key1", "value1-1"))
+ table.addEntry(pair("key2", "value2-1"))
+ table.addEntry(pair("key1", "value1-2"))
+ table.addEntry(pair("key3", "value3-1"))
+ table.addEntry(pair("key4", "value4-1"))
+ table.addEntry(pair("key2", "value2-2"))
+
+ // Tests will be run twice: once before evicting anything, and
+ // again after evicting the three oldest entries.
+ tests := []struct {
+ f HeaderField
+ beforeWantStaticI uint64
+ beforeWantMatch bool
+ afterWantStaticI uint64
+ afterWantMatch bool
+ }{
+ {HeaderField{"key1", "value1-1", false}, 1, true, 0, false},
+ {HeaderField{"key1", "value1-2", false}, 3, true, 0, false},
+ {HeaderField{"key1", "value1-3", false}, 3, false, 0, false},
+ {HeaderField{"key2", "value2-1", false}, 2, true, 3, false},
+ {HeaderField{"key2", "value2-2", false}, 6, true, 3, true},
+ {HeaderField{"key2", "value2-3", false}, 6, false, 3, false},
+ {HeaderField{"key4", "value4-1", false}, 5, true, 2, true},
+ // Name match only, because sensitive.
+ {HeaderField{"key4", "value4-1", true}, 5, false, 2, false},
+ // Key not found.
+ {HeaderField{"key5", "value5-x", false}, 0, false, 0, false},
+ }
+
+ staticToDynamic := func(i uint64) uint64 {
+ if i == 0 {
+ return 0
+ }
+ return uint64(table.len()) - i + 1 // dynamic is the reversed table
+ }
+
+ searchStatic := func(f HeaderField) (uint64, bool) {
+ old := staticTable
+ staticTable = table
+ defer func() { staticTable = old }()
+ return staticTable.search(f)
+ }
+
+ searchDynamic := func(f HeaderField) (uint64, bool) {
+ return table.search(f)
+ }
+
+ for _, test := range tests {
+ gotI, gotMatch := searchStatic(test.f)
+ if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ gotI, gotMatch = searchDynamic(test.f)
+ wantDynamicI := staticToDynamic(test.beforeWantStaticI)
+ if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ }
+
+ table.evictOldest(3)
+
+ for _, test := range tests {
+ gotI, gotMatch := searchStatic(test.f)
+ if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ gotI, gotMatch = searchDynamic(test.f)
+ wantDynamicI := staticToDynamic(test.afterWantStaticI)
+ if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
+ t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
+ }
+ }
+}
+
+func TestStaticTable(t *testing.T) {
+ fromSpec := `
+ +-------+-----------------------------+---------------+
+ | 1 | :authority | |
+ | 2 | :method | GET |
+ | 3 | :method | POST |
+ | 4 | :path | / |
+ | 5 | :path | /index.html |
+ | 6 | :scheme | http |
+ | 7 | :scheme | https |
+ | 8 | :status | 200 |
+ | 9 | :status | 204 |
+ | 10 | :status | 206 |
+ | 11 | :status | 304 |
+ | 12 | :status | 400 |
+ | 13 | :status | 404 |
+ | 14 | :status | 500 |
+ | 15 | accept-charset | |
+ | 16 | accept-encoding | gzip, deflate |
+ | 17 | accept-language | |
+ | 18 | accept-ranges | |
+ | 19 | accept | |
+ | 20 | access-control-allow-origin | |
+ | 21 | age | |
+ | 22 | allow | |
+ | 23 | authorization | |
+ | 24 | cache-control | |
+ | 25 | content-disposition | |
+ | 26 | content-encoding | |
+ | 27 | content-language | |
+ | 28 | content-length | |
+ | 29 | content-location | |
+ | 30 | content-range | |
+ | 31 | content-type | |
+ | 32 | cookie | |
+ | 33 | date | |
+ | 34 | etag | |
+ | 35 | expect | |
+ | 36 | expires | |
+ | 37 | from | |
+ | 38 | host | |
+ | 39 | if-match | |
+ | 40 | if-modified-since | |
+ | 41 | if-none-match | |
+ | 42 | if-range | |
+ | 43 | if-unmodified-since | |
+ | 44 | last-modified | |
+ | 45 | link | |
+ | 46 | location | |
+ | 47 | max-forwards | |
+ | 48 | proxy-authenticate | |
+ | 49 | proxy-authorization | |
+ | 50 | range | |
+ | 51 | referer | |
+ | 52 | refresh | |
+ | 53 | retry-after | |
+ | 54 | server | |
+ | 55 | set-cookie | |
+ | 56 | strict-transport-security | |
+ | 57 | transfer-encoding | |
+ | 58 | user-agent | |
+ | 59 | vary | |
+ | 60 | via | |
+ | 61 | www-authenticate | |
+ +-------+-----------------------------+---------------+
+`
+ bs := bufio.NewScanner(strings.NewReader(fromSpec))
+ re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
+ for bs.Scan() {
+ l := bs.Text()
+ if !strings.Contains(l, "|") {
+ continue
+ }
+ m := re.FindStringSubmatch(l)
+ if m == nil {
+ continue
+ }
+ i, err := strconv.Atoi(m[1])
+ if err != nil {
+ t.Errorf("Bogus integer on line %q", l)
+ continue
+ }
+ if i < 1 || i > staticTable.len() {
+ t.Errorf("Bogus index %d on line %q", i, l)
+ continue
+ }
+ if got, want := staticTable.ents[i-1].Name, m[2]; got != want {
+ t.Errorf("header index %d name = %q; want %q", i, got, want)
+ }
+ if got, want := staticTable.ents[i-1].Value, m[3]; got != want {
+ t.Errorf("header index %d value = %q; want %q", i, got, want)
+ }
+ }
+ if err := bs.Err(); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index 53b7a1daf..914aaf8a7 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -10,7 +10,7 @@ import (
"sync"
)
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct {
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 3c6b90ccd..550427dda 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -110,11 +110,38 @@ type Server struct {
// activity for the purposes of IdleTimeout.
IdleTimeout time.Duration
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
// NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
}
+func (s *Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection > initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
+}
+
func (s *Server) maxReadFrameSize() uint32 {
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
return v
@@ -255,27 +282,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
defer cancel()
sc := &serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- baseCtx: baseCtx,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*stream),
- readFrameCh: make(chan readFrameResult),
- wantWriteFrameCh: make(chan FrameWriteRequest, 8),
- wantStartPushCh: make(chan startPushRequest, 8),
- wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
- initialWindowSize: initialWindowSize,
- maxFrameSize: initialMaxFrameSize,
- headerTableSize: initialHeaderTableSize,
- serveG: newGoroutineLock(),
- pushEnabled: true,
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan FrameWriteRequest, 8),
+ wantStartPushCh: make(chan startPushRequest, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: initialWindowSize,
+ maxFrameSize: initialMaxFrameSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
}
// The net/http package sets the write deadline from the
@@ -294,6 +321,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.writeSched = NewRandomWriteScheduler()
}
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -387,34 +417,34 @@ type serverConn struct {
writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
- serveG goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curClientStreams uint32 // number of open streams initiated by the client
- curPushedStreams uint32 // number of open streams initiated by server push
- maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
- maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
- streams map[uint32]*stream
- initialWindowSize int32
- maxFrameSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started writing a frame (on serve goroutine or separate)
- writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- inGoAway bool // we've started to or sent GOAWAY
- inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode ErrCode
- shutdownTimerCh <-chan time.Time // nil until used
- shutdownTimer *time.Timer // nil until used
- idleTimer *time.Timer // nil if unused
- idleTimerCh <-chan time.Time // nil if unused
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*stream
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
+ idleTimerCh <-chan time.Time // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -463,10 +493,9 @@ type stream struct {
numTrailerValues int64
weight uint8
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -696,21 +725,23 @@ func (sc *serverConn) serve() {
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
-
- // TODO: more actual settings, notably
- // SettingInitialWindowSize, but then we also
- // want to bump up the conn window size the
- // same amount here right after the settings
+ {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
+ // Each connection starts with intialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
- // "StateNew" state. We can't go directly to idle, though.
+ // "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame.
sc.setConnState(http.StateActive)
@@ -1395,9 +1426,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the
// old value."
- old := sc.initialWindowSize
- sc.initialWindowSize = int32(val)
- growth := sc.initialWindowSize - old // may be negative
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size
@@ -1719,9 +1750,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
}
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(sc.srv.initialStreamRecvWindowSize())
sc.streams[id] = st
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
@@ -1785,16 +1816,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, err
}
if bodyOpen {
- st.reqBuf = getRequestBodyBuf()
- req.Body.(*requestBody).pipe = &pipe{
- b: &fixedBuffer{buf: st.reqBuf},
- }
-
if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
}
+ req.Body.(*requestBody).pipe = &pipe{
+ b: &dataBuffer{expected: req.ContentLength},
+ }
}
return rw, req, nil
}
@@ -1890,24 +1919,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
return rw, req, nil
}
-var reqBodyCache = make(chan []byte, 8)
-
-func getRequestBodyBuf() []byte {
- select {
- case b := <-reqBodyCache:
- return b
- default:
- return make([]byte, initialWindowSize)
- }
-}
-
-func putRequestBodyBuf(b []byte) {
- select {
- case reqBodyCache <- b:
- default:
- }
-}
-
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true
@@ -2003,12 +2014,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
case <-sc.doneServing:
}
}
- if err == io.EOF {
- if buf := st.reqBuf; buf != nil {
- st.reqBuf = nil // shouldn't matter; field unused by other
- putRequestBodyBuf(buf)
- }
- }
}
func (sc *serverConn) noteBodyRead(st *stream, n int) {
@@ -2103,8 +2108,8 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
return
}
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
@@ -2278,7 +2283,7 @@ const TrailerPrefix = "Trailer:"
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
+// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go
index dfa4cff2e..407fafc6d 100644
--- a/vendor/golang.org/x/net/http2/server_test.go
+++ b/vendor/golang.org/x/net/http2/server_test.go
@@ -80,6 +80,7 @@ type serverTesterOpt string
var optOnlyServer = serverTesterOpt("only_server")
var optQuiet = serverTesterOpt("quiet_logging")
+var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames")
func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
resetHooks()
@@ -91,7 +92,7 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
NextProtos: []string{NextProtoTLS},
}
- var onlyServer, quiet bool
+ var onlyServer, quiet, framerReuseFrames bool
h2server := new(Server)
for _, opt := range opts {
switch v := opt.(type) {
@@ -107,6 +108,8 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
onlyServer = true
case optQuiet:
quiet = true
+ case optFramerReuseFrames:
+ framerReuseFrames = true
}
case func(net.Conn, http.ConnState):
ts.Config.ConnState = v
@@ -149,6 +152,9 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
}
st.cc = cc
st.fr = NewFramer(cc, cc)
+ if framerReuseFrames {
+ st.fr.SetReuseFrames()
+ }
if !logFrameReads && !logFrameWrites {
st.fr.debugReadLoggerf = func(m string, v ...interface{}) {
m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
@@ -254,11 +260,52 @@ func (st *serverTester) Close() {
// greet initiates the client's HTTP/2 connection into a state where
// frames may be sent.
func (st *serverTester) greet() {
+ st.greetAndCheckSettings(func(Setting) error { return nil })
+}
+
+func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) {
st.writePreface()
st.writeInitialSettings()
- st.wantSettings()
+ st.wantSettings().ForeachSetting(checkSetting)
st.writeSettingsAck()
- st.wantSettingsAck()
+
+ // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order.
+ var gotSettingsAck bool
+ var gotWindowUpdate bool
+
+ for i := 0; i < 2; i++ {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ switch f := f.(type) {
+ case *SettingsFrame:
+ if !f.Header().Flags.Has(FlagSettingsAck) {
+ st.t.Fatal("Settings Frame didn't have ACK set")
+ }
+ gotSettingsAck = true
+
+ case *WindowUpdateFrame:
+ if f.FrameHeader.StreamID != 0 {
+ st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID, 0)
+ }
+ incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize)
+ if f.Increment != incr {
+ st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr)
+ }
+ gotWindowUpdate = true
+
+ default:
+ st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f)
+ }
+ }
+
+ if !gotSettingsAck {
+ st.t.Fatalf("Didn't get a settings ACK")
+ }
+ if !gotWindowUpdate {
+ st.t.Fatalf("Didn't get a window update")
+ }
}
func (st *serverTester) writePreface() {
@@ -318,7 +365,7 @@ func (st *serverTester) encodeHeaderRaw(headers ...string) []byte {
}
// encodeHeader encodes headers and returns their HPACK bytes. headers
-// must contain an even number of key/value pairs. There may be
+// must contain an even number of key/value pairs. There may be
// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
// :scheme headers default to GET, / and https. The :authority header
// defaults to st.ts.Listener.Addr().
@@ -578,12 +625,7 @@ func TestServer(t *testing.T) {
server sends in the HTTP/2 connection.
`)
- st.writePreface()
- st.writeInitialSettings()
- st.wantSettings()
- st.writeSettingsAck()
- st.wantSettingsAck()
-
+ st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(),
@@ -656,7 +698,7 @@ func TestServer_Request_Get_PathSlashes(t *testing.T) {
}
// TODO: add a test with EndStream=true on the HEADERS but setting a
-// Content-Length anyway. Should we just omit it and force it to
+// Content-Length anyway. Should we just omit it and force it to
// zero?
func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
@@ -1192,7 +1234,7 @@ func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
EndStream: false,
EndHeaders: true,
})
- st.writeDataPadded(1, false, []byte("abcdef"), []byte("1234"))
+ st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0})
// Expect to immediately get our 5 bytes of padding back for
// both the connection and stream (4 bytes of padding + 1 byte of length)
@@ -2595,11 +2637,9 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) {
defer st.Close()
// shake hands
- st.writePreface()
- st.writeInitialSettings()
frameSize := defaultMaxReadFrameSize
var advHeaderListSize *uint32
- st.wantSettings().ForeachSetting(func(s Setting) error {
+ st.greetAndCheckSettings(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
if s.Val < minMaxFrameSize {
@@ -2614,8 +2654,6 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) {
}
return nil
})
- st.writeSettingsAck()
- st.wantSettingsAck()
if advHeaderListSize == nil {
t.Errorf("server didn't advertise a max header list size")
@@ -2994,6 +3032,89 @@ func BenchmarkServerPosts(b *testing.B) {
}
}
+// Send a stream of messages from server to client in separate data frames.
+// Brings up performance issues seen in long streams.
+// Created to show problem in go issue #18502
+func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) {
+ benchmarkServerToClientStream(b)
+}
+
+// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8
+// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer.
+func BenchmarkServerToClientStreamReuseFrames(b *testing.B) {
+ benchmarkServerToClientStream(b, optFramerReuseFrames)
+}
+
+func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
+ defer disableGoroutineTracking()()
+ b.ReportAllocs()
+ const msgLen = 1
+ // default window size
+ const windowSize = 1<<16 - 1
+
+ // next message to send from the server and for the client to expect
+ nextMsg := func(i int) []byte {
+ msg := make([]byte, msgLen)
+ msg[0] = byte(i)
+ if len(msg) != msgLen {
+ panic("invalid test setup msg length")
+ }
+ return msg
+ }
+
+ st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+ // Consume the (empty) body from th peer before replying, otherwise
+ // the server will sometimes (depending on scheduling) send the peer a
+ // a RST_STREAM with the CANCEL error code.
+ if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
+ b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
+ }
+ for i := 0; i < b.N; i += 1 {
+ w.Write(nextMsg(i))
+ w.(http.Flusher).Flush()
+ }
+ }, newServerOpts...)
+ defer st.Close()
+ st.greet()
+
+ const id = uint32(1)
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: id,
+ BlockFragment: st.encodeHeader(":method", "POST"),
+ EndStream: false,
+ EndHeaders: true,
+ })
+
+ st.writeData(id, true, nil)
+ st.wantHeaders()
+
+ var pendingWindowUpdate = uint32(0)
+
+ for i := 0; i < b.N; i += 1 {
+ expected := nextMsg(i)
+ df := st.wantData()
+ if bytes.Compare(expected, df.data) != 0 {
+ b.Fatalf("Bad message received; want %v; got %v", expected, df.data)
+ }
+ // try to send infrequent but large window updates so they don't overwhelm the test
+ pendingWindowUpdate += uint32(len(df.data))
+ if pendingWindowUpdate >= windowSize/2 {
+ if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil {
+ b.Fatal(err)
+ }
+ if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil {
+ b.Fatal(err)
+ }
+ pendingWindowUpdate = 0
+ }
+ }
+ df := st.wantData()
+ if !df.StreamEnded() {
+ b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+ }
+}
+
// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
// Verify we don't hang.
func TestIssue53(t *testing.T) {
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 0c7e859db..84d042d46 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -575,7 +575,7 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
cc.nextStreamID < math.MaxInt32
}
-// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
@@ -809,8 +809,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
@@ -1528,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil
}
- buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
- cs.bufPipe = pipe{b: buf}
+ cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req)
diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go
index 8ef4f3388..7ae8ff787 100644
--- a/vendor/golang.org/x/net/http2/transport_test.go
+++ b/vendor/golang.org/x/net/http2/transport_test.go
@@ -2406,7 +2406,7 @@ func TestTransportReturnsDataPaddingFlowControl(t *testing.T) {
EndStream: false,
BlockFragment: buf.Bytes(),
})
- pad := []byte("12345")
+ pad := make([]byte, 5)
ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream
f, err := ct.readNonSettingsFrame()