summaryrefslogtreecommitdiff
path: root/libgo/go/net/http/h2_bundle.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/net/http/h2_bundle.go')
-rw-r--r--libgo/go/net/http/h2_bundle.go104
1 files changed, 90 insertions, 14 deletions
diff --git a/libgo/go/net/http/h2_bundle.go b/libgo/go/net/http/h2_bundle.go
index 21921aba5fd..f03dbba729b 100644
--- a/libgo/go/net/http/h2_bundle.go
+++ b/libgo/go/net/http/h2_bundle.go
@@ -1,5 +1,7 @@
+// +build !nethttpomithttp2
+
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-//go:generate bundle -o h2_bundle.go -prefix http2 golang.org/x/net/http2
+// $ bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
// Package http2 implements the HTTP/2 protocol.
//
@@ -3465,6 +3467,7 @@ type http2pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b http2pipeBuffer // nil when done reading
+ unread int // bytes unread when done
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
@@ -3481,7 +3484,7 @@ func (p *http2pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
- return 0
+ return p.unread
}
return p.b.Len()
}
@@ -3528,6 +3531,7 @@ func (p *http2pipe) Write(d []byte) (n int, err error) {
return 0, http2errClosedPipeWrite
}
if p.breakErr != nil {
+ p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
@@ -3565,6 +3569,9 @@ func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
}
p.readFn = fn
if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
p.b = nil
}
*dst = err
@@ -3811,7 +3818,7 @@ func http2ConfigureServer(s *Server, conf *http2Server) error {
}
}
if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).")
}
}
@@ -3881,7 +3888,7 @@ type http2ServeConnOpts struct {
}
func (o *http2ServeConnOpts) context() context.Context {
- if o.Context != nil {
+ if o != nil && o.Context != nil {
return o.Context
}
return context.Background()
@@ -5979,7 +5986,11 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
- if !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
+ // If the Content-Encoding is non-blank, we shouldn't
+ // sniff the body. See Issue golang.org/issue/31753.
+ ce := rws.snapHeader.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
ctype = DetectContentType(p)
}
var date string
@@ -6088,7 +6099,7 @@ const http2TrailerPrefix = "Trailer:"
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
// and gRPC libraries permit setting trailers mid-stream without
-// predeclarnig them. So: change of plans. We still permit the old
+// predeclaring them. So: change of plans. We still permit the old
// way, but we also permit this hack: if a Header() key begins with
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
// invalid token byte anyway, there is no ambiguity. (And it's already
@@ -6388,7 +6399,7 @@ func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
// is in either the "open" or "half-closed (remote)" state.
if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
- // responseWriter.Push checks that the stream is peer-initiaed.
+ // responseWriter.Push checks that the stream is peer-initiated.
msg.done <- http2errStreamClosed
return
}
@@ -6715,6 +6726,7 @@ type http2ClientConn struct {
br *bufio.Reader
fr *http2Framer
lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
@@ -7092,7 +7104,7 @@ func (t *http2Transport) expectContinueTimeout() time.Duration {
}
func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) {
- return t.newClientConn(c, false)
+ return t.newClientConn(c, t.disableKeepAlives())
}
func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) {
@@ -7225,7 +7237,8 @@ func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) {
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
@@ -7235,6 +7248,16 @@ func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest
}
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *http2ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
@@ -7654,6 +7677,7 @@ func (cc *http2ClientConn) awaitOpenSlotForRequest(req *Request) error {
}
return http2errClientConnUnusable
}
+ cc.lastIdle = time.Time{}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
@@ -7720,6 +7744,8 @@ var (
// abort request body write, but send stream reset of cancel.
http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+
+ http2errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
)
func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
@@ -7742,10 +7768,32 @@ func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Clos
req := cs.req
hasTrailers := req.Trailer != nil
+ remainLen := http2actualContentLength(req)
+ hasContentLen := remainLen != -1
var sawEOF bool
for !sawEOF {
- n, err := body.Read(buf)
+ n, err := body.Read(buf[:len(buf)-1])
+ if hasContentLen {
+ remainLen -= int64(n)
+ if remainLen == 0 && err == nil {
+ // The request body's Content-Length was predeclared and
+ // we just finished reading it all, but the underlying io.Reader
+ // returned the final chunk with a nil error (which is one of
+ // the two valid things a Reader can do at EOF). Because we'd prefer
+ // to send the END_STREAM bit early, double-check that we're actually
+ // at EOF. Subsequent reads should return (0, EOF) at this point.
+ // If either value is different, we return an error in one of two ways below.
+ var n1 int
+ n1, err = body.Read(buf[n:])
+ remainLen -= int64(n1)
+ }
+ if remainLen < 0 {
+ err = http2errReqBodyTooLong
+ cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err)
+ return err
+ }
+ }
if err == io.EOF {
sawEOF = true
err = nil
@@ -7958,7 +8006,29 @@ func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trail
if vv[0] == "" {
continue
}
-
+ } else if strings.EqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
}
for _, v := range vv {
@@ -8096,6 +8166,7 @@ func (cc *http2ClientConn) streamByID(id uint32, andRemove bool) *http2clientStr
delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
}
close(cs.done)
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
@@ -9846,7 +9917,7 @@ func (n *http2priorityNode) addBytes(b int64) {
}
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
-// with a non-empty write queue. When f returns true, this funcion returns true and the
+// with a non-empty write queue. When f returns true, this function returns true and the
// walk halts. tmp is used as scratch space for sorting.
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
@@ -10163,7 +10234,8 @@ type http2randomWriteScheduler struct {
zero http2writeQueue
// sq contains the stream-specific queues, keyed by stream ID.
- // When a stream is idle or closed, it's deleted from the map.
+ // When a stream is idle, closed, or emptied, it's deleted
+ // from the map.
sq map[uint32]*http2writeQueue
// pool of empty queues for reuse.
@@ -10207,8 +10279,12 @@ func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
return ws.zero.shift(), true
}
// Iterate over all non-idle streams until finding one that can be consumed.
- for _, q := range ws.sq {
+ for streamID, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
+ if q.empty() {
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+ }
return wr, true
}
}