gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

server.go (102508B)


      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // TODO: turn off the serve goroutine when idle, so
      6 // an idle conn only has the readFrames goroutine active. (which could
      7 // also be optimized probably to pin less memory in crypto/tls). This
      8 // would involve tracking when the serve goroutine is active (atomic
      9 // int32 read/CAS probably?) and starting it up when frames arrive,
     10 // and shutting it down when all handlers exit. the occasional PING
     11 // packets could use time.AfterFunc to call sc.wakeStartServeLoop()
     12 // (which is a no-op if already running) and then queue the PING write
     13 // as normal. The serve loop would then exit in most cases (if no
     14 // Handlers running) and not be woken up again until the PING packet
     15 // returns.
     16 
     17 // TODO (maybe): add a mechanism for Handlers to going into
     18 // half-closed-local mode (rw.(io.Closer) test?) but not exit their
     19 // handler, and continue to be able to read from the
     20 // Request.Body. This would be a somewhat semantic change from HTTP/1
     21 // (or at least what we expose in net/http), so I'd probably want to
     22 // add it there too. For now, this package says that returning from
     23 // the Handler ServeHTTP function means you're both done reading and
     24 // done writing, without a way to stop just one or the other.
     25 
     26 package http2
     27 
     28 import (
     29 	"bufio"
     30 	"bytes"
     31 	"context"
     32 	"crypto/tls"
     33 	"errors"
     34 	"fmt"
     35 	"io"
     36 	"log"
     37 	"math"
     38 	"net"
     39 	"net/http"
     40 	"net/textproto"
     41 	"net/url"
     42 	"os"
     43 	"reflect"
     44 	"runtime"
     45 	"strconv"
     46 	"strings"
     47 	"sync"
     48 	"time"
     49 
     50 	"golang.org/x/net/http/httpguts"
     51 	"golang.org/x/net/http2/hpack"
     52 )
     53 
     54 const (
     55 	prefaceTimeout         = 10 * time.Second
     56 	firstSettingsTimeout   = 2 * time.Second // should be in-flight with preface anyway
     57 	handlerChunkWriteSize  = 4 << 10
     58 	defaultMaxStreams      = 250 // TODO: make this 100 as the GFE seems to?
     59 	maxQueuedControlFrames = 10000
     60 )
     61 
     62 var (
     63 	errClientDisconnected = errors.New("client disconnected")
     64 	errClosedBody         = errors.New("body closed by handler")
     65 	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
     66 	errStreamClosed       = errors.New("http2: stream closed")
     67 )
     68 
     69 var responseWriterStatePool = sync.Pool{
     70 	New: func() interface{} {
     71 		rws := &responseWriterState{}
     72 		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
     73 		return rws
     74 	},
     75 }
     76 
     77 // Test hooks.
     78 var (
     79 	testHookOnConn        func()
     80 	testHookGetServerConn func(*serverConn)
     81 	testHookOnPanicMu     *sync.Mutex // nil except in tests
     82 	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
     83 )
     84 
     85 // Server is an HTTP/2 server.
     86 type Server struct {
     87 	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
     88 	// which may run at a time over all connections.
     89 	// Negative or zero no limit.
     90 	// TODO: implement
     91 	MaxHandlers int
     92 
     93 	// MaxConcurrentStreams optionally specifies the number of
     94 	// concurrent streams that each client may have open at a
     95 	// time. This is unrelated to the number of http.Handler goroutines
     96 	// which may be active globally, which is MaxHandlers.
     97 	// If zero, MaxConcurrentStreams defaults to at least 100, per
     98 	// the HTTP/2 spec's recommendations.
     99 	MaxConcurrentStreams uint32
    100 
    101 	// MaxDecoderHeaderTableSize optionally specifies the http2
    102 	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
    103 	// informs the remote endpoint of the maximum size of the header compression
    104 	// table used to decode header blocks, in octets. If zero, the default value
    105 	// of 4096 is used.
    106 	MaxDecoderHeaderTableSize uint32
    107 
    108 	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
    109 	// header compression table used for encoding request headers. Received
    110 	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
    111 	// the default value of 4096 is used.
    112 	MaxEncoderHeaderTableSize uint32
    113 
    114 	// MaxReadFrameSize optionally specifies the largest frame
    115 	// this server is willing to read. A valid value is between
    116 	// 16k and 16M, inclusive. If zero or otherwise invalid, a
    117 	// default value is used.
    118 	MaxReadFrameSize uint32
    119 
    120 	// PermitProhibitedCipherSuites, if true, permits the use of
    121 	// cipher suites prohibited by the HTTP/2 spec.
    122 	PermitProhibitedCipherSuites bool
    123 
    124 	// IdleTimeout specifies how long until idle clients should be
    125 	// closed with a GOAWAY frame. PING frames are not considered
    126 	// activity for the purposes of IdleTimeout.
    127 	IdleTimeout time.Duration
    128 
    129 	// MaxUploadBufferPerConnection is the size of the initial flow
    130 	// control window for each connections. The HTTP/2 spec does not
    131 	// allow this to be smaller than 65535 or larger than 2^32-1.
    132 	// If the value is outside this range, a default value will be
    133 	// used instead.
    134 	MaxUploadBufferPerConnection int32
    135 
    136 	// MaxUploadBufferPerStream is the size of the initial flow control
    137 	// window for each stream. The HTTP/2 spec does not allow this to
    138 	// be larger than 2^32-1. If the value is zero or larger than the
    139 	// maximum, a default value will be used instead.
    140 	MaxUploadBufferPerStream int32
    141 
    142 	// NewWriteScheduler constructs a write scheduler for a connection.
    143 	// If nil, a default scheduler is chosen.
    144 	NewWriteScheduler func() WriteScheduler
    145 
    146 	// CountError, if non-nil, is called on HTTP/2 server errors.
    147 	// It's intended to increment a metric for monitoring, such
    148 	// as an expvar or Prometheus metric.
    149 	// The errType consists of only ASCII word characters.
    150 	CountError func(errType string)
    151 
    152 	// Internal state. This is a pointer (rather than embedded directly)
    153 	// so that we don't embed a Mutex in this struct, which will make the
    154 	// struct non-copyable, which might break some callers.
    155 	state *serverInternalState
    156 }
    157 
    158 func (s *Server) initialConnRecvWindowSize() int32 {
    159 	if s.MaxUploadBufferPerConnection >= initialWindowSize {
    160 		return s.MaxUploadBufferPerConnection
    161 	}
    162 	return 1 << 20
    163 }
    164 
    165 func (s *Server) initialStreamRecvWindowSize() int32 {
    166 	if s.MaxUploadBufferPerStream > 0 {
    167 		return s.MaxUploadBufferPerStream
    168 	}
    169 	return 1 << 20
    170 }
    171 
    172 func (s *Server) maxReadFrameSize() uint32 {
    173 	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
    174 		return v
    175 	}
    176 	return defaultMaxReadFrameSize
    177 }
    178 
    179 func (s *Server) maxConcurrentStreams() uint32 {
    180 	if v := s.MaxConcurrentStreams; v > 0 {
    181 		return v
    182 	}
    183 	return defaultMaxStreams
    184 }
    185 
    186 func (s *Server) maxDecoderHeaderTableSize() uint32 {
    187 	if v := s.MaxDecoderHeaderTableSize; v > 0 {
    188 		return v
    189 	}
    190 	return initialHeaderTableSize
    191 }
    192 
    193 func (s *Server) maxEncoderHeaderTableSize() uint32 {
    194 	if v := s.MaxEncoderHeaderTableSize; v > 0 {
    195 		return v
    196 	}
    197 	return initialHeaderTableSize
    198 }
    199 
    200 // maxQueuedControlFrames is the maximum number of control frames like
    201 // SETTINGS, PING and RST_STREAM that will be queued for writing before
    202 // the connection is closed to prevent memory exhaustion attacks.
    203 func (s *Server) maxQueuedControlFrames() int {
    204 	// TODO: if anybody asks, add a Server field, and remember to define the
    205 	// behavior of negative values.
    206 	return maxQueuedControlFrames
    207 }
    208 
    209 type serverInternalState struct {
    210 	mu          sync.Mutex
    211 	activeConns map[*serverConn]struct{}
    212 }
    213 
    214 func (s *serverInternalState) registerConn(sc *serverConn) {
    215 	if s == nil {
    216 		return // if the Server was used without calling ConfigureServer
    217 	}
    218 	s.mu.Lock()
    219 	s.activeConns[sc] = struct{}{}
    220 	s.mu.Unlock()
    221 }
    222 
    223 func (s *serverInternalState) unregisterConn(sc *serverConn) {
    224 	if s == nil {
    225 		return // if the Server was used without calling ConfigureServer
    226 	}
    227 	s.mu.Lock()
    228 	delete(s.activeConns, sc)
    229 	s.mu.Unlock()
    230 }
    231 
    232 func (s *serverInternalState) startGracefulShutdown() {
    233 	if s == nil {
    234 		return // if the Server was used without calling ConfigureServer
    235 	}
    236 	s.mu.Lock()
    237 	for sc := range s.activeConns {
    238 		sc.startGracefulShutdown()
    239 	}
    240 	s.mu.Unlock()
    241 }
    242 
    243 // ConfigureServer adds HTTP/2 support to a net/http Server.
    244 //
    245 // The configuration conf may be nil.
    246 //
    247 // ConfigureServer must be called before s begins serving.
    248 func ConfigureServer(s *http.Server, conf *Server) error {
    249 	if s == nil {
    250 		panic("nil *http.Server")
    251 	}
    252 	if conf == nil {
    253 		conf = new(Server)
    254 	}
    255 	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
    256 	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
    257 		if h1.IdleTimeout != 0 {
    258 			h2.IdleTimeout = h1.IdleTimeout
    259 		} else {
    260 			h2.IdleTimeout = h1.ReadTimeout
    261 		}
    262 	}
    263 	s.RegisterOnShutdown(conf.state.startGracefulShutdown)
    264 
    265 	if s.TLSConfig == nil {
    266 		s.TLSConfig = new(tls.Config)
    267 	} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
    268 		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
    269 		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
    270 		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
    271 		haveRequired := false
    272 		for _, cs := range s.TLSConfig.CipherSuites {
    273 			switch cs {
    274 			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
    275 				// Alternative MTI cipher to not discourage ECDSA-only servers.
    276 				// See http://golang.org/cl/30721 for further information.
    277 				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
    278 				haveRequired = true
    279 			}
    280 		}
    281 		if !haveRequired {
    282 			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
    283 		}
    284 	}
    285 
    286 	// Note: not setting MinVersion to tls.VersionTLS12,
    287 	// as we don't want to interfere with HTTP/1.1 traffic
    288 	// on the user's server. We enforce TLS 1.2 later once
    289 	// we accept a connection. Ideally this should be done
    290 	// during next-proto selection, but using TLS <1.2 with
    291 	// HTTP/2 is still the client's bug.
    292 
    293 	s.TLSConfig.PreferServerCipherSuites = true
    294 
    295 	if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
    296 		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
    297 	}
    298 	if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
    299 		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
    300 	}
    301 
    302 	if s.TLSNextProto == nil {
    303 		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
    304 	}
    305 	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
    306 		if testHookOnConn != nil {
    307 			testHookOnConn()
    308 		}
    309 		// The TLSNextProto interface predates contexts, so
    310 		// the net/http package passes down its per-connection
    311 		// base context via an exported but unadvertised
    312 		// method on the Handler. This is for internal
    313 		// net/http<=>http2 use only.
    314 		var ctx context.Context
    315 		type baseContexter interface {
    316 			BaseContext() context.Context
    317 		}
    318 		if bc, ok := h.(baseContexter); ok {
    319 			ctx = bc.BaseContext()
    320 		}
    321 		conf.ServeConn(c, &ServeConnOpts{
    322 			Context:    ctx,
    323 			Handler:    h,
    324 			BaseConfig: hs,
    325 		})
    326 	}
    327 	s.TLSNextProto[NextProtoTLS] = protoHandler
    328 	return nil
    329 }
    330 
    331 // ServeConnOpts are options for the Server.ServeConn method.
    332 type ServeConnOpts struct {
    333 	// Context is the base context to use.
    334 	// If nil, context.Background is used.
    335 	Context context.Context
    336 
    337 	// BaseConfig optionally sets the base configuration
    338 	// for values. If nil, defaults are used.
    339 	BaseConfig *http.Server
    340 
    341 	// Handler specifies which handler to use for processing
    342 	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
    343 	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
    344 	Handler http.Handler
    345 
    346 	// UpgradeRequest is an initial request received on a connection
    347 	// undergoing an h2c upgrade. The request body must have been
    348 	// completely read from the connection before calling ServeConn,
    349 	// and the 101 Switching Protocols response written.
    350 	UpgradeRequest *http.Request
    351 
    352 	// Settings is the decoded contents of the HTTP2-Settings header
    353 	// in an h2c upgrade request.
    354 	Settings []byte
    355 
    356 	// SawClientPreface is set if the HTTP/2 connection preface
    357 	// has already been read from the connection.
    358 	SawClientPreface bool
    359 }
    360 
    361 func (o *ServeConnOpts) context() context.Context {
    362 	if o != nil && o.Context != nil {
    363 		return o.Context
    364 	}
    365 	return context.Background()
    366 }
    367 
    368 func (o *ServeConnOpts) baseConfig() *http.Server {
    369 	if o != nil && o.BaseConfig != nil {
    370 		return o.BaseConfig
    371 	}
    372 	return new(http.Server)
    373 }
    374 
    375 func (o *ServeConnOpts) handler() http.Handler {
    376 	if o != nil {
    377 		if o.Handler != nil {
    378 			return o.Handler
    379 		}
    380 		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
    381 			return o.BaseConfig.Handler
    382 		}
    383 	}
    384 	return http.DefaultServeMux
    385 }
    386 
    387 // ServeConn serves HTTP/2 requests on the provided connection and
    388 // blocks until the connection is no longer readable.
    389 //
    390 // ServeConn starts speaking HTTP/2 assuming that c has not had any
    391 // reads or writes. It writes its initial settings frame and expects
    392 // to be able to read the preface and settings frame from the
    393 // client. If c has a ConnectionState method like a *tls.Conn, the
    394 // ConnectionState is used to verify the TLS ciphersuite and to set
    395 // the Request.TLS field in Handlers.
    396 //
    397 // ServeConn does not support h2c by itself. Any h2c support must be
    398 // implemented in terms of providing a suitably-behaving net.Conn.
    399 //
    400 // The opts parameter is optional. If nil, default values are used.
    401 func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
    402 	baseCtx, cancel := serverConnBaseContext(c, opts)
    403 	defer cancel()
    404 
    405 	sc := &serverConn{
    406 		srv:                         s,
    407 		hs:                          opts.baseConfig(),
    408 		conn:                        c,
    409 		baseCtx:                     baseCtx,
    410 		remoteAddrStr:               c.RemoteAddr().String(),
    411 		bw:                          newBufferedWriter(c),
    412 		handler:                     opts.handler(),
    413 		streams:                     make(map[uint32]*stream),
    414 		readFrameCh:                 make(chan readFrameResult),
    415 		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
    416 		serveMsgCh:                  make(chan interface{}, 8),
    417 		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
    418 		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
    419 		doneServing:                 make(chan struct{}),
    420 		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
    421 		advMaxStreams:               s.maxConcurrentStreams(),
    422 		initialStreamSendWindowSize: initialWindowSize,
    423 		maxFrameSize:                initialMaxFrameSize,
    424 		serveG:                      newGoroutineLock(),
    425 		pushEnabled:                 true,
    426 		sawClientPreface:            opts.SawClientPreface,
    427 	}
    428 
    429 	s.state.registerConn(sc)
    430 	defer s.state.unregisterConn(sc)
    431 
    432 	// The net/http package sets the write deadline from the
    433 	// http.Server.WriteTimeout during the TLS handshake, but then
    434 	// passes the connection off to us with the deadline already set.
    435 	// Write deadlines are set per stream in serverConn.newStream.
    436 	// Disarm the net.Conn write deadline here.
    437 	if sc.hs.WriteTimeout != 0 {
    438 		sc.conn.SetWriteDeadline(time.Time{})
    439 	}
    440 
    441 	if s.NewWriteScheduler != nil {
    442 		sc.writeSched = s.NewWriteScheduler()
    443 	} else {
    444 		sc.writeSched = newRoundRobinWriteScheduler()
    445 	}
    446 
    447 	// These start at the RFC-specified defaults. If there is a higher
    448 	// configured value for inflow, that will be updated when we send a
    449 	// WINDOW_UPDATE shortly after sending SETTINGS.
    450 	sc.flow.add(initialWindowSize)
    451 	sc.inflow.init(initialWindowSize)
    452 	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
    453 	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
    454 
    455 	fr := NewFramer(sc.bw, c)
    456 	if s.CountError != nil {
    457 		fr.countError = s.CountError
    458 	}
    459 	fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
    460 	fr.MaxHeaderListSize = sc.maxHeaderListSize()
    461 	fr.SetMaxReadFrameSize(s.maxReadFrameSize())
    462 	sc.framer = fr
    463 
    464 	if tc, ok := c.(connectionStater); ok {
    465 		sc.tlsState = new(tls.ConnectionState)
    466 		*sc.tlsState = tc.ConnectionState()
    467 		// 9.2 Use of TLS Features
    468 		// An implementation of HTTP/2 over TLS MUST use TLS
    469 		// 1.2 or higher with the restrictions on feature set
    470 		// and cipher suite described in this section. Due to
    471 		// implementation limitations, it might not be
    472 		// possible to fail TLS negotiation. An endpoint MUST
    473 		// immediately terminate an HTTP/2 connection that
    474 		// does not meet the TLS requirements described in
    475 		// this section with a connection error (Section
    476 		// 5.4.1) of type INADEQUATE_SECURITY.
    477 		if sc.tlsState.Version < tls.VersionTLS12 {
    478 			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
    479 			return
    480 		}
    481 
    482 		if sc.tlsState.ServerName == "" {
    483 			// Client must use SNI, but we don't enforce that anymore,
    484 			// since it was causing problems when connecting to bare IP
    485 			// addresses during development.
    486 			//
    487 			// TODO: optionally enforce? Or enforce at the time we receive
    488 			// a new request, and verify the ServerName matches the :authority?
    489 			// But that precludes proxy situations, perhaps.
    490 			//
    491 			// So for now, do nothing here again.
    492 		}
    493 
    494 		if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
    495 			// "Endpoints MAY choose to generate a connection error
    496 			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
    497 			// the prohibited cipher suites are negotiated."
    498 			//
    499 			// We choose that. In my opinion, the spec is weak
    500 			// here. It also says both parties must support at least
    501 			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
    502 			// excuses here. If we really must, we could allow an
    503 			// "AllowInsecureWeakCiphers" option on the server later.
    504 			// Let's see how it plays out first.
    505 			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
    506 			return
    507 		}
    508 	}
    509 
    510 	if opts.Settings != nil {
    511 		fr := &SettingsFrame{
    512 			FrameHeader: FrameHeader{valid: true},
    513 			p:           opts.Settings,
    514 		}
    515 		if err := fr.ForeachSetting(sc.processSetting); err != nil {
    516 			sc.rejectConn(ErrCodeProtocol, "invalid settings")
    517 			return
    518 		}
    519 		opts.Settings = nil
    520 	}
    521 
    522 	if hook := testHookGetServerConn; hook != nil {
    523 		hook(sc)
    524 	}
    525 
    526 	if opts.UpgradeRequest != nil {
    527 		sc.upgradeRequest(opts.UpgradeRequest)
    528 		opts.UpgradeRequest = nil
    529 	}
    530 
    531 	sc.serve()
    532 }
    533 
    534 func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
    535 	ctx, cancel = context.WithCancel(opts.context())
    536 	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
    537 	if hs := opts.baseConfig(); hs != nil {
    538 		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
    539 	}
    540 	return
    541 }
    542 
    543 func (sc *serverConn) rejectConn(err ErrCode, debug string) {
    544 	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
    545 	// ignoring errors. hanging up anyway.
    546 	sc.framer.WriteGoAway(0, err, []byte(debug))
    547 	sc.bw.Flush()
    548 	sc.conn.Close()
    549 }
    550 
    551 type serverConn struct {
    552 	// Immutable:
    553 	srv              *Server
    554 	hs               *http.Server
    555 	conn             net.Conn
    556 	bw               *bufferedWriter // writing to conn
    557 	handler          http.Handler
    558 	baseCtx          context.Context
    559 	framer           *Framer
    560 	doneServing      chan struct{}          // closed when serverConn.serve ends
    561 	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
    562 	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
    563 	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
    564 	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
    565 	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
    566 	flow             outflow                // conn-wide (not stream-specific) outbound flow control
    567 	inflow           inflow                 // conn-wide inbound flow control
    568 	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
    569 	remoteAddrStr    string
    570 	writeSched       WriteScheduler
    571 
    572 	// Everything following is owned by the serve loop; use serveG.check():
    573 	serveG                      goroutineLock // used to verify funcs are on serve()
    574 	pushEnabled                 bool
    575 	sawClientPreface            bool // preface has already been read, used in h2c upgrade
    576 	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
    577 	needToSendSettingsAck       bool
    578 	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
    579 	queuedControlFrames         int    // control frames in the writeSched queue
    580 	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
    581 	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
    582 	curClientStreams            uint32 // number of open streams initiated by the client
    583 	curPushedStreams            uint32 // number of open streams initiated by server push
    584 	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
    585 	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
    586 	streams                     map[uint32]*stream
    587 	initialStreamSendWindowSize int32
    588 	maxFrameSize                int32
    589 	peerMaxHeaderListSize       uint32            // zero means unknown (default)
    590 	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
    591 	canonHeaderKeysSize         int               // canonHeader keys size in bytes
    592 	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
    593 	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
    594 	needsFrameFlush             bool              // last frame write wasn't a flush
    595 	inGoAway                    bool              // we've started to or sent GOAWAY
    596 	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
    597 	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
    598 	goAwayCode                  ErrCode
    599 	shutdownTimer               *time.Timer // nil until used
    600 	idleTimer                   *time.Timer // nil if unused
    601 
    602 	// Owned by the writeFrameAsync goroutine:
    603 	headerWriteBuf bytes.Buffer
    604 	hpackEncoder   *hpack.Encoder
    605 
    606 	// Used by startGracefulShutdown.
    607 	shutdownOnce sync.Once
    608 }
    609 
    610 func (sc *serverConn) maxHeaderListSize() uint32 {
    611 	n := sc.hs.MaxHeaderBytes
    612 	if n <= 0 {
    613 		n = http.DefaultMaxHeaderBytes
    614 	}
    615 	// http2's count is in a slightly different unit and includes 32 bytes per pair.
    616 	// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
    617 	const perFieldOverhead = 32 // per http2 spec
    618 	const typicalHeaders = 10   // conservative
    619 	return uint32(n + typicalHeaders*perFieldOverhead)
    620 }
    621 
    622 func (sc *serverConn) curOpenStreams() uint32 {
    623 	sc.serveG.check()
    624 	return sc.curClientStreams + sc.curPushedStreams
    625 }
    626 
    627 // stream represents a stream. This is the minimal metadata needed by
    628 // the serve goroutine. Most of the actual stream state is owned by
    629 // the http.Handler's goroutine in the responseWriter. Because the
    630 // responseWriter's responseWriterState is recycled at the end of a
    631 // handler, this struct intentionally has no pointer to the
    632 // *responseWriter{,State} itself, as the Handler ending nils out the
    633 // responseWriter's state field.
    634 type stream struct {
    635 	// immutable:
    636 	sc        *serverConn
    637 	id        uint32
    638 	body      *pipe       // non-nil if expecting DATA frames
    639 	cw        closeWaiter // closed wait stream transitions to closed state
    640 	ctx       context.Context
    641 	cancelCtx func()
    642 
    643 	// owned by serverConn's serve loop:
    644 	bodyBytes        int64   // body bytes seen so far
    645 	declBodyBytes    int64   // or -1 if undeclared
    646 	flow             outflow // limits writing from Handler to client
    647 	inflow           inflow  // what the client is allowed to POST/etc to us
    648 	state            streamState
    649 	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
    650 	gotTrailerHeader bool        // HEADER frame for trailers was seen
    651 	wroteHeaders     bool        // whether we wrote headers (not status 100)
    652 	readDeadline     *time.Timer // nil if unused
    653 	writeDeadline    *time.Timer // nil if unused
    654 	closeErr         error       // set before cw is closed
    655 
    656 	trailer    http.Header // accumulated trailers
    657 	reqTrailer http.Header // handler's Request.Trailer
    658 }
    659 
    660 func (sc *serverConn) Framer() *Framer  { return sc.framer }
    661 func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
    662 func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
    663 func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
    664 	return sc.hpackEncoder, &sc.headerWriteBuf
    665 }
    666 
    667 func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
    668 	sc.serveG.check()
    669 	// http://tools.ietf.org/html/rfc7540#section-5.1
    670 	if st, ok := sc.streams[streamID]; ok {
    671 		return st.state, st
    672 	}
    673 	// "The first use of a new stream identifier implicitly closes all
    674 	// streams in the "idle" state that might have been initiated by
    675 	// that peer with a lower-valued stream identifier. For example, if
    676 	// a client sends a HEADERS frame on stream 7 without ever sending a
    677 	// frame on stream 5, then stream 5 transitions to the "closed"
    678 	// state when the first frame for stream 7 is sent or received."
    679 	if streamID%2 == 1 {
    680 		if streamID <= sc.maxClientStreamID {
    681 			return stateClosed, nil
    682 		}
    683 	} else {
    684 		if streamID <= sc.maxPushPromiseID {
    685 			return stateClosed, nil
    686 		}
    687 	}
    688 	return stateIdle, nil
    689 }
    690 
    691 // setConnState calls the net/http ConnState hook for this connection, if configured.
    692 // Note that the net/http package does StateNew and StateClosed for us.
    693 // There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
    694 func (sc *serverConn) setConnState(state http.ConnState) {
    695 	if sc.hs.ConnState != nil {
    696 		sc.hs.ConnState(sc.conn, state)
    697 	}
    698 }
    699 
    700 func (sc *serverConn) vlogf(format string, args ...interface{}) {
    701 	if VerboseLogs {
    702 		sc.logf(format, args...)
    703 	}
    704 }
    705 
    706 func (sc *serverConn) logf(format string, args ...interface{}) {
    707 	if lg := sc.hs.ErrorLog; lg != nil {
    708 		lg.Printf(format, args...)
    709 	} else {
    710 		log.Printf(format, args...)
    711 	}
    712 }
    713 
    714 // errno returns v's underlying uintptr, else 0.
    715 //
    716 // TODO: remove this helper function once http2 can use build
    717 // tags. See comment in isClosedConnError.
    718 func errno(v error) uintptr {
    719 	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
    720 		return uintptr(rv.Uint())
    721 	}
    722 	return 0
    723 }
    724 
    725 // isClosedConnError reports whether err is an error from use of a closed
    726 // network connection.
    727 func isClosedConnError(err error) bool {
    728 	if err == nil {
    729 		return false
    730 	}
    731 
    732 	// TODO: remove this string search and be more like the Windows
    733 	// case below. That might involve modifying the standard library
    734 	// to return better error types.
    735 	str := err.Error()
    736 	if strings.Contains(str, "use of closed network connection") {
    737 		return true
    738 	}
    739 
    740 	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
    741 	// build tags, so I can't make an http2_windows.go file with
    742 	// Windows-specific stuff. Fix that and move this, once we
    743 	// have a way to bundle this into std's net/http somehow.
    744 	if runtime.GOOS == "windows" {
    745 		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
    746 			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
    747 				const WSAECONNABORTED = 10053
    748 				const WSAECONNRESET = 10054
    749 				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
    750 					return true
    751 				}
    752 			}
    753 		}
    754 	}
    755 	return false
    756 }
    757 
    758 func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
    759 	if err == nil {
    760 		return
    761 	}
    762 	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
    763 		// Boring, expected errors.
    764 		sc.vlogf(format, args...)
    765 	} else {
    766 		sc.logf(format, args...)
    767 	}
    768 }
    769 
    770 // maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
    771 // of the entries in the canonHeader cache.
    772 // This should be larger than the size of unique, uncommon header keys likely to
    773 // be sent by the peer, while not so high as to permit unreasonable memory usage
    774 // if the peer sends an unbounded number of unique header keys.
    775 const maxCachedCanonicalHeadersKeysSize = 2048
    776 
    777 func (sc *serverConn) canonicalHeader(v string) string {
    778 	sc.serveG.check()
    779 	buildCommonHeaderMapsOnce()
    780 	cv, ok := commonCanonHeader[v]
    781 	if ok {
    782 		return cv
    783 	}
    784 	cv, ok = sc.canonHeader[v]
    785 	if ok {
    786 		return cv
    787 	}
    788 	if sc.canonHeader == nil {
    789 		sc.canonHeader = make(map[string]string)
    790 	}
    791 	cv = http.CanonicalHeaderKey(v)
    792 	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
    793 	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
    794 		sc.canonHeader[v] = cv
    795 		sc.canonHeaderKeysSize += size
    796 	}
    797 	return cv
    798 }
    799 
    800 type readFrameResult struct {
    801 	f   Frame // valid until readMore is called
    802 	err error
    803 
    804 	// readMore should be called once the consumer no longer needs or
    805 	// retains f. After readMore, f is invalid and more frames can be
    806 	// read.
    807 	readMore func()
    808 }
    809 
    810 // readFrames is the loop that reads incoming frames.
    811 // It takes care to only read one frame at a time, blocking until the
    812 // consumer is done with the frame.
    813 // It's run on its own goroutine.
    814 func (sc *serverConn) readFrames() {
    815 	gate := make(gate)
    816 	gateDone := gate.Done
    817 	for {
    818 		f, err := sc.framer.ReadFrame()
    819 		select {
    820 		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
    821 		case <-sc.doneServing:
    822 			return
    823 		}
    824 		select {
    825 		case <-gate:
    826 		case <-sc.doneServing:
    827 			return
    828 		}
    829 		if terminalReadFrameError(err) {
    830 			return
    831 		}
    832 	}
    833 }
    834 
    835 // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
    836 type frameWriteResult struct {
    837 	_   incomparable
    838 	wr  FrameWriteRequest // what was written (or attempted)
    839 	err error             // result of the writeFrame call
    840 }
    841 
    842 // writeFrameAsync runs in its own goroutine and writes a single frame
    843 // and then reports when it's done.
    844 // At most one goroutine can be running writeFrameAsync at a time per
    845 // serverConn.
    846 func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
    847 	var err error
    848 	if wd == nil {
    849 		err = wr.write.writeFrame(sc)
    850 	} else {
    851 		err = sc.framer.endWrite()
    852 	}
    853 	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
    854 }
    855 
    856 func (sc *serverConn) closeAllStreamsOnConnClose() {
    857 	sc.serveG.check()
    858 	for _, st := range sc.streams {
    859 		sc.closeStream(st, errClientDisconnected)
    860 	}
    861 }
    862 
    863 func (sc *serverConn) stopShutdownTimer() {
    864 	sc.serveG.check()
    865 	if t := sc.shutdownTimer; t != nil {
    866 		t.Stop()
    867 	}
    868 }
    869 
    870 func (sc *serverConn) notePanic() {
    871 	// Note: this is for serverConn.serve panicking, not http.Handler code.
    872 	if testHookOnPanicMu != nil {
    873 		testHookOnPanicMu.Lock()
    874 		defer testHookOnPanicMu.Unlock()
    875 	}
    876 	if testHookOnPanic != nil {
    877 		if e := recover(); e != nil {
    878 			if testHookOnPanic(sc, e) {
    879 				panic(e)
    880 			}
    881 		}
    882 	}
    883 }
    884 
    885 func (sc *serverConn) serve() {
    886 	sc.serveG.check()
    887 	defer sc.notePanic()
    888 	defer sc.conn.Close()
    889 	defer sc.closeAllStreamsOnConnClose()
    890 	defer sc.stopShutdownTimer()
    891 	defer close(sc.doneServing) // unblocks handlers trying to send
    892 
    893 	if VerboseLogs {
    894 		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
    895 	}
    896 
    897 	sc.writeFrame(FrameWriteRequest{
    898 		write: writeSettings{
    899 			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
    900 			{SettingMaxConcurrentStreams, sc.advMaxStreams},
    901 			{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
    902 			{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
    903 			{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
    904 		},
    905 	})
    906 	sc.unackedSettings++
    907 
    908 	// Each connection starts with initialWindowSize inflow tokens.
    909 	// If a higher value is configured, we add more tokens.
    910 	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
    911 		sc.sendWindowUpdate(nil, int(diff))
    912 	}
    913 
    914 	if err := sc.readPreface(); err != nil {
    915 		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
    916 		return
    917 	}
    918 	// Now that we've got the preface, get us out of the
    919 	// "StateNew" state. We can't go directly to idle, though.
    920 	// Active means we read some data and anticipate a request. We'll
    921 	// do another Active when we get a HEADERS frame.
    922 	sc.setConnState(http.StateActive)
    923 	sc.setConnState(http.StateIdle)
    924 
    925 	if sc.srv.IdleTimeout != 0 {
    926 		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
    927 		defer sc.idleTimer.Stop()
    928 	}
    929 
    930 	go sc.readFrames() // closed by defer sc.conn.Close above
    931 
    932 	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
    933 	defer settingsTimer.Stop()
    934 
    935 	loopNum := 0
    936 	for {
    937 		loopNum++
    938 		select {
    939 		case wr := <-sc.wantWriteFrameCh:
    940 			if se, ok := wr.write.(StreamError); ok {
    941 				sc.resetStream(se)
    942 				break
    943 			}
    944 			sc.writeFrame(wr)
    945 		case res := <-sc.wroteFrameCh:
    946 			sc.wroteFrame(res)
    947 		case res := <-sc.readFrameCh:
    948 			// Process any written frames before reading new frames from the client since a
    949 			// written frame could have triggered a new stream to be started.
    950 			if sc.writingFrameAsync {
    951 				select {
    952 				case wroteRes := <-sc.wroteFrameCh:
    953 					sc.wroteFrame(wroteRes)
    954 				default:
    955 				}
    956 			}
    957 			if !sc.processFrameFromReader(res) {
    958 				return
    959 			}
    960 			res.readMore()
    961 			if settingsTimer != nil {
    962 				settingsTimer.Stop()
    963 				settingsTimer = nil
    964 			}
    965 		case m := <-sc.bodyReadCh:
    966 			sc.noteBodyRead(m.st, m.n)
    967 		case msg := <-sc.serveMsgCh:
    968 			switch v := msg.(type) {
    969 			case func(int):
    970 				v(loopNum) // for testing
    971 			case *serverMessage:
    972 				switch v {
    973 				case settingsTimerMsg:
    974 					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
    975 					return
    976 				case idleTimerMsg:
    977 					sc.vlogf("connection is idle")
    978 					sc.goAway(ErrCodeNo)
    979 				case shutdownTimerMsg:
    980 					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
    981 					return
    982 				case gracefulShutdownMsg:
    983 					sc.startGracefulShutdownInternal()
    984 				default:
    985 					panic("unknown timer")
    986 				}
    987 			case *startPushRequest:
    988 				sc.startPush(v)
    989 			case func(*serverConn):
    990 				v(sc)
    991 			default:
    992 				panic(fmt.Sprintf("unexpected type %T", v))
    993 			}
    994 		}
    995 
    996 		// If the peer is causing us to generate a lot of control frames,
    997 		// but not reading them from us, assume they are trying to make us
    998 		// run out of memory.
    999 		if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
   1000 			sc.vlogf("http2: too many control frames in send queue, closing connection")
   1001 			return
   1002 		}
   1003 
   1004 		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
   1005 		// with no error code (graceful shutdown), don't start the timer until
   1006 		// all open streams have been completed.
   1007 		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
   1008 		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
   1009 		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
   1010 			sc.shutDownIn(goAwayTimeout)
   1011 		}
   1012 	}
   1013 }
   1014 
   1015 func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
   1016 	select {
   1017 	case <-sc.doneServing:
   1018 	case <-sharedCh:
   1019 		close(privateCh)
   1020 	}
   1021 }
   1022 
   1023 type serverMessage int
   1024 
   1025 // Message values sent to serveMsgCh.
   1026 var (
   1027 	settingsTimerMsg    = new(serverMessage)
   1028 	idleTimerMsg        = new(serverMessage)
   1029 	shutdownTimerMsg    = new(serverMessage)
   1030 	gracefulShutdownMsg = new(serverMessage)
   1031 )
   1032 
   1033 func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
   1034 func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
   1035 func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
   1036 
   1037 func (sc *serverConn) sendServeMsg(msg interface{}) {
   1038 	sc.serveG.checkNotOn() // NOT
   1039 	select {
   1040 	case sc.serveMsgCh <- msg:
   1041 	case <-sc.doneServing:
   1042 	}
   1043 }
   1044 
   1045 var errPrefaceTimeout = errors.New("timeout waiting for client preface")
   1046 
   1047 // readPreface reads the ClientPreface greeting from the peer or
   1048 // returns errPrefaceTimeout on timeout, or an error if the greeting
   1049 // is invalid.
   1050 func (sc *serverConn) readPreface() error {
   1051 	if sc.sawClientPreface {
   1052 		return nil
   1053 	}
   1054 	errc := make(chan error, 1)
   1055 	go func() {
   1056 		// Read the client preface
   1057 		buf := make([]byte, len(ClientPreface))
   1058 		if _, err := io.ReadFull(sc.conn, buf); err != nil {
   1059 			errc <- err
   1060 		} else if !bytes.Equal(buf, clientPreface) {
   1061 			errc <- fmt.Errorf("bogus greeting %q", buf)
   1062 		} else {
   1063 			errc <- nil
   1064 		}
   1065 	}()
   1066 	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
   1067 	defer timer.Stop()
   1068 	select {
   1069 	case <-timer.C:
   1070 		return errPrefaceTimeout
   1071 	case err := <-errc:
   1072 		if err == nil {
   1073 			if VerboseLogs {
   1074 				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
   1075 			}
   1076 		}
   1077 		return err
   1078 	}
   1079 }
   1080 
   1081 var errChanPool = sync.Pool{
   1082 	New: func() interface{} { return make(chan error, 1) },
   1083 }
   1084 
   1085 var writeDataPool = sync.Pool{
   1086 	New: func() interface{} { return new(writeData) },
   1087 }
   1088 
   1089 // writeDataFromHandler writes DATA response frames from a handler on
   1090 // the given stream.
   1091 func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
   1092 	ch := errChanPool.Get().(chan error)
   1093 	writeArg := writeDataPool.Get().(*writeData)
   1094 	*writeArg = writeData{stream.id, data, endStream}
   1095 	err := sc.writeFrameFromHandler(FrameWriteRequest{
   1096 		write:  writeArg,
   1097 		stream: stream,
   1098 		done:   ch,
   1099 	})
   1100 	if err != nil {
   1101 		return err
   1102 	}
   1103 	var frameWriteDone bool // the frame write is done (successfully or not)
   1104 	select {
   1105 	case err = <-ch:
   1106 		frameWriteDone = true
   1107 	case <-sc.doneServing:
   1108 		return errClientDisconnected
   1109 	case <-stream.cw:
   1110 		// If both ch and stream.cw were ready (as might
   1111 		// happen on the final Write after an http.Handler
   1112 		// ends), prefer the write result. Otherwise this
   1113 		// might just be us successfully closing the stream.
   1114 		// The writeFrameAsync and serve goroutines guarantee
   1115 		// that the ch send will happen before the stream.cw
   1116 		// close.
   1117 		select {
   1118 		case err = <-ch:
   1119 			frameWriteDone = true
   1120 		default:
   1121 			return errStreamClosed
   1122 		}
   1123 	}
   1124 	errChanPool.Put(ch)
   1125 	if frameWriteDone {
   1126 		writeDataPool.Put(writeArg)
   1127 	}
   1128 	return err
   1129 }
   1130 
   1131 // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
   1132 // if the connection has gone away.
   1133 //
   1134 // This must not be run from the serve goroutine itself, else it might
   1135 // deadlock writing to sc.wantWriteFrameCh (which is only mildly
   1136 // buffered and is read by serve itself). If you're on the serve
   1137 // goroutine, call writeFrame instead.
   1138 func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
   1139 	sc.serveG.checkNotOn() // NOT
   1140 	select {
   1141 	case sc.wantWriteFrameCh <- wr:
   1142 		return nil
   1143 	case <-sc.doneServing:
   1144 		// Serve loop is gone.
   1145 		// Client has closed their connection to the server.
   1146 		return errClientDisconnected
   1147 	}
   1148 }
   1149 
   1150 // writeFrame schedules a frame to write and sends it if there's nothing
   1151 // already being written.
   1152 //
   1153 // There is no pushback here (the serve goroutine never blocks). It's
   1154 // the http.Handlers that block, waiting for their previous frames to
   1155 // make it onto the wire
   1156 //
   1157 // If you're not on the serve goroutine, use writeFrameFromHandler instead.
   1158 func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
   1159 	sc.serveG.check()
   1160 
   1161 	// If true, wr will not be written and wr.done will not be signaled.
   1162 	var ignoreWrite bool
   1163 
   1164 	// We are not allowed to write frames on closed streams. RFC 7540 Section
   1165 	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
   1166 	// a closed stream." Our server never sends PRIORITY, so that exception
   1167 	// does not apply.
   1168 	//
   1169 	// The serverConn might close an open stream while the stream's handler
   1170 	// is still running. For example, the server might close a stream when it
   1171 	// receives bad data from the client. If this happens, the handler might
   1172 	// attempt to write a frame after the stream has been closed (since the
   1173 	// handler hasn't yet been notified of the close). In this case, we simply
   1174 	// ignore the frame. The handler will notice that the stream is closed when
   1175 	// it waits for the frame to be written.
   1176 	//
   1177 	// As an exception to this rule, we allow sending RST_STREAM after close.
   1178 	// This allows us to immediately reject new streams without tracking any
   1179 	// state for those streams (except for the queued RST_STREAM frame). This
   1180 	// may result in duplicate RST_STREAMs in some cases, but the client should
   1181 	// ignore those.
   1182 	if wr.StreamID() != 0 {
   1183 		_, isReset := wr.write.(StreamError)
   1184 		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
   1185 			ignoreWrite = true
   1186 		}
   1187 	}
   1188 
   1189 	// Don't send a 100-continue response if we've already sent headers.
   1190 	// See golang.org/issue/14030.
   1191 	switch wr.write.(type) {
   1192 	case *writeResHeaders:
   1193 		wr.stream.wroteHeaders = true
   1194 	case write100ContinueHeadersFrame:
   1195 		if wr.stream.wroteHeaders {
   1196 			// We do not need to notify wr.done because this frame is
   1197 			// never written with wr.done != nil.
   1198 			if wr.done != nil {
   1199 				panic("wr.done != nil for write100ContinueHeadersFrame")
   1200 			}
   1201 			ignoreWrite = true
   1202 		}
   1203 	}
   1204 
   1205 	if !ignoreWrite {
   1206 		if wr.isControl() {
   1207 			sc.queuedControlFrames++
   1208 			// For extra safety, detect wraparounds, which should not happen,
   1209 			// and pull the plug.
   1210 			if sc.queuedControlFrames < 0 {
   1211 				sc.conn.Close()
   1212 			}
   1213 		}
   1214 		sc.writeSched.Push(wr)
   1215 	}
   1216 	sc.scheduleFrameWrite()
   1217 }
   1218 
   1219 // startFrameWrite starts a goroutine to write wr (in a separate
   1220 // goroutine since that might block on the network), and updates the
   1221 // serve goroutine's state about the world, updated from info in wr.
   1222 func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
   1223 	sc.serveG.check()
   1224 	if sc.writingFrame {
   1225 		panic("internal error: can only be writing one frame at a time")
   1226 	}
   1227 
   1228 	st := wr.stream
   1229 	if st != nil {
   1230 		switch st.state {
   1231 		case stateHalfClosedLocal:
   1232 			switch wr.write.(type) {
   1233 			case StreamError, handlerPanicRST, writeWindowUpdate:
   1234 				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
   1235 				// in this state. (We never send PRIORITY from the server, so that is not checked.)
   1236 			default:
   1237 				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
   1238 			}
   1239 		case stateClosed:
   1240 			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
   1241 		}
   1242 	}
   1243 	if wpp, ok := wr.write.(*writePushPromise); ok {
   1244 		var err error
   1245 		wpp.promisedID, err = wpp.allocatePromisedID()
   1246 		if err != nil {
   1247 			sc.writingFrameAsync = false
   1248 			wr.replyToWriter(err)
   1249 			return
   1250 		}
   1251 	}
   1252 
   1253 	sc.writingFrame = true
   1254 	sc.needsFrameFlush = true
   1255 	if wr.write.staysWithinBuffer(sc.bw.Available()) {
   1256 		sc.writingFrameAsync = false
   1257 		err := wr.write.writeFrame(sc)
   1258 		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
   1259 	} else if wd, ok := wr.write.(*writeData); ok {
   1260 		// Encode the frame in the serve goroutine, to ensure we don't have
   1261 		// any lingering asynchronous references to data passed to Write.
   1262 		// See https://go.dev/issue/58446.
   1263 		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
   1264 		sc.writingFrameAsync = true
   1265 		go sc.writeFrameAsync(wr, wd)
   1266 	} else {
   1267 		sc.writingFrameAsync = true
   1268 		go sc.writeFrameAsync(wr, nil)
   1269 	}
   1270 }
   1271 
   1272 // errHandlerPanicked is the error given to any callers blocked in a read from
   1273 // Request.Body when the main goroutine panics. Since most handlers read in the
   1274 // main ServeHTTP goroutine, this will show up rarely.
   1275 var errHandlerPanicked = errors.New("http2: handler panicked")
   1276 
   1277 // wroteFrame is called on the serve goroutine with the result of
   1278 // whatever happened on writeFrameAsync.
   1279 func (sc *serverConn) wroteFrame(res frameWriteResult) {
   1280 	sc.serveG.check()
   1281 	if !sc.writingFrame {
   1282 		panic("internal error: expected to be already writing a frame")
   1283 	}
   1284 	sc.writingFrame = false
   1285 	sc.writingFrameAsync = false
   1286 
   1287 	wr := res.wr
   1288 
   1289 	if writeEndsStream(wr.write) {
   1290 		st := wr.stream
   1291 		if st == nil {
   1292 			panic("internal error: expecting non-nil stream")
   1293 		}
   1294 		switch st.state {
   1295 		case stateOpen:
   1296 			// Here we would go to stateHalfClosedLocal in
   1297 			// theory, but since our handler is done and
   1298 			// the net/http package provides no mechanism
   1299 			// for closing a ResponseWriter while still
   1300 			// reading data (see possible TODO at top of
   1301 			// this file), we go into closed state here
   1302 			// anyway, after telling the peer we're
   1303 			// hanging up on them. We'll transition to
   1304 			// stateClosed after the RST_STREAM frame is
   1305 			// written.
   1306 			st.state = stateHalfClosedLocal
   1307 			// Section 8.1: a server MAY request that the client abort
   1308 			// transmission of a request without error by sending a
   1309 			// RST_STREAM with an error code of NO_ERROR after sending
   1310 			// a complete response.
   1311 			sc.resetStream(streamError(st.id, ErrCodeNo))
   1312 		case stateHalfClosedRemote:
   1313 			sc.closeStream(st, errHandlerComplete)
   1314 		}
   1315 	} else {
   1316 		switch v := wr.write.(type) {
   1317 		case StreamError:
   1318 			// st may be unknown if the RST_STREAM was generated to reject bad input.
   1319 			if st, ok := sc.streams[v.StreamID]; ok {
   1320 				sc.closeStream(st, v)
   1321 			}
   1322 		case handlerPanicRST:
   1323 			sc.closeStream(wr.stream, errHandlerPanicked)
   1324 		}
   1325 	}
   1326 
   1327 	// Reply (if requested) to unblock the ServeHTTP goroutine.
   1328 	wr.replyToWriter(res.err)
   1329 
   1330 	sc.scheduleFrameWrite()
   1331 }
   1332 
   1333 // scheduleFrameWrite tickles the frame writing scheduler.
   1334 //
   1335 // If a frame is already being written, nothing happens. This will be called again
   1336 // when the frame is done being written.
   1337 //
   1338 // If a frame isn't being written and we need to send one, the best frame
   1339 // to send is selected by writeSched.
   1340 //
   1341 // If a frame isn't being written and there's nothing else to send, we
   1342 // flush the write buffer.
   1343 func (sc *serverConn) scheduleFrameWrite() {
   1344 	sc.serveG.check()
   1345 	if sc.writingFrame || sc.inFrameScheduleLoop {
   1346 		return
   1347 	}
   1348 	sc.inFrameScheduleLoop = true
   1349 	for !sc.writingFrameAsync {
   1350 		if sc.needToSendGoAway {
   1351 			sc.needToSendGoAway = false
   1352 			sc.startFrameWrite(FrameWriteRequest{
   1353 				write: &writeGoAway{
   1354 					maxStreamID: sc.maxClientStreamID,
   1355 					code:        sc.goAwayCode,
   1356 				},
   1357 			})
   1358 			continue
   1359 		}
   1360 		if sc.needToSendSettingsAck {
   1361 			sc.needToSendSettingsAck = false
   1362 			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
   1363 			continue
   1364 		}
   1365 		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
   1366 			if wr, ok := sc.writeSched.Pop(); ok {
   1367 				if wr.isControl() {
   1368 					sc.queuedControlFrames--
   1369 				}
   1370 				sc.startFrameWrite(wr)
   1371 				continue
   1372 			}
   1373 		}
   1374 		if sc.needsFrameFlush {
   1375 			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
   1376 			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
   1377 			continue
   1378 		}
   1379 		break
   1380 	}
   1381 	sc.inFrameScheduleLoop = false
   1382 }
   1383 
   1384 // startGracefulShutdown gracefully shuts down a connection. This
   1385 // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
   1386 // shutting down. The connection isn't closed until all current
   1387 // streams are done.
   1388 //
   1389 // startGracefulShutdown returns immediately; it does not wait until
   1390 // the connection has shut down.
   1391 func (sc *serverConn) startGracefulShutdown() {
   1392 	sc.serveG.checkNotOn() // NOT
   1393 	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
   1394 }
   1395 
   1396 // After sending GOAWAY with an error code (non-graceful shutdown), the
   1397 // connection will close after goAwayTimeout.
   1398 //
   1399 // If we close the connection immediately after sending GOAWAY, there may
   1400 // be unsent data in our kernel receive buffer, which will cause the kernel
   1401 // to send a TCP RST on close() instead of a FIN. This RST will abort the
   1402 // connection immediately, whether or not the client had received the GOAWAY.
   1403 //
   1404 // Ideally we should delay for at least 1 RTT + epsilon so the client has
   1405 // a chance to read the GOAWAY and stop sending messages. Measuring RTT
   1406 // is hard, so we approximate with 1 second. See golang.org/issue/18701.
   1407 //
   1408 // This is a var so it can be shorter in tests, where all requests uses the
   1409 // loopback interface making the expected RTT very small.
   1410 //
   1411 // TODO: configurable?
   1412 var goAwayTimeout = 1 * time.Second
   1413 
   1414 func (sc *serverConn) startGracefulShutdownInternal() {
   1415 	sc.goAway(ErrCodeNo)
   1416 }
   1417 
   1418 func (sc *serverConn) goAway(code ErrCode) {
   1419 	sc.serveG.check()
   1420 	if sc.inGoAway {
   1421 		if sc.goAwayCode == ErrCodeNo {
   1422 			sc.goAwayCode = code
   1423 		}
   1424 		return
   1425 	}
   1426 	sc.inGoAway = true
   1427 	sc.needToSendGoAway = true
   1428 	sc.goAwayCode = code
   1429 	sc.scheduleFrameWrite()
   1430 }
   1431 
   1432 func (sc *serverConn) shutDownIn(d time.Duration) {
   1433 	sc.serveG.check()
   1434 	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
   1435 }
   1436 
   1437 func (sc *serverConn) resetStream(se StreamError) {
   1438 	sc.serveG.check()
   1439 	sc.writeFrame(FrameWriteRequest{write: se})
   1440 	if st, ok := sc.streams[se.StreamID]; ok {
   1441 		st.resetQueued = true
   1442 	}
   1443 }
   1444 
   1445 // processFrameFromReader processes the serve loop's read from readFrameCh from the
   1446 // frame-reading goroutine.
   1447 // processFrameFromReader returns whether the connection should be kept open.
   1448 func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
   1449 	sc.serveG.check()
   1450 	err := res.err
   1451 	if err != nil {
   1452 		if err == ErrFrameTooLarge {
   1453 			sc.goAway(ErrCodeFrameSize)
   1454 			return true // goAway will close the loop
   1455 		}
   1456 		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
   1457 		if clientGone {
   1458 			// TODO: could we also get into this state if
   1459 			// the peer does a half close
   1460 			// (e.g. CloseWrite) because they're done
   1461 			// sending frames but they're still wanting
   1462 			// our open replies?  Investigate.
   1463 			// TODO: add CloseWrite to crypto/tls.Conn first
   1464 			// so we have a way to test this? I suppose
   1465 			// just for testing we could have a non-TLS mode.
   1466 			return false
   1467 		}
   1468 	} else {
   1469 		f := res.f
   1470 		if VerboseLogs {
   1471 			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
   1472 		}
   1473 		err = sc.processFrame(f)
   1474 		if err == nil {
   1475 			return true
   1476 		}
   1477 	}
   1478 
   1479 	switch ev := err.(type) {
   1480 	case StreamError:
   1481 		sc.resetStream(ev)
   1482 		return true
   1483 	case goAwayFlowError:
   1484 		sc.goAway(ErrCodeFlowControl)
   1485 		return true
   1486 	case ConnectionError:
   1487 		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
   1488 		sc.goAway(ErrCode(ev))
   1489 		return true // goAway will handle shutdown
   1490 	default:
   1491 		if res.err != nil {
   1492 			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
   1493 		} else {
   1494 			sc.logf("http2: server closing client connection: %v", err)
   1495 		}
   1496 		return false
   1497 	}
   1498 }
   1499 
   1500 func (sc *serverConn) processFrame(f Frame) error {
   1501 	sc.serveG.check()
   1502 
   1503 	// First frame received must be SETTINGS.
   1504 	if !sc.sawFirstSettings {
   1505 		if _, ok := f.(*SettingsFrame); !ok {
   1506 			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
   1507 		}
   1508 		sc.sawFirstSettings = true
   1509 	}
   1510 
   1511 	// Discard frames for streams initiated after the identified last
   1512 	// stream sent in a GOAWAY, or all frames after sending an error.
   1513 	// We still need to return connection-level flow control for DATA frames.
   1514 	// RFC 9113 Section 6.8.
   1515 	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
   1516 
   1517 		if f, ok := f.(*DataFrame); ok {
   1518 			if !sc.inflow.take(f.Length) {
   1519 				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
   1520 			}
   1521 			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
   1522 		}
   1523 		return nil
   1524 	}
   1525 
   1526 	switch f := f.(type) {
   1527 	case *SettingsFrame:
   1528 		return sc.processSettings(f)
   1529 	case *MetaHeadersFrame:
   1530 		return sc.processHeaders(f)
   1531 	case *WindowUpdateFrame:
   1532 		return sc.processWindowUpdate(f)
   1533 	case *PingFrame:
   1534 		return sc.processPing(f)
   1535 	case *DataFrame:
   1536 		return sc.processData(f)
   1537 	case *RSTStreamFrame:
   1538 		return sc.processResetStream(f)
   1539 	case *PriorityFrame:
   1540 		return sc.processPriority(f)
   1541 	case *GoAwayFrame:
   1542 		return sc.processGoAway(f)
   1543 	case *PushPromiseFrame:
   1544 		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
   1545 		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
   1546 		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
   1547 	default:
   1548 		sc.vlogf("http2: server ignoring frame: %v", f.Header())
   1549 		return nil
   1550 	}
   1551 }
   1552 
   1553 func (sc *serverConn) processPing(f *PingFrame) error {
   1554 	sc.serveG.check()
   1555 	if f.IsAck() {
   1556 		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
   1557 		// containing this flag."
   1558 		return nil
   1559 	}
   1560 	if f.StreamID != 0 {
   1561 		// "PING frames are not associated with any individual
   1562 		// stream. If a PING frame is received with a stream
   1563 		// identifier field value other than 0x0, the recipient MUST
   1564 		// respond with a connection error (Section 5.4.1) of type
   1565 		// PROTOCOL_ERROR."
   1566 		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
   1567 	}
   1568 	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
   1569 	return nil
   1570 }
   1571 
   1572 func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
   1573 	sc.serveG.check()
   1574 	switch {
   1575 	case f.StreamID != 0: // stream-level flow control
   1576 		state, st := sc.state(f.StreamID)
   1577 		if state == stateIdle {
   1578 			// Section 5.1: "Receiving any frame other than HEADERS
   1579 			// or PRIORITY on a stream in this state MUST be
   1580 			// treated as a connection error (Section 5.4.1) of
   1581 			// type PROTOCOL_ERROR."
   1582 			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
   1583 		}
   1584 		if st == nil {
   1585 			// "WINDOW_UPDATE can be sent by a peer that has sent a
   1586 			// frame bearing the END_STREAM flag. This means that a
   1587 			// receiver could receive a WINDOW_UPDATE frame on a "half
   1588 			// closed (remote)" or "closed" stream. A receiver MUST
   1589 			// NOT treat this as an error, see Section 5.1."
   1590 			return nil
   1591 		}
   1592 		if !st.flow.add(int32(f.Increment)) {
   1593 			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
   1594 		}
   1595 	default: // connection-level flow control
   1596 		if !sc.flow.add(int32(f.Increment)) {
   1597 			return goAwayFlowError{}
   1598 		}
   1599 	}
   1600 	sc.scheduleFrameWrite()
   1601 	return nil
   1602 }
   1603 
   1604 func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
   1605 	sc.serveG.check()
   1606 
   1607 	state, st := sc.state(f.StreamID)
   1608 	if state == stateIdle {
   1609 		// 6.4 "RST_STREAM frames MUST NOT be sent for a
   1610 		// stream in the "idle" state. If a RST_STREAM frame
   1611 		// identifying an idle stream is received, the
   1612 		// recipient MUST treat this as a connection error
   1613 		// (Section 5.4.1) of type PROTOCOL_ERROR.
   1614 		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
   1615 	}
   1616 	if st != nil {
   1617 		st.cancelCtx()
   1618 		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
   1619 	}
   1620 	return nil
   1621 }
   1622 
   1623 func (sc *serverConn) closeStream(st *stream, err error) {
   1624 	sc.serveG.check()
   1625 	if st.state == stateIdle || st.state == stateClosed {
   1626 		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
   1627 	}
   1628 	st.state = stateClosed
   1629 	if st.readDeadline != nil {
   1630 		st.readDeadline.Stop()
   1631 	}
   1632 	if st.writeDeadline != nil {
   1633 		st.writeDeadline.Stop()
   1634 	}
   1635 	if st.isPushed() {
   1636 		sc.curPushedStreams--
   1637 	} else {
   1638 		sc.curClientStreams--
   1639 	}
   1640 	delete(sc.streams, st.id)
   1641 	if len(sc.streams) == 0 {
   1642 		sc.setConnState(http.StateIdle)
   1643 		if sc.srv.IdleTimeout != 0 {
   1644 			sc.idleTimer.Reset(sc.srv.IdleTimeout)
   1645 		}
   1646 		if h1ServerKeepAlivesDisabled(sc.hs) {
   1647 			sc.startGracefulShutdownInternal()
   1648 		}
   1649 	}
   1650 	if p := st.body; p != nil {
   1651 		// Return any buffered unread bytes worth of conn-level flow control.
   1652 		// See golang.org/issue/16481
   1653 		sc.sendWindowUpdate(nil, p.Len())
   1654 
   1655 		p.CloseWithError(err)
   1656 	}
   1657 	if e, ok := err.(StreamError); ok {
   1658 		if e.Cause != nil {
   1659 			err = e.Cause
   1660 		} else {
   1661 			err = errStreamClosed
   1662 		}
   1663 	}
   1664 	st.closeErr = err
   1665 	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
   1666 	sc.writeSched.CloseStream(st.id)
   1667 }
   1668 
   1669 func (sc *serverConn) processSettings(f *SettingsFrame) error {
   1670 	sc.serveG.check()
   1671 	if f.IsAck() {
   1672 		sc.unackedSettings--
   1673 		if sc.unackedSettings < 0 {
   1674 			// Why is the peer ACKing settings we never sent?
   1675 			// The spec doesn't mention this case, but
   1676 			// hang up on them anyway.
   1677 			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
   1678 		}
   1679 		return nil
   1680 	}
   1681 	if f.NumSettings() > 100 || f.HasDuplicates() {
   1682 		// This isn't actually in the spec, but hang up on
   1683 		// suspiciously large settings frames or those with
   1684 		// duplicate entries.
   1685 		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
   1686 	}
   1687 	if err := f.ForeachSetting(sc.processSetting); err != nil {
   1688 		return err
   1689 	}
   1690 	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
   1691 	// acknowledged individually, even if multiple are received before the ACK.
   1692 	sc.needToSendSettingsAck = true
   1693 	sc.scheduleFrameWrite()
   1694 	return nil
   1695 }
   1696 
   1697 func (sc *serverConn) processSetting(s Setting) error {
   1698 	sc.serveG.check()
   1699 	if err := s.Valid(); err != nil {
   1700 		return err
   1701 	}
   1702 	if VerboseLogs {
   1703 		sc.vlogf("http2: server processing setting %v", s)
   1704 	}
   1705 	switch s.ID {
   1706 	case SettingHeaderTableSize:
   1707 		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
   1708 	case SettingEnablePush:
   1709 		sc.pushEnabled = s.Val != 0
   1710 	case SettingMaxConcurrentStreams:
   1711 		sc.clientMaxStreams = s.Val
   1712 	case SettingInitialWindowSize:
   1713 		return sc.processSettingInitialWindowSize(s.Val)
   1714 	case SettingMaxFrameSize:
   1715 		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
   1716 	case SettingMaxHeaderListSize:
   1717 		sc.peerMaxHeaderListSize = s.Val
   1718 	default:
   1719 		// Unknown setting: "An endpoint that receives a SETTINGS
   1720 		// frame with any unknown or unsupported identifier MUST
   1721 		// ignore that setting."
   1722 		if VerboseLogs {
   1723 			sc.vlogf("http2: server ignoring unknown setting %v", s)
   1724 		}
   1725 	}
   1726 	return nil
   1727 }
   1728 
   1729 func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
   1730 	sc.serveG.check()
   1731 	// Note: val already validated to be within range by
   1732 	// processSetting's Valid call.
   1733 
   1734 	// "A SETTINGS frame can alter the initial flow control window
   1735 	// size for all current streams. When the value of
   1736 	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
   1737 	// adjust the size of all stream flow control windows that it
   1738 	// maintains by the difference between the new value and the
   1739 	// old value."
   1740 	old := sc.initialStreamSendWindowSize
   1741 	sc.initialStreamSendWindowSize = int32(val)
   1742 	growth := int32(val) - old // may be negative
   1743 	for _, st := range sc.streams {
   1744 		if !st.flow.add(growth) {
   1745 			// 6.9.2 Initial Flow Control Window Size
   1746 			// "An endpoint MUST treat a change to
   1747 			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
   1748 			// control window to exceed the maximum size as a
   1749 			// connection error (Section 5.4.1) of type
   1750 			// FLOW_CONTROL_ERROR."
   1751 			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
   1752 		}
   1753 	}
   1754 	return nil
   1755 }
   1756 
   1757 func (sc *serverConn) processData(f *DataFrame) error {
   1758 	sc.serveG.check()
   1759 	id := f.Header().StreamID
   1760 
   1761 	data := f.Data()
   1762 	state, st := sc.state(id)
   1763 	if id == 0 || state == stateIdle {
   1764 		// Section 6.1: "DATA frames MUST be associated with a
   1765 		// stream. If a DATA frame is received whose stream
   1766 		// identifier field is 0x0, the recipient MUST respond
   1767 		// with a connection error (Section 5.4.1) of type
   1768 		// PROTOCOL_ERROR."
   1769 		//
   1770 		// Section 5.1: "Receiving any frame other than HEADERS
   1771 		// or PRIORITY on a stream in this state MUST be
   1772 		// treated as a connection error (Section 5.4.1) of
   1773 		// type PROTOCOL_ERROR."
   1774 		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
   1775 	}
   1776 
   1777 	// "If a DATA frame is received whose stream is not in "open"
   1778 	// or "half closed (local)" state, the recipient MUST respond
   1779 	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
   1780 	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
   1781 		// This includes sending a RST_STREAM if the stream is
   1782 		// in stateHalfClosedLocal (which currently means that
   1783 		// the http.Handler returned, so it's done reading &
   1784 		// done writing). Try to stop the client from sending
   1785 		// more DATA.
   1786 
   1787 		// But still enforce their connection-level flow control,
   1788 		// and return any flow control bytes since we're not going
   1789 		// to consume them.
   1790 		if !sc.inflow.take(f.Length) {
   1791 			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
   1792 		}
   1793 		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
   1794 
   1795 		if st != nil && st.resetQueued {
   1796 			// Already have a stream error in flight. Don't send another.
   1797 			return nil
   1798 		}
   1799 		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
   1800 	}
   1801 	if st.body == nil {
   1802 		panic("internal error: should have a body in this state")
   1803 	}
   1804 
   1805 	// Sender sending more than they'd declared?
   1806 	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
   1807 		if !sc.inflow.take(f.Length) {
   1808 			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
   1809 		}
   1810 		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
   1811 
   1812 		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
   1813 		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
   1814 		// value of a content-length header field does not equal the sum of the
   1815 		// DATA frame payload lengths that form the body.
   1816 		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
   1817 	}
   1818 	if f.Length > 0 {
   1819 		// Check whether the client has flow control quota.
   1820 		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
   1821 			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
   1822 		}
   1823 
   1824 		if len(data) > 0 {
   1825 			st.bodyBytes += int64(len(data))
   1826 			wrote, err := st.body.Write(data)
   1827 			if err != nil {
   1828 				// The handler has closed the request body.
   1829 				// Return the connection-level flow control for the discarded data,
   1830 				// but not the stream-level flow control.
   1831 				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
   1832 				return nil
   1833 			}
   1834 			if wrote != len(data) {
   1835 				panic("internal error: bad Writer")
   1836 			}
   1837 		}
   1838 
   1839 		// Return any padded flow control now, since we won't
   1840 		// refund it later on body reads.
   1841 		// Call sendWindowUpdate even if there is no padding,
   1842 		// to return buffered flow control credit if the sent
   1843 		// window has shrunk.
   1844 		pad := int32(f.Length) - int32(len(data))
   1845 		sc.sendWindowUpdate32(nil, pad)
   1846 		sc.sendWindowUpdate32(st, pad)
   1847 	}
   1848 	if f.StreamEnded() {
   1849 		st.endStream()
   1850 	}
   1851 	return nil
   1852 }
   1853 
   1854 func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
   1855 	sc.serveG.check()
   1856 	if f.ErrCode != ErrCodeNo {
   1857 		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
   1858 	} else {
   1859 		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
   1860 	}
   1861 	sc.startGracefulShutdownInternal()
   1862 	// http://tools.ietf.org/html/rfc7540#section-6.8
   1863 	// We should not create any new streams, which means we should disable push.
   1864 	sc.pushEnabled = false
   1865 	return nil
   1866 }
   1867 
   1868 // isPushed reports whether the stream is server-initiated.
   1869 func (st *stream) isPushed() bool {
   1870 	return st.id%2 == 0
   1871 }
   1872 
   1873 // endStream closes a Request.Body's pipe. It is called when a DATA
   1874 // frame says a request body is over (or after trailers).
   1875 func (st *stream) endStream() {
   1876 	sc := st.sc
   1877 	sc.serveG.check()
   1878 
   1879 	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
   1880 		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
   1881 			st.declBodyBytes, st.bodyBytes))
   1882 	} else {
   1883 		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
   1884 		st.body.CloseWithError(io.EOF)
   1885 	}
   1886 	st.state = stateHalfClosedRemote
   1887 }
   1888 
   1889 // copyTrailersToHandlerRequest is run in the Handler's goroutine in
   1890 // its Request.Body.Read just before it gets io.EOF.
   1891 func (st *stream) copyTrailersToHandlerRequest() {
   1892 	for k, vv := range st.trailer {
   1893 		if _, ok := st.reqTrailer[k]; ok {
   1894 			// Only copy it over it was pre-declared.
   1895 			st.reqTrailer[k] = vv
   1896 		}
   1897 	}
   1898 }
   1899 
   1900 // onReadTimeout is run on its own goroutine (from time.AfterFunc)
   1901 // when the stream's ReadTimeout has fired.
   1902 func (st *stream) onReadTimeout() {
   1903 	// Wrap the ErrDeadlineExceeded to avoid callers depending on us
   1904 	// returning the bare error.
   1905 	st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
   1906 }
   1907 
   1908 // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
   1909 // when the stream's WriteTimeout has fired.
   1910 func (st *stream) onWriteTimeout() {
   1911 	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
   1912 		StreamID: st.id,
   1913 		Code:     ErrCodeInternal,
   1914 		Cause:    os.ErrDeadlineExceeded,
   1915 	}})
   1916 }
   1917 
   1918 func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
   1919 	sc.serveG.check()
   1920 	id := f.StreamID
   1921 	// http://tools.ietf.org/html/rfc7540#section-5.1.1
   1922 	// Streams initiated by a client MUST use odd-numbered stream
   1923 	// identifiers. [...] An endpoint that receives an unexpected
   1924 	// stream identifier MUST respond with a connection error
   1925 	// (Section 5.4.1) of type PROTOCOL_ERROR.
   1926 	if id%2 != 1 {
   1927 		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
   1928 	}
   1929 	// A HEADERS frame can be used to create a new stream or
   1930 	// send a trailer for an open one. If we already have a stream
   1931 	// open, let it process its own HEADERS frame (trailers at this
   1932 	// point, if it's valid).
   1933 	if st := sc.streams[f.StreamID]; st != nil {
   1934 		if st.resetQueued {
   1935 			// We're sending RST_STREAM to close the stream, so don't bother
   1936 			// processing this frame.
   1937 			return nil
   1938 		}
   1939 		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
   1940 		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
   1941 		// this state, it MUST respond with a stream error (Section 5.4.2) of
   1942 		// type STREAM_CLOSED.
   1943 		if st.state == stateHalfClosedRemote {
   1944 			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
   1945 		}
   1946 		return st.processTrailerHeaders(f)
   1947 	}
   1948 
   1949 	// [...] The identifier of a newly established stream MUST be
   1950 	// numerically greater than all streams that the initiating
   1951 	// endpoint has opened or reserved. [...]  An endpoint that
   1952 	// receives an unexpected stream identifier MUST respond with
   1953 	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
   1954 	if id <= sc.maxClientStreamID {
   1955 		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
   1956 	}
   1957 	sc.maxClientStreamID = id
   1958 
   1959 	if sc.idleTimer != nil {
   1960 		sc.idleTimer.Stop()
   1961 	}
   1962 
   1963 	// http://tools.ietf.org/html/rfc7540#section-5.1.2
   1964 	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
   1965 	// endpoint that receives a HEADERS frame that causes their
   1966 	// advertised concurrent stream limit to be exceeded MUST treat
   1967 	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
   1968 	// or REFUSED_STREAM.
   1969 	if sc.curClientStreams+1 > sc.advMaxStreams {
   1970 		if sc.unackedSettings == 0 {
   1971 			// They should know better.
   1972 			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
   1973 		}
   1974 		// Assume it's a network race, where they just haven't
   1975 		// received our last SETTINGS update. But actually
   1976 		// this can't happen yet, because we don't yet provide
   1977 		// a way for users to adjust server parameters at
   1978 		// runtime.
   1979 		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
   1980 	}
   1981 
   1982 	initialState := stateOpen
   1983 	if f.StreamEnded() {
   1984 		initialState = stateHalfClosedRemote
   1985 	}
   1986 	st := sc.newStream(id, 0, initialState)
   1987 
   1988 	if f.HasPriority() {
   1989 		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
   1990 			return err
   1991 		}
   1992 		sc.writeSched.AdjustStream(st.id, f.Priority)
   1993 	}
   1994 
   1995 	rw, req, err := sc.newWriterAndRequest(st, f)
   1996 	if err != nil {
   1997 		return err
   1998 	}
   1999 	st.reqTrailer = req.Trailer
   2000 	if st.reqTrailer != nil {
   2001 		st.trailer = make(http.Header)
   2002 	}
   2003 	st.body = req.Body.(*requestBody).pipe // may be nil
   2004 	st.declBodyBytes = req.ContentLength
   2005 
   2006 	handler := sc.handler.ServeHTTP
   2007 	if f.Truncated {
   2008 		// Their header list was too long. Send a 431 error.
   2009 		handler = handleHeaderListTooLong
   2010 	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
   2011 		handler = new400Handler(err)
   2012 	}
   2013 
   2014 	// The net/http package sets the read deadline from the
   2015 	// http.Server.ReadTimeout during the TLS handshake, but then
   2016 	// passes the connection off to us with the deadline already
   2017 	// set. Disarm it here after the request headers are read,
   2018 	// similar to how the http1 server works. Here it's
   2019 	// technically more like the http1 Server's ReadHeaderTimeout
   2020 	// (in Go 1.8), though. That's a more sane option anyway.
   2021 	if sc.hs.ReadTimeout != 0 {
   2022 		sc.conn.SetReadDeadline(time.Time{})
   2023 		if st.body != nil {
   2024 			st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
   2025 		}
   2026 	}
   2027 
   2028 	go sc.runHandler(rw, req, handler)
   2029 	return nil
   2030 }
   2031 
   2032 func (sc *serverConn) upgradeRequest(req *http.Request) {
   2033 	sc.serveG.check()
   2034 	id := uint32(1)
   2035 	sc.maxClientStreamID = id
   2036 	st := sc.newStream(id, 0, stateHalfClosedRemote)
   2037 	st.reqTrailer = req.Trailer
   2038 	if st.reqTrailer != nil {
   2039 		st.trailer = make(http.Header)
   2040 	}
   2041 	rw := sc.newResponseWriter(st, req)
   2042 
   2043 	// Disable any read deadline set by the net/http package
   2044 	// prior to the upgrade.
   2045 	if sc.hs.ReadTimeout != 0 {
   2046 		sc.conn.SetReadDeadline(time.Time{})
   2047 	}
   2048 
   2049 	go sc.runHandler(rw, req, sc.handler.ServeHTTP)
   2050 }
   2051 
   2052 func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
   2053 	sc := st.sc
   2054 	sc.serveG.check()
   2055 	if st.gotTrailerHeader {
   2056 		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
   2057 	}
   2058 	st.gotTrailerHeader = true
   2059 	if !f.StreamEnded() {
   2060 		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
   2061 	}
   2062 
   2063 	if len(f.PseudoFields()) > 0 {
   2064 		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
   2065 	}
   2066 	if st.trailer != nil {
   2067 		for _, hf := range f.RegularFields() {
   2068 			key := sc.canonicalHeader(hf.Name)
   2069 			if !httpguts.ValidTrailerHeader(key) {
   2070 				// TODO: send more details to the peer somehow. But http2 has
   2071 				// no way to send debug data at a stream level. Discuss with
   2072 				// HTTP folk.
   2073 				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
   2074 			}
   2075 			st.trailer[key] = append(st.trailer[key], hf.Value)
   2076 		}
   2077 	}
   2078 	st.endStream()
   2079 	return nil
   2080 }
   2081 
   2082 func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
   2083 	if streamID == p.StreamDep {
   2084 		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
   2085 		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
   2086 		// Section 5.3.3 says that a stream can depend on one of its dependencies,
   2087 		// so it's only self-dependencies that are forbidden.
   2088 		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
   2089 	}
   2090 	return nil
   2091 }
   2092 
   2093 func (sc *serverConn) processPriority(f *PriorityFrame) error {
   2094 	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
   2095 		return err
   2096 	}
   2097 	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
   2098 	return nil
   2099 }
   2100 
   2101 func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
   2102 	sc.serveG.check()
   2103 	if id == 0 {
   2104 		panic("internal error: cannot create stream with id 0")
   2105 	}
   2106 
   2107 	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
   2108 	st := &stream{
   2109 		sc:        sc,
   2110 		id:        id,
   2111 		state:     state,
   2112 		ctx:       ctx,
   2113 		cancelCtx: cancelCtx,
   2114 	}
   2115 	st.cw.Init()
   2116 	st.flow.conn = &sc.flow // link to conn-level counter
   2117 	st.flow.add(sc.initialStreamSendWindowSize)
   2118 	st.inflow.init(sc.srv.initialStreamRecvWindowSize())
   2119 	if sc.hs.WriteTimeout != 0 {
   2120 		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
   2121 	}
   2122 
   2123 	sc.streams[id] = st
   2124 	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
   2125 	if st.isPushed() {
   2126 		sc.curPushedStreams++
   2127 	} else {
   2128 		sc.curClientStreams++
   2129 	}
   2130 	if sc.curOpenStreams() == 1 {
   2131 		sc.setConnState(http.StateActive)
   2132 	}
   2133 
   2134 	return st
   2135 }
   2136 
   2137 func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
   2138 	sc.serveG.check()
   2139 
   2140 	rp := requestParam{
   2141 		method:    f.PseudoValue("method"),
   2142 		scheme:    f.PseudoValue("scheme"),
   2143 		authority: f.PseudoValue("authority"),
   2144 		path:      f.PseudoValue("path"),
   2145 	}
   2146 
   2147 	isConnect := rp.method == "CONNECT"
   2148 	if isConnect {
   2149 		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
   2150 			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
   2151 		}
   2152 	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
   2153 		// See 8.1.2.6 Malformed Requests and Responses:
   2154 		//
   2155 		// Malformed requests or responses that are detected
   2156 		// MUST be treated as a stream error (Section 5.4.2)
   2157 		// of type PROTOCOL_ERROR."
   2158 		//
   2159 		// 8.1.2.3 Request Pseudo-Header Fields
   2160 		// "All HTTP/2 requests MUST include exactly one valid
   2161 		// value for the :method, :scheme, and :path
   2162 		// pseudo-header fields"
   2163 		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
   2164 	}
   2165 
   2166 	rp.header = make(http.Header)
   2167 	for _, hf := range f.RegularFields() {
   2168 		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
   2169 	}
   2170 	if rp.authority == "" {
   2171 		rp.authority = rp.header.Get("Host")
   2172 	}
   2173 
   2174 	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
   2175 	if err != nil {
   2176 		return nil, nil, err
   2177 	}
   2178 	bodyOpen := !f.StreamEnded()
   2179 	if bodyOpen {
   2180 		if vv, ok := rp.header["Content-Length"]; ok {
   2181 			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
   2182 				req.ContentLength = int64(cl)
   2183 			} else {
   2184 				req.ContentLength = 0
   2185 			}
   2186 		} else {
   2187 			req.ContentLength = -1
   2188 		}
   2189 		req.Body.(*requestBody).pipe = &pipe{
   2190 			b: &dataBuffer{expected: req.ContentLength},
   2191 		}
   2192 	}
   2193 	return rw, req, nil
   2194 }
   2195 
   2196 type requestParam struct {
   2197 	method                  string
   2198 	scheme, authority, path string
   2199 	header                  http.Header
   2200 }
   2201 
   2202 func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
   2203 	sc.serveG.check()
   2204 
   2205 	var tlsState *tls.ConnectionState // nil if not scheme https
   2206 	if rp.scheme == "https" {
   2207 		tlsState = sc.tlsState
   2208 	}
   2209 
   2210 	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
   2211 	if needsContinue {
   2212 		rp.header.Del("Expect")
   2213 	}
   2214 	// Merge Cookie headers into one "; "-delimited value.
   2215 	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
   2216 		rp.header.Set("Cookie", strings.Join(cookies, "; "))
   2217 	}
   2218 
   2219 	// Setup Trailers
   2220 	var trailer http.Header
   2221 	for _, v := range rp.header["Trailer"] {
   2222 		for _, key := range strings.Split(v, ",") {
   2223 			key = http.CanonicalHeaderKey(textproto.TrimString(key))
   2224 			switch key {
   2225 			case "Transfer-Encoding", "Trailer", "Content-Length":
   2226 				// Bogus. (copy of http1 rules)
   2227 				// Ignore.
   2228 			default:
   2229 				if trailer == nil {
   2230 					trailer = make(http.Header)
   2231 				}
   2232 				trailer[key] = nil
   2233 			}
   2234 		}
   2235 	}
   2236 	delete(rp.header, "Trailer")
   2237 
   2238 	var url_ *url.URL
   2239 	var requestURI string
   2240 	if rp.method == "CONNECT" {
   2241 		url_ = &url.URL{Host: rp.authority}
   2242 		requestURI = rp.authority // mimic HTTP/1 server behavior
   2243 	} else {
   2244 		var err error
   2245 		url_, err = url.ParseRequestURI(rp.path)
   2246 		if err != nil {
   2247 			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
   2248 		}
   2249 		requestURI = rp.path
   2250 	}
   2251 
   2252 	body := &requestBody{
   2253 		conn:          sc,
   2254 		stream:        st,
   2255 		needsContinue: needsContinue,
   2256 	}
   2257 	req := &http.Request{
   2258 		Method:     rp.method,
   2259 		URL:        url_,
   2260 		RemoteAddr: sc.remoteAddrStr,
   2261 		Header:     rp.header,
   2262 		RequestURI: requestURI,
   2263 		Proto:      "HTTP/2.0",
   2264 		ProtoMajor: 2,
   2265 		ProtoMinor: 0,
   2266 		TLS:        tlsState,
   2267 		Host:       rp.authority,
   2268 		Body:       body,
   2269 		Trailer:    trailer,
   2270 	}
   2271 	req = req.WithContext(st.ctx)
   2272 
   2273 	rw := sc.newResponseWriter(st, req)
   2274 	return rw, req, nil
   2275 }
   2276 
   2277 func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
   2278 	rws := responseWriterStatePool.Get().(*responseWriterState)
   2279 	bwSave := rws.bw
   2280 	*rws = responseWriterState{} // zero all the fields
   2281 	rws.conn = sc
   2282 	rws.bw = bwSave
   2283 	rws.bw.Reset(chunkWriter{rws})
   2284 	rws.stream = st
   2285 	rws.req = req
   2286 	return &responseWriter{rws: rws}
   2287 }
   2288 
   2289 // Run on its own goroutine.
   2290 func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
   2291 	didPanic := true
   2292 	defer func() {
   2293 		rw.rws.stream.cancelCtx()
   2294 		if req.MultipartForm != nil {
   2295 			req.MultipartForm.RemoveAll()
   2296 		}
   2297 		if didPanic {
   2298 			e := recover()
   2299 			sc.writeFrameFromHandler(FrameWriteRequest{
   2300 				write:  handlerPanicRST{rw.rws.stream.id},
   2301 				stream: rw.rws.stream,
   2302 			})
   2303 			// Same as net/http:
   2304 			if e != nil && e != http.ErrAbortHandler {
   2305 				const size = 64 << 10
   2306 				buf := make([]byte, size)
   2307 				buf = buf[:runtime.Stack(buf, false)]
   2308 				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
   2309 			}
   2310 			return
   2311 		}
   2312 		rw.handlerDone()
   2313 	}()
   2314 	handler(rw, req)
   2315 	didPanic = false
   2316 }
   2317 
   2318 func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
   2319 	// 10.5.1 Limits on Header Block Size:
   2320 	// .. "A server that receives a larger header block than it is
   2321 	// willing to handle can send an HTTP 431 (Request Header Fields Too
   2322 	// Large) status code"
   2323 	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
   2324 	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
   2325 	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
   2326 }
   2327 
   2328 // called from handler goroutines.
   2329 // h may be nil.
   2330 func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
   2331 	sc.serveG.checkNotOn() // NOT on
   2332 	var errc chan error
   2333 	if headerData.h != nil {
   2334 		// If there's a header map (which we don't own), so we have to block on
   2335 		// waiting for this frame to be written, so an http.Flush mid-handler
   2336 		// writes out the correct value of keys, before a handler later potentially
   2337 		// mutates it.
   2338 		errc = errChanPool.Get().(chan error)
   2339 	}
   2340 	if err := sc.writeFrameFromHandler(FrameWriteRequest{
   2341 		write:  headerData,
   2342 		stream: st,
   2343 		done:   errc,
   2344 	}); err != nil {
   2345 		return err
   2346 	}
   2347 	if errc != nil {
   2348 		select {
   2349 		case err := <-errc:
   2350 			errChanPool.Put(errc)
   2351 			return err
   2352 		case <-sc.doneServing:
   2353 			return errClientDisconnected
   2354 		case <-st.cw:
   2355 			return errStreamClosed
   2356 		}
   2357 	}
   2358 	return nil
   2359 }
   2360 
   2361 // called from handler goroutines.
   2362 func (sc *serverConn) write100ContinueHeaders(st *stream) {
   2363 	sc.writeFrameFromHandler(FrameWriteRequest{
   2364 		write:  write100ContinueHeadersFrame{st.id},
   2365 		stream: st,
   2366 	})
   2367 }
   2368 
   2369 // A bodyReadMsg tells the server loop that the http.Handler read n
   2370 // bytes of the DATA from the client on the given stream.
   2371 type bodyReadMsg struct {
   2372 	st *stream
   2373 	n  int
   2374 }
   2375 
   2376 // called from handler goroutines.
   2377 // Notes that the handler for the given stream ID read n bytes of its body
   2378 // and schedules flow control tokens to be sent.
   2379 func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
   2380 	sc.serveG.checkNotOn() // NOT on
   2381 	if n > 0 {
   2382 		select {
   2383 		case sc.bodyReadCh <- bodyReadMsg{st, n}:
   2384 		case <-sc.doneServing:
   2385 		}
   2386 	}
   2387 }
   2388 
   2389 func (sc *serverConn) noteBodyRead(st *stream, n int) {
   2390 	sc.serveG.check()
   2391 	sc.sendWindowUpdate(nil, n) // conn-level
   2392 	if st.state != stateHalfClosedRemote && st.state != stateClosed {
   2393 		// Don't send this WINDOW_UPDATE if the stream is closed
   2394 		// remotely.
   2395 		sc.sendWindowUpdate(st, n)
   2396 	}
   2397 }
   2398 
   2399 // st may be nil for conn-level
   2400 func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
   2401 	sc.sendWindowUpdate(st, int(n))
   2402 }
   2403 
   2404 // st may be nil for conn-level
   2405 func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
   2406 	sc.serveG.check()
   2407 	var streamID uint32
   2408 	var send int32
   2409 	if st == nil {
   2410 		send = sc.inflow.add(n)
   2411 	} else {
   2412 		streamID = st.id
   2413 		send = st.inflow.add(n)
   2414 	}
   2415 	if send == 0 {
   2416 		return
   2417 	}
   2418 	sc.writeFrame(FrameWriteRequest{
   2419 		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
   2420 		stream: st,
   2421 	})
   2422 }
   2423 
   2424 // requestBody is the Handler's Request.Body type.
   2425 // Read and Close may be called concurrently.
   2426 type requestBody struct {
   2427 	_             incomparable
   2428 	stream        *stream
   2429 	conn          *serverConn
   2430 	closeOnce     sync.Once // for use by Close only
   2431 	sawEOF        bool      // for use by Read only
   2432 	pipe          *pipe     // non-nil if we have an HTTP entity message body
   2433 	needsContinue bool      // need to send a 100-continue
   2434 }
   2435 
   2436 func (b *requestBody) Close() error {
   2437 	b.closeOnce.Do(func() {
   2438 		if b.pipe != nil {
   2439 			b.pipe.BreakWithError(errClosedBody)
   2440 		}
   2441 	})
   2442 	return nil
   2443 }
   2444 
   2445 func (b *requestBody) Read(p []byte) (n int, err error) {
   2446 	if b.needsContinue {
   2447 		b.needsContinue = false
   2448 		b.conn.write100ContinueHeaders(b.stream)
   2449 	}
   2450 	if b.pipe == nil || b.sawEOF {
   2451 		return 0, io.EOF
   2452 	}
   2453 	n, err = b.pipe.Read(p)
   2454 	if err == io.EOF {
   2455 		b.sawEOF = true
   2456 	}
   2457 	if b.conn == nil && inTests {
   2458 		return
   2459 	}
   2460 	b.conn.noteBodyReadFromHandler(b.stream, n, err)
   2461 	return
   2462 }
   2463 
   2464 // responseWriter is the http.ResponseWriter implementation. It's
   2465 // intentionally small (1 pointer wide) to minimize garbage. The
   2466 // responseWriterState pointer inside is zeroed at the end of a
   2467 // request (in handlerDone) and calls on the responseWriter thereafter
   2468 // simply crash (caller's mistake), but the much larger responseWriterState
   2469 // and buffers are reused between multiple requests.
   2470 type responseWriter struct {
   2471 	rws *responseWriterState
   2472 }
   2473 
   2474 // Optional http.ResponseWriter interfaces implemented.
   2475 var (
   2476 	_ http.CloseNotifier = (*responseWriter)(nil)
   2477 	_ http.Flusher       = (*responseWriter)(nil)
   2478 	_ stringWriter       = (*responseWriter)(nil)
   2479 )
   2480 
   2481 type responseWriterState struct {
   2482 	// immutable within a request:
   2483 	stream *stream
   2484 	req    *http.Request
   2485 	conn   *serverConn
   2486 
   2487 	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
   2488 	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
   2489 
   2490 	// mutated by http.Handler goroutine:
   2491 	handlerHeader http.Header // nil until called
   2492 	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
   2493 	trailers      []string    // set in writeChunk
   2494 	status        int         // status code passed to WriteHeader
   2495 	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
   2496 	sentHeader    bool        // have we sent the header frame?
   2497 	handlerDone   bool        // handler has finished
   2498 	dirty         bool        // a Write failed; don't reuse this responseWriterState
   2499 
   2500 	sentContentLen int64 // non-zero if handler set a Content-Length header
   2501 	wroteBytes     int64
   2502 
   2503 	closeNotifierMu sync.Mutex // guards closeNotifierCh
   2504 	closeNotifierCh chan bool  // nil until first used
   2505 }
   2506 
   2507 type chunkWriter struct{ rws *responseWriterState }
   2508 
   2509 func (cw chunkWriter) Write(p []byte) (n int, err error) {
   2510 	n, err = cw.rws.writeChunk(p)
   2511 	if err == errStreamClosed {
   2512 		// If writing failed because the stream has been closed,
   2513 		// return the reason it was closed.
   2514 		err = cw.rws.stream.closeErr
   2515 	}
   2516 	return n, err
   2517 }
   2518 
   2519 func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
   2520 
   2521 func (rws *responseWriterState) hasNonemptyTrailers() bool {
   2522 	for _, trailer := range rws.trailers {
   2523 		if _, ok := rws.handlerHeader[trailer]; ok {
   2524 			return true
   2525 		}
   2526 	}
   2527 	return false
   2528 }
   2529 
   2530 // declareTrailer is called for each Trailer header when the
   2531 // response header is written. It notes that a header will need to be
   2532 // written in the trailers at the end of the response.
   2533 func (rws *responseWriterState) declareTrailer(k string) {
   2534 	k = http.CanonicalHeaderKey(k)
   2535 	if !httpguts.ValidTrailerHeader(k) {
   2536 		// Forbidden by RFC 7230, section 4.1.2.
   2537 		rws.conn.logf("ignoring invalid trailer %q", k)
   2538 		return
   2539 	}
   2540 	if !strSliceContains(rws.trailers, k) {
   2541 		rws.trailers = append(rws.trailers, k)
   2542 	}
   2543 }
   2544 
   2545 // writeChunk writes chunks from the bufio.Writer. But because
   2546 // bufio.Writer may bypass its chunking, sometimes p may be
   2547 // arbitrarily large.
   2548 //
   2549 // writeChunk is also responsible (on the first chunk) for sending the
   2550 // HEADER response.
   2551 func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
   2552 	if !rws.wroteHeader {
   2553 		rws.writeHeader(200)
   2554 	}
   2555 
   2556 	if rws.handlerDone {
   2557 		rws.promoteUndeclaredTrailers()
   2558 	}
   2559 
   2560 	isHeadResp := rws.req.Method == "HEAD"
   2561 	if !rws.sentHeader {
   2562 		rws.sentHeader = true
   2563 		var ctype, clen string
   2564 		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
   2565 			rws.snapHeader.Del("Content-Length")
   2566 			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
   2567 				rws.sentContentLen = int64(cl)
   2568 			} else {
   2569 				clen = ""
   2570 			}
   2571 		}
   2572 		_, hasContentLength := rws.snapHeader["Content-Length"]
   2573 		if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
   2574 			clen = strconv.Itoa(len(p))
   2575 		}
   2576 		_, hasContentType := rws.snapHeader["Content-Type"]
   2577 		// If the Content-Encoding is non-blank, we shouldn't
   2578 		// sniff the body. See Issue golang.org/issue/31753.
   2579 		ce := rws.snapHeader.Get("Content-Encoding")
   2580 		hasCE := len(ce) > 0
   2581 		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
   2582 			ctype = http.DetectContentType(p)
   2583 		}
   2584 		var date string
   2585 		if _, ok := rws.snapHeader["Date"]; !ok {
   2586 			// TODO(bradfitz): be faster here, like net/http? measure.
   2587 			date = time.Now().UTC().Format(http.TimeFormat)
   2588 		}
   2589 
   2590 		for _, v := range rws.snapHeader["Trailer"] {
   2591 			foreachHeaderElement(v, rws.declareTrailer)
   2592 		}
   2593 
   2594 		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
   2595 		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
   2596 		// down the TCP connection when idle, like we do for HTTP/1.
   2597 		// TODO: remove more Connection-specific header fields here, in addition
   2598 		// to "Connection".
   2599 		if _, ok := rws.snapHeader["Connection"]; ok {
   2600 			v := rws.snapHeader.Get("Connection")
   2601 			delete(rws.snapHeader, "Connection")
   2602 			if v == "close" {
   2603 				rws.conn.startGracefulShutdown()
   2604 			}
   2605 		}
   2606 
   2607 		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
   2608 		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
   2609 			streamID:      rws.stream.id,
   2610 			httpResCode:   rws.status,
   2611 			h:             rws.snapHeader,
   2612 			endStream:     endStream,
   2613 			contentType:   ctype,
   2614 			contentLength: clen,
   2615 			date:          date,
   2616 		})
   2617 		if err != nil {
   2618 			rws.dirty = true
   2619 			return 0, err
   2620 		}
   2621 		if endStream {
   2622 			return 0, nil
   2623 		}
   2624 	}
   2625 	if isHeadResp {
   2626 		return len(p), nil
   2627 	}
   2628 	if len(p) == 0 && !rws.handlerDone {
   2629 		return 0, nil
   2630 	}
   2631 
   2632 	// only send trailers if they have actually been defined by the
   2633 	// server handler.
   2634 	hasNonemptyTrailers := rws.hasNonemptyTrailers()
   2635 	endStream := rws.handlerDone && !hasNonemptyTrailers
   2636 	if len(p) > 0 || endStream {
   2637 		// only send a 0 byte DATA frame if we're ending the stream.
   2638 		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
   2639 			rws.dirty = true
   2640 			return 0, err
   2641 		}
   2642 	}
   2643 
   2644 	if rws.handlerDone && hasNonemptyTrailers {
   2645 		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
   2646 			streamID:  rws.stream.id,
   2647 			h:         rws.handlerHeader,
   2648 			trailers:  rws.trailers,
   2649 			endStream: true,
   2650 		})
   2651 		if err != nil {
   2652 			rws.dirty = true
   2653 		}
   2654 		return len(p), err
   2655 	}
   2656 	return len(p), nil
   2657 }
   2658 
   2659 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
   2660 // that, if present, signals that the map entry is actually for
   2661 // the response trailers, and not the response headers. The prefix
   2662 // is stripped after the ServeHTTP call finishes and the values are
   2663 // sent in the trailers.
   2664 //
   2665 // This mechanism is intended only for trailers that are not known
   2666 // prior to the headers being written. If the set of trailers is fixed
   2667 // or known before the header is written, the normal Go trailers mechanism
   2668 // is preferred:
   2669 //
   2670 //	https://golang.org/pkg/net/http/#ResponseWriter
   2671 //	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
   2672 const TrailerPrefix = "Trailer:"
   2673 
   2674 // promoteUndeclaredTrailers permits http.Handlers to set trailers
   2675 // after the header has already been flushed. Because the Go
   2676 // ResponseWriter interface has no way to set Trailers (only the
   2677 // Header), and because we didn't want to expand the ResponseWriter
   2678 // interface, and because nobody used trailers, and because RFC 7230
   2679 // says you SHOULD (but not must) predeclare any trailers in the
   2680 // header, the official ResponseWriter rules said trailers in Go must
   2681 // be predeclared, and then we reuse the same ResponseWriter.Header()
   2682 // map to mean both Headers and Trailers. When it's time to write the
   2683 // Trailers, we pick out the fields of Headers that were declared as
   2684 // trailers. That worked for a while, until we found the first major
   2685 // user of Trailers in the wild: gRPC (using them only over http2),
   2686 // and gRPC libraries permit setting trailers mid-stream without
   2687 // predeclaring them. So: change of plans. We still permit the old
   2688 // way, but we also permit this hack: if a Header() key begins with
   2689 // "Trailer:", the suffix of that key is a Trailer. Because ':' is an
   2690 // invalid token byte anyway, there is no ambiguity. (And it's already
   2691 // filtered out) It's mildly hacky, but not terrible.
   2692 //
   2693 // This method runs after the Handler is done and promotes any Header
   2694 // fields to be trailers.
   2695 func (rws *responseWriterState) promoteUndeclaredTrailers() {
   2696 	for k, vv := range rws.handlerHeader {
   2697 		if !strings.HasPrefix(k, TrailerPrefix) {
   2698 			continue
   2699 		}
   2700 		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
   2701 		rws.declareTrailer(trailerKey)
   2702 		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
   2703 	}
   2704 
   2705 	if len(rws.trailers) > 1 {
   2706 		sorter := sorterPool.Get().(*sorter)
   2707 		sorter.SortStrings(rws.trailers)
   2708 		sorterPool.Put(sorter)
   2709 	}
   2710 }
   2711 
   2712 func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
   2713 	st := w.rws.stream
   2714 	if !deadline.IsZero() && deadline.Before(time.Now()) {
   2715 		// If we're setting a deadline in the past, reset the stream immediately
   2716 		// so writes after SetWriteDeadline returns will fail.
   2717 		st.onReadTimeout()
   2718 		return nil
   2719 	}
   2720 	w.rws.conn.sendServeMsg(func(sc *serverConn) {
   2721 		if st.readDeadline != nil {
   2722 			if !st.readDeadline.Stop() {
   2723 				// Deadline already exceeded, or stream has been closed.
   2724 				return
   2725 			}
   2726 		}
   2727 		if deadline.IsZero() {
   2728 			st.readDeadline = nil
   2729 		} else if st.readDeadline == nil {
   2730 			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
   2731 		} else {
   2732 			st.readDeadline.Reset(deadline.Sub(time.Now()))
   2733 		}
   2734 	})
   2735 	return nil
   2736 }
   2737 
   2738 func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
   2739 	st := w.rws.stream
   2740 	if !deadline.IsZero() && deadline.Before(time.Now()) {
   2741 		// If we're setting a deadline in the past, reset the stream immediately
   2742 		// so writes after SetWriteDeadline returns will fail.
   2743 		st.onWriteTimeout()
   2744 		return nil
   2745 	}
   2746 	w.rws.conn.sendServeMsg(func(sc *serverConn) {
   2747 		if st.writeDeadline != nil {
   2748 			if !st.writeDeadline.Stop() {
   2749 				// Deadline already exceeded, or stream has been closed.
   2750 				return
   2751 			}
   2752 		}
   2753 		if deadline.IsZero() {
   2754 			st.writeDeadline = nil
   2755 		} else if st.writeDeadline == nil {
   2756 			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
   2757 		} else {
   2758 			st.writeDeadline.Reset(deadline.Sub(time.Now()))
   2759 		}
   2760 	})
   2761 	return nil
   2762 }
   2763 
   2764 func (w *responseWriter) Flush() {
   2765 	w.FlushError()
   2766 }
   2767 
   2768 func (w *responseWriter) FlushError() error {
   2769 	rws := w.rws
   2770 	if rws == nil {
   2771 		panic("Header called after Handler finished")
   2772 	}
   2773 	var err error
   2774 	if rws.bw.Buffered() > 0 {
   2775 		err = rws.bw.Flush()
   2776 	} else {
   2777 		// The bufio.Writer won't call chunkWriter.Write
   2778 		// (writeChunk with zero bytes), so we have to do it
   2779 		// ourselves to force the HTTP response header and/or
   2780 		// final DATA frame (with END_STREAM) to be sent.
   2781 		_, err = chunkWriter{rws}.Write(nil)
   2782 		if err == nil {
   2783 			select {
   2784 			case <-rws.stream.cw:
   2785 				err = rws.stream.closeErr
   2786 			default:
   2787 			}
   2788 		}
   2789 	}
   2790 	return err
   2791 }
   2792 
   2793 func (w *responseWriter) CloseNotify() <-chan bool {
   2794 	rws := w.rws
   2795 	if rws == nil {
   2796 		panic("CloseNotify called after Handler finished")
   2797 	}
   2798 	rws.closeNotifierMu.Lock()
   2799 	ch := rws.closeNotifierCh
   2800 	if ch == nil {
   2801 		ch = make(chan bool, 1)
   2802 		rws.closeNotifierCh = ch
   2803 		cw := rws.stream.cw
   2804 		go func() {
   2805 			cw.Wait() // wait for close
   2806 			ch <- true
   2807 		}()
   2808 	}
   2809 	rws.closeNotifierMu.Unlock()
   2810 	return ch
   2811 }
   2812 
   2813 func (w *responseWriter) Header() http.Header {
   2814 	rws := w.rws
   2815 	if rws == nil {
   2816 		panic("Header called after Handler finished")
   2817 	}
   2818 	if rws.handlerHeader == nil {
   2819 		rws.handlerHeader = make(http.Header)
   2820 	}
   2821 	return rws.handlerHeader
   2822 }
   2823 
   2824 // checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
   2825 func checkWriteHeaderCode(code int) {
   2826 	// Issue 22880: require valid WriteHeader status codes.
   2827 	// For now we only enforce that it's three digits.
   2828 	// In the future we might block things over 599 (600 and above aren't defined
   2829 	// at http://httpwg.org/specs/rfc7231.html#status.codes).
   2830 	// But for now any three digits.
   2831 	//
   2832 	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
   2833 	// no equivalent bogus thing we can realistically send in HTTP/2,
   2834 	// so we'll consistently panic instead and help people find their bugs
   2835 	// early. (We can't return an error from WriteHeader even if we wanted to.)
   2836 	if code < 100 || code > 999 {
   2837 		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
   2838 	}
   2839 }
   2840 
   2841 func (w *responseWriter) WriteHeader(code int) {
   2842 	rws := w.rws
   2843 	if rws == nil {
   2844 		panic("WriteHeader called after Handler finished")
   2845 	}
   2846 	rws.writeHeader(code)
   2847 }
   2848 
   2849 func (rws *responseWriterState) writeHeader(code int) {
   2850 	if rws.wroteHeader {
   2851 		return
   2852 	}
   2853 
   2854 	checkWriteHeaderCode(code)
   2855 
   2856 	// Handle informational headers
   2857 	if code >= 100 && code <= 199 {
   2858 		// Per RFC 8297 we must not clear the current header map
   2859 		h := rws.handlerHeader
   2860 
   2861 		_, cl := h["Content-Length"]
   2862 		_, te := h["Transfer-Encoding"]
   2863 		if cl || te {
   2864 			h = h.Clone()
   2865 			h.Del("Content-Length")
   2866 			h.Del("Transfer-Encoding")
   2867 		}
   2868 
   2869 		if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
   2870 			streamID:    rws.stream.id,
   2871 			httpResCode: code,
   2872 			h:           h,
   2873 			endStream:   rws.handlerDone && !rws.hasTrailers(),
   2874 		}) != nil {
   2875 			rws.dirty = true
   2876 		}
   2877 
   2878 		return
   2879 	}
   2880 
   2881 	rws.wroteHeader = true
   2882 	rws.status = code
   2883 	if len(rws.handlerHeader) > 0 {
   2884 		rws.snapHeader = cloneHeader(rws.handlerHeader)
   2885 	}
   2886 }
   2887 
   2888 func cloneHeader(h http.Header) http.Header {
   2889 	h2 := make(http.Header, len(h))
   2890 	for k, vv := range h {
   2891 		vv2 := make([]string, len(vv))
   2892 		copy(vv2, vv)
   2893 		h2[k] = vv2
   2894 	}
   2895 	return h2
   2896 }
   2897 
   2898 // The Life Of A Write is like this:
   2899 //
   2900 // * Handler calls w.Write or w.WriteString ->
   2901 // * -> rws.bw (*bufio.Writer) ->
   2902 // * (Handler might call Flush)
   2903 // * -> chunkWriter{rws}
   2904 // * -> responseWriterState.writeChunk(p []byte)
   2905 // * -> responseWriterState.writeChunk (most of the magic; see comment there)
   2906 func (w *responseWriter) Write(p []byte) (n int, err error) {
   2907 	return w.write(len(p), p, "")
   2908 }
   2909 
   2910 func (w *responseWriter) WriteString(s string) (n int, err error) {
   2911 	return w.write(len(s), nil, s)
   2912 }
   2913 
   2914 // either dataB or dataS is non-zero.
   2915 func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
   2916 	rws := w.rws
   2917 	if rws == nil {
   2918 		panic("Write called after Handler finished")
   2919 	}
   2920 	if !rws.wroteHeader {
   2921 		w.WriteHeader(200)
   2922 	}
   2923 	if !bodyAllowedForStatus(rws.status) {
   2924 		return 0, http.ErrBodyNotAllowed
   2925 	}
   2926 	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
   2927 	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
   2928 		// TODO: send a RST_STREAM
   2929 		return 0, errors.New("http2: handler wrote more than declared Content-Length")
   2930 	}
   2931 
   2932 	if dataB != nil {
   2933 		return rws.bw.Write(dataB)
   2934 	} else {
   2935 		return rws.bw.WriteString(dataS)
   2936 	}
   2937 }
   2938 
   2939 func (w *responseWriter) handlerDone() {
   2940 	rws := w.rws
   2941 	dirty := rws.dirty
   2942 	rws.handlerDone = true
   2943 	w.Flush()
   2944 	w.rws = nil
   2945 	if !dirty {
   2946 		// Only recycle the pool if all prior Write calls to
   2947 		// the serverConn goroutine completed successfully. If
   2948 		// they returned earlier due to resets from the peer
   2949 		// there might still be write goroutines outstanding
   2950 		// from the serverConn referencing the rws memory. See
   2951 		// issue 20704.
   2952 		responseWriterStatePool.Put(rws)
   2953 	}
   2954 }
   2955 
   2956 // Push errors.
   2957 var (
   2958 	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
   2959 	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
   2960 )
   2961 
   2962 var _ http.Pusher = (*responseWriter)(nil)
   2963 
   2964 func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
   2965 	st := w.rws.stream
   2966 	sc := st.sc
   2967 	sc.serveG.checkNotOn()
   2968 
   2969 	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
   2970 	// http://tools.ietf.org/html/rfc7540#section-6.6
   2971 	if st.isPushed() {
   2972 		return ErrRecursivePush
   2973 	}
   2974 
   2975 	if opts == nil {
   2976 		opts = new(http.PushOptions)
   2977 	}
   2978 
   2979 	// Default options.
   2980 	if opts.Method == "" {
   2981 		opts.Method = "GET"
   2982 	}
   2983 	if opts.Header == nil {
   2984 		opts.Header = http.Header{}
   2985 	}
   2986 	wantScheme := "http"
   2987 	if w.rws.req.TLS != nil {
   2988 		wantScheme = "https"
   2989 	}
   2990 
   2991 	// Validate the request.
   2992 	u, err := url.Parse(target)
   2993 	if err != nil {
   2994 		return err
   2995 	}
   2996 	if u.Scheme == "" {
   2997 		if !strings.HasPrefix(target, "/") {
   2998 			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
   2999 		}
   3000 		u.Scheme = wantScheme
   3001 		u.Host = w.rws.req.Host
   3002 	} else {
   3003 		if u.Scheme != wantScheme {
   3004 			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
   3005 		}
   3006 		if u.Host == "" {
   3007 			return errors.New("URL must have a host")
   3008 		}
   3009 	}
   3010 	for k := range opts.Header {
   3011 		if strings.HasPrefix(k, ":") {
   3012 			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
   3013 		}
   3014 		// These headers are meaningful only if the request has a body,
   3015 		// but PUSH_PROMISE requests cannot have a body.
   3016 		// http://tools.ietf.org/html/rfc7540#section-8.2
   3017 		// Also disallow Host, since the promised URL must be absolute.
   3018 		if asciiEqualFold(k, "content-length") ||
   3019 			asciiEqualFold(k, "content-encoding") ||
   3020 			asciiEqualFold(k, "trailer") ||
   3021 			asciiEqualFold(k, "te") ||
   3022 			asciiEqualFold(k, "expect") ||
   3023 			asciiEqualFold(k, "host") {
   3024 			return fmt.Errorf("promised request headers cannot include %q", k)
   3025 		}
   3026 	}
   3027 	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
   3028 		return err
   3029 	}
   3030 
   3031 	// The RFC effectively limits promised requests to GET and HEAD:
   3032 	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
   3033 	// http://tools.ietf.org/html/rfc7540#section-8.2
   3034 	if opts.Method != "GET" && opts.Method != "HEAD" {
   3035 		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
   3036 	}
   3037 
   3038 	msg := &startPushRequest{
   3039 		parent: st,
   3040 		method: opts.Method,
   3041 		url:    u,
   3042 		header: cloneHeader(opts.Header),
   3043 		done:   errChanPool.Get().(chan error),
   3044 	}
   3045 
   3046 	select {
   3047 	case <-sc.doneServing:
   3048 		return errClientDisconnected
   3049 	case <-st.cw:
   3050 		return errStreamClosed
   3051 	case sc.serveMsgCh <- msg:
   3052 	}
   3053 
   3054 	select {
   3055 	case <-sc.doneServing:
   3056 		return errClientDisconnected
   3057 	case <-st.cw:
   3058 		return errStreamClosed
   3059 	case err := <-msg.done:
   3060 		errChanPool.Put(msg.done)
   3061 		return err
   3062 	}
   3063 }
   3064 
   3065 type startPushRequest struct {
   3066 	parent *stream
   3067 	method string
   3068 	url    *url.URL
   3069 	header http.Header
   3070 	done   chan error
   3071 }
   3072 
   3073 func (sc *serverConn) startPush(msg *startPushRequest) {
   3074 	sc.serveG.check()
   3075 
   3076 	// http://tools.ietf.org/html/rfc7540#section-6.6.
   3077 	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
   3078 	// is in either the "open" or "half-closed (remote)" state.
   3079 	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
   3080 		// responseWriter.Push checks that the stream is peer-initiated.
   3081 		msg.done <- errStreamClosed
   3082 		return
   3083 	}
   3084 
   3085 	// http://tools.ietf.org/html/rfc7540#section-6.6.
   3086 	if !sc.pushEnabled {
   3087 		msg.done <- http.ErrNotSupported
   3088 		return
   3089 	}
   3090 
   3091 	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
   3092 	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
   3093 	// is written. Once the ID is allocated, we start the request handler.
   3094 	allocatePromisedID := func() (uint32, error) {
   3095 		sc.serveG.check()
   3096 
   3097 		// Check this again, just in case. Technically, we might have received
   3098 		// an updated SETTINGS by the time we got around to writing this frame.
   3099 		if !sc.pushEnabled {
   3100 			return 0, http.ErrNotSupported
   3101 		}
   3102 		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
   3103 		if sc.curPushedStreams+1 > sc.clientMaxStreams {
   3104 			return 0, ErrPushLimitReached
   3105 		}
   3106 
   3107 		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
   3108 		// Streams initiated by the server MUST use even-numbered identifiers.
   3109 		// A server that is unable to establish a new stream identifier can send a GOAWAY
   3110 		// frame so that the client is forced to open a new connection for new streams.
   3111 		if sc.maxPushPromiseID+2 >= 1<<31 {
   3112 			sc.startGracefulShutdownInternal()
   3113 			return 0, ErrPushLimitReached
   3114 		}
   3115 		sc.maxPushPromiseID += 2
   3116 		promisedID := sc.maxPushPromiseID
   3117 
   3118 		// http://tools.ietf.org/html/rfc7540#section-8.2.
   3119 		// Strictly speaking, the new stream should start in "reserved (local)", then
   3120 		// transition to "half closed (remote)" after sending the initial HEADERS, but
   3121 		// we start in "half closed (remote)" for simplicity.
   3122 		// See further comments at the definition of stateHalfClosedRemote.
   3123 		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
   3124 		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
   3125 			method:    msg.method,
   3126 			scheme:    msg.url.Scheme,
   3127 			authority: msg.url.Host,
   3128 			path:      msg.url.RequestURI(),
   3129 			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
   3130 		})
   3131 		if err != nil {
   3132 			// Should not happen, since we've already validated msg.url.
   3133 			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
   3134 		}
   3135 
   3136 		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
   3137 		return promisedID, nil
   3138 	}
   3139 
   3140 	sc.writeFrame(FrameWriteRequest{
   3141 		write: &writePushPromise{
   3142 			streamID:           msg.parent.id,
   3143 			method:             msg.method,
   3144 			url:                msg.url,
   3145 			h:                  msg.header,
   3146 			allocatePromisedID: allocatePromisedID,
   3147 		},
   3148 		stream: msg.parent,
   3149 		done:   msg.done,
   3150 	})
   3151 }
   3152 
   3153 // foreachHeaderElement splits v according to the "#rule" construction
   3154 // in RFC 7230 section 7 and calls fn for each non-empty element.
   3155 func foreachHeaderElement(v string, fn func(string)) {
   3156 	v = textproto.TrimString(v)
   3157 	if v == "" {
   3158 		return
   3159 	}
   3160 	if !strings.Contains(v, ",") {
   3161 		fn(v)
   3162 		return
   3163 	}
   3164 	for _, f := range strings.Split(v, ",") {
   3165 		if f = textproto.TrimString(f); f != "" {
   3166 			fn(f)
   3167 		}
   3168 	}
   3169 }
   3170 
   3171 // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
   3172 var connHeaders = []string{
   3173 	"Connection",
   3174 	"Keep-Alive",
   3175 	"Proxy-Connection",
   3176 	"Transfer-Encoding",
   3177 	"Upgrade",
   3178 }
   3179 
   3180 // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
   3181 // per RFC 7540 Section 8.1.2.2.
   3182 // The returned error is reported to users.
   3183 func checkValidHTTP2RequestHeaders(h http.Header) error {
   3184 	for _, k := range connHeaders {
   3185 		if _, ok := h[k]; ok {
   3186 			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
   3187 		}
   3188 	}
   3189 	te := h["Te"]
   3190 	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
   3191 		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
   3192 	}
   3193 	return nil
   3194 }
   3195 
   3196 func new400Handler(err error) http.HandlerFunc {
   3197 	return func(w http.ResponseWriter, r *http.Request) {
   3198 		http.Error(w, err.Error(), http.StatusBadRequest)
   3199 	}
   3200 }
   3201 
   3202 // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
   3203 // disabled. See comments on h1ServerShutdownChan above for why
   3204 // the code is written this way.
   3205 func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
   3206 	var x interface{} = hs
   3207 	type I interface {
   3208 		doKeepAlives() bool
   3209 	}
   3210 	if hs, ok := x.(I); ok {
   3211 		return !hs.doKeepAlives()
   3212 	}
   3213 	return false
   3214 }
   3215 
   3216 func (sc *serverConn) countError(name string, err error) error {
   3217 	if sc == nil || sc.srv == nil {
   3218 		return err
   3219 	}
   3220 	f := sc.srv.CountError
   3221 	if f == nil {
   3222 		return err
   3223 	}
   3224 	var typ string
   3225 	var code ErrCode
   3226 	switch e := err.(type) {
   3227 	case ConnectionError:
   3228 		typ = "conn"
   3229 		code = ErrCode(e)
   3230 	case StreamError:
   3231 		typ = "stream"
   3232 		code = ErrCode(e.Code)
   3233 	default:
   3234 		return err
   3235 	}
   3236 	codeStr := errCodeName[code]
   3237 	if codeStr == "" {
   3238 		codeStr = strconv.Itoa(int(code))
   3239 	}
   3240 	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
   3241 	return err
   3242 }