Vendor cleanup

Signed-off-by: Madhu Rajanna <mrajanna@redhat.com>
This commit is contained in:
Madhu Rajanna
2019-01-16 18:11:54 +05:30
parent 661818bd79
commit 0f836c62fa
16816 changed files with 20 additions and 4611100 deletions

View File

@ -1,2 +0,0 @@
*~
h2i/h2i

View File

@ -1,51 +0,0 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

View File

@ -1,3 +0,0 @@
curlimage:
docker build -t gohttp2/curl .

20
vendor/golang.org/x/net/http2/README generated vendored
View File

@ -1,20 +0,0 @@
This is a work-in-progress HTTP/2 implementation for Go.
It will eventually live in the Go standard library and won't require
any changes to your code to use. It will just be automatic.
Status:
* The server support is pretty good. A few things are missing
but are being worked on.
* The client work has just started but shares a lot of code
is coming along much quicker.
Docs are at https://godoc.org/golang.org/x/net/http2
Demo test server at https://http2.golang.org/
Help & bug reports welcome!
Contributing: https://golang.org/doc/contribute.html
Bugs: https://golang.org/issue/new?title=x/net/http2:+

View File

@ -1,309 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "testing"
func TestIsBadCipherBad(t *testing.T) {
for _, c := range badCiphers {
if !isBadCipher(c) {
t.Errorf("Wrong result for isBadCipher(%d), want true", c)
}
}
}
// verify we don't give false positives on ciphers not on blacklist
func TestIsBadCipherGood(t *testing.T) {
goodCiphers := map[uint16]string{
cipher_TLS_DHE_RSA_WITH_AES_256_CCM: "cipher_TLS_DHE_RSA_WITH_AES_256_CCM",
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM: "cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256: "cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256",
}
for c, name := range goodCiphers {
if isBadCipher(c) {
t.Errorf("Wrong result for isBadCipher(%d) %s, want false", c, name)
}
}
}
// copied from https://http2.github.io/http2-spec/#BadCipherSuites,
var badCiphers = []uint16{
cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8,
}

View File

@ -1,155 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"fmt"
"reflect"
"testing"
)
func fmtDataChunk(chunk []byte) string {
out := ""
var last byte
var count int
for _, c := range chunk {
if c != last {
if count > 0 {
out += fmt.Sprintf(" x %d ", count)
count = 0
}
out += string([]byte{c})
last = c
}
count++
}
if count > 0 {
out += fmt.Sprintf(" x %d", count)
}
return out
}
func fmtDataChunks(chunks [][]byte) string {
var out string
for _, chunk := range chunks {
out += fmt.Sprintf("{%q}", fmtDataChunk(chunk))
}
return out
}
func testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) {
// Run setup, then read the remaining bytes from the dataBuffer and check
// that they match wantBytes. We use different read sizes to check corner
// cases in Read.
for _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} {
t.Run(fmt.Sprintf("ReadSize=%d", readSize), func(t *testing.T) {
b := setup(t)
buf := make([]byte, readSize)
var gotRead bytes.Buffer
for {
n, err := b.Read(buf)
gotRead.Write(buf[:n])
if err == errReadEmpty {
break
}
if err != nil {
t.Fatalf("error after %v bytes: %v", gotRead.Len(), err)
}
}
if got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) {
t.Errorf("FinalRead=%q, want %q", fmtDataChunk(got), fmtDataChunk(want))
}
})
}
}
func TestDataBufferAllocation(t *testing.T) {
writes := [][]byte{
bytes.Repeat([]byte("a"), 1*1024-1),
[]byte("a"),
bytes.Repeat([]byte("b"), 4*1024-1),
[]byte("b"),
bytes.Repeat([]byte("c"), 8*1024-1),
[]byte("c"),
bytes.Repeat([]byte("d"), 16*1024-1),
[]byte("d"),
bytes.Repeat([]byte("e"), 32*1024),
}
var wantRead bytes.Buffer
for _, p := range writes {
wantRead.Write(p)
}
testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
b := &dataBuffer{}
for _, p := range writes {
if n, err := b.Write(p); n != len(p) || err != nil {
t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
}
}
want := [][]byte{
bytes.Repeat([]byte("a"), 1*1024),
bytes.Repeat([]byte("b"), 4*1024),
bytes.Repeat([]byte("c"), 8*1024),
bytes.Repeat([]byte("d"), 16*1024),
bytes.Repeat([]byte("e"), 16*1024),
bytes.Repeat([]byte("e"), 16*1024),
}
if !reflect.DeepEqual(b.chunks, want) {
t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
}
return b
})
}
func TestDataBufferAllocationWithExpected(t *testing.T) {
writes := [][]byte{
bytes.Repeat([]byte("a"), 1*1024), // allocates 16KB
bytes.Repeat([]byte("b"), 14*1024),
bytes.Repeat([]byte("c"), 15*1024), // allocates 16KB more
bytes.Repeat([]byte("d"), 2*1024),
bytes.Repeat([]byte("e"), 1*1024), // overflows 32KB expectation, allocates just 1KB
}
var wantRead bytes.Buffer
for _, p := range writes {
wantRead.Write(p)
}
testDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {
b := &dataBuffer{expected: 32 * 1024}
for _, p := range writes {
if n, err := b.Write(p); n != len(p) || err != nil {
t.Fatalf("Write(%q x %d)=%v,%v want %v,nil", p[:1], len(p), n, err, len(p))
}
}
want := [][]byte{
append(bytes.Repeat([]byte("a"), 1*1024), append(bytes.Repeat([]byte("b"), 14*1024), bytes.Repeat([]byte("c"), 1*1024)...)...),
append(bytes.Repeat([]byte("c"), 14*1024), bytes.Repeat([]byte("d"), 2*1024)...),
bytes.Repeat([]byte("e"), 1*1024),
}
if !reflect.DeepEqual(b.chunks, want) {
t.Errorf("dataBuffer.chunks\ngot: %s\nwant: %s", fmtDataChunks(b.chunks), fmtDataChunks(want))
}
return b
})
}
func TestDataBufferWriteAfterPartialRead(t *testing.T) {
testDataBuffer(t, []byte("cdxyz"), func(t *testing.T) *dataBuffer {
b := &dataBuffer{}
if n, err := b.Write([]byte("abcd")); n != 4 || err != nil {
t.Fatalf("Write(\"abcd\")=%v,%v want 4,nil", n, err)
}
p := make([]byte, 2)
if n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte("ab")) {
t.Fatalf("Read()=%q,%v,%v want \"ab\",2,nil", p, n, err)
}
if n, err := b.Write([]byte("xyz")); n != 3 || err != nil {
t.Fatalf("Write(\"xyz\")=%v,%v want 3,nil", n, err)
}
return b
})
}

View File

@ -1,24 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "testing"
func TestErrCodeString(t *testing.T) {
tests := []struct {
err ErrCode
want string
}{
{ErrCodeProtocol, "PROTOCOL_ERROR"},
{0xd, "HTTP_1_1_REQUIRED"},
{0xf, "unknown error code 0xf"},
}
for i, tt := range tests {
got := tt.err.String()
if got != tt.want {
t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
}
}
}

View File

@ -1,87 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "testing"
func TestFlow(t *testing.T) {
var st flow
var conn flow
st.add(3)
conn.add(2)
if got, want := st.available(), int32(3); got != want {
t.Errorf("available = %d; want %d", got, want)
}
st.setConnFlow(&conn)
if got, want := st.available(), int32(2); got != want {
t.Errorf("after parent setup, available = %d; want %d", got, want)
}
st.take(2)
if got, want := conn.available(), int32(0); got != want {
t.Errorf("after taking 2, conn = %d; want %d", got, want)
}
if got, want := st.available(), int32(0); got != want {
t.Errorf("after taking 2, stream = %d; want %d", got, want)
}
}
func TestFlowAdd(t *testing.T) {
var f flow
if !f.add(1) {
t.Fatal("failed to add 1")
}
if !f.add(-1) {
t.Fatal("failed to add -1")
}
if got, want := f.available(), int32(0); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
if !f.add(1<<31 - 1) {
t.Fatal("failed to add 2^31-1")
}
if got, want := f.available(), int32(1<<31-1); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
if f.add(1) {
t.Fatal("adding 1 to max shouldn't be allowed")
}
}
func TestFlowAddOverflow(t *testing.T) {
var f flow
if !f.add(0) {
t.Fatal("failed to add 0")
}
if !f.add(-1) {
t.Fatal("failed to add -1")
}
if !f.add(0) {
t.Fatal("failed to add 0")
}
if !f.add(1) {
t.Fatal("failed to add 1")
}
if !f.add(1) {
t.Fatal("failed to add 1")
}
if !f.add(0) {
t.Fatal("failed to add 0")
}
if !f.add(-3) {
t.Fatal("failed to add -3")
}
if got, want := f.available(), int32(-2); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
if !f.add(1<<31 - 1) {
t.Fatal("failed to add 2^31-1")
}
if got, want := f.available(), int32(1+-3+(1<<31-1)); got != want {
t.Fatalf("size = %d; want %d", got, want)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,33 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"strings"
"testing"
)
func TestGoroutineLock(t *testing.T) {
oldDebug := DebugGoroutines
DebugGoroutines = true
defer func() { DebugGoroutines = oldDebug }()
g := newGoroutineLock()
g.check()
sawPanic := make(chan interface{})
go func() {
defer func() { sawPanic <- recover() }()
g.check() // should panic
}()
e := <-sawPanic
if e == nil {
t.Fatal("did not see panic from check in other goroutine")
}
if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
}
}

View File

@ -1,495 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
//
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
// net/http or golang.org/x/net/http2.
package h2c
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/textproto"
"os"
"strings"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
)
var (
http2VerboseLogs bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
}
}
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
// that should be h2c traffic. There are two ways to begin a h2c connection
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
// works by starting an h2c connection with a string of bytes that is valid
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
}
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. If a request is an h2c connection, it's hijacked and redirected to
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
// compatible parts of the Go http library to parse and recognize h2c requests.
// Once a request is recognized as h2c, we hijack the connection and convert it
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
// understands HTTP/2 except for the h2c part of it.)
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return &h2cHandler{
Handler: h,
s: s,
}
}
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
if http2VerboseLogs {
log.Print("h2c: attempting h2c with prior knowledge.")
}
conn, err := initH2CWithPriorKnowledge(w)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c with prior knowledge: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
if conn, err := h2cUpgrade(w, r); err == nil {
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
s.Handler.ServeHTTP(w, r)
return
}
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
// All we have to do is look for the client preface that is suppose to be part
// of the body, and reforward the client preface on the net.Conn this function
// creates.
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
hijacker, ok := w.(http.Hijacker)
if !ok {
panic("Hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
panic(fmt.Sprintf("Hijack failed: %v", err))
}
const expectedBody = "SM\r\n\r\n"
buf := make([]byte, len(expectedBody))
n, err := io.ReadFull(rw, buf)
if err != nil {
return nil, fmt.Errorf("could not read from the buffer: %s", err)
}
if string(buf[:n]) == expectedBody {
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(strings.NewReader(http2.ClientPreface), rw),
BufWriter: rw.Writer,
}
return c, nil
}
conn.Close()
if http2VerboseLogs {
log.Printf(
"h2c: missing the request body portion of the client preface. Wanted: %v Got: %v",
[]byte(expectedBody),
buf[0:n],
)
}
return nil, errors.New("invalid client preface")
}
// drainClientPreface reads a single instance of the HTTP/2 client preface from
// the supplied reader.
func drainClientPreface(r io.Reader) error {
var buf bytes.Buffer
prefaceLen := int64(len(http2.ClientPreface))
n, err := io.CopyN(&buf, r, prefaceLen)
if err != nil {
return err
}
if n != prefaceLen || buf.String() != http2.ClientPreface {
return fmt.Errorf("Client never sent: %s", http2.ClientPreface)
}
return nil
}
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
if !isH2CUpgrade(r.Header) {
return nil, errors.New("non-conforming h2c headers")
}
// Initial bytes we put into conn to fool http2 server
initBytes, _, err := convertH1ReqToH2(r)
if err != nil {
return nil, err
}
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, errors.New("hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijack failed: %v", err)
}
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
"Connection: Upgrade\r\n" +
"Upgrade: h2c\r\n\r\n"))
rw.Flush()
// A conforming client will now send an H2 client preface which need to drain
// since we already sent this.
if err := drainClientPreface(rw); err != nil {
return nil, err
}
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(initBytes, rw),
BufWriter: newSettingsAckSwallowWriter(rw.Writer),
}
return c, nil
}
// convert the data contained in the HTTP/1 upgrade request into the HTTP/2
// version in byte form.
func convertH1ReqToH2(r *http.Request) (*bytes.Buffer, []http2.Setting, error) {
h2Bytes := bytes.NewBuffer([]byte((http2.ClientPreface)))
framer := http2.NewFramer(h2Bytes, nil)
settings, err := getH2Settings(r.Header)
if err != nil {
return nil, nil, err
}
if err := framer.WriteSettings(settings...); err != nil {
return nil, nil, err
}
headerBytes, err := getH2HeaderBytes(r, getMaxHeaderTableSize(settings))
if err != nil {
return nil, nil, err
}
maxFrameSize := int(getMaxFrameSize(settings))
needOneHeader := len(headerBytes) < maxFrameSize
err = framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: 1,
BlockFragment: headerBytes,
EndHeaders: needOneHeader,
})
if err != nil {
return nil, nil, err
}
for i := maxFrameSize; i < len(headerBytes); i += maxFrameSize {
if len(headerBytes)-i > maxFrameSize {
if err := framer.WriteContinuation(1,
false, // endHeaders
headerBytes[i:maxFrameSize]); err != nil {
return nil, nil, err
}
} else {
if err := framer.WriteContinuation(1,
true, // endHeaders
headerBytes[i:]); err != nil {
return nil, nil, err
}
}
}
return h2Bytes, settings, nil
}
// getMaxFrameSize returns the SETTINGS_MAX_FRAME_SIZE. If not present default
// value is 16384 as specified by RFC 7540 Section 6.5.2.
func getMaxFrameSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingMaxFrameSize {
return setting.Val
}
}
return 16384
}
// getMaxHeaderTableSize returns the SETTINGS_HEADER_TABLE_SIZE. If not present
// default value is 4096 as specified by RFC 7540 Section 6.5.2.
func getMaxHeaderTableSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingHeaderTableSize {
return setting.Val
}
}
return 4096
}
// bufWriter is a Writer interface that also has a Flush method.
type bufWriter interface {
io.Writer
Flush() error
}
// rwConn implements net.Conn but overrides Read and Write so that reads and
// writes are forwarded to the provided io.Reader and bufWriter.
type rwConn struct {
net.Conn
io.Reader
BufWriter bufWriter
}
// Read forwards reads to the underlying Reader.
func (c *rwConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
// Write forwards writes to the underlying bufWriter and immediately flushes.
func (c *rwConn) Write(p []byte) (int, error) {
n, err := c.BufWriter.Write(p)
if err := c.BufWriter.Flush(); err != nil {
return 0, err
}
return n, err
}
// settingsAckSwallowWriter is a writer that normally forwards bytes to its
// underlying Writer, but swallows the first SettingsAck frame that it sees.
type settingsAckSwallowWriter struct {
Writer *bufio.Writer
buf []byte
didSwallow bool
}
// newSettingsAckSwallowWriter returns a new settingsAckSwallowWriter.
func newSettingsAckSwallowWriter(w *bufio.Writer) *settingsAckSwallowWriter {
return &settingsAckSwallowWriter{
Writer: w,
buf: make([]byte, 0),
didSwallow: false,
}
}
// Write implements io.Writer interface. Normally forwards bytes to w.Writer,
// except for the first Settings ACK frame that it sees.
func (w *settingsAckSwallowWriter) Write(p []byte) (int, error) {
if !w.didSwallow {
w.buf = append(w.buf, p...)
// Process all the frames we have collected into w.buf
for {
// Append until we get full frame header which is 9 bytes
if len(w.buf) < 9 {
break
}
// Check if we have collected a whole frame.
fh, err := http2.ReadFrameHeader(bytes.NewBuffer(w.buf))
if err != nil {
// Corrupted frame, fail current Write
return 0, err
}
fSize := fh.Length + 9
if uint32(len(w.buf)) < fSize {
// Have not collected whole frame. Stop processing buf, and withold on
// forward bytes to w.Writer until we get the full frame.
break
}
// We have now collected a whole frame.
if fh.Type == http2.FrameSettings && fh.Flags.Has(http2.FlagSettingsAck) {
// If Settings ACK frame, do not forward to underlying writer, remove
// bytes from w.buf, and record that we have swallowed Settings Ack
// frame.
w.didSwallow = true
w.buf = w.buf[fSize:]
continue
}
// Not settings ack frame. Forward bytes to w.Writer.
if _, err := w.Writer.Write(w.buf[:fSize]); err != nil {
// Couldn't forward bytes. Fail current Write.
return 0, err
}
w.buf = w.buf[fSize:]
}
return len(p), nil
}
return w.Writer.Write(p)
}
// Flush calls w.Writer.Flush.
func (w *settingsAckSwallowWriter) Flush() error {
return w.Writer.Flush()
}
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
// as specified by Section 3.2.
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// getH2Settings returns the []http2.Setting that are encoded in the
// HTTP2-Settings header.
func getH2Settings(h http.Header) ([]http2.Setting, error) {
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
if !ok {
return nil, errors.New("missing HTTP2-Settings header")
}
if len(vals) != 1 {
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
}
settings, err := decodeSettings(vals[0])
if err != nil {
return nil, fmt.Errorf("Invalid HTTP2-Settings: %q", vals[0])
}
return settings, nil
}
// decodeSettings decodes the base64url header value of the HTTP2-Settings
// header. RFC 7540 Section 3.2.1.
func decodeSettings(headerVal string) ([]http2.Setting, error) {
b, err := base64.RawURLEncoding.DecodeString(headerVal)
if err != nil {
return nil, err
}
if len(b)%6 != 0 {
return nil, err
}
settings := make([]http2.Setting, 0)
for i := 0; i < len(b)/6; i++ {
settings = append(settings, http2.Setting{
ID: http2.SettingID(binary.BigEndian.Uint16(b[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(b[i*6+2 : i*6+6]),
})
}
return settings, nil
}
// getH2HeaderBytes return the headers in r a []bytes encoded by HPACK.
func getH2HeaderBytes(r *http.Request, maxHeaderTableSize uint32) ([]byte, error) {
headerBytes := bytes.NewBuffer(nil)
hpackEnc := hpack.NewEncoder(headerBytes)
hpackEnc.SetMaxDynamicTableSize(maxHeaderTableSize)
// Section 8.1.2.3
err := hpackEnc.WriteField(hpack.HeaderField{
Name: ":method",
Value: r.Method,
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":scheme",
Value: "http",
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":authority",
Value: r.Host,
})
if err != nil {
return nil, err
}
path := r.URL.Path
if r.URL.RawQuery != "" {
path = strings.Join([]string{path, r.URL.RawQuery}, "?")
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":path",
Value: path,
})
if err != nil {
return nil, err
}
// TODO Implement Section 8.3
for header, values := range r.Header {
// Skip non h2 headers
if isNonH2Header(header) {
continue
}
for _, v := range values {
err := hpackEnc.WriteField(hpack.HeaderField{
Name: strings.ToLower(header),
Value: v,
})
if err != nil {
return nil, err
}
}
}
return headerBytes.Bytes(), nil
}
// Connection specific headers listed in RFC 7540 Section 8.1.2.2 that are not
// suppose to be transferred to HTTP/2. The Http2-Settings header is skipped
// since already use to create the HTTP/2 SETTINGS frame.
var nonH2Headers = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
"Http2-Settings",
}
// isNonH2Header returns true if header should not be transferred to HTTP/2.
func isNonH2Header(header string) bool {
for _, nonH2h := range nonH2Headers {
if header == nonH2h {
return true
}
}
return false
}

View File

@ -1,58 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package h2c
import (
"bufio"
"bytes"
"fmt"
"log"
"net/http"
"testing"
"golang.org/x/net/http2"
)
func TestSettingsAckSwallowWriter(t *testing.T) {
var buf bytes.Buffer
swallower := newSettingsAckSwallowWriter(bufio.NewWriter(&buf))
fw := http2.NewFramer(swallower, nil)
fw.WriteSettings(http2.Setting{http2.SettingMaxFrameSize, 2})
fw.WriteSettingsAck()
fw.WriteData(1, true, []byte{})
swallower.Flush()
fr := http2.NewFramer(nil, bufio.NewReader(&buf))
f, err := fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
if f.Header().Type != http2.FrameSettings {
t.Fatalf("Expected first frame to be SETTINGS. Got: %v", f.Header().Type)
}
f, err = fr.ReadFrame()
if err != nil {
t.Fatal(err)
}
if f.Header().Type != http2.FrameData {
t.Fatalf("Expected first frame to be DATA. Got: %v", f.Header().Type)
}
}
func ExampleNewHandler() {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello world")
})
h2s := &http2.Server{
// ...
}
h1s := &http.Server{
Addr: ":8080",
Handler: NewHandler(handler, h2s),
}
log.Fatal(h1s.ListenAndServe())
}

View File

@ -1,6 +0,0 @@
h2demo
h2demo.linux
client-id.dat
client-secret.dat
token.dat
ca-certificates.crt

View File

@ -1,11 +0,0 @@
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
FROM scratch
LABEL maintainer "golang-dev@googlegroups.com"
COPY ca-certificates.crt /etc/ssl/certs/
COPY h2demo /
ENTRYPOINT ["/h2demo", "-prod"]

View File

@ -1,134 +0,0 @@
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
FROM golang:1.9
LABEL maintainer "golang-dev@googlegroups.com"
ENV CGO_ENABLED=0
# BEGIN deps (run `make update-deps` to update)
# Repo cloud.google.com/go at 1d0c2da (2018-01-30)
ENV REV=1d0c2da40456a9b47f5376165f275424acc15c09
RUN go get -d cloud.google.com/go/compute/metadata `#and 6 other pkgs` &&\
(cd /go/src/cloud.google.com/go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo github.com/golang/protobuf at 9255415 (2018-01-25)
ENV REV=925541529c1fa6821df4e44ce2723319eb2be768
RUN go get -d github.com/golang/protobuf/proto `#and 6 other pkgs` &&\
(cd /go/src/github.com/golang/protobuf && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo github.com/googleapis/gax-go at 317e000 (2017-09-15)
ENV REV=317e0006254c44a0ac427cc52a0e083ff0b9622f
RUN go get -d github.com/googleapis/gax-go &&\
(cd /go/src/github.com/googleapis/gax-go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo go4.org at 034d17a (2017-05-25)
ENV REV=034d17a462f7b2dcd1a4a73553ec5357ff6e6c6e
RUN go get -d go4.org/syncutil/singleflight &&\
(cd /go/src/go4.org && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/build at 8aa9ee0 (2018-02-01)
ENV REV=8aa9ee0e557fd49c14113e5ba106e13a5b455460
RUN go get -d golang.org/x/build/autocertcache &&\
(cd /go/src/golang.org/x/build && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/crypto at 1875d0a (2018-01-27)
ENV REV=1875d0a70c90e57f11972aefd42276df65e895b9
RUN go get -d golang.org/x/crypto/acme `#and 2 other pkgs` &&\
(cd /go/src/golang.org/x/crypto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/oauth2 at 30785a2 (2018-01-04)
ENV REV=30785a2c434e431ef7c507b54617d6a951d5f2b4
RUN go get -d golang.org/x/oauth2 `#and 5 other pkgs` &&\
(cd /go/src/golang.org/x/oauth2 && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/text at e19ae14 (2017-12-27)
ENV REV=e19ae1496984b1c655b8044a65c0300a3c878dd3
RUN go get -d golang.org/x/text/secure/bidirule `#and 4 other pkgs` &&\
(cd /go/src/golang.org/x/text && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/api at 7d0e2d3 (2018-01-30)
ENV REV=7d0e2d350555821bef5a5b8aecf0d12cc1def633
RUN go get -d google.golang.org/api/gensupport `#and 9 other pkgs` &&\
(cd /go/src/google.golang.org/api && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/genproto at 4eb30f4 (2018-01-25)
ENV REV=4eb30f4778eed4c258ba66527a0d4f9ec8a36c45
RUN go get -d google.golang.org/genproto/googleapis/api/annotations `#and 3 other pkgs` &&\
(cd /go/src/google.golang.org/genproto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/grpc at 0bd008f (2018-01-25)
ENV REV=0bd008f5fadb62d228f12b18d016709e8139a7af
RUN go get -d google.golang.org/grpc `#and 23 other pkgs` &&\
(cd /go/src/google.golang.org/grpc && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Optimization to speed up iterative development, not necessary for correctness:
RUN go install cloud.google.com/go/compute/metadata \
cloud.google.com/go/iam \
cloud.google.com/go/internal \
cloud.google.com/go/internal/optional \
cloud.google.com/go/internal/version \
cloud.google.com/go/storage \
github.com/golang/protobuf/proto \
github.com/golang/protobuf/protoc-gen-go/descriptor \
github.com/golang/protobuf/ptypes \
github.com/golang/protobuf/ptypes/any \
github.com/golang/protobuf/ptypes/duration \
github.com/golang/protobuf/ptypes/timestamp \
github.com/googleapis/gax-go \
go4.org/syncutil/singleflight \
golang.org/x/build/autocertcache \
golang.org/x/crypto/acme \
golang.org/x/crypto/acme/autocert \
golang.org/x/oauth2 \
golang.org/x/oauth2/google \
golang.org/x/oauth2/internal \
golang.org/x/oauth2/jws \
golang.org/x/oauth2/jwt \
golang.org/x/text/secure/bidirule \
golang.org/x/text/transform \
golang.org/x/text/unicode/bidi \
golang.org/x/text/unicode/norm \
google.golang.org/api/gensupport \
google.golang.org/api/googleapi \
google.golang.org/api/googleapi/internal/uritemplates \
google.golang.org/api/googleapi/transport \
google.golang.org/api/internal \
google.golang.org/api/iterator \
google.golang.org/api/option \
google.golang.org/api/storage/v1 \
google.golang.org/api/transport/http \
google.golang.org/genproto/googleapis/api/annotations \
google.golang.org/genproto/googleapis/iam/v1 \
google.golang.org/genproto/googleapis/rpc/status \
google.golang.org/grpc \
google.golang.org/grpc/balancer \
google.golang.org/grpc/balancer/base \
google.golang.org/grpc/balancer/roundrobin \
google.golang.org/grpc/codes \
google.golang.org/grpc/connectivity \
google.golang.org/grpc/credentials \
google.golang.org/grpc/encoding \
google.golang.org/grpc/encoding/proto \
google.golang.org/grpc/grpclb/grpc_lb_v1/messages \
google.golang.org/grpc/grpclog \
google.golang.org/grpc/internal \
google.golang.org/grpc/keepalive \
google.golang.org/grpc/metadata \
google.golang.org/grpc/naming \
google.golang.org/grpc/peer \
google.golang.org/grpc/resolver \
google.golang.org/grpc/resolver/dns \
google.golang.org/grpc/resolver/passthrough \
google.golang.org/grpc/stats \
google.golang.org/grpc/status \
google.golang.org/grpc/tap \
google.golang.org/grpc/transport
# END deps
COPY . /go/src/golang.org/x/net/
RUN go install -tags "h2demo netgo" -ldflags "-linkmode=external -extldflags '-static -pthread'" golang.org/x/net/http2/h2demo

View File

@ -1,55 +0,0 @@
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
MUTABLE_VERSION ?= latest
VERSION ?= $(shell git rev-parse --short HEAD)
IMAGE_STAGING := gcr.io/go-dashboard-dev/h2demo
IMAGE_PROD := gcr.io/symbolic-datum-552/h2demo
DOCKER_IMAGE_build0=build0/h2demo:latest
DOCKER_CTR_build0=h2demo-build0
build0: *.go Dockerfile.0
docker build --force-rm -f Dockerfile.0 --tag=$(DOCKER_IMAGE_build0) ../..
h2demo: build0
docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0)
docker cp $(DOCKER_CTR_build0):/go/bin/$@ $@
docker rm $(DOCKER_CTR_build0)
ca-certificates.crt:
docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0)
docker cp $(DOCKER_CTR_build0):/etc/ssl/certs/$@ $@
docker rm $(DOCKER_CTR_build0)
update-deps:
go install golang.org/x/build/cmd/gitlock
gitlock --update=Dockerfile.0 --ignore=golang.org/x/net --tags=h2demo golang.org/x/net/http2/h2demo
docker-prod: Dockerfile h2demo ca-certificates.crt
docker build --force-rm --tag=$(IMAGE_PROD):$(VERSION) .
docker tag $(IMAGE_PROD):$(VERSION) $(IMAGE_PROD):$(MUTABLE_VERSION)
docker-staging: Dockerfile h2demo ca-certificates.crt
docker build --force-rm --tag=$(IMAGE_STAGING):$(VERSION) .
docker tag $(IMAGE_STAGING):$(VERSION) $(IMAGE_STAGING):$(MUTABLE_VERSION)
push-prod: docker-prod
gcloud docker -- push $(IMAGE_PROD):$(MUTABLE_VERSION)
gcloud docker -- push $(IMAGE_PROD):$(VERSION)
push-staging: docker-staging
gcloud docker -- push $(IMAGE_STAGING):$(MUTABLE_VERSION)
gcloud docker -- push $(IMAGE_STAGING):$(VERSION)
deploy-prod: push-prod
kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_PROD):$(VERSION)
deploy-staging: push-staging
kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_STAGING):$(VERSION)
.PHONY: clean
clean:
$(RM) h2demo
$(RM) ca-certificates.crt
FORCE:

View File

@ -1,16 +0,0 @@
Client:
-- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
-- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
Make CA:
$ openssl genrsa -out rootCA.key 2048
$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
... install that to Firefox
Make cert:
$ openssl genrsa -out server.key 2048
$ openssl req -new -key server.key -out server.csr
$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500

View File

@ -1,28 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: h2demo-deployment
spec:
replicas: 1
template:
metadata:
labels:
app: h2demo
annotations:
container.seccomp.security.alpha.kubernetes.io/h2demo: docker/default
container.apparmor.security.beta.kubernetes.io/h2demo: runtime/default
spec:
containers:
- name: h2demo
image: gcr.io/symbolic-datum-552/h2demo:latest
imagePullPolicy: Always
command: ["/h2demo", "-prod"]
ports:
- containerPort: 80
- containerPort: 443
resources:
requests:
cpu: "1"
memory: "1Gi"
limits:
memory: "2Gi"

View File

@ -1,546 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build h2demo
package main
import (
"bytes"
"context"
"crypto/tls"
"flag"
"fmt"
"hash/crc32"
"image"
"image/jpeg"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"path"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/storage"
"go4.org/syncutil/singleflight"
"golang.org/x/build/autocertcache"
"golang.org/x/crypto/acme/autocert"
"golang.org/x/net/http2"
)
var (
prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.")
httpsAddr = flag.String("https_addr", "localhost:4430", "TLS address to listen on ('host:port' or ':port'). Required.")
httpAddr = flag.String("http_addr", "", "Plain HTTP address to listen on ('host:port', or ':port'). Empty means no HTTP.")
hostHTTP = flag.String("http_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -http_addr.")
hostHTTPS = flag.String("https_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -https_addr.")
)
func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, `<html>
<body>
<h1>Go + HTTP/2</h1>
<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
<p>Unfortunately, you're <b>not</b> using HTTP/2 right now. To do so:</p>
<ul>
<li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
<li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
</ul>
<p>See code & instructions for connecting at <a href="https://github.com/golang/net/tree/master/http2">https://github.com/golang/net/tree/master/http2</a>.</p>
</body></html>`)
}
func home(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
io.WriteString(w, `<html>
<body>
<h1>Go + HTTP/2</h1>
<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a
href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
<p>Congratulations, <b>you're using HTTP/2 right now</b>.</p>
<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
<p>
The code is at <a href="https://golang.org/x/net/http2">golang.org/x/net/http2</a> and
is used transparently by the Go standard library from Go 1.6 and later.
</p>
<p>Contact info: <i>bradfitz@golang.org</i>, or <a
href="https://golang.org/s/http2bug">file a bug</a>.</p>
<h2>Handlers for testing</h2>
<ul>
<li>GET <a href="/reqinfo">/reqinfo</a> to dump the request + headers received</li>
<li>GET <a href="/clockstream">/clockstream</a> streams the current time every second</li>
<li>GET <a href="/gophertiles">/gophertiles</a> to see a page with a bunch of images</li>
<li>GET <a href="/serverpush">/serverpush</a> to see a page with server push</li>
<li>GET <a href="/file/gopher.png">/file/gopher.png</a> for a small file (does If-Modified-Since, Content-Range, etc)</li>
<li>GET <a href="/file/go.src.tar.gz">/file/go.src.tar.gz</a> for a larger file (~10 MB)</li>
<li>GET <a href="/redirect">/redirect</a> to redirect back to / (this page)</li>
<li>GET <a href="/goroutines">/goroutines</a> to see all active goroutines in this server</li>
<li>PUT something to <a href="/crc32">/crc32</a> to get a count of number of bytes and its CRC-32</li>
<li>PUT something to <a href="/ECHO">/ECHO</a> and it will be streamed back to you capitalized</li>
</ul>
</body></html>`)
}
func reqInfoHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
fmt.Fprintf(w, "Method: %s\n", r.Method)
fmt.Fprintf(w, "Protocol: %s\n", r.Proto)
fmt.Fprintf(w, "Host: %s\n", r.Host)
fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr)
fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI)
fmt.Fprintf(w, "URL: %#v\n", r.URL)
fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength)
fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close)
fmt.Fprintf(w, "TLS: %#v\n", r.TLS)
fmt.Fprintf(w, "\nHeaders:\n")
r.Header.Write(w)
}
func crcHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "PUT" {
http.Error(w, "PUT required.", 400)
return
}
crc := crc32.NewIEEE()
n, err := io.Copy(crc, r.Body)
if err == nil {
w.Header().Set("Content-Type", "text/plain")
fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil))
}
}
type capitalizeReader struct {
r io.Reader
}
func (cr capitalizeReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
for i, b := range p[:n] {
if b >= 'a' && b <= 'z' {
p[i] = b - ('a' - 'A')
}
}
return
}
type flushWriter struct {
w io.Writer
}
func (fw flushWriter) Write(p []byte) (n int, err error) {
n, err = fw.w.Write(p)
if f, ok := fw.w.(http.Flusher); ok {
f.Flush()
}
return
}
func echoCapitalHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "PUT" {
http.Error(w, "PUT required.", 400)
return
}
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
io.Copy(flushWriter{w}, capitalizeReader{r.Body})
}
var (
fsGrp singleflight.Group
fsMu sync.Mutex // guards fsCache
fsCache = map[string]http.Handler{}
)
// fileServer returns a file-serving handler that proxies URL.
// It lazily fetches URL on the first access and caches its contents forever.
func fileServer(url string, latency time.Duration) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if latency > 0 {
time.Sleep(latency)
}
hi, err := fsGrp.Do(url, func() (interface{}, error) {
fsMu.Lock()
if h, ok := fsCache[url]; ok {
fsMu.Unlock()
return h, nil
}
fsMu.Unlock()
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
modTime := time.Now()
var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp))
})
fsMu.Lock()
fsCache[url] = h
fsMu.Unlock()
return h, nil
})
if err != nil {
http.Error(w, err.Error(), 500)
return
}
hi.(http.Handler).ServeHTTP(w, r)
})
}
func clockStreamHandler(w http.ResponseWriter, r *http.Request) {
clientGone := w.(http.CloseNotifier).CloseNotify()
w.Header().Set("Content-Type", "text/plain")
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n")
io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13))
for {
fmt.Fprintf(w, "%v\n", time.Now())
w.(http.Flusher).Flush()
select {
case <-ticker.C:
case <-clientGone:
log.Printf("Client %v disconnected from the clock", r.RemoteAddr)
return
}
}
}
func registerHandlers() {
tiles := newGopherTilesHandler()
push := newPushHandler()
mux2 := http.NewServeMux()
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
switch {
case r.URL.Path == "/gophertiles":
tiles.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x
return
case strings.HasPrefix(r.URL.Path, "/serverpush"):
push.ServeHTTP(w, r) // allow HTTP/2 + HTTP/1.x
return
case r.TLS == nil: // do not allow HTTP/1.x for anything else
http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound)
return
}
if r.ProtoMajor == 1 {
if r.URL.Path == "/reqinfo" {
reqInfoHandler(w, r)
return
}
homeOldHTTP(w, r)
return
}
mux2.ServeHTTP(w, r)
})
mux2.HandleFunc("/", home)
mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png", 0))
mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz", 0))
mux2.HandleFunc("/reqinfo", reqInfoHandler)
mux2.HandleFunc("/crc32", crcHandler)
mux2.HandleFunc("/ECHO", echoCapitalHandler)
mux2.HandleFunc("/clockstream", clockStreamHandler)
mux2.Handle("/gophertiles", tiles)
mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/", http.StatusFound)
})
stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`)
mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
buf := make([]byte, 2<<20)
w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil))
})
}
var pushResources = map[string]http.Handler{
"/serverpush/static/jquery.min.js": fileServer("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js", 100*time.Millisecond),
"/serverpush/static/godocs.js": fileServer("https://golang.org/lib/godoc/godocs.js", 100*time.Millisecond),
"/serverpush/static/playground.js": fileServer("https://golang.org/lib/godoc/playground.js", 100*time.Millisecond),
"/serverpush/static/style.css": fileServer("https://golang.org/lib/godoc/style.css", 100*time.Millisecond),
}
func newPushHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for path, handler := range pushResources {
if r.URL.Path == path {
handler.ServeHTTP(w, r)
return
}
}
cacheBust := time.Now().UnixNano()
if pusher, ok := w.(http.Pusher); ok {
for path := range pushResources {
url := fmt.Sprintf("%s?%d", path, cacheBust)
if err := pusher.Push(url, nil); err != nil {
log.Printf("Failed to push %v: %v", path, err)
}
}
}
time.Sleep(100 * time.Millisecond) // fake network latency + parsing time
if err := pushTmpl.Execute(w, struct {
CacheBust int64
HTTPSHost string
HTTPHost string
}{
CacheBust: cacheBust,
HTTPSHost: httpsHost(),
HTTPHost: httpHost(),
}); err != nil {
log.Printf("Executing server push template: %v", err)
}
})
}
func newGopherTilesHandler() http.Handler {
const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg"
res, err := http.Get(gopherURL)
if err != nil {
log.Fatal(err)
}
if res.StatusCode != 200 {
log.Fatalf("Error fetching %s: %v", gopherURL, res.Status)
}
slurp, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
im, err := jpeg.Decode(bytes.NewReader(slurp))
if err != nil {
if len(slurp) > 1024 {
slurp = slurp[:1024]
}
log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp)
}
type subImager interface {
SubImage(image.Rectangle) image.Image
}
const tileSize = 32
xt := im.Bounds().Max.X / tileSize
yt := im.Bounds().Max.Y / tileSize
var tile [][][]byte // y -> x -> jpeg bytes
for yi := 0; yi < yt; yi++ {
var row [][]byte
for xi := 0; xi < xt; xi++ {
si := im.(subImager).SubImage(image.Rectangle{
Min: image.Point{xi * tileSize, yi * tileSize},
Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize},
})
buf := new(bytes.Buffer)
if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil {
log.Fatal(err)
}
row = append(row, buf.Bytes())
}
tile = append(tile, row)
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ms, _ := strconv.Atoi(r.FormValue("latency"))
const nanosPerMilli = 1e6
if r.FormValue("x") != "" {
x, _ := strconv.Atoi(r.FormValue("x"))
y, _ := strconv.Atoi(r.FormValue("y"))
if ms <= 1000 {
time.Sleep(time.Duration(ms) * nanosPerMilli)
}
if x >= 0 && x < xt && y >= 0 && y < yt {
http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x]))
return
}
}
io.WriteString(w, "<html><body onload='showtimes()'>")
fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
for _, ms := range []int{0, 30, 200, 1000} {
d := time.Duration(ms) * nanosPerMilli
fmt.Fprintf(w, "[<a href='https://%s/gophertiles?latency=%d'>HTTP/2, %v latency</a>] [<a href='http://%s/gophertiles?latency=%d'>HTTP/1, %v latency</a>]<br>\n",
httpsHost(), ms, d,
httpHost(), ms, d,
)
}
io.WriteString(w, "<p>\n")
cacheBust := time.Now().UnixNano()
for y := 0; y < yt; y++ {
for x := 0; x < xt; x++ {
fmt.Fprintf(w, "<img width=%d height=%d src='/gophertiles?x=%d&y=%d&cachebust=%d&latency=%d'>",
tileSize, tileSize, x, y, cacheBust, ms)
}
io.WriteString(w, "<br/>\n")
}
io.WriteString(w, `<p><div id='loadtimes'></div></p>
<script>
function showtimes() {
var times = 'Times from connection start:<br>'
times += 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.connectStart) + 'ms<br>'
times += 'DOM complete (images loaded): ' + (window.performance.timing.domComplete - window.performance.timing.connectStart) + 'ms<br>'
document.getElementById('loadtimes').innerHTML = times
}
</script>
<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>`)
})
}
func httpsHost() string {
if *hostHTTPS != "" {
return *hostHTTPS
}
if v := *httpsAddr; strings.HasPrefix(v, ":") {
return "localhost" + v
} else {
return v
}
}
func httpHost() string {
if *hostHTTP != "" {
return *hostHTTP
}
if v := *httpAddr; strings.HasPrefix(v, ":") {
return "localhost" + v
} else {
return v
}
}
func serveProdTLS(autocertManager *autocert.Manager) error {
srv := &http.Server{
TLSConfig: &tls.Config{
GetCertificate: autocertManager.GetCertificate,
},
}
http2.ConfigureServer(srv, &http2.Server{
NewWriteScheduler: func() http2.WriteScheduler {
return http2.NewPriorityWriteScheduler(nil)
},
})
ln, err := net.Listen("tcp", ":443")
if err != nil {
return err
}
return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
func serveProd() error {
log.Printf("running in production mode")
storageClient, err := storage.NewClient(context.Background())
if err != nil {
log.Fatalf("storage.NewClient: %v", err)
}
autocertManager := &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("http2.golang.org"),
Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"),
}
errc := make(chan error, 2)
go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }()
go func() { errc <- serveProdTLS(autocertManager) }()
return <-errc
}
const idleTimeout = 5 * time.Minute
const activeTimeout = 10 * time.Minute
// TODO: put this into the standard library and actually send
// PING frames and GOAWAY, etc: golang.org/issue/14204
func idleTimeoutHook() func(net.Conn, http.ConnState) {
var mu sync.Mutex
m := map[net.Conn]*time.Timer{}
return func(c net.Conn, cs http.ConnState) {
mu.Lock()
defer mu.Unlock()
if t, ok := m[c]; ok {
delete(m, c)
t.Stop()
}
var d time.Duration
switch cs {
case http.StateNew, http.StateIdle:
d = idleTimeout
case http.StateActive:
d = activeTimeout
default:
return
}
m[c] = time.AfterFunc(d, func() {
log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d)
go c.Close()
})
}
}
func main() {
var srv http.Server
flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
flag.Parse()
srv.Addr = *httpsAddr
srv.ConnState = idleTimeoutHook()
registerHandlers()
if *prod {
*hostHTTP = "http2.golang.org"
*hostHTTPS = "http2.golang.org"
log.Fatal(serveProd())
}
url := "https://" + httpsHost() + "/"
log.Printf("Listening on " + url)
http2.ConfigureServer(&srv, &http2.Server{})
if *httpAddr != "" {
go func() {
log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)")
log.Fatal(http.ListenAndServe(*httpAddr, nil))
}()
}
go func() {
log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
}()
select {}
}

View File

@ -1,302 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
)
var (
proj = flag.String("project", "symbolic-datum-552", "name of Project")
zone = flag.String("zone", "us-central1-a", "GCE zone")
mach = flag.String("machinetype", "n1-standard-1", "Machine type")
instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
)
func readFile(v string) string {
slurp, err := ioutil.ReadFile(v)
if err != nil {
log.Fatalf("Error reading %s: %v", v, err)
}
return strings.TrimSpace(string(slurp))
}
var config = &oauth2.Config{
// The client-id and secret should be for an "Installed Application" when using
// the CLI. Later we'll use a web application with a callback.
ClientID: readFile("client-id.dat"),
ClientSecret: readFile("client-secret.dat"),
Endpoint: google.Endpoint,
Scopes: []string{
compute.DevstorageFullControlScope,
compute.ComputeScope,
"https://www.googleapis.com/auth/sqlservice",
"https://www.googleapis.com/auth/sqlservice.admin",
},
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
}
const baseConfig = `#cloud-config
coreos:
units:
- name: h2demo.service
command: start
content: |
[Unit]
Description=HTTP2 Demo
[Service]
ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
ExecStart=/opt/bin/h2demo --prod
RestartSec=5s
Restart=always
Type=simple
[Install]
WantedBy=multi-user.target
`
func main() {
flag.Parse()
if *proj == "" {
log.Fatalf("Missing --project flag")
}
prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
const tokenFileName = "token.dat"
tokenFile := tokenCacheFile(tokenFileName)
tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
token, err := tokenSource.Token()
if err != nil {
if *writeObject != "" {
log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
}
log.Printf("Error getting token from %s: %v", tokenFileName, err)
log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
fmt.Print("\nEnter auth code: ")
sc := bufio.NewScanner(os.Stdin)
sc.Scan()
authCode := strings.TrimSpace(sc.Text())
token, err = config.Exchange(oauth2.NoContext, authCode)
if err != nil {
log.Fatalf("Error exchanging auth code for a token: %v", err)
}
if err := tokenFile.WriteToken(token); err != nil {
log.Fatalf("Error writing to %s: %v", tokenFileName, err)
}
tokenSource = oauth2.ReuseTokenSource(token, nil)
}
oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
if *writeObject != "" {
writeCloudStorageObject(oauthClient)
return
}
computeService, _ := compute.New(oauthClient)
natIP := *staticIP
if natIP == "" {
// Try to find it by name.
aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
if err != nil {
log.Fatal(err)
}
// http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
IPLoop:
for _, asl := range aggAddrList.Items {
for _, addr := range asl.Addresses {
if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
natIP = addr.Address
break IPLoop
}
}
}
}
cloudConfig := baseConfig
if *sshPub != "" {
key := strings.TrimSpace(readFile(*sshPub))
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
}
if os.Getenv("USER") == "bradfitz" {
cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
}
const maxCloudConfig = 32 << 10 // per compute API docs
if len(cloudConfig) > maxCloudConfig {
log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
}
instance := &compute.Instance{
Name: *instName,
Description: "Go Builder",
MachineType: machType,
Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
Tags: &compute.Tags{
Items: []string{"http-server", "https-server"},
},
Metadata: &compute.Metadata{
Items: []*compute.MetadataItems{
{
Key: "user-data",
Value: &cloudConfig,
},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
AccessConfigs: []*compute.AccessConfig{
{
Type: "ONE_TO_ONE_NAT",
Name: "External NAT",
NatIP: natIP,
},
},
Network: prefix + "/global/networks/default",
},
},
ServiceAccounts: []*compute.ServiceAccount{
{
Email: "default",
Scopes: []string{
compute.DevstorageFullControlScope,
compute.ComputeScope,
},
},
},
}
log.Printf("Creating instance...")
op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
if err != nil {
log.Fatalf("Failed to create instance: %v", err)
}
opName := op.Name
log.Printf("Created. Waiting on operation %v", opName)
OpLoop:
for {
time.Sleep(2 * time.Second)
op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
if err != nil {
log.Fatalf("Failed to get op %s: %v", opName, err)
}
switch op.Status {
case "PENDING", "RUNNING":
log.Printf("Waiting on operation %v", opName)
continue
case "DONE":
if op.Error != nil {
for _, operr := range op.Error.Errors {
log.Printf("Error: %+v", operr)
}
log.Fatalf("Failed to start.")
}
log.Printf("Success. %+v", op)
break OpLoop
default:
log.Fatalf("Unknown status %q: %+v", op.Status, op)
}
}
inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
if err != nil {
log.Fatalf("Error getting instance after creation: %v", err)
}
ij, _ := json.MarshalIndent(inst, "", " ")
log.Printf("Instance: %s", ij)
}
func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
diskName := *instName + "-disk"
return &compute.AttachedDisk{
AutoDelete: true,
Boot: true,
Type: "PERSISTENT",
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: diskName,
SourceImage: imageURL,
DiskSizeGb: 50,
},
}
}
func writeCloudStorageObject(httpClient *http.Client) {
content := os.Stdin
const maxSlurp = 1 << 20
var buf bytes.Buffer
n, err := io.CopyN(&buf, content, maxSlurp)
if err != nil && err != io.EOF {
log.Fatalf("Error reading from stdin: %v, %v", n, err)
}
contentType := http.DetectContentType(buf.Bytes())
req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
if err != nil {
log.Fatal(err)
}
req.Header.Set("x-goog-api-version", "2")
if *publicObject {
req.Header.Set("x-goog-acl", "public-read")
}
req.Header.Set("Content-Type", contentType)
res, err := httpClient.Do(req)
if err != nil {
log.Fatal(err)
}
if res.StatusCode != 200 {
res.Write(os.Stderr)
log.Fatalf("Failed.")
}
log.Printf("Success.")
os.Exit(0)
}
type tokenCacheFile string
func (f tokenCacheFile) Token() (*oauth2.Token, error) {
slurp, err := ioutil.ReadFile(string(f))
if err != nil {
return nil, err
}
t := new(oauth2.Token)
if err := json.Unmarshal(slurp, t); err != nil {
return nil, err
}
return t, nil
}
func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
jt, err := json.Marshal(t)
if err != nil {
return err
}
return ioutil.WriteFile(string(f), jt, 0600)
}

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
-----END RSA PRIVATE KEY-----

View File

@ -1,26 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
kPe6XoSbiLm/kxk32T0=
-----END CERTIFICATE-----

View File

@ -1 +0,0 @@
E2CE26BF3285059C

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
90p6/CbU71bGbfpM2PHot2fm
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
-----END RSA PRIVATE KEY-----

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: h2demo
spec:
externalTrafficPolicy: Local
ports:
- port: 80
targetPort: 80
name: http
- port: 443
targetPort: 443
name: https
selector:
app: h2demo
type: LoadBalancer
loadBalancerIP: 130.211.116.44

File diff suppressed because it is too large Load Diff

View File

@ -1,97 +0,0 @@
# h2i
**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
days of telnetting to your HTTP/1.n servers? We're bringing you
back.
Features:
- send raw HTTP/2 frames
- PING
- SETTINGS
- HEADERS
- etc
- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
- tab completion of commands, options
Not yet features, but soon:
- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
- request bodies (DATA frames)
- send invalid frames for testing server implementations (supported by underlying Framer)
Later:
- act like a server
## Installation
```
$ go get golang.org/x/net/http2/h2i
$ h2i <host>
```
## Demo
```
$ h2i
Usage: h2i <hostname>
-insecure
Whether to skip TLS cert validation
-nextproto string
Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
$ h2i google.com
Connecting to google.com:443 ...
Connected to 74.125.224.41:443
Negotiated protocol "h2-14"
[FrameHeader SETTINGS len=18]
[MAX_CONCURRENT_STREAMS = 100]
[INITIAL_WINDOW_SIZE = 1048576]
[MAX_FRAME_SIZE = 16384]
[FrameHeader WINDOW_UPDATE len=4]
Window-Increment = 983041
h2i> PING h2iSayHI
[FrameHeader PING flags=ACK len=8]
Data = "h2iSayHI"
h2i> headers
(as HTTP/1.1)> GET / HTTP/1.1
(as HTTP/1.1)> Host: ip.appspot.com
(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
(as HTTP/1.1)>
Opening Stream-ID 1:
:authority = ip.appspot.com
:method = GET
:path = /
:scheme = https
user-agent = h2i/brad-n-blake
[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
:status = "200"
alternate-protocol = "443:quic,p=1"
content-length = "15"
content-type = "text/html"
date = "Fri, 01 May 2015 23:06:56 GMT"
server = "Google Frontend"
[FrameHeader DATA flags=END_STREAM stream=1 len=15]
"173.164.155.78\n"
[FrameHeader PING len=8]
Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
h2i> ping
[FrameHeader PING flags=ACK len=8]
Data = "h2i_ping"
h2i> ping
[FrameHeader PING flags=ACK len=8]
Data = "h2i_ping"
h2i> ping
[FrameHeader GOAWAY len=22]
Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
ReadFrame: EOF
```
## Status
Quick few hour hack. So much yet to do. Feel free to file issues for
bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
and I aren't yet accepting pull requests until things settle down.

View File

@ -1,522 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
/*
The h2i command is an interactive HTTP/2 console.
Usage:
$ h2i [flags] <hostname>
Interactive commands in the console: (all parts case-insensitive)
ping [data]
settings ack
settings FOO=n BAR=z
headers (open a new stream by typing HTTP/1.1)
*/
package main
import (
"bufio"
"bytes"
"crypto/tls"
"errors"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
)
// Flags
var (
flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.")
flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)")
)
type command struct {
run func(*h2i, []string) error // required
// complete optionally specifies tokens (case-insensitive) which are
// valid for this subcommand.
complete func() []string
}
var commands = map[string]command{
"ping": {run: (*h2i).cmdPing},
"settings": {
run: (*h2i).cmdSettings,
complete: func() []string {
return []string{
"ACK",
http2.SettingHeaderTableSize.String(),
http2.SettingEnablePush.String(),
http2.SettingMaxConcurrentStreams.String(),
http2.SettingInitialWindowSize.String(),
http2.SettingMaxFrameSize.String(),
http2.SettingMaxHeaderListSize.String(),
}
},
},
"quit": {run: (*h2i).cmdQuit},
"headers": {run: (*h2i).cmdHeaders},
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage: h2i <hostname>\n\n")
flag.PrintDefaults()
}
// withPort adds ":443" if another port isn't already present.
func withPort(host string) string {
if _, _, err := net.SplitHostPort(host); err != nil {
return net.JoinHostPort(host, "443")
}
return host
}
// withoutPort strips the port from addr if present.
func withoutPort(addr string) string {
if h, _, err := net.SplitHostPort(addr); err == nil {
return h
}
return addr
}
// h2i is the app's state.
type h2i struct {
host string
tc *tls.Conn
framer *http2.Framer
term *terminal.Terminal
// owned by the command loop:
streamID uint32
hbuf bytes.Buffer
henc *hpack.Encoder
// owned by the readFrames loop:
peerSetting map[http2.SettingID]uint32
hdec *hpack.Decoder
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() != 1 {
usage()
os.Exit(2)
}
log.SetFlags(0)
host := flag.Arg(0)
app := &h2i{
host: host,
peerSetting: make(map[http2.SettingID]uint32),
}
app.henc = hpack.NewEncoder(&app.hbuf)
if err := app.Main(); err != nil {
if app.term != nil {
app.logf("%v\n", err)
} else {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
os.Exit(1)
}
fmt.Fprintf(os.Stdout, "\n")
}
func (app *h2i) Main() error {
cfg := &tls.Config{
ServerName: withoutPort(app.host),
NextProtos: strings.Split(*flagNextProto, ","),
InsecureSkipVerify: *flagInsecure,
}
hostAndPort := *flagDial
if hostAndPort == "" {
hostAndPort = withPort(app.host)
}
log.Printf("Connecting to %s ...", hostAndPort)
tc, err := tls.Dial("tcp", hostAndPort, cfg)
if err != nil {
return fmt.Errorf("Error dialing %s: %v", hostAndPort, err)
}
log.Printf("Connected to %v", tc.RemoteAddr())
defer tc.Close()
if err := tc.Handshake(); err != nil {
return fmt.Errorf("TLS handshake: %v", err)
}
if !*flagInsecure {
if err := tc.VerifyHostname(app.host); err != nil {
return fmt.Errorf("VerifyHostname: %v", err)
}
}
state := tc.ConnectionState()
log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
return fmt.Errorf("Could not negotiate protocol mutually")
}
if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
return err
}
app.framer = http2.NewFramer(tc, tc)
oldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
return err
}
defer terminal.Restore(0, oldState)
var screen = struct {
io.Reader
io.Writer
}{os.Stdin, os.Stdout}
app.term = terminal.NewTerminal(screen, "h2i> ")
lastWord := regexp.MustCompile(`.+\W(\w+)$`)
app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
if key != '\t' {
return
}
if pos != len(line) {
// TODO: we're being lazy for now, only supporting tab completion at the end.
return
}
// Auto-complete for the command itself.
if !strings.Contains(line, " ") {
var name string
name, _, ok = lookupCommand(line)
if !ok {
return
}
return name, len(name), true
}
_, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
if !ok || c.complete == nil {
return
}
if strings.HasSuffix(line, " ") {
app.logf("%s", strings.Join(c.complete(), " "))
return line, pos, true
}
m := lastWord.FindStringSubmatch(line)
if m == nil {
return line, len(line), true
}
soFar := m[1]
var match []string
for _, cand := range c.complete() {
if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
continue
}
match = append(match, cand)
}
if len(match) == 0 {
return
}
if len(match) > 1 {
// TODO: auto-complete any common prefix
app.logf("%s", strings.Join(match, " "))
return line, pos, true
}
newLine = line[:len(line)-len(soFar)] + match[0]
return newLine, len(newLine), true
}
errc := make(chan error, 2)
go func() { errc <- app.readFrames() }()
go func() { errc <- app.readConsole() }()
return <-errc
}
func (app *h2i) logf(format string, args ...interface{}) {
fmt.Fprintf(app.term, format+"\r\n", args...)
}
func (app *h2i) readConsole() error {
if s := *flagSettings; s != "omit" {
var args []string
if s != "empty" {
args = strings.Split(s, ",")
}
_, c, ok := lookupCommand("settings")
if !ok {
panic("settings command not found")
}
c.run(app, args)
}
for {
line, err := app.term.ReadLine()
if err == io.EOF {
return nil
}
if err != nil {
return fmt.Errorf("terminal.ReadLine: %v", err)
}
f := strings.Fields(line)
if len(f) == 0 {
continue
}
cmd, args := f[0], f[1:]
if _, c, ok := lookupCommand(cmd); ok {
err = c.run(app, args)
} else {
app.logf("Unknown command %q", line)
}
if err == errExitApp {
return nil
}
if err != nil {
return err
}
}
}
func lookupCommand(prefix string) (name string, c command, ok bool) {
prefix = strings.ToLower(prefix)
if c, ok = commands[prefix]; ok {
return prefix, c, ok
}
for full, candidate := range commands {
if strings.HasPrefix(full, prefix) {
if c.run != nil {
return "", command{}, false // ambiguous
}
c = candidate
name = full
}
}
return name, c, c.run != nil
}
var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
func (a *h2i) cmdQuit(args []string) error {
if len(args) > 0 {
a.logf("the QUIT command takes no argument")
return nil
}
return errExitApp
}
func (a *h2i) cmdSettings(args []string) error {
if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
return a.framer.WriteSettingsAck()
}
var settings []http2.Setting
for _, arg := range args {
if strings.EqualFold(arg, "ACK") {
a.logf("Error: ACK must be only argument with the SETTINGS command")
return nil
}
eq := strings.Index(arg, "=")
if eq == -1 {
a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
return nil
}
sid, ok := settingByName(arg[:eq])
if !ok {
a.logf("Error: unknown setting name %q", arg[:eq])
return nil
}
val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
if err != nil {
a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
return nil
}
settings = append(settings, http2.Setting{
ID: sid,
Val: uint32(val),
})
}
a.logf("Sending: %v", settings)
return a.framer.WriteSettings(settings...)
}
func settingByName(name string) (http2.SettingID, bool) {
for _, sid := range [...]http2.SettingID{
http2.SettingHeaderTableSize,
http2.SettingEnablePush,
http2.SettingMaxConcurrentStreams,
http2.SettingInitialWindowSize,
http2.SettingMaxFrameSize,
http2.SettingMaxHeaderListSize,
} {
if strings.EqualFold(sid.String(), name) {
return sid, true
}
}
return 0, false
}
func (app *h2i) cmdPing(args []string) error {
if len(args) > 1 {
app.logf("invalid PING usage: only accepts 0 or 1 args")
return nil // nil means don't end the program
}
var data [8]byte
if len(args) == 1 {
copy(data[:], args[0])
} else {
copy(data[:], "h2i_ping")
}
return app.framer.WritePing(false, data)
}
func (app *h2i) cmdHeaders(args []string) error {
if len(args) > 0 {
app.logf("Error: HEADERS doesn't yet take arguments.")
// TODO: flags for restricting window size, to force CONTINUATION
// frames.
return nil
}
var h1req bytes.Buffer
app.term.SetPrompt("(as HTTP/1.1)> ")
defer app.term.SetPrompt("h2i> ")
for {
line, err := app.term.ReadLine()
if err != nil {
return err
}
h1req.WriteString(line)
h1req.WriteString("\r\n")
if line == "" {
break
}
}
req, err := http.ReadRequest(bufio.NewReader(&h1req))
if err != nil {
app.logf("Invalid HTTP/1.1 request: %v", err)
return nil
}
if app.streamID == 0 {
app.streamID = 1
} else {
app.streamID += 2
}
app.logf("Opening Stream-ID %d:", app.streamID)
hbf := app.encodeHeaders(req)
if len(hbf) > 16<<10 {
app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
return nil
}
return app.framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: app.streamID,
BlockFragment: hbf,
EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
EndHeaders: true, // for now
})
}
func (app *h2i) readFrames() error {
for {
f, err := app.framer.ReadFrame()
if err != nil {
return fmt.Errorf("ReadFrame: %v", err)
}
app.logf("%v", f)
switch f := f.(type) {
case *http2.PingFrame:
app.logf(" Data = %q", f.Data)
case *http2.SettingsFrame:
f.ForeachSetting(func(s http2.Setting) error {
app.logf(" %v", s)
app.peerSetting[s.ID] = s.Val
return nil
})
case *http2.WindowUpdateFrame:
app.logf(" Window-Increment = %v", f.Increment)
case *http2.GoAwayFrame:
app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode)
case *http2.DataFrame:
app.logf(" %q", f.Data())
case *http2.HeadersFrame:
if f.HasPriority() {
app.logf(" PRIORITY = %v", f.Priority)
}
if app.hdec == nil {
// TODO: if the user uses h2i to send a SETTINGS frame advertising
// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
// and stuff here instead of using the 4k default. But for now:
tableSize := uint32(4 << 10)
app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
}
app.hdec.Write(f.HeaderBlockFragment())
case *http2.PushPromiseFrame:
if app.hdec == nil {
// TODO: if the user uses h2i to send a SETTINGS frame advertising
// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
// and stuff here instead of using the 4k default. But for now:
tableSize := uint32(4 << 10)
app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
}
app.hdec.Write(f.HeaderBlockFragment())
}
}
}
// called from readLoop
func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
if f.Sensitive {
app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
}
app.logf(" %s = %q", f.Name, f.Value)
}
func (app *h2i) encodeHeaders(req *http.Request) []byte {
app.hbuf.Reset()
// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
host := req.Host
if host == "" {
host = req.URL.Host
}
path := req.RequestURI
if path == "" {
path = "/"
}
app.writeHeader(":authority", host) // probably not right for all sites
app.writeHeader(":method", req.Method)
app.writeHeader(":path", path)
app.writeHeader(":scheme", "https")
for k, vv := range req.Header {
lowKey := strings.ToLower(k)
if lowKey == "host" {
continue
}
for _, v := range vv {
app.writeHeader(lowKey, v)
}
}
return app.hbuf.Bytes()
}
func (app *h2i) writeHeader(name, value string) {
app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
app.logf(" %s = %s", name, value)
}

View File

@ -1,386 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bytes"
"encoding/hex"
"fmt"
"math/rand"
"reflect"
"strings"
"testing"
)
func TestEncoderTableSizeUpdate(t *testing.T) {
tests := []struct {
size1, size2 uint32
wantHex string
}{
// Should emit 2 table size updates (2048 and 4096)
{2048, 4096, "3fe10f 3fe11f 82"},
// Should emit 1 table size update (2048)
{16384, 2048, "3fe10f 82"},
}
for _, tt := range tests {
var buf bytes.Buffer
e := NewEncoder(&buf)
e.SetMaxDynamicTableSize(tt.size1)
e.SetMaxDynamicTableSize(tt.size2)
if err := e.WriteField(pair(":method", "GET")); err != nil {
t.Fatal(err)
}
want := removeSpace(tt.wantHex)
if got := hex.EncodeToString(buf.Bytes()); got != want {
t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
}
}
}
func TestEncoderWriteField(t *testing.T) {
var buf bytes.Buffer
e := NewEncoder(&buf)
var got []HeaderField
d := NewDecoder(4<<10, func(f HeaderField) {
got = append(got, f)
})
tests := []struct {
hdrs []HeaderField
}{
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
}},
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
}},
{[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
}},
}
for i, tt := range tests {
buf.Reset()
got = got[:0]
for _, hf := range tt.hdrs {
if err := e.WriteField(hf); err != nil {
t.Fatal(err)
}
}
_, err := d.Write(buf.Bytes())
if err != nil {
t.Errorf("%d. Decoder Write = %v", i, err)
}
if !reflect.DeepEqual(got, tt.hdrs) {
t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
}
}
}
func TestEncoderSearchTable(t *testing.T) {
e := NewEncoder(nil)
e.dynTab.add(pair("foo", "bar"))
e.dynTab.add(pair("blake", "miz"))
e.dynTab.add(pair(":method", "GET"))
tests := []struct {
hf HeaderField
wantI uint64
wantMatch bool
}{
// Name and Value match
{pair("foo", "bar"), uint64(staticTable.len()) + 3, true},
{pair("blake", "miz"), uint64(staticTable.len()) + 2, true},
{pair(":method", "GET"), 2, true},
// Only name match because Sensitive == true. This is allowed to match
// any ":method" entry. The current implementation uses the last entry
// added in newStaticTable.
{HeaderField{":method", "GET", true}, 3, false},
// Only Name matches
{pair("foo", "..."), uint64(staticTable.len()) + 3, false},
{pair("blake", "..."), uint64(staticTable.len()) + 2, false},
// As before, this is allowed to match any ":method" entry.
{pair(":method", "..."), 3, false},
// None match
{pair("foo-", "bar"), 0, false},
}
for _, tt := range tests {
if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
}
}
}
func TestAppendVarInt(t *testing.T) {
tests := []struct {
n byte
i uint64
want []byte
}{
// Fits in a byte:
{1, 0, []byte{0}},
{2, 2, []byte{2}},
{3, 6, []byte{6}},
{4, 14, []byte{14}},
{5, 30, []byte{30}},
{6, 62, []byte{62}},
{7, 126, []byte{126}},
{8, 254, []byte{254}},
// Multiple bytes:
{5, 1337, []byte{31, 154, 10}},
}
for _, tt := range tests {
got := appendVarInt(nil, tt.n, tt.i)
if !bytes.Equal(got, tt.want) {
t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
}
}
}
func TestAppendHpackString(t *testing.T) {
tests := []struct {
s, wantHex string
}{
// Huffman encoded
{"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
// Not Huffman encoded
{"a", "01 61"},
// zero length
{"", "00"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendHpackString(nil, tt.s)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
}
}
}
func TestAppendIndexed(t *testing.T) {
tests := []struct {
i uint64
wantHex string
}{
// 1 byte
{1, "81"},
{126, "fe"},
// 2 bytes
{127, "ff00"},
{128, "ff01"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendIndexed(nil, tt.i)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
}
}
}
func TestAppendNewName(t *testing.T) {
tests := []struct {
f HeaderField
indexing bool
wantHex string
}{
// Incremental indexing
{HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
// Without indexing
{HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
// Never indexed
{HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
{HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendNewName(nil, tt.f, tt.indexing)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
}
}
}
func TestAppendIndexedName(t *testing.T) {
tests := []struct {
f HeaderField
i uint64
indexing bool
wantHex string
}{
// Incremental indexing
{HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
// Without indexing
{HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
// Never indexed
{HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
{HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
}
}
}
func TestAppendTableSize(t *testing.T) {
tests := []struct {
i uint32
wantHex string
}{
// Fits into 1 byte
{30, "3e"},
// Extra byte
{31, "3f00"},
{32, "3f01"},
}
for _, tt := range tests {
want := removeSpace(tt.wantHex)
buf := appendTableSize(nil, tt.i)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
}
}
}
func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
var buf bytes.Buffer
e := NewEncoder(&buf)
tests := []struct {
v uint32
wantUpdate bool
wantMinSize uint32
wantMaxSize uint32
}{
// Set new table size to 2048
{2048, true, 2048, 2048},
// Set new table size to 16384, but still limited to
// 4096
{16384, true, 2048, 4096},
}
for _, tt := range tests {
e.SetMaxDynamicTableSize(tt.v)
if got := e.tableSizeUpdate; tt.wantUpdate != got {
t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
}
if got := e.minSize; tt.wantMinSize != got {
t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
}
if got := e.dynTab.maxSize; tt.wantMaxSize != got {
t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
}
}
}
func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
e := NewEncoder(nil)
// 4095 < initialHeaderTableSize means maxSize is truncated to
// 4095.
e.SetMaxDynamicTableSizeLimit(4095)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
if got, want := e.maxSizeLimit, uint32(4095); got != want {
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
}
if got, want := e.tableSizeUpdate, true; got != want {
t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
}
// maxSize will be truncated to maxSizeLimit
e.SetMaxDynamicTableSize(16384)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
// 8192 > current maxSizeLimit, so maxSize does not change.
e.SetMaxDynamicTableSizeLimit(8192)
if got, want := e.dynTab.maxSize, uint32(4095); got != want {
t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
}
if got, want := e.maxSizeLimit, uint32(8192); got != want {
t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
}
}
func removeSpace(s string) string {
return strings.Replace(s, " ", "", -1)
}
func BenchmarkEncoderSearchTable(b *testing.B) {
e := NewEncoder(nil)
// A sample of possible header fields.
// This is not based on any actual data from HTTP/2 traces.
var possible []HeaderField
for _, f := range staticTable.ents {
if f.Value == "" {
possible = append(possible, f)
continue
}
// Generate 5 random values, except for cookie and set-cookie,
// which we know can have many values in practice.
num := 5
if f.Name == "cookie" || f.Name == "set-cookie" {
num = 25
}
for i := 0; i < num; i++ {
f.Value = fmt.Sprintf("%s-%d", f.Name, i)
possible = append(possible, f)
}
}
for k := 0; k < 10; k++ {
f := HeaderField{
Name: fmt.Sprintf("x-header-%d", k),
Sensitive: rand.Int()%2 == 0,
}
for i := 0; i < 5; i++ {
f.Value = fmt.Sprintf("%s-%d", f.Name, i)
possible = append(possible, f)
}
}
// Add a random sample to the dynamic table. This very loosely simulates
// a history of 100 requests with 20 header fields per request.
for r := 0; r < 100*20; r++ {
f := possible[rand.Int31n(int32(len(possible)))]
// Skip if this is in the staticTable verbatim.
if _, has := staticTable.search(f); !has {
e.dynTab.add(f)
}
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
for _, f := range possible {
e.searchTable(f)
}
}
}

View File

@ -1,770 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bytes"
"encoding/hex"
"fmt"
"math/rand"
"reflect"
"strings"
"testing"
"time"
)
func (d *Decoder) mustAt(idx int) HeaderField {
if hf, ok := d.at(uint64(idx)); !ok {
panic(fmt.Sprintf("bogus index %d", idx))
} else {
return hf
}
}
func TestDynamicTableAt(t *testing.T) {
d := NewDecoder(4096, nil)
at := d.mustAt
if got, want := at(2), (pair(":method", "GET")); got != want {
t.Errorf("at(2) = %v; want %v", got, want)
}
d.dynTab.add(pair("foo", "bar"))
d.dynTab.add(pair("blake", "miz"))
if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 2) = %v; want %v", got, want)
}
if got, want := at(3), (pair(":method", "POST")); got != want {
t.Errorf("at(3) = %v; want %v", got, want)
}
}
func TestDynamicTableSizeEvict(t *testing.T) {
d := NewDecoder(4096, nil)
if want := uint32(0); d.dynTab.size != want {
t.Fatalf("size = %d; want %d", d.dynTab.size, want)
}
add := d.dynTab.add
add(pair("blake", "eats pizza"))
if want := uint32(15 + 32); d.dynTab.size != want {
t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
}
add(pair("foo", "bar"))
if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
}
d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
if want := uint32(6 + 32); d.dynTab.size != want {
t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
}
if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want {
t.Errorf("at(dyn 1) = %v; want %v", got, want)
}
add(pair("long", strings.Repeat("x", 500)))
if want := uint32(0); d.dynTab.size != want {
t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
}
}
func TestDecoderDecode(t *testing.T) {
tests := []struct {
name string
in []byte
want []HeaderField
wantDynTab []HeaderField // newest entry first
}{
// C.2.1 Literal Header Field with Indexing
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
{"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
[]HeaderField{pair("custom-key", "custom-header")},
[]HeaderField{pair("custom-key", "custom-header")},
},
// C.2.2 Literal Header Field without Indexing
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
{"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
[]HeaderField{pair(":path", "/sample/path")},
[]HeaderField{}},
// C.2.3 Literal Header Field never Indexed
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
{"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
[]HeaderField{{"password", "secret", true}},
[]HeaderField{}},
// C.2.4 Indexed Header Field
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
{"C.2.4", []byte("\x82"),
[]HeaderField{pair(":method", "GET")},
[]HeaderField{}},
}
for _, tt := range tests {
d := NewDecoder(4096, nil)
hf, err := d.DecodeFull(tt.in)
if err != nil {
t.Errorf("%s: %v", tt.name, err)
continue
}
if !reflect.DeepEqual(hf, tt.want) {
t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
}
gotDynTab := d.dynTab.reverseCopy()
if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
}
}
}
func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
hf = make([]HeaderField, len(dt.table.ents))
for i := range hf {
hf[i] = dt.table.ents[len(dt.table.ents)-1-i]
}
return
}
type encAndWant struct {
enc []byte
want []HeaderField
wantDynTab []HeaderField
wantDynSize uint32
}
// C.3 Request Examples without Huffman Coding
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
func TestDecodeC3_NoHuffman(t *testing.T) {
testDecodeSeries(t, 4096, []encAndWant{
{dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
},
[]HeaderField{
pair(":authority", "www.example.com"),
},
57,
},
{dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
},
[]HeaderField{
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
110,
},
{dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
},
[]HeaderField{
pair("custom-key", "custom-value"),
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
164,
},
})
}
// C.4 Request Examples with Huffman Coding
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
func TestDecodeC4_Huffman(t *testing.T) {
testDecodeSeries(t, 4096, []encAndWant{
{dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
},
[]HeaderField{
pair(":authority", "www.example.com"),
},
57,
},
{dehex("8286 84be 5886 a8eb 1064 9cbf"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "http"),
pair(":path", "/"),
pair(":authority", "www.example.com"),
pair("cache-control", "no-cache"),
},
[]HeaderField{
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
110,
},
{dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
[]HeaderField{
pair(":method", "GET"),
pair(":scheme", "https"),
pair(":path", "/index.html"),
pair(":authority", "www.example.com"),
pair("custom-key", "custom-value"),
},
[]HeaderField{
pair("custom-key", "custom-value"),
pair("cache-control", "no-cache"),
pair(":authority", "www.example.com"),
},
164,
},
})
}
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
// "This section shows several consecutive header lists, corresponding
// to HTTP responses, on the same connection. The HTTP/2 setting
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
// octets, causing some evictions to occur."
func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
testDecodeSeries(t, 256, []encAndWant{
{dehex(`
4803 3330 3258 0770 7269 7661 7465 611d
4d6f 6e2c 2032 3120 4f63 7420 3230 3133
2032 303a 3133 3a32 3120 474d 546e 1768
7474 7073 3a2f 2f77 7777 2e65 7861 6d70
6c65 2e63 6f6d
`),
[]HeaderField{
pair(":status", "302"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
pair(":status", "302"),
},
222,
},
{dehex("4803 3330 37c1 c0bf"),
[]HeaderField{
pair(":status", "307"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair(":status", "307"),
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
},
222,
},
{dehex(`
88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
3230 3133 2032 303a 3133 3a32 3220 474d
54c0 5a04 677a 6970 7738 666f 6f3d 4153
444a 4b48 514b 425a 584f 5157 454f 5049
5541 5851 5745 4f49 553b 206d 6178 2d61
6765 3d33 3630 303b 2076 6572 7369 6f6e
3d31
`),
[]HeaderField{
pair(":status", "200"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
pair("location", "https://www.example.com"),
pair("content-encoding", "gzip"),
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
},
[]HeaderField{
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
pair("content-encoding", "gzip"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
},
215,
},
})
}
// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
// "This section shows the same examples as the previous section, but
// using Huffman encoding for the literal values. The HTTP/2 setting
// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
// octets, causing some evictions to occur. The eviction mechanism
// uses the length of the decoded literal values, so the same
// evictions occurs as in the previous section."
func TestDecodeC6_ResponsesHuffman(t *testing.T) {
testDecodeSeries(t, 256, []encAndWant{
{dehex(`
4882 6402 5885 aec3 771a 4b61 96d0 7abe
9410 54d4 44a8 2005 9504 0b81 66e0 82a6
2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
e9ae 82ae 43d3
`),
[]HeaderField{
pair(":status", "302"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
pair(":status", "302"),
},
222,
},
{dehex("4883 640e ffc1 c0bf"),
[]HeaderField{
pair(":status", "307"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("location", "https://www.example.com"),
},
[]HeaderField{
pair(":status", "307"),
pair("location", "https://www.example.com"),
pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
pair("cache-control", "private"),
},
222,
},
{dehex(`
88c1 6196 d07a be94 1054 d444 a820 0595
040b 8166 e084 a62d 1bff c05a 839b d9ab
77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
9587 3160 65c0 03ed 4ee5 b106 3d50 07
`),
[]HeaderField{
pair(":status", "200"),
pair("cache-control", "private"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
pair("location", "https://www.example.com"),
pair("content-encoding", "gzip"),
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
},
[]HeaderField{
pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
pair("content-encoding", "gzip"),
pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
},
215,
},
})
}
func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
d := NewDecoder(size, nil)
for i, step := range steps {
hf, err := d.DecodeFull(step.enc)
if err != nil {
t.Fatalf("Error at step index %d: %v", i, err)
}
if !reflect.DeepEqual(hf, step.want) {
t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
}
gotDynTab := d.dynTab.reverseCopy()
if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
}
if d.dynTab.size != step.wantDynSize {
t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
}
}
}
func TestHuffmanDecodeExcessPadding(t *testing.T) {
tests := [][]byte{
{0xff}, // Padding Exceeds 7 bits
{0x1f, 0xff}, // {"a", 1 byte excess padding}
{0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding}
{0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding}
{0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding}
{'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol.
}
for i, in := range tests {
var buf bytes.Buffer
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err)
}
}
}
func TestHuffmanDecodeEOS(t *testing.T) {
in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"}
var buf bytes.Buffer
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
t.Errorf("error = %v; want ErrInvalidHuffman", err)
}
}
func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) {
in := []byte{0x00, 0x01} // {"0", "0", "0"}
var buf bytes.Buffer
if err := huffmanDecode(&buf, 2, in); err != ErrStringLength {
t.Errorf("error = %v; want ErrStringLength", err)
}
}
func TestHuffmanDecodeCorruptPadding(t *testing.T) {
in := []byte{0x00}
var buf bytes.Buffer
if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {
t.Errorf("error = %v; want ErrInvalidHuffman", err)
}
}
func TestHuffmanDecode(t *testing.T) {
tests := []struct {
inHex, want string
}{
{"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
{"a8eb 1064 9cbf", "no-cache"},
{"25a8 49e9 5ba9 7d7f", "custom-key"},
{"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
{"6402", "302"},
{"aec3 771a 4b", "private"},
{"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
{"9bd9 ab", "gzip"},
{"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
}
for i, tt := range tests {
var buf bytes.Buffer
in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
if err != nil {
t.Errorf("%d. hex input error: %v", i, err)
continue
}
if _, err := HuffmanDecode(&buf, in); err != nil {
t.Errorf("%d. decode error: %v", i, err)
continue
}
if got := buf.String(); tt.want != got {
t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
}
}
}
func BenchmarkHuffmanDecode(b *testing.B) {
b.StopTimer()
enc, err := hex.DecodeString(strings.Replace("94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
" ", "", -1))
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.StartTimer()
var buf bytes.Buffer
for i := 0; i < b.N; i++ {
buf.Reset()
if _, err := HuffmanDecode(&buf, enc); err != nil {
b.Fatalf("decode error: %v", err)
}
if string(buf.Bytes()) != "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1" {
b.Fatalf("bogus output %q", buf.Bytes())
}
}
}
func TestAppendHuffmanString(t *testing.T) {
tests := []struct {
in, want string
}{
{"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
{"no-cache", "a8eb 1064 9cbf"},
{"custom-key", "25a8 49e9 5ba9 7d7f"},
{"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
{"302", "6402"},
{"private", "aec3 771a 4b"},
{"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
{"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
{"gzip", "9bd9 ab"},
{"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
}
for i, tt := range tests {
buf := []byte{}
want := strings.Replace(tt.want, " ", "", -1)
buf = AppendHuffmanString(buf, tt.in)
if got := hex.EncodeToString(buf); want != got {
t.Errorf("%d. encode = %q; want %q", i, got, want)
}
}
}
func TestHuffmanMaxStrLen(t *testing.T) {
const msg = "Some string"
huff := AppendHuffmanString(nil, msg)
testGood := func(max int) {
var out bytes.Buffer
if err := huffmanDecode(&out, max, huff); err != nil {
t.Errorf("For maxLen=%d, unexpected error: %v", max, err)
}
if out.String() != msg {
t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg)
}
}
testGood(0)
testGood(len(msg))
testGood(len(msg) + 1)
var out bytes.Buffer
if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {
t.Errorf("err = %v; want ErrStringLength", err)
}
}
func TestHuffmanRoundtripStress(t *testing.T) {
const Len = 50 // of uncompressed string
input := make([]byte, Len)
var output bytes.Buffer
var huff []byte
n := 5000
if testing.Short() {
n = 100
}
seed := time.Now().UnixNano()
t.Logf("Seed = %v", seed)
src := rand.New(rand.NewSource(seed))
var encSize int64
for i := 0; i < n; i++ {
for l := range input {
input[l] = byte(src.Intn(256))
}
huff = AppendHuffmanString(huff[:0], string(input))
encSize += int64(len(huff))
output.Reset()
if err := huffmanDecode(&output, 0, huff); err != nil {
t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err)
continue
}
if !bytes.Equal(output.Bytes(), input) {
t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes())
}
}
t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)
}
func TestHuffmanDecodeFuzz(t *testing.T) {
const Len = 50 // of compressed
var buf, zbuf bytes.Buffer
n := 5000
if testing.Short() {
n = 100
}
seed := time.Now().UnixNano()
t.Logf("Seed = %v", seed)
src := rand.New(rand.NewSource(seed))
numFail := 0
for i := 0; i < n; i++ {
zbuf.Reset()
if i == 0 {
// Start with at least one invalid one.
zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8")
} else {
for l := 0; l < Len; l++ {
zbuf.WriteByte(byte(src.Intn(256)))
}
}
buf.Reset()
if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {
if err == ErrInvalidHuffman {
numFail++
continue
}
t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err)
continue
}
}
t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n)
if numFail < 1 {
t.Error("expected at least one invalid huffman encoding (test starts with one)")
}
}
func TestReadVarInt(t *testing.T) {
type res struct {
i uint64
consumed int
err error
}
tests := []struct {
n byte
p []byte
want res
}{
// Fits in a byte:
{1, []byte{0}, res{0, 1, nil}},
{2, []byte{2}, res{2, 1, nil}},
{3, []byte{6}, res{6, 1, nil}},
{4, []byte{14}, res{14, 1, nil}},
{5, []byte{30}, res{30, 1, nil}},
{6, []byte{62}, res{62, 1, nil}},
{7, []byte{126}, res{126, 1, nil}},
{8, []byte{254}, res{254, 1, nil}},
// Doesn't fit in a byte:
{1, []byte{1}, res{0, 0, errNeedMore}},
{2, []byte{3}, res{0, 0, errNeedMore}},
{3, []byte{7}, res{0, 0, errNeedMore}},
{4, []byte{15}, res{0, 0, errNeedMore}},
{5, []byte{31}, res{0, 0, errNeedMore}},
{6, []byte{63}, res{0, 0, errNeedMore}},
{7, []byte{127}, res{0, 0, errNeedMore}},
{8, []byte{255}, res{0, 0, errNeedMore}},
// Ignoring top bits:
{5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
{5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
{5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
// Extra byte:
{5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
// Short a byte:
{5, []byte{191, 154}, res{0, 0, errNeedMore}},
// integer overflow:
{1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
}
for _, tt := range tests {
i, remain, err := readVarInt(tt.n, tt.p)
consumed := len(tt.p) - len(remain)
got := res{i, consumed, err}
if got != tt.want {
t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
}
}
}
// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56
func TestHuffmanFuzzCrash(t *testing.T) {
got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8"))
if got != "" {
t.Errorf("Got %q; want empty string", got)
}
if err != ErrInvalidHuffman {
t.Errorf("Err = %v; want ErrInvalidHuffman", err)
}
}
func pair(name, value string) HeaderField {
return HeaderField{Name: name, Value: value}
}
func dehex(s string) []byte {
s = strings.Replace(s, " ", "", -1)
s = strings.Replace(s, "\n", "", -1)
b, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return b
}
func TestEmitEnabled(t *testing.T) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
numCallback := 0
var dec *Decoder
dec = NewDecoder(8<<20, func(HeaderField) {
numCallback++
dec.SetEmitEnabled(false)
})
if !dec.EmitEnabled() {
t.Errorf("initial emit enabled = false; want true")
}
if _, err := dec.Write(buf.Bytes()); err != nil {
t.Error(err)
}
if numCallback != 1 {
t.Errorf("num callbacks = %d; want 1", numCallback)
}
if dec.EmitEnabled() {
t.Errorf("emit enabled = true; want false")
}
}
func TestSaveBufLimit(t *testing.T) {
const maxStr = 1 << 10
var got []HeaderField
dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
got = append(got, hf)
})
dec.SetMaxStringLength(maxStr)
var frag []byte
frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "foo"...)
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "bar"...)
if _, err := dec.Write(frag); err != nil {
t.Fatal(err)
}
want := []HeaderField{{Name: "foo", Value: "bar"}}
if !reflect.DeepEqual(got, want) {
t.Errorf("After small writes, got %v; want %v", got, want)
}
frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, maxStr*3)
frag = append(frag, make([]byte, maxStr*3)...)
_, err := dec.Write(frag)
if err != ErrStringLength {
t.Fatalf("Write error = %v; want ErrStringLength", err)
}
}
func TestDynamicSizeUpdate(t *testing.T) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
enc.SetMaxDynamicTableSize(255)
enc.WriteField(HeaderField{Name: "foo", Value: "bar"})
d := NewDecoder(4096, func(_ HeaderField) {})
_, err := d.Write(buf.Bytes())
if err != nil {
t.Fatalf("unexpected error: got = %v", err)
}
d.Close()
// Start a new header
_, err = d.Write(buf.Bytes())
if err != nil {
t.Fatalf("unexpected error: got = %v", err)
}
// must fail since the dynamic table update must be at the beginning
_, err = d.Write(buf.Bytes())
if err == nil {
t.Fatalf("dynamic table size update not at the beginning of a header block")
}
}

View File

@ -1,214 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bufio"
"regexp"
"strconv"
"strings"
"testing"
)
func TestHeaderFieldTable(t *testing.T) {
table := &headerFieldTable{}
table.init()
table.addEntry(pair("key1", "value1-1"))
table.addEntry(pair("key2", "value2-1"))
table.addEntry(pair("key1", "value1-2"))
table.addEntry(pair("key3", "value3-1"))
table.addEntry(pair("key4", "value4-1"))
table.addEntry(pair("key2", "value2-2"))
// Tests will be run twice: once before evicting anything, and
// again after evicting the three oldest entries.
tests := []struct {
f HeaderField
beforeWantStaticI uint64
beforeWantMatch bool
afterWantStaticI uint64
afterWantMatch bool
}{
{HeaderField{"key1", "value1-1", false}, 1, true, 0, false},
{HeaderField{"key1", "value1-2", false}, 3, true, 0, false},
{HeaderField{"key1", "value1-3", false}, 3, false, 0, false},
{HeaderField{"key2", "value2-1", false}, 2, true, 3, false},
{HeaderField{"key2", "value2-2", false}, 6, true, 3, true},
{HeaderField{"key2", "value2-3", false}, 6, false, 3, false},
{HeaderField{"key4", "value4-1", false}, 5, true, 2, true},
// Name match only, because sensitive.
{HeaderField{"key4", "value4-1", true}, 5, false, 2, false},
// Key not found.
{HeaderField{"key5", "value5-x", false}, 0, false, 0, false},
}
staticToDynamic := func(i uint64) uint64 {
if i == 0 {
return 0
}
return uint64(table.len()) - i + 1 // dynamic is the reversed table
}
searchStatic := func(f HeaderField) (uint64, bool) {
old := staticTable
staticTable = table
defer func() { staticTable = old }()
return staticTable.search(f)
}
searchDynamic := func(f HeaderField) (uint64, bool) {
return table.search(f)
}
for _, test := range tests {
gotI, gotMatch := searchStatic(test.f)
if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
gotI, gotMatch = searchDynamic(test.f)
wantDynamicI := staticToDynamic(test.beforeWantStaticI)
if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
}
table.evictOldest(3)
for _, test := range tests {
gotI, gotMatch := searchStatic(test.f)
if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
gotI, gotMatch = searchDynamic(test.f)
wantDynamicI := staticToDynamic(test.afterWantStaticI)
if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {
t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch)
}
}
}
func TestHeaderFieldTable_LookupMapEviction(t *testing.T) {
table := &headerFieldTable{}
table.init()
table.addEntry(pair("key1", "value1-1"))
table.addEntry(pair("key2", "value2-1"))
table.addEntry(pair("key1", "value1-2"))
table.addEntry(pair("key3", "value3-1"))
table.addEntry(pair("key4", "value4-1"))
table.addEntry(pair("key2", "value2-2"))
// evict all pairs
table.evictOldest(table.len())
if l := table.len(); l > 0 {
t.Errorf("table.len() = %d, want 0", l)
}
if l := len(table.byName); l > 0 {
t.Errorf("len(table.byName) = %d, want 0", l)
}
if l := len(table.byNameValue); l > 0 {
t.Errorf("len(table.byNameValue) = %d, want 0", l)
}
}
func TestStaticTable(t *testing.T) {
fromSpec := `
+-------+-----------------------------+---------------+
| 1 | :authority | |
| 2 | :method | GET |
| 3 | :method | POST |
| 4 | :path | / |
| 5 | :path | /index.html |
| 6 | :scheme | http |
| 7 | :scheme | https |
| 8 | :status | 200 |
| 9 | :status | 204 |
| 10 | :status | 206 |
| 11 | :status | 304 |
| 12 | :status | 400 |
| 13 | :status | 404 |
| 14 | :status | 500 |
| 15 | accept-charset | |
| 16 | accept-encoding | gzip, deflate |
| 17 | accept-language | |
| 18 | accept-ranges | |
| 19 | accept | |
| 20 | access-control-allow-origin | |
| 21 | age | |
| 22 | allow | |
| 23 | authorization | |
| 24 | cache-control | |
| 25 | content-disposition | |
| 26 | content-encoding | |
| 27 | content-language | |
| 28 | content-length | |
| 29 | content-location | |
| 30 | content-range | |
| 31 | content-type | |
| 32 | cookie | |
| 33 | date | |
| 34 | etag | |
| 35 | expect | |
| 36 | expires | |
| 37 | from | |
| 38 | host | |
| 39 | if-match | |
| 40 | if-modified-since | |
| 41 | if-none-match | |
| 42 | if-range | |
| 43 | if-unmodified-since | |
| 44 | last-modified | |
| 45 | link | |
| 46 | location | |
| 47 | max-forwards | |
| 48 | proxy-authenticate | |
| 49 | proxy-authorization | |
| 50 | range | |
| 51 | referer | |
| 52 | refresh | |
| 53 | retry-after | |
| 54 | server | |
| 55 | set-cookie | |
| 56 | strict-transport-security | |
| 57 | transfer-encoding | |
| 58 | user-agent | |
| 59 | vary | |
| 60 | via | |
| 61 | www-authenticate | |
+-------+-----------------------------+---------------+
`
bs := bufio.NewScanner(strings.NewReader(fromSpec))
re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
for bs.Scan() {
l := bs.Text()
if !strings.Contains(l, "|") {
continue
}
m := re.FindStringSubmatch(l)
if m == nil {
continue
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Errorf("Bogus integer on line %q", l)
continue
}
if i < 1 || i > staticTable.len() {
t.Errorf("Bogus index %d on line %q", i, l)
continue
}
if got, want := staticTable.ents[i-1].Name, m[2]; got != want {
t.Errorf("header index %d name = %q; want %q", i, got, want)
}
if got, want := staticTable.ents[i-1].Value, m[3]; got != want {
t.Errorf("header index %d value = %q; want %q", i, got, want)
}
}
if err := bs.Err(); err != nil {
t.Error(err)
}
}

View File

@ -1,280 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"errors"
"flag"
"fmt"
"net/http"
"os/exec"
"strconv"
"strings"
"testing"
"time"
"golang.org/x/net/http2/hpack"
)
var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
func condSkipFailingTest(t *testing.T) {
if !*knownFailing {
t.Skip("Skipping known-failing test without --known_failing")
}
}
func init() {
inTests = true
DebugGoroutines = true
flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging")
}
func TestSettingString(t *testing.T) {
tests := []struct {
s Setting
want string
}{
{Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
{Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
}
for i, tt := range tests {
got := fmt.Sprint(tt.s)
if got != tt.want {
t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
}
}
}
type twriter struct {
t testing.TB
st *serverTester // optional
}
func (w twriter) Write(p []byte) (n int, err error) {
if w.st != nil {
ps := string(p)
for _, phrase := range w.st.logFilter {
if strings.Contains(ps, phrase) {
return len(p), nil // no logging
}
}
}
w.t.Logf("%s", p)
return len(p), nil
}
// like encodeHeader, but don't add implicit pseudo headers.
func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
for len(headers) > 0 {
k, v := headers[0], headers[1]
headers = headers[2:]
if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
}
}
return buf.Bytes()
}
// Verify that curl has http2.
func requireCurl(t *testing.T) {
out, err := dockerLogs(curl(t, "--version"))
if err != nil {
t.Skipf("failed to determine curl features; skipping test")
}
if !strings.Contains(string(out), "HTTP2") {
t.Skip("curl doesn't support HTTP2; skipping test")
}
}
func curl(t *testing.T, args ...string) (container string) {
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
if err != nil {
t.Skipf("Failed to run curl in docker: %v, %s", err, out)
}
return strings.TrimSpace(string(out))
}
// Verify that h2load exists.
func requireH2load(t *testing.T) {
out, err := dockerLogs(h2load(t, "--version"))
if err != nil {
t.Skipf("failed to probe h2load; skipping test: %s", out)
}
if !strings.Contains(string(out), "h2load nghttp2/") {
t.Skipf("h2load not present; skipping test. (Output=%q)", out)
}
}
func h2load(t *testing.T, args ...string) (container string) {
out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
if err != nil {
t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
}
return strings.TrimSpace(string(out))
}
type puppetCommand struct {
fn func(w http.ResponseWriter, r *http.Request)
done chan<- bool
}
type handlerPuppet struct {
ch chan puppetCommand
}
func newHandlerPuppet() *handlerPuppet {
return &handlerPuppet{
ch: make(chan puppetCommand),
}
}
func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
for cmd := range p.ch {
cmd.fn(w, r)
cmd.done <- true
}
}
func (p *handlerPuppet) done() { close(p.ch) }
func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
done := make(chan bool)
p.ch <- puppetCommand{fn, done}
<-done
}
func dockerLogs(container string) ([]byte, error) {
out, err := exec.Command("docker", "wait", container).CombinedOutput()
if err != nil {
return out, err
}
exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
if err != nil {
return out, errors.New("unexpected exit status from docker wait")
}
out, err = exec.Command("docker", "logs", container).CombinedOutput()
exec.Command("docker", "rm", container).Run()
if err == nil && exitStatus != 0 {
err = fmt.Errorf("exit status %d: %s", exitStatus, out)
}
return out, err
}
func kill(container string) {
exec.Command("docker", "kill", container).Run()
exec.Command("docker", "rm", container).Run()
}
func cleanDate(res *http.Response) {
if d := res.Header["Date"]; len(d) == 1 {
d[0] = "XXX"
}
}
func TestSorterPoolAllocs(t *testing.T) {
ss := []string{"a", "b", "c"}
h := http.Header{
"a": nil,
"b": nil,
"c": nil,
}
sorter := new(sorter)
if allocs := testing.AllocsPerRun(100, func() {
sorter.SortStrings(ss)
}); allocs >= 1 {
t.Logf("SortStrings allocs = %v; want <1", allocs)
}
if allocs := testing.AllocsPerRun(5, func() {
if len(sorter.Keys(h)) != 3 {
t.Fatal("wrong result")
}
}); allocs > 0 {
t.Logf("Keys allocs = %v; want <1", allocs)
}
}
// waitCondition reports whether fn eventually returned true,
// checking immediately and then every checkEvery amount,
// until waitFor has elapsed, at which point it returns false.
func waitCondition(waitFor, checkEvery time.Duration, fn func() bool) bool {
deadline := time.Now().Add(waitFor)
for time.Now().Before(deadline) {
if fn() {
return true
}
time.Sleep(checkEvery)
}
return false
}
// waitErrCondition is like waitCondition but with errors instead of bools.
func waitErrCondition(waitFor, checkEvery time.Duration, fn func() error) error {
deadline := time.Now().Add(waitFor)
var err error
for time.Now().Before(deadline) {
if err = fn(); err == nil {
return nil
}
time.Sleep(checkEvery)
}
return err
}
// Tests that http2.Server.IdleTimeout is initialized from
// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was
// added in Go 1.8.
func TestConfigureServerIdleTimeout_Go18(t *testing.T) {
const timeout = 5 * time.Second
const notThisOne = 1 * time.Second
// With a zero http2.Server, verify that it copies IdleTimeout:
{
s1 := &http.Server{
IdleTimeout: timeout,
ReadTimeout: notThisOne,
}
s2 := &Server{}
if err := ConfigureServer(s1, s2); err != nil {
t.Fatal(err)
}
if s2.IdleTimeout != timeout {
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
}
}
// And that it falls back to ReadTimeout:
{
s1 := &http.Server{
ReadTimeout: timeout,
}
s2 := &Server{}
if err := ConfigureServer(s1, s2); err != nil {
t.Fatal(err)
}
if s2.IdleTimeout != timeout {
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
}
}
// Verify that s1's IdleTimeout doesn't overwrite an existing setting:
{
s1 := &http.Server{
IdleTimeout: notThisOne,
}
s2 := &Server{
IdleTimeout: timeout,
}
if err := ConfigureServer(s1, s2); err != nil {
t.Fatal(err)
}
if s2.IdleTimeout != timeout {
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
}
}
}

View File

@ -1,130 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"errors"
"io"
"io/ioutil"
"testing"
)
func TestPipeClose(t *testing.T) {
var p pipe
p.b = new(bytes.Buffer)
a := errors.New("a")
b := errors.New("b")
p.CloseWithError(a)
p.CloseWithError(b)
_, err := p.Read(make([]byte, 1))
if err != a {
t.Errorf("err = %v want %v", err, a)
}
}
func TestPipeDoneChan(t *testing.T) {
var p pipe
done := p.Done()
select {
case <-done:
t.Fatal("done too soon")
default:
}
p.CloseWithError(io.EOF)
select {
case <-done:
default:
t.Fatal("should be done")
}
}
func TestPipeDoneChan_ErrFirst(t *testing.T) {
var p pipe
p.CloseWithError(io.EOF)
done := p.Done()
select {
case <-done:
default:
t.Fatal("should be done")
}
}
func TestPipeDoneChan_Break(t *testing.T) {
var p pipe
done := p.Done()
select {
case <-done:
t.Fatal("done too soon")
default:
}
p.BreakWithError(io.EOF)
select {
case <-done:
default:
t.Fatal("should be done")
}
}
func TestPipeDoneChan_Break_ErrFirst(t *testing.T) {
var p pipe
p.BreakWithError(io.EOF)
done := p.Done()
select {
case <-done:
default:
t.Fatal("should be done")
}
}
func TestPipeCloseWithError(t *testing.T) {
p := &pipe{b: new(bytes.Buffer)}
const body = "foo"
io.WriteString(p, body)
a := errors.New("test error")
p.CloseWithError(a)
all, err := ioutil.ReadAll(p)
if string(all) != body {
t.Errorf("read bytes = %q; want %q", all, body)
}
if err != a {
t.Logf("read error = %v, %v", err, a)
}
// Read and Write should fail.
if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 {
t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite)
}
if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite)
}
}
func TestPipeBreakWithError(t *testing.T) {
p := &pipe{b: new(bytes.Buffer)}
io.WriteString(p, "foo")
a := errors.New("test err")
p.BreakWithError(a)
all, err := ioutil.ReadAll(p)
if string(all) != "" {
t.Errorf("read bytes = %q; want empty string", all)
}
if err != a {
t.Logf("read error = %v, %v", err, a)
}
if p.b != nil {
t.Errorf("buffer should be nil after BreakWithError")
}
// Write should succeed silently.
if n, err := p.Write([]byte("abc")); err != nil || n != 3 {
t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err)
}
if p.b != nil {
t.Errorf("buffer should be nil after Write")
}
// Read should fail.
if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {
t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n)
}
}

View File

@ -1,519 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"sync"
"testing"
"time"
)
func TestServer_Push_Success(t *testing.T) {
const (
mainBody = "<html>index page</html>"
pushedBody = "<html>pushed page</html>"
userAgent = "testagent"
cookie = "testcookie"
)
var stURL string
checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error {
if got, want := r.Method, wantMethod; got != want {
return fmt.Errorf("promised Req.Method=%q, want %q", got, want)
}
if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) {
return fmt.Errorf("promised Req.Header=%q, want %q", got, want)
}
if got, want := "https://"+r.Host, stURL; got != want {
return fmt.Errorf("promised Req.Host=%q, want %q", got, want)
}
if r.Body == nil {
return fmt.Errorf("nil Body")
}
if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {
return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err)
}
return nil
}
errc := make(chan error, 3)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
switch r.URL.RequestURI() {
case "/":
// Push "/pushed?get" as a GET request, using an absolute URL.
opt := &http.PushOptions{
Header: http.Header{
"User-Agent": {userAgent},
},
}
if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil {
errc <- fmt.Errorf("error pushing /pushed?get: %v", err)
return
}
// Push "/pushed?head" as a HEAD request, using a path.
opt = &http.PushOptions{
Method: "HEAD",
Header: http.Header{
"User-Agent": {userAgent},
"Cookie": {cookie},
},
}
if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil {
errc <- fmt.Errorf("error pushing /pushed?head: %v", err)
return
}
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Content-Length", strconv.Itoa(len(mainBody)))
w.WriteHeader(200)
io.WriteString(w, mainBody)
errc <- nil
case "/pushed?get":
wantH := http.Header{}
wantH.Set("User-Agent", userAgent)
if err := checkPromisedReq(r, "GET", wantH); err != nil {
errc <- fmt.Errorf("/pushed?get: %v", err)
return
}
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody)))
w.WriteHeader(200)
io.WriteString(w, pushedBody)
errc <- nil
case "/pushed?head":
wantH := http.Header{}
wantH.Set("User-Agent", userAgent)
wantH.Set("Cookie", cookie)
if err := checkPromisedReq(r, "HEAD", wantH); err != nil {
errc <- fmt.Errorf("/pushed?head: %v", err)
return
}
w.WriteHeader(204)
errc <- nil
default:
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
}
})
stURL = st.ts.URL
// Send one request, which should push two responses.
st.greet()
getSlash(st)
for k := 0; k < 3; k++ {
select {
case <-time.After(2 * time.Second):
t.Errorf("timeout waiting for handler %d to finish", k)
case err := <-errc:
if err != nil {
t.Fatal(err)
}
}
}
checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error {
pp, ok := f.(*PushPromiseFrame)
if !ok {
return fmt.Errorf("got a %T; want *PushPromiseFrame", f)
}
if !pp.HeadersEnded() {
return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame")
}
if got, want := pp.PromiseID, promiseID; got != want {
return fmt.Errorf("got PromiseID %v; want %v", got, want)
}
gotH := st.decodeHeader(pp.HeaderBlockFragment())
if !reflect.DeepEqual(gotH, wantH) {
return fmt.Errorf("got promised headers %v; want %v", gotH, wantH)
}
return nil
}
checkHeaders := func(f Frame, wantH [][2]string) error {
hf, ok := f.(*HeadersFrame)
if !ok {
return fmt.Errorf("got a %T; want *HeadersFrame", f)
}
gotH := st.decodeHeader(hf.HeaderBlockFragment())
if !reflect.DeepEqual(gotH, wantH) {
return fmt.Errorf("got response headers %v; want %v", gotH, wantH)
}
return nil
}
checkData := func(f Frame, wantData string) error {
df, ok := f.(*DataFrame)
if !ok {
return fmt.Errorf("got a %T; want *DataFrame", f)
}
if gotData := string(df.Data()); gotData != wantData {
return fmt.Errorf("got response data %q; want %q", gotData, wantData)
}
return nil
}
// Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA
// Stream 2 has HEADERS + DATA
// Stream 4 has HEADERS
expected := map[uint32][]func(Frame) error{
1: {
func(f Frame) error {
return checkPushPromise(f, 2, [][2]string{
{":method", "GET"},
{":scheme", "https"},
{":authority", st.ts.Listener.Addr().String()},
{":path", "/pushed?get"},
{"user-agent", userAgent},
})
},
func(f Frame) error {
return checkPushPromise(f, 4, [][2]string{
{":method", "HEAD"},
{":scheme", "https"},
{":authority", st.ts.Listener.Addr().String()},
{":path", "/pushed?head"},
{"cookie", cookie},
{"user-agent", userAgent},
})
},
func(f Frame) error {
return checkHeaders(f, [][2]string{
{":status", "200"},
{"content-type", "text/html"},
{"content-length", strconv.Itoa(len(mainBody))},
})
},
func(f Frame) error {
return checkData(f, mainBody)
},
},
2: {
func(f Frame) error {
return checkHeaders(f, [][2]string{
{":status", "200"},
{"content-type", "text/html"},
{"content-length", strconv.Itoa(len(pushedBody))},
})
},
func(f Frame) error {
return checkData(f, pushedBody)
},
},
4: {
func(f Frame) error {
return checkHeaders(f, [][2]string{
{":status", "204"},
})
},
},
}
consumed := map[uint32]int{}
for k := 0; len(expected) > 0; k++ {
f, err := st.readFrame()
if err != nil {
for id, left := range expected {
t.Errorf("stream %d: missing %d frames", id, len(left))
}
t.Fatalf("readFrame %d: %v", k, err)
}
id := f.Header().StreamID
label := fmt.Sprintf("stream %d, frame %d", id, consumed[id])
if len(expected[id]) == 0 {
t.Fatalf("%s: unexpected frame %#+v", label, f)
}
check := expected[id][0]
expected[id] = expected[id][1:]
if len(expected[id]) == 0 {
delete(expected, id)
}
if err := check(f); err != nil {
t.Fatalf("%s: %v", label, err)
}
consumed[id]++
}
}
func TestServer_Push_SuccessNoRace(t *testing.T) {
// Regression test for issue #18326. Ensure the request handler can mutate
// pushed request headers without racing with the PUSH_PROMISE write.
errc := make(chan error, 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
switch r.URL.RequestURI() {
case "/":
opt := &http.PushOptions{
Header: http.Header{"User-Agent": {"testagent"}},
}
if err := w.(http.Pusher).Push("/pushed", opt); err != nil {
errc <- fmt.Errorf("error pushing: %v", err)
return
}
w.WriteHeader(200)
errc <- nil
case "/pushed":
// Update request header, ensure there is no race.
r.Header.Set("User-Agent", "newagent")
r.Header.Set("Cookie", "cookie")
w.WriteHeader(200)
errc <- nil
default:
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
}
})
// Send one request, which should push one response.
st.greet()
getSlash(st)
for k := 0; k < 2; k++ {
select {
case <-time.After(2 * time.Second):
t.Errorf("timeout waiting for handler %d to finish", k)
case err := <-errc:
if err != nil {
t.Fatal(err)
}
}
}
}
func TestServer_Push_RejectRecursivePush(t *testing.T) {
// Expect two requests, but might get three if there's a bug and the second push succeeds.
errc := make(chan error, 3)
handler := func(w http.ResponseWriter, r *http.Request) error {
baseURL := "https://" + r.Host
switch r.URL.Path {
case "/":
if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil {
return fmt.Errorf("first Push()=%v, want nil", err)
}
return nil
case "/push1":
if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want {
return fmt.Errorf("Push()=%v, want %v", got, want)
}
return nil
default:
return fmt.Errorf("unexpected path: %q", r.URL.Path)
}
}
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
errc <- handler(w, r)
})
defer st.Close()
st.greet()
getSlash(st)
if err := <-errc; err != nil {
t.Errorf("First request failed: %v", err)
}
if err := <-errc; err != nil {
t.Errorf("Second request failed: %v", err)
}
}
func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) {
// Expect one request, but might get two if there's a bug and the push succeeds.
errc := make(chan error, 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
errc <- doPush(w.(http.Pusher), r)
})
defer st.Close()
st.greet()
if err := st.fr.WriteSettings(settings...); err != nil {
st.t.Fatalf("WriteSettings: %v", err)
}
st.wantSettingsAck()
getSlash(st)
if err := <-errc; err != nil {
t.Error(err)
}
// Should not get a PUSH_PROMISE frame.
hf := st.wantHeaders()
if !hf.StreamEnded() {
t.Error("stream should end after headers")
}
}
func TestServer_Push_RejectIfDisabled(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
return fmt.Errorf("Push()=%v, want %v", got, want)
}
return nil
},
Setting{SettingEnablePush, 0})
}
func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want {
return fmt.Errorf("Push()=%v, want %v", got, want)
}
return nil
},
Setting{SettingMaxConcurrentStreams, 0})
}
func TestServer_Push_RejectWrongScheme(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil {
return errors.New("Push() should have failed (push target URL is http)")
}
return nil
})
}
func TestServer_Push_RejectMissingHost(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if err := p.Push("https:pushed", nil); err == nil {
return errors.New("Push() should have failed (push target URL missing host)")
}
return nil
})
}
func TestServer_Push_RejectRelativePath(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if err := p.Push("../test", nil); err == nil {
return errors.New("Push() should have failed (push target is a relative path)")
}
return nil
})
}
func TestServer_Push_RejectForbiddenMethod(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil {
return errors.New("Push() should have failed (cannot promise a POST)")
}
return nil
})
}
func TestServer_Push_RejectForbiddenHeader(t *testing.T) {
testServer_Push_RejectSingleRequest(t,
func(p http.Pusher, r *http.Request) error {
header := http.Header{
"Content-Length": {"10"},
"Content-Encoding": {"gzip"},
"Trailer": {"Foo"},
"Te": {"trailers"},
"Host": {"test.com"},
":authority": {"test.com"},
}
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil {
return errors.New("Push() should have failed (forbidden headers)")
}
return nil
})
}
func TestServer_Push_StateTransitions(t *testing.T) {
const body = "foo"
gotPromise := make(chan bool)
finishedPush := make(chan bool)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
switch r.URL.RequestURI() {
case "/":
if err := w.(http.Pusher).Push("/pushed", nil); err != nil {
t.Errorf("Push error: %v", err)
}
// Don't finish this request until the push finishes so we don't
// nondeterministically interleave output frames with the push.
<-finishedPush
case "/pushed":
<-gotPromise
}
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Content-Length", strconv.Itoa(len(body)))
w.WriteHeader(200)
io.WriteString(w, body)
})
defer st.Close()
st.greet()
if st.stream(2) != nil {
t.Fatal("stream 2 should be empty")
}
if got, want := st.streamState(2), stateIdle; got != want {
t.Fatalf("streamState(2)=%v, want %v", got, want)
}
getSlash(st)
// After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.
st.wantPushPromise()
if got, want := st.streamState(2), stateHalfClosedRemote; got != want {
t.Fatalf("streamState(2)=%v, want %v", got, want)
}
// We stall the HTTP handler for "/pushed" until the above check. If we don't
// stall the handler, then the handler might write HEADERS and DATA and finish
// the stream before we check st.streamState(2) -- should that happen, we'll
// see stateClosed and fail the above check.
close(gotPromise)
st.wantHeaders()
if df := st.wantData(); !df.StreamEnded() {
t.Fatal("expected END_STREAM flag on DATA")
}
if got, want := st.streamState(2), stateClosed; got != want {
t.Fatalf("streamState(2)=%v, want %v", got, want)
}
close(finishedPush)
}
func TestServer_Push_RejectAfterGoAway(t *testing.T) {
var readyOnce sync.Once
ready := make(chan struct{})
errc := make(chan error, 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
select {
case <-ready:
case <-time.After(5 * time.Second):
errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
}
if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
errc <- fmt.Errorf("Push()=%v, want %v", got, want)
}
errc <- nil
})
defer st.Close()
st.greet()
getSlash(st)
// Send GOAWAY and wait for it to be processed.
st.fr.WriteGoAway(1, ErrCodeNo, nil)
go func() {
for {
select {
case <-ready:
return
default:
}
st.sc.serveMsgCh <- func(loopNum int) {
if !st.sc.pushEnabled {
readyOnce.Do(func() { close(ready) })
}
}
}
}()
if err := <-errc; err != nil {
t.Error(err)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,541 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"fmt"
"sort"
"testing"
)
func defaultPriorityWriteScheduler() *priorityWriteScheduler {
return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)
}
func checkPriorityWellFormed(ws *priorityWriteScheduler) error {
for id, n := range ws.nodes {
if id != n.id {
return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id)
}
if n.parent == nil {
if n.next != nil || n.prev != nil {
return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id)
}
continue
}
found := false
for k := n.parent.kids; k != nil; k = k.next {
if k.id == id {
found = true
break
}
}
if !found {
return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id)
}
}
return nil
}
func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string {
var ids []int
for _, n := range ws.nodes {
ids = append(ids, int(n.id))
}
sort.Ints(ids)
var buf bytes.Buffer
for _, id := range ids {
if buf.Len() != 0 {
buf.WriteString(" ")
}
if id == 0 {
buf.WriteString(fmtNode(&ws.root))
} else {
buf.WriteString(fmtNode(ws.nodes[uint32(id)]))
}
}
return buf.String()
}
func fmtNodeParentSkipRoot(n *priorityNode) string {
switch {
case n.id == 0:
return ""
case n.parent == nil:
return fmt.Sprintf("%d{parent:nil}", n.id)
default:
return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id)
}
}
func fmtNodeWeightParentSkipRoot(n *priorityNode) string {
switch {
case n.id == 0:
return ""
case n.parent == nil:
return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight)
default:
return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id)
}
}
func TestPriorityTwoStreams(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
// Move 1's parent to 2.
ws.AdjustStream(1, PriorityParam{
StreamDep: 2,
Weight: 32,
Exclusive: false,
})
want = "1{weight:32,parent:2} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityAdjustExclusiveZero(t *testing.T) {
// 1, 2, and 3 are all children of the 0 stream.
// Exclusive reprioritization to any of the streams should bring
// the rest of the streams under the reprioritized stream.
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
ws.OpenStream(3, OpenStreamOptions{})
want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
ws.AdjustStream(2, PriorityParam{
StreamDep: 0,
Weight: 20,
Exclusive: true,
})
want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityAdjustOwnParent(t *testing.T) {
// Assigning a node as its own parent should have no effect.
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
ws.AdjustStream(2, PriorityParam{
StreamDep: 2,
Weight: 20,
Exclusive: true,
})
want := "1{weight:15,parent:0} 2{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityClosedStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler)
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
// Close the first three streams. We lose 1, but keep 2 and 3.
ws.CloseStream(1)
ws.CloseStream(2)
ws.CloseStream(3)
want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After close\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
// Adding a stream as an exclusive child of 1 gives it default
// priorities, since 1 is gone.
ws.OpenStream(5, OpenStreamOptions{})
ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true})
// Adding a stream as an exclusive child of 2 should work, since 2 is not gone.
ws.OpenStream(6, OpenStreamOptions{})
ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true})
want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After add streams\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityClosedStreamsDisabled(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
// Close the first two streams. We keep only 3.
ws.CloseStream(1)
ws.CloseStream(2)
want := "3{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After close\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityIdleStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler)
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
ws.OpenStream(4, OpenStreamOptions{})
ws.OpenStream(5, OpenStreamOptions{})
ws.OpenStream(6, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15})
ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15})
ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15})
want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPriorityIdleStreamsDisabled(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle
ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle
ws.OpenStream(4, OpenStreamOptions{})
want := "4{weight:15,parent:0}"
if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {
t.Errorf("After open\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection531NonExclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.1.
// A,B,C,D = 1,2,3,4
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{
StreamDep: 1,
Weight: 15,
Exclusive: false,
})
want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection531Exclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.1.
// A,B,C,D = 1,2,3,4
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{})
ws.AdjustStream(4, PriorityParam{
StreamDep: 1,
Weight: 15,
Exclusive: true,
})
want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func makeSection533Tree() *priorityWriteScheduler {
// Initial tree from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
return ws
}
func TestPrioritySection533NonExclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
ws.AdjustStream(1, PriorityParam{
StreamDep: 4,
Weight: 15,
Exclusive: false,
})
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func TestPrioritySection533Exclusive(t *testing.T) {
// Example from RFC 7540 Section 5.3.3.
// A,B,C,D,E,F = 1,2,3,4,5,6
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 1})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.OpenStream(5, OpenStreamOptions{PusherID: 3})
ws.OpenStream(6, OpenStreamOptions{PusherID: 4})
ws.AdjustStream(1, PriorityParam{
StreamDep: 4,
Weight: 15,
Exclusive: true,
})
want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}"
if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {
t.Errorf("After adjust\ngot %q\nwant %q", got, want)
}
if err := checkPriorityWellFormed(ws); err != nil {
t.Error(err)
}
}
func checkPopAll(ws WriteScheduler, order []uint32) error {
for k, id := range order {
wr, ok := ws.Pop()
if !ok {
return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order)
}
if got := wr.StreamID(); got != id {
return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order)
}
}
wr, ok := ws.Pop()
if ok {
return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order)
}
return nil
}
func TestPriorityPopFrom533Tree(t *testing.T) {
ws := makeSection533Tree()
ws.Push(makeWriteHeadersRequest(3 /*C*/))
ws.Push(makeWriteNonStreamRequest())
ws.Push(makeWriteHeadersRequest(5 /*E*/))
ws.Push(makeWriteHeadersRequest(1 /*A*/))
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil {
t.Error(err)
}
}
func TestPriorityPopFromLinearTree(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
ws.OpenStream(3, OpenStreamOptions{PusherID: 2})
ws.OpenStream(4, OpenStreamOptions{PusherID: 3})
ws.Push(makeWriteHeadersRequest(3))
ws.Push(makeWriteHeadersRequest(4))
ws.Push(makeWriteHeadersRequest(1))
ws.Push(makeWriteHeadersRequest(2))
ws.Push(makeWriteNonStreamRequest())
ws.Push(makeWriteNonStreamRequest())
t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot))
if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil {
t.Error(err)
}
}
func TestPriorityFlowControl(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false})
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
sc := &serverConn{maxFrameSize: 16}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil})
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
// No flow-control bytes available.
if wr, ok := ws.Pop(); ok {
t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr)
}
// Add enough flow-control bytes to write st2 in two Pop calls.
// Should write data from st2 even though it's lower priority than st1.
for i := 1; i <= 2; i++ {
st2.flow.add(8)
wr, ok := ws.Pop()
if !ok {
t.Fatalf("Pop(%d)=false, want true", i)
}
if got, want := wr.DataSize(), 8; got != want {
t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want)
}
}
}
func TestPriorityThrottleOutOfOrderWrites(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true})
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{PusherID: 1})
sc := &serverConn{maxFrameSize: 4096}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
st1.flow.add(4096)
st2.flow.add(4096)
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil})
ws.AdjustStream(2, PriorityParam{StreamDep: 1})
// We have enough flow-control bytes to write st2 in a single Pop call.
// However, due to out-of-order write throttling, the first call should
// only write 1KB.
wr, ok := ws.Pop()
if !ok {
t.Fatalf("Pop(st2.first)=false, want true")
}
if got, want := wr.StreamID(), uint32(2); got != want {
t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 1024; got != want {
t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want)
}
// Now add data on st1. This should take precedence.
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil})
wr, ok = ws.Pop()
if !ok {
t.Fatalf("Pop(st1)=false, want true")
}
if got, want := wr.StreamID(), uint32(1); got != want {
t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 4096; got != want {
t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want)
}
// Should go back to writing 1KB from st2.
wr, ok = ws.Pop()
if !ok {
t.Fatalf("Pop(st2.last)=false, want true")
}
if got, want := wr.StreamID(), uint32(2); got != want {
t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want)
}
if got, want := wr.DataSize(), 1024; got != want {
t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want)
}
}
func TestPriorityWeights(t *testing.T) {
ws := defaultPriorityWriteScheduler()
ws.OpenStream(1, OpenStreamOptions{})
ws.OpenStream(2, OpenStreamOptions{})
sc := &serverConn{maxFrameSize: 8}
st1 := &stream{id: 1, sc: sc}
st2 := &stream{id: 2, sc: sc}
st1.flow.add(40)
st2.flow.add(40)
ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil})
ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil})
ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34})
ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9})
// st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)).
// The maximum frame size is 8 bytes. The write sequence should be:
// st1, total bytes so far is (st1=8, st=0)
// st2, total bytes so far is (st1=8, st=8)
// st1, total bytes so far is (st1=16, st=8)
// st1, total bytes so far is (st1=24, st=8) // 3x bandwidth
// st1, total bytes so far is (st1=32, st=8) // 4x bandwidth
// st2, total bytes so far is (st1=32, st=16) // 2x bandwidth
// st1, total bytes so far is (st1=40, st=16)
// st2, total bytes so far is (st1=40, st=24)
// st2, total bytes so far is (st1=40, st=32)
// st2, total bytes so far is (st1=40, st=40)
if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil {
t.Error(err)
}
}
func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {
ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 0,
MaxIdleNodesInTree: 0,
})
ws.OpenStream(1, OpenStreamOptions{})
ws.CloseStream(1)
ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)})
ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)})
if err := checkPopAll(ws, []uint32{1, 2}); err != nil {
t.Error(err)
}
}

View File

@ -1,44 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "testing"
func TestRandomScheduler(t *testing.T) {
ws := NewRandomWriteScheduler()
ws.Push(makeWriteHeadersRequest(3))
ws.Push(makeWriteHeadersRequest(4))
ws.Push(makeWriteHeadersRequest(1))
ws.Push(makeWriteHeadersRequest(2))
ws.Push(makeWriteNonStreamRequest())
ws.Push(makeWriteNonStreamRequest())
// Pop all frames. Should get the non-stream requests first,
// followed by the stream requests in any order.
var order []FrameWriteRequest
for {
wr, ok := ws.Pop()
if !ok {
break
}
order = append(order, wr)
}
t.Logf("got frames: %v", order)
if len(order) != 6 {
t.Fatalf("got %d frames, expected 6", len(order))
}
if order[0].StreamID() != 0 || order[1].StreamID() != 0 {
t.Fatal("expected non-stream frames first", order[0], order[1])
}
got := make(map[uint32]bool)
for _, wr := range order[2:] {
got[wr.StreamID()] = true
}
for id := uint32(1); id <= 4; id++ {
if !got[id] {
t.Errorf("frame not found for stream %d", id)
}
}
}

View File

@ -1,125 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
"reflect"
"testing"
)
func makeWriteNonStreamRequest() FrameWriteRequest {
return FrameWriteRequest{writeSettingsAck{}, nil, nil}
}
func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest {
st := &stream{id: streamID}
return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil}
}
func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error {
consumed, rest, n := wr.Consume(nbytes)
var wantConsumed, wantRest FrameWriteRequest
switch len(want) {
case 0:
case 1:
wantConsumed = want[0]
case 2:
wantConsumed = want[0]
wantRest = want[1]
}
if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) {
return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want))
}
return nil
}
func TestFrameWriteRequestNonData(t *testing.T) {
wr := makeWriteNonStreamRequest()
if got, want := wr.DataSize(), 0; got != want {
t.Errorf("DataSize: got %v, want %v", got, want)
}
// Non-DATA frames are always consumed whole.
if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil {
t.Errorf("Consume:\n%v", err)
}
}
func TestFrameWriteRequestData(t *testing.T) {
st := &stream{
id: 1,
sc: &serverConn{maxFrameSize: 16},
}
const size = 32
wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)}
if got, want := wr.DataSize(), size; got != want {
t.Errorf("DataSize: got %v, want %v", got, want)
}
// No flow-control bytes available: cannot consume anything.
if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil {
t.Errorf("Consume(limited by flow control):\n%v", err)
}
// Add enough flow-control bytes to consume the entire frame,
// but we're now restricted by st.sc.maxFrameSize.
st.flow.add(size)
want := []FrameWriteRequest{
{
write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false},
stream: st,
done: nil,
},
{
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true},
stream: st,
done: wr.done,
},
}
if err := checkConsume(wr, math.MaxInt32, want); err != nil {
t.Errorf("Consume(limited by maxFrameSize):\n%v", err)
}
rest := want[1]
// Consume 8 bytes from the remaining frame.
want = []FrameWriteRequest{
{
write: &writeData{st.id, make([]byte, 8), false},
stream: st,
done: nil,
},
{
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
stream: st,
done: wr.done,
},
}
if err := checkConsume(rest, 8, want); err != nil {
t.Errorf("Consume(8):\n%v", err)
}
rest = want[1]
// Consume all remaining bytes.
want = []FrameWriteRequest{
{
write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},
stream: st,
done: wr.done,
},
}
if err := checkConsume(rest, math.MaxInt32, want); err != nil {
t.Errorf("Consume(remainder):\n%v", err)
}
}
func TestFrameWriteRequest_StreamID(t *testing.T) {
const streamID = 123
wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)}
if got := wr.StreamID(); got != streamID {
t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID)
}
}

View File

@ -1,356 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"testing"
)
var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
// The global map of sentence coverage for the http2 spec.
var defaultSpecCoverage specCoverage
var loadSpecOnce sync.Once
func loadSpec() {
if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
panic(err)
} else {
defaultSpecCoverage = readSpecCov(f)
f.Close()
}
}
// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
// "covered" will be included in report outputted by TestSpecCoverage.
func covers(sec, sentences string) {
loadSpecOnce.Do(loadSpec)
defaultSpecCoverage.cover(sec, sentences)
}
type specPart struct {
section string
sentence string
}
func (ss specPart) Less(oo specPart) bool {
atoi := func(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
panic(err)
}
return n
}
a := strings.Split(ss.section, ".")
b := strings.Split(oo.section, ".")
for len(a) > 0 {
if len(b) == 0 {
return false
}
x, y := atoi(a[0]), atoi(b[0])
if x == y {
a, b = a[1:], b[1:]
continue
}
return x < y
}
if len(b) > 0 {
return true
}
return false
}
type bySpecSection []specPart
func (a bySpecSection) Len() int { return len(a) }
func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type specCoverage struct {
coverage map[specPart]bool
d *xml.Decoder
}
func joinSection(sec []int) string {
s := fmt.Sprintf("%d", sec[0])
for _, n := range sec[1:] {
s = fmt.Sprintf("%s.%d", s, n)
}
return s
}
func (sc specCoverage) readSection(sec []int) {
var (
buf = new(bytes.Buffer)
sub = 0
)
for {
tk, err := sc.d.Token()
if err != nil {
if err == io.EOF {
return
}
panic(err)
}
switch v := tk.(type) {
case xml.StartElement:
if skipElement(v) {
if err := sc.d.Skip(); err != nil {
panic(err)
}
if v.Name.Local == "section" {
sub++
}
break
}
switch v.Name.Local {
case "section":
sub++
sc.readSection(append(sec, sub))
case "xref":
buf.Write(sc.readXRef(v))
}
case xml.CharData:
if len(sec) == 0 {
break
}
buf.Write(v)
case xml.EndElement:
if v.Name.Local == "section" {
sc.addSentences(joinSection(sec), buf.String())
return
}
}
}
}
func (sc specCoverage) readXRef(se xml.StartElement) []byte {
var b []byte
for {
tk, err := sc.d.Token()
if err != nil {
panic(err)
}
switch v := tk.(type) {
case xml.CharData:
if b != nil {
panic("unexpected CharData")
}
b = []byte(string(v))
case xml.EndElement:
if v.Name.Local != "xref" {
panic("expected </xref>")
}
if b != nil {
return b
}
sig := attrSig(se)
switch sig {
case "target":
return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
case "fmt-of,rel,target", "fmt-,,rel,target":
return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
case "fmt-of,sec,target", "fmt-,,sec,target":
return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
case "fmt-of,rel,sec,target":
return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
default:
panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
}
default:
panic(fmt.Sprintf("unexpected tag %q", v))
}
}
}
var skipAnchor = map[string]bool{
"intro": true,
"Overview": true,
}
var skipTitle = map[string]bool{
"Acknowledgements": true,
"Change Log": true,
"Document Organization": true,
"Conventions and Terminology": true,
}
func skipElement(s xml.StartElement) bool {
switch s.Name.Local {
case "artwork":
return true
case "section":
for _, attr := range s.Attr {
switch attr.Name.Local {
case "anchor":
if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
return true
}
case "title":
if skipTitle[attr.Value] {
return true
}
}
}
}
return false
}
func readSpecCov(r io.Reader) specCoverage {
sc := specCoverage{
coverage: map[specPart]bool{},
d: xml.NewDecoder(r)}
sc.readSection(nil)
return sc
}
func (sc specCoverage) addSentences(sec string, sentence string) {
for _, s := range parseSentences(sentence) {
sc.coverage[specPart{sec, s}] = false
}
}
func (sc specCoverage) cover(sec string, sentence string) {
for _, s := range parseSentences(sentence) {
p := specPart{sec, s}
if _, ok := sc.coverage[p]; !ok {
panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
}
sc.coverage[specPart{sec, s}] = true
}
}
var whitespaceRx = regexp.MustCompile(`\s+`)
func parseSentences(sens string) []string {
sens = strings.TrimSpace(sens)
if sens == "" {
return nil
}
ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
for i, s := range ss {
s = strings.TrimSpace(s)
if !strings.HasSuffix(s, ".") {
s += "."
}
ss[i] = s
}
return ss
}
func TestSpecParseSentences(t *testing.T) {
tests := []struct {
ss string
want []string
}{
{"Sentence 1. Sentence 2.",
[]string{
"Sentence 1.",
"Sentence 2.",
}},
{"Sentence 1. \nSentence 2.\tSentence 3.",
[]string{
"Sentence 1.",
"Sentence 2.",
"Sentence 3.",
}},
}
for i, tt := range tests {
got := parseSentences(tt.ss)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%d: got = %q, want %q", i, got, tt.want)
}
}
}
func TestSpecCoverage(t *testing.T) {
if !*coverSpec {
t.Skip()
}
loadSpecOnce.Do(loadSpec)
var (
list []specPart
cv = defaultSpecCoverage.coverage
total = len(cv)
complete = 0
)
for sp, touched := range defaultSpecCoverage.coverage {
if touched {
complete++
} else {
list = append(list, sp)
}
}
sort.Stable(bySpecSection(list))
if testing.Short() && len(list) > 5 {
list = list[:5]
}
for _, p := range list {
t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
}
t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100)
}
func attrSig(se xml.StartElement) string {
var names []string
for _, attr := range se.Attr {
if attr.Name.Local == "fmt" {
names = append(names, "fmt-"+attr.Value)
} else {
names = append(names, attr.Name.Local)
}
}
sort.Strings(names)
return strings.Join(names, ",")
}
func attrValue(se xml.StartElement, attr string) string {
for _, a := range se.Attr {
if a.Name.Local == attr {
return a.Value
}
}
panic("unknown attribute " + attr)
}
func TestSpecPartLess(t *testing.T) {
tests := []struct {
sec1, sec2 string
want bool
}{
{"6.2.1", "6.2", false},
{"6.2", "6.2.1", true},
{"6.10", "6.10.1", true},
{"6.10", "6.1.1", false}, // 10, not 1
{"6.1", "6.1", false}, // equal, so not less
}
for _, tt := range tests {
got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
if got != tt.want {
t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
}
}
}